diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 10feacf5ef4..19fdaec07e0 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -1,17 +1,15 @@ -name: Build +name: Build and smoke test on: pull_request: - branches: [ master, rc/* ] - types: [opened, ready_for_review] - push: + branches: [master, rc/*] workflow_dispatch: jobs: build: strategy: matrix: - runs-on: [ubuntu-latest] + runs-on: [ubuntu-latest, macos-latest, macos-13-xlarge] runs-on: ${{ matrix.runs-on }} name: Build steps: @@ -28,12 +26,23 @@ jobs: run: | go get -v -t -d ./... if [ -f Gopkg.toml ]; then - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh - dep ensure + curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure fi + - name: Build run: | cd ${GITHUB_WORKSPACE}/cmd/node && go build . + cd ${GITHUB_WORKSPACE}/cmd/seednode && go build . cd ${GITHUB_WORKSPACE}/cmd/keygenerator && go build . cd ${GITHUB_WORKSPACE}/cmd/logviewer && go build . cd ${GITHUB_WORKSPACE}/cmd/termui && go build . + + # On GitHub, we only run the short tests, and we only run them for some OS/ARCH combinations. + - name: Run tests + run: | + GOOS=$(go env GOOS) + + if [[ "$GOOS" == darwin ]]; then + go test -short -v ./... + fi diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 9916e67d744..ca13a9f0313 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -15,7 +15,7 @@ jobs: build: strategy: matrix: - runs-on: [ubuntu-latest] # TODO add macos-latest when builds are possible on macs + runs-on: [ubuntu-latest, macos-latest, macos-13-xlarge] runs-on: ${{ matrix.runs-on }} name: Build steps: @@ -45,21 +45,23 @@ jobs: GOOS=$(go env GOOS) GOARCH=$(go env GOARCH) GOPATH=$(go env GOPATH) - ARCHIVE="multiversx_""$APP_VER_SHORT""_""$GOOS""_""$GOARCH"".tgz" + ARCHIVE="multiversx_""$APP_VER_SHORT""_""$GOOS""_""$GOARCH"".zip" BUILD_DIR=${GITHUB_WORKSPACE}/build - WASM_VERSION=$(cat go.mod | grep mx-chain-vm-v | sort -n | tail -n -1| awk -F '/' '{print$3}'| sed 's/ /@/g') - WASMER_DIR=${GOPATH}/pkg/mod/github.com/multiversx/${WASM_VERSION}/wasmer + VM_GO_VERSION=$(cat go.mod | grep mx-chain-vm-go | sort -n | tail -n -1| awk -F '/' '{print$3}'| sed 's/ /@/g') + VM_GO_DIR=${GOPATH}/pkg/mod/github.com/multiversx/${VM_GO_VERSION} echo "GOOS=${GOOS}" >> $GITHUB_ENV echo "GOARCH=${GOARCH}" >> $GITHUB_ENV echo "ARCHIVE=${ARCHIVE}" >> $GITHUB_ENV echo "BUILD_DIR=${BUILD_DIR}" >> $GITHUB_ENV - echo "WASMER_DIR=${WASMER_DIR}" >> $GITHUB_ENV + echo "VM_GO_VERSION=${VM_GO_VERSION}" >> $GITHUB_ENV + echo "VM_GO_DIR=${VM_GO_DIR}" >> $GITHUB_ENV - name: Build run: | mkdir -p ${BUILD_DIR} cd ${GITHUB_WORKSPACE}/cmd/node && go build -o "${BUILD_DIR}/node" -a -ldflags="-X main.appVersion=${APP_VER}" + cd ${GITHUB_WORKSPACE}/cmd/seednode && go build -o "${BUILD_DIR}/seednode" -a -ldflags="-X main.appVersion=${APP_VER}" cd ${GITHUB_WORKSPACE}/cmd/keygenerator && go build -o "${BUILD_DIR}/keygenerator" -a -ldflags="-X main.appVersion=${APP_VER}" cd ${GITHUB_WORKSPACE}/cmd/logviewer && go build -o "${BUILD_DIR}/logviewer" -a -ldflags="-X main.appVersion=${APP_VER}" cd ${GITHUB_WORKSPACE}/cmd/termui && go build -o "${BUILD_DIR}/termui" -a -ldflags="-X main.appVersion=${APP_VER}" @@ -69,24 +71,68 @@ jobs: cd ${GITHUB_WORKSPACE} if [[ "$GOOS" == linux && "$GOARCH" == amd64 ]]; then - cp -f ${WASMER_DIR}/libwasmer_linux_amd64.so ${BUILD_DIR}; + cp --verbose --no-preserve=mode,ownership ${VM_GO_DIR}/wasmer2/libvmexeccapi.so ${BUILD_DIR}/libvmexeccapi.so + cp --verbose --no-preserve=mode,ownership ${VM_GO_DIR}/wasmer/libwasmer_linux_amd64.so ${BUILD_DIR}/libwasmer_linux_amd64.so fi + + # Actually, there's no runner for this combination (as of March 2024). if [[ "$GOOS" == linux && "$GOARCH" == arm64 ]]; then - cp -f ${WASMER_DIR}/libwasmer_linux_arm64.so ${BUILD_DIR}; + cp --verbose --no-preserve=mode,ownership ${VM_GO_DIR}/wasmer2/libvmexeccapi_arm.so ${BUILD_DIR}/libvmexeccapi_arm.so + cp --verbose --no-preserve=mode,ownership ${VM_GO_DIR}/wasmer/libwasmer_linux_arm64_shim.so ${BUILD_DIR}/libwasmer_linux_arm64_shim.so fi + if [[ "$GOOS" == darwin && "$GOARCH" == amd64 ]]; then - cp -f ${WASMER_DIR}/libwasmer_darwin_amd64.dylib ${BUILD_DIR}; + cp -v ${VM_GO_DIR}/wasmer2/libvmexeccapi.dylib ${BUILD_DIR}/libvmexeccapi.dylib + cp -v ${VM_GO_DIR}/wasmer/libwasmer_darwin_amd64.dylib ${BUILD_DIR}/libwasmer_darwin_amd64.dylib + fi + + if [[ "$GOOS" == darwin && "$GOARCH" == arm64 ]]; then + cp -v ${VM_GO_DIR}/wasmer2/libvmexeccapi_arm.dylib ${BUILD_DIR}/libvmexeccapi_arm.dylib + cp -v ${VM_GO_DIR}/wasmer/libwasmer_darwin_arm64_shim.dylib ${BUILD_DIR}/libwasmer_darwin_arm64_shim.dylib fi - cd ${BUILD_DIR} - tar czvf "${GITHUB_WORKSPACE}/${ARCHIVE}" * - stat ${GITHUB_WORKSPACE}/${ARCHIVE} + if [[ "$GOOS" == linux ]]; then + patchelf --set-rpath "\$ORIGIN" ${BUILD_DIR}/node + patchelf --set-rpath "\$ORIGIN" ${BUILD_DIR}/seednode + + ldd ${BUILD_DIR}/node + ldd ${BUILD_DIR}/seednode + fi + + if [[ "$GOOS" == darwin ]]; then + install_name_tool -add_rpath "@loader_path" ${BUILD_DIR}/node + install_name_tool -add_rpath "@loader_path" ${BUILD_DIR}/seednode + + otool -L ${BUILD_DIR}/node + otool -L ${BUILD_DIR}/seednode + fi + + - name: Smoke test + run: | + # Remove all downloaded Go packages, so that we can test the binary's independence from them (think of Wasmer libraries). + sudo rm -rf ${GOPATH}/pkg/mod + + # Test binaries in different current directories. + cd ${BUILD_DIR} && ./node --version + cd ${GITHUB_WORKSPACE} && ${BUILD_DIR}/node --version + cd / && ${BUILD_DIR}/node --version + + cd ${BUILD_DIR} && ./seednode --version + cd ${GITHUB_WORKSPACE} && ${BUILD_DIR}/seednode --version + cd / && ${BUILD_DIR}/seednode --version + + - name: Package build output + run: | + sudo chown -R $USER: ${BUILD_DIR} + chmod -R 755 ${BUILD_DIR} + ls -al ${BUILD_DIR} + zip -r -j ${ARCHIVE} ${BUILD_DIR} - name: Save artifacts uses: actions/upload-artifact@v3 with: name: ${{ env.ARCHIVE }} - path: ${{ github.workspace }}/${{ env.ARCHIVE }} + path: ${{ env.ARCHIVE }} if-no-files-found: error release: @@ -113,6 +159,6 @@ jobs: run: | gh release create --draft --notes="Release draft from Github Actions" vNext sleep 10 - for i in $(find ./assets -name '*.tgz' -type f); do + for i in $(find ./assets -name '*.zip' -type f); do gh release upload vNext ${i} done diff --git a/api/groups/nodeGroup.go b/api/groups/nodeGroup.go index fd61f481c39..e7025c033d9 100644 --- a/api/groups/nodeGroup.go +++ b/api/groups/nodeGroup.go @@ -28,6 +28,7 @@ const ( bootstrapStatusPath = "/bootstrapstatus" connectedPeersRatingsPath = "/connected-peers-ratings" managedKeys = "/managed-keys" + loadedKeys = "/loaded-keys" managedKeysCount = "/managed-keys/count" eligibleManagedKeys = "/managed-keys/eligible" waitingManagedKeys = "/managed-keys/waiting" @@ -44,6 +45,7 @@ type nodeFacadeHandler interface { GetConnectedPeersRatingsOnMainNetwork() (string, error) GetManagedKeysCount() int GetManagedKeys() []string + GetLoadedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) @@ -129,6 +131,11 @@ func NewNodeGroup(facade nodeFacadeHandler) (*nodeGroup, error) { Method: http.MethodGet, Handler: ng.managedKeys, }, + { + Path: loadedKeys, + Method: http.MethodGet, + Handler: ng.loadedKeys, + }, { Path: eligibleManagedKeys, Method: http.MethodGet, @@ -411,6 +418,19 @@ func (ng *nodeGroup) managedKeys(c *gin.Context) { ) } +// loadedKeys returns all keys loaded by the current node +func (ng *nodeGroup) loadedKeys(c *gin.Context) { + keys := ng.getFacade().GetLoadedKeys() + c.JSON( + http.StatusOK, + shared.GenericAPIResponse{ + Data: gin.H{"loadedKeys": keys}, + Error: "", + Code: shared.ReturnCodeSuccess, + }, + ) +} + // managedKeysEligible returns the node's eligible managed keys func (ng *nodeGroup) managedKeysEligible(c *gin.Context) { keys, err := ng.getFacade().GetEligibleManagedKeys() diff --git a/api/groups/nodeGroup_test.go b/api/groups/nodeGroup_test.go index 6aa00d91693..4bc6e6c738e 100644 --- a/api/groups/nodeGroup_test.go +++ b/api/groups/nodeGroup_test.go @@ -81,6 +81,13 @@ type managedKeysResponse struct { generalResponse } +type loadedKeysResponse struct { + Data struct { + LoadedKeys []string `json:"loadedKeys"` + } `json:"data"` + generalResponse +} + type managedEligibleKeysResponse struct { Data struct { Keys []string `json:"eligibleKeys"` @@ -764,6 +771,36 @@ func TestNodeGroup_ManagedKeys(t *testing.T) { assert.Equal(t, providedKeys, response.Data.ManagedKeys) } +func TestNodeGroup_LoadedKeys(t *testing.T) { + t.Parallel() + + providedKeys := []string{ + "pk1", + "pk2", + } + facade := mock.FacadeStub{ + GetLoadedKeysCalled: func() []string { + return providedKeys + }, + } + + nodeGroup, err := groups.NewNodeGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(nodeGroup, "node", getNodeRoutesConfig()) + + req, _ := http.NewRequest("GET", "/node/loaded-keys", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := &loadedKeysResponse{} + loadResponse(resp.Body, response) + + assert.Equal(t, http.StatusOK, resp.Code) + assert.Equal(t, "", response.Error) + assert.Equal(t, providedKeys, response.Data.LoadedKeys) +} + func TestNodeGroup_ManagedKeysEligible(t *testing.T) { t.Parallel() @@ -1046,6 +1083,7 @@ func getNodeRoutesConfig() config.ApiRoutesConfig { {Name: "/connected-peers-ratings", Open: true}, {Name: "/managed-keys/count", Open: true}, {Name: "/managed-keys", Open: true}, + {Name: "/loaded-keys", Open: true}, {Name: "/managed-keys/eligible", Open: true}, {Name: "/managed-keys/waiting", Open: true}, {Name: "/waiting-epochs-left/:key", Open: true}, diff --git a/api/groups/transactionGroup.go b/api/groups/transactionGroup.go index c2b47bf7a87..3c62221d121 100644 --- a/api/groups/transactionGroup.go +++ b/api/groups/transactionGroup.go @@ -745,6 +745,10 @@ func validateQuery(sender, fields string, lastNonce, nonceGaps bool) error { return errors.ErrEmptySenderToGetNonceGaps } + if fields == "*" { + return nil + } + if fields != "" { return validateFields(fields) } diff --git a/api/groups/transactionGroup_test.go b/api/groups/transactionGroup_test.go index 1f8f6bffbd4..22085956fe9 100644 --- a/api/groups/transactionGroup_test.go +++ b/api/groups/transactionGroup_test.go @@ -704,6 +704,7 @@ func TestTransactionGroup_getTransactionsPool(t *testing.T) { t.Run("fields + nonce gaps", testTxPoolWithInvalidQuery("?fields=sender,receiver&nonce-gaps=true", apiErrors.ErrFetchingNonceGapsCannotIncludeFields)) t.Run("fields has spaces", testTxPoolWithInvalidQuery("?fields=sender ,receiver", apiErrors.ErrInvalidFields)) t.Run("fields has numbers", testTxPoolWithInvalidQuery("?fields=sender1", apiErrors.ErrInvalidFields)) + t.Run("fields + wild card", testTxPoolWithInvalidQuery("?fields=sender,receiver,*", apiErrors.ErrInvalidFields)) t.Run("GetTransactionsPool error should error", func(t *testing.T) { t.Parallel() @@ -816,8 +817,7 @@ func TestTransactionGroup_getTransactionsPool(t *testing.T) { t.Parallel() expectedSender := "sender" - providedFields := "sender,receiver" - query := "?by-sender=" + expectedSender + "&fields=" + providedFields + query := "?by-sender=" + expectedSender + "&fields=*" expectedResp := &common.TransactionsPoolForSenderApiResponse{ Transactions: []common.Transaction{ { diff --git a/api/groups/validatorGroup.go b/api/groups/validatorGroup.go index f2c206b34f3..1120ae4186d 100644 --- a/api/groups/validatorGroup.go +++ b/api/groups/validatorGroup.go @@ -10,13 +10,18 @@ import ( "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-go/api/errors" "github.com/multiversx/mx-chain-go/api/shared" + "github.com/multiversx/mx-chain-go/common" ) -const statisticsPath = "/statistics" +const ( + statisticsPath = "/statistics" + auctionPath = "/auction" +) // validatorFacadeHandler defines the methods to be implemented by a facade for validator requests type validatorFacadeHandler interface { ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) + AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) IsInterfaceNil() bool } @@ -43,6 +48,11 @@ func NewValidatorGroup(facade validatorFacadeHandler) (*validatorGroup, error) { Method: http.MethodGet, Handler: ng.statistics, }, + { + Path: auctionPath, + Method: http.MethodGet, + Handler: ng.auction, + }, } ng.endpoints = endpoints @@ -74,6 +84,31 @@ func (vg *validatorGroup) statistics(c *gin.Context) { ) } +// auction will return the list of the validators in the auction list +func (vg *validatorGroup) auction(c *gin.Context) { + valStats, err := vg.getFacade().AuctionListApi() + if err != nil { + c.JSON( + http.StatusBadRequest, + shared.GenericAPIResponse{ + Data: nil, + Error: err.Error(), + Code: shared.ReturnCodeRequestError, + }, + ) + return + } + + c.JSON( + http.StatusOK, + shared.GenericAPIResponse{ + Data: gin.H{"auctionList": valStats}, + Error: "", + Code: shared.ReturnCodeSuccess, + }, + ) +} + func (vg *validatorGroup) getFacade() validatorFacadeHandler { vg.mutFacade.RLock() defer vg.mutFacade.RUnlock() diff --git a/api/groups/validatorGroup_test.go b/api/groups/validatorGroup_test.go index 0bb20a869cd..0bbd1ebf742 100644 --- a/api/groups/validatorGroup_test.go +++ b/api/groups/validatorGroup_test.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-go/api/groups" "github.com/multiversx/mx-chain-go/api/mock" "github.com/multiversx/mx-chain-go/api/shared" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -34,11 +35,18 @@ func TestNewValidatorGroup(t *testing.T) { } // ValidatorStatisticsResponse is the response for the validator statistics endpoint. -type ValidatorStatisticsResponse struct { +type validatorStatisticsResponse struct { Result map[string]*validator.ValidatorStatistics `json:"statistics"` Error string `json:"error"` } +type auctionListResponse struct { + Data struct { + Result []*common.AuctionListValidatorAPIResponse `json:"auctionList"` + } `json:"data"` + Error string +} + func TestValidatorStatistics_ErrorWhenFacadeFails(t *testing.T) { t.Parallel() @@ -60,7 +68,7 @@ func TestValidatorStatistics_ErrorWhenFacadeFails(t *testing.T) { resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) - response := ValidatorStatisticsResponse{} + response := validatorStatisticsResponse{} loadResponse(resp.Body, &response) assert.Equal(t, http.StatusBadRequest, resp.Code) @@ -97,7 +105,7 @@ func TestValidatorStatistics_ReturnsSuccessfully(t *testing.T) { response := shared.GenericAPIResponse{} loadResponse(resp.Body, &response) - validatorStatistics := ValidatorStatisticsResponse{} + validatorStatistics := validatorStatisticsResponse{} mapResponseData := response.Data.(map[string]interface{}) mapResponseDataBytes, _ := json.Marshal(mapResponseData) _ = json.Unmarshal(mapResponseDataBytes, &validatorStatistics) @@ -147,14 +155,13 @@ func TestValidatorGroup_UpdateFacade(t *testing.T) { require.NoError(t, err) ws := startWebServer(validatorGroup, "validator", getValidatorRoutesConfig()) - req, _ := http.NewRequest("GET", "/validator/statistics", nil) resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) response := shared.GenericAPIResponse{} loadResponse(resp.Body, &response) - validatorStatistics := ValidatorStatisticsResponse{} + validatorStatistics := validatorStatisticsResponse{} mapResponseData := response.Data.(map[string]interface{}) mapResponseDataBytes, _ := json.Marshal(mapResponseData) _ = json.Unmarshal(mapResponseDataBytes, &validatorStatistics) @@ -191,12 +198,71 @@ func TestValidatorGroup_IsInterfaceNil(t *testing.T) { require.False(t, validatorGroup.IsInterfaceNil()) } +func TestAuctionList_ErrorWhenFacadeFails(t *testing.T) { + t.Parallel() + + errStr := "error in facade" + facade := mock.FacadeStub{ + AuctionListHandler: func() ([]*common.AuctionListValidatorAPIResponse, error) { + return nil, errors.New(errStr) + }, + } + + validatorGroup, err := groups.NewValidatorGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(validatorGroup, "validator", getValidatorRoutesConfig()) + req, _ := http.NewRequest("GET", "/validator/auction", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := auctionListResponse{} + loadResponse(resp.Body, &response) + + assert.Equal(t, http.StatusBadRequest, resp.Code) + assert.Contains(t, response.Error, errStr) +} + +func TestAuctionList_ReturnsSuccessfully(t *testing.T) { + t.Parallel() + + auctionListToReturn := []*common.AuctionListValidatorAPIResponse{ + { + Owner: "owner", + NumStakedNodes: 4, + TotalTopUp: "1234", + TopUpPerNode: "4321", + QualifiedTopUp: "4444", + }, + } + facade := mock.FacadeStub{ + AuctionListHandler: func() ([]*common.AuctionListValidatorAPIResponse, error) { + return auctionListToReturn, nil + }, + } + + validatorGroup, err := groups.NewValidatorGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(validatorGroup, "validator", getValidatorRoutesConfig()) + req, _ := http.NewRequest("GET", "/validator/auction", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := auctionListResponse{} + loadResponse(resp.Body, &response) + + assert.Equal(t, http.StatusOK, resp.Code) + assert.Equal(t, response.Data.Result, auctionListToReturn) +} + func getValidatorRoutesConfig() config.ApiRoutesConfig { return config.ApiRoutesConfig{ APIPackages: map[string]config.APIPackageConfig{ "validator": { Routes: []config.RouteConfig{ {Name: "/statistics", Open: true}, + {Name: "/auction", Open: true}, }, }, }, diff --git a/api/mock/facadeStub.go b/api/mock/facadeStub.go index 50572622897..e40645c1ac3 100644 --- a/api/mock/facadeStub.go +++ b/api/mock/facadeStub.go @@ -91,10 +91,12 @@ type FacadeStub struct { IsDataTrieMigratedCalled func(address string, options api.AccountQueryOptions) (bool, error) GetManagedKeysCountCalled func() int GetManagedKeysCalled func() []string + GetLoadedKeysCalled func() []string GetEligibleManagedKeysCalled func() ([]string, error) GetWaitingManagedKeysCalled func() ([]string, error) GetWaitingEpochsLeftForPublicKeyCalled func(publicKey string) (uint32, error) P2PPrometheusMetricsEnabledCalled func() bool + AuctionListHandler func() ([]*common.AuctionListValidatorAPIResponse, error) } // GetTokenSupply - @@ -195,12 +197,20 @@ func (f *FacadeStub) PprofEnabled() bool { // GetHeartbeats returns the slice of heartbeat info func (f *FacadeStub) GetHeartbeats() ([]data.PubKeyHeartbeat, error) { - return f.GetHeartbeatsHandler() + if f.GetHeartbeatsHandler != nil { + return f.GetHeartbeatsHandler() + } + + return nil, nil } // GetBalance is the mock implementation of a handler's GetBalance method func (f *FacadeStub) GetBalance(address string, options api.AccountQueryOptions) (*big.Int, api.BlockInfo, error) { - return f.GetBalanceCalled(address, options) + if f.GetBalanceCalled != nil { + return f.GetBalanceCalled(address, options) + } + + return nil, api.BlockInfo{}, nil } // GetValueForKey is the mock implementation of a handler's GetValueForKey method @@ -285,7 +295,11 @@ func (f *FacadeStub) GetAllIssuedESDTs(tokenType string) ([]string, error) { // GetAccount - func (f *FacadeStub) GetAccount(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { - return f.GetAccountCalled(address, options) + if f.GetAccountCalled != nil { + return f.GetAccountCalled(address, options) + } + + return api.AccountResponse{}, api.BlockInfo{}, nil } // GetAccounts - @@ -299,72 +313,137 @@ func (f *FacadeStub) GetAccounts(addresses []string, options api.AccountQueryOpt // CreateTransaction is mock implementation of a handler's CreateTransaction method func (f *FacadeStub) CreateTransaction(txArgs *external.ArgsCreateTransaction) (*transaction.Transaction, []byte, error) { - return f.CreateTransactionHandler(txArgs) + if f.CreateTransactionHandler != nil { + return f.CreateTransactionHandler(txArgs) + } + + return nil, nil, nil } // GetTransaction is the mock implementation of a handler's GetTransaction method func (f *FacadeStub) GetTransaction(hash string, withResults bool) (*transaction.ApiTransactionResult, error) { - return f.GetTransactionHandler(hash, withResults) + if f.GetTransactionHandler != nil { + return f.GetTransactionHandler(hash, withResults) + } + + return nil, nil } // SimulateTransactionExecution is the mock implementation of a handler's SimulateTransactionExecution method func (f *FacadeStub) SimulateTransactionExecution(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { - return f.SimulateTransactionExecutionHandler(tx) + if f.SimulateTransactionExecutionHandler != nil { + return f.SimulateTransactionExecutionHandler(tx) + } + + return nil, nil } // SendBulkTransactions is the mock implementation of a handler's SendBulkTransactions method func (f *FacadeStub) SendBulkTransactions(txs []*transaction.Transaction) (uint64, error) { - return f.SendBulkTransactionsHandler(txs) + if f.SendBulkTransactionsHandler != nil { + return f.SendBulkTransactionsHandler(txs) + } + + return 0, nil } // ValidateTransaction - func (f *FacadeStub) ValidateTransaction(tx *transaction.Transaction) error { - return f.ValidateTransactionHandler(tx) + if f.ValidateTransactionHandler != nil { + return f.ValidateTransactionHandler(tx) + } + + return nil } // ValidateTransactionForSimulation - func (f *FacadeStub) ValidateTransactionForSimulation(tx *transaction.Transaction, bypassSignature bool) error { - return f.ValidateTransactionForSimulationHandler(tx, bypassSignature) + if f.ValidateTransactionForSimulationHandler != nil { + return f.ValidateTransactionForSimulationHandler(tx, bypassSignature) + } + + return nil } // ValidatorStatisticsApi is the mock implementation of a handler's ValidatorStatisticsApi method func (f *FacadeStub) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) { - return f.ValidatorStatisticsHandler() + if f.ValidatorStatisticsHandler != nil { + return f.ValidatorStatisticsHandler() + } + + return nil, nil +} + +// AuctionListApi is the mock implementation of a handler's AuctionListApi method +func (f *FacadeStub) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { + if f.AuctionListHandler != nil { + return f.AuctionListHandler() + } + + return nil, nil } // ExecuteSCQuery is a mock implementation. func (f *FacadeStub) ExecuteSCQuery(query *process.SCQuery) (*vm.VMOutputApi, api.BlockInfo, error) { - return f.ExecuteSCQueryHandler(query) + if f.ExecuteSCQueryHandler != nil { + return f.ExecuteSCQueryHandler(query) + } + + return nil, api.BlockInfo{}, nil } // StatusMetrics is the mock implementation for the StatusMetrics func (f *FacadeStub) StatusMetrics() external.StatusMetricsHandler { - return f.StatusMetricsHandler() + if f.StatusMetricsHandler != nil { + return f.StatusMetricsHandler() + } + + return nil } // GetTotalStakedValue - func (f *FacadeStub) GetTotalStakedValue() (*api.StakeValues, error) { - return f.GetTotalStakedValueHandler() + if f.GetTotalStakedValueHandler != nil { + return f.GetTotalStakedValueHandler() + } + + return nil, nil } // GetDirectStakedList - func (f *FacadeStub) GetDirectStakedList() ([]*api.DirectStakedValue, error) { - return f.GetDirectStakedListHandler() + if f.GetDirectStakedListHandler != nil { + return f.GetDirectStakedListHandler() + } + + return nil, nil } // GetDelegatorsList - func (f *FacadeStub) GetDelegatorsList() ([]*api.Delegator, error) { - return f.GetDelegatorsListHandler() + if f.GetDelegatorsListHandler != nil { + return f.GetDelegatorsListHandler() + } + + return nil, nil } // ComputeTransactionGasLimit - func (f *FacadeStub) ComputeTransactionGasLimit(tx *transaction.Transaction) (*transaction.CostResponse, error) { - return f.ComputeTransactionGasLimitHandler(tx) + if f.ComputeTransactionGasLimitHandler != nil { + return f.ComputeTransactionGasLimitHandler(tx) + } + + return nil, nil } // NodeConfig - func (f *FacadeStub) NodeConfig() map[string]interface{} { - return f.NodeConfigCalled() + if f.NodeConfigCalled != nil { + return f.NodeConfigCalled() + } + + return nil } // EncodeAddressPubkey - @@ -382,17 +461,29 @@ func (f *FacadeStub) DecodeAddressPubkey(pk string) ([]byte, error) { // GetQueryHandler - func (f *FacadeStub) GetQueryHandler(name string) (debug.QueryHandler, error) { - return f.GetQueryHandlerCalled(name) + if f.GetQueryHandlerCalled != nil { + return f.GetQueryHandlerCalled(name) + } + + return nil, nil } // GetPeerInfo - func (f *FacadeStub) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) { - return f.GetPeerInfoCalled(pid) + if f.GetPeerInfoCalled != nil { + return f.GetPeerInfoCalled(pid) + } + + return nil, nil } // GetConnectedPeersRatingsOnMainNetwork - func (f *FacadeStub) GetConnectedPeersRatingsOnMainNetwork() (string, error) { - return f.GetConnectedPeersRatingsOnMainNetworkCalled() + if f.GetConnectedPeersRatingsOnMainNetworkCalled != nil { + return f.GetConnectedPeersRatingsOnMainNetworkCalled() + } + + return "", nil } // GetEpochStartDataAPI - @@ -402,12 +493,20 @@ func (f *FacadeStub) GetEpochStartDataAPI(epoch uint32) (*common.EpochStartDataA // GetBlockByNonce - func (f *FacadeStub) GetBlockByNonce(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) { - return f.GetBlockByNonceCalled(nonce, options) + if f.GetBlockByNonceCalled != nil { + return f.GetBlockByNonceCalled(nonce, options) + } + + return nil, nil } // GetBlockByHash - func (f *FacadeStub) GetBlockByHash(hash string, options api.BlockQueryOptions) (*api.Block, error) { - return f.GetBlockByHashCalled(hash, options) + if f.GetBlockByHashCalled != nil { + return f.GetBlockByHashCalled(hash, options) + } + + return nil, nil } // GetBlockByRound - @@ -596,6 +695,14 @@ func (f *FacadeStub) GetManagedKeys() []string { return make([]string, 0) } +// GetLoadedKeys - +func (f *FacadeStub) GetLoadedKeys() []string { + if f.GetLoadedKeysCalled != nil { + return f.GetLoadedKeysCalled() + } + return make([]string, 0) +} + // GetEligibleManagedKeys - func (f *FacadeStub) GetEligibleManagedKeys() ([]string, error) { if f.GetEligibleManagedKeysCalled != nil { diff --git a/api/shared/interface.go b/api/shared/interface.go index 9be6e66c7b8..4b775ebdd39 100644 --- a/api/shared/interface.go +++ b/api/shared/interface.go @@ -115,6 +115,7 @@ type FacadeHandler interface { ComputeTransactionGasLimit(tx *transaction.Transaction) (*transaction.CostResponse, error) EncodeAddressPubkey(pk []byte) (string, error) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) + AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) ExecuteSCQuery(*process.SCQuery) (*vm.VMOutputApi, api.BlockInfo, error) DecodeAddressPubkey(pk string) ([]byte, error) RestApiInterface() string @@ -130,6 +131,7 @@ type FacadeHandler interface { IsDataTrieMigrated(address string, options api.AccountQueryOptions) (bool, error) GetManagedKeysCount() int GetManagedKeys() []string + GetLoadedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) diff --git a/cmd/node/config/api.toml b/cmd/node/config/api.toml index 2c7fb1d7889..a10ec049554 100644 --- a/cmd/node/config/api.toml +++ b/cmd/node/config/api.toml @@ -43,6 +43,9 @@ # /node/managed-keys will return the keys managed by the node { Name = "/managed-keys", Open = true }, + # /node/loaded-keys will return the keys loaded by the node + { Name = "/loaded-keys", Open = true }, + # /node/managed-keys/count will return the number of keys managed by the node { Name = "/managed-keys/count", Open = true }, @@ -170,7 +173,10 @@ [APIPackages.validator] Routes = [ # /validator/statistics will return a list of validators statistics for all validators - { Name = "/statistics", Open = true } + { Name = "/statistics", Open = true }, + + # /validator/auction will return a list of nodes that are in the auction list + { Name = "/auction", Open = true }, ] [APIPackages.vm-values] diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 57fee3a8778..b6c11452a64 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -492,6 +492,7 @@ [Antiflood] Enabled = true NumConcurrentResolverJobs = 50 + NumConcurrentResolvingTrieNodesJobs = 3 [Antiflood.FastReacting] IntervalInSeconds = 1 ReservedPercent = 20.0 @@ -620,6 +621,7 @@ Type = "json" [EpochStartConfig] + GenesisEpoch = 0 MinRoundsBetweenEpochs = 20 RoundsPerEpoch = 200 # Min and Max ShuffledOutRestartThreshold represents the minimum and maximum duration of an epoch (in percentage) after a node which @@ -659,6 +661,7 @@ PeerStatePruningEnabled = true MaxStateTrieLevelInMemory = 5 MaxPeerTrieLevelInMemory = 5 + StateStatisticsEnabled = false [BlockSizeThrottleConfig] MinSizeInBytes = 104857 # 104857 is 10% from 1MB diff --git a/cmd/node/config/enableEpochs.toml b/cmd/node/config/enableEpochs.toml index a720846ce34..29aaf825438 100644 --- a/cmd/node/config/enableEpochs.toml +++ b/cmd/node/config/enableEpochs.toml @@ -90,9 +90,6 @@ # ValidatorToDelegationEnableEpoch represents the epoch when the validator-to-delegation feature will be enabled ValidatorToDelegationEnableEpoch = 1 - # WaitingListFixEnableEpoch represents the epoch when the 6 epoch waiting list fix is enabled - WaitingListFixEnableEpoch = 1000000 - # IncrementSCRNonceInMultiTransferEnableEpoch represents the epoch when the fix for preventing the generation of the same SCRs # is enabled. The fix is done by adding an extra increment. IncrementSCRNonceInMultiTransferEnableEpoch = 1 @@ -106,9 +103,6 @@ # ESDTTransferRoleEnableEpoch represents the epoch when esdt transfer role set is enabled ESDTTransferRoleEnableEpoch = 1 - # BuiltInFunctionOnMetaEnableEpoch represents the epoch when built in function processing on metachain is enabled - BuiltInFunctionOnMetaEnableEpoch = 1000000 - # ComputeRewardCheckpointEnableEpoch represents the epoch when compute rewards checkpoint epoch is enabled ComputeRewardCheckpointEnableEpoch = 1 @@ -293,6 +287,24 @@ # CurrentRandomnessOnSortingEnableEpoch represents the epoch when the current randomness on sorting is enabled CurrentRandomnessOnSortingEnableEpoch = 4 + # StakeLimitsEnableEpoch represents the epoch when stake limits on validators are enabled + # Should have the same value as StakingV4Step1EnableEpoch that triggers the automatic unstake operations for the queue nodes + StakeLimitsEnableEpoch = 4 + + # StakingV4Step1EnableEpoch represents the epoch when staking v4 is initialized. This is the epoch in which + # all nodes from staking queue are moved in the auction list + StakingV4Step1EnableEpoch = 4 + + # StakingV4Step2EnableEpoch represents the epoch when staking v4 is enabled. Should have a greater value than StakingV4Step1EnableEpoch. + # From this epoch, all shuffled out nodes are moved to auction nodes. No auction nodes selection is done yet. + StakingV4Step2EnableEpoch = 5 + + # StakingV4Step3EnableEpoch represents the epoch in which selected nodes from auction will be distributed to waiting list + StakingV4Step3EnableEpoch = 6 + + # AlwaysMergeContextsInEEIEnableEpoch represents the epoch in which the EEI will always merge the contexts + AlwaysMergeContextsInEEIEnableEpoch = 4 + # DynamicESDTEnableEpoch represents the epoch when dynamic NFT feature is enabled DynamicESDTEnableEpoch = 4 @@ -304,8 +316,13 @@ # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ - { EpochEnable = 0, MaxNumNodes = 48, NodesToShufflePerShard = 4 }, # 4 shuffled out keys / shard will not be reached normally - { EpochEnable = 1, MaxNumNodes = 64, NodesToShufflePerShard = 2 } + { EpochEnable = 0, MaxNumNodes = 48, NodesToShufflePerShard = 4 }, # 4 shuffled out keys / shard will not be reached normally + { EpochEnable = 1, MaxNumNodes = 64, NodesToShufflePerShard = 2 }, + # Staking v4 configuration, where: + # - Enable epoch = StakingV4Step3EnableEpoch + # - NodesToShufflePerShard = same as previous entry in MaxNodesChangeEnableEpoch + # - MaxNumNodes = (MaxNumNodesFromPreviousEpochEnable - (numOfShards+1)*NodesToShufflePerShard) + { EpochEnable = 6, MaxNumNodes = 56, NodesToShufflePerShard = 2 }, ] [GasSchedule] diff --git a/cmd/node/config/fullArchiveP2P.toml b/cmd/node/config/fullArchiveP2P.toml index 7be85d0e3f5..927eb0eb431 100644 --- a/cmd/node/config/fullArchiveP2P.toml +++ b/cmd/node/config/fullArchiveP2P.toml @@ -23,10 +23,11 @@ [Node.Transports.TCP] ListenAddress = "/ip4/0.0.0.0/tcp/%d" # TCP listen address PreventPortReuse = false - [Node.ResourceLimiter] - Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". - ManualSystemMemoryInMB = 0 # not taken into account if the type is not "default with manual scale" - ManualMaximumFD = 0 # not taken into account if the type is not "default with manual scale" + + [Node.ResourceLimiter] + Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". + ManualSystemMemoryInMB = 0 # not taken into account if the type is not "default with manual scale" + ManualMaximumFD = 0 # not taken into account if the type is not "default with manual scale" # P2P peer discovery section diff --git a/cmd/node/config/p2p.toml b/cmd/node/config/p2p.toml index 57ec120ff40..5d08f4be919 100644 --- a/cmd/node/config/p2p.toml +++ b/cmd/node/config/p2p.toml @@ -23,10 +23,11 @@ [Node.Transports.TCP] ListenAddress = "/ip4/0.0.0.0/tcp/%d" # TCP listen address PreventPortReuse = false - [Node.ResourceLimiter] - Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". - ManualSystemMemoryInMB = 0 # not taken into account if the type is not "default with manual scale" - ManualMaximumFD = 0 # not taken into account if the type is not "default with manual scale" + + [Node.ResourceLimiter] + Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". + ManualSystemMemoryInMB = 0 # not taken into account if the type is not "default with manual scale" + ManualMaximumFD = 0 # not taken into account if the type is not "default with manual scale" # P2P peer discovery section diff --git a/cmd/node/config/systemSmartContractsConfig.toml b/cmd/node/config/systemSmartContractsConfig.toml index fc898335f79..372cd0eba03 100644 --- a/cmd/node/config/systemSmartContractsConfig.toml +++ b/cmd/node/config/systemSmartContractsConfig.toml @@ -11,6 +11,8 @@ MaxNumberOfNodesForStake = 64 UnJailValue = "2500000000000000000" #0.1% of genesis node price ActivateBLSPubKeyMessageVerification = false + StakeLimitPercentage = 1.0 #fraction of value 1 - 100%, for the time being no stake limit + NodeLimitPercentage = 0.1 #fraction of value 0.1 - 10% [ESDTSystemSCConfig] BaseIssuingCost = "5000000000000000000" #5 eGLD @@ -39,3 +41,10 @@ [DelegationSystemSCConfig] MinServiceFee = 0 MaxServiceFee = 10000 + +# Changing this config is not backwards compatible +[SoftAuctionConfig] + TopUpStep = "10000000000000000000" # 10 EGLD + MinTopUp = "1000000000000000000" # 1 EGLD should be minimum + MaxTopUp = "32000000000000000000000000" # 32 mil EGLD + MaxNumberOfIterations = 100000 # 100k max number of iterations for soft auction config diff --git a/cmd/node/flags.go b/cmd/node/flags.go index 7f610b8d130..72c86c04f96 100644 --- a/cmd/node/flags.go +++ b/cmd/node/flags.go @@ -633,7 +633,8 @@ func applyCompatibleConfigs(log logger.Logger, configs *config.Configs) error { isInHistoricalBalancesMode := operationmodes.SliceContainsElement(operationModes, operationmodes.OperationModeHistoricalBalances) if isInHistoricalBalancesMode { - processHistoricalBalancesMode(log, configs) + // TODO move all operation modes settings in the common/operationmodes package and add tests + operationmodes.ProcessHistoricalBalancesMode(log, configs) } isInDbLookupExtensionMode := operationmodes.SliceContainsElement(operationModes, operationmodes.OperationModeDbLookupExtension) @@ -649,28 +650,6 @@ func applyCompatibleConfigs(log logger.Logger, configs *config.Configs) error { return nil } -func processHistoricalBalancesMode(log logger.Logger, configs *config.Configs) { - configs.GeneralConfig.StoragePruning.Enabled = true - configs.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData = false - configs.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData = false - configs.GeneralConfig.GeneralSettings.StartInEpochEnabled = false - configs.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData = false - configs.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled = false - configs.GeneralConfig.DbLookupExtensions.Enabled = true - configs.PreferencesConfig.Preferences.FullArchive = true - - log.Warn("the node is in historical balances mode! Will auto-set some config values", - "StoragePruning.Enabled", configs.GeneralConfig.StoragePruning.Enabled, - "StoragePruning.ValidatorCleanOldEpochsData", configs.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData, - "StoragePruning.ObserverCleanOldEpochsData", configs.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData, - "StoragePruning.AccountsTrieCleanOldEpochsData", configs.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData, - "GeneralSettings.StartInEpochEnabled", configs.GeneralConfig.GeneralSettings.StartInEpochEnabled, - "StateTriesConfig.AccountsStatePruningEnabled", configs.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled, - "DbLookupExtensions.Enabled", configs.GeneralConfig.DbLookupExtensions.Enabled, - "Preferences.FullArchive", configs.PreferencesConfig.Preferences.FullArchive, - ) -} - func processDbLookupExtensionMode(log logger.Logger, configs *config.Configs) { configs.GeneralConfig.DbLookupExtensions.Enabled = true configs.GeneralConfig.StoragePruning.Enabled = true diff --git a/cmd/node/main.go b/cmd/node/main.go index 289800252f5..c7cc3c1085c 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -47,10 +47,13 @@ VERSION: // appVersion should be populated at build time using ldflags // Usage examples: // linux/mac: -// go build -v -ldflags="-X main.appVersion=$(git describe --tags --long --dirty)" +// +// go build -v -ldflags="-X main.appVersion=$(git describe --tags --long --dirty)" +// // windows: -// for /f %i in ('git describe --tags --long --dirty') do set VERS=%i -// go build -v -ldflags="-X main.appVersion=%VERS%" +// +// for /f %i in ('git describe --tags --long --dirty') do set VERS=%i +// go build -v -ldflags="-X main.appVersion=%VERS%" var appVersion = common.UnVersionedAppString func main() { diff --git a/cmd/seednode/config/p2p.toml b/cmd/seednode/config/p2p.toml index cd98c9e6798..41db3a2196f 100644 --- a/cmd/seednode/config/p2p.toml +++ b/cmd/seednode/config/p2p.toml @@ -22,10 +22,11 @@ [Node.Transports.TCP] ListenAddress = "/ip4/0.0.0.0/tcp/%d" # TCP listen address PreventPortReuse = true # seeder nodes will need to enable this option - [Node.ResourceLimiter] - Type = "default with manual scale" - ManualSystemMemoryInMB = 65536 # pretend that the host running the seeder has more RAM so it can handle more connections - ManualMaximumFD = 1048576 + + [Node.ResourceLimiter] + Type = "default with manual scale" + ManualSystemMemoryInMB = 65536 # pretend that the host running the seeder has more RAM so it can handle more connections + ManualMaximumFD = 1048576 # P2P peer discovery section diff --git a/cmd/termui/presenter/presenterStatusHandler.go b/cmd/termui/presenter/presenterStatusHandler.go index 6ad88f98e4d..1722eedbcb4 100644 --- a/cmd/termui/presenter/presenterStatusHandler.go +++ b/cmd/termui/presenter/presenterStatusHandler.go @@ -6,7 +6,7 @@ import ( "sync" ) -//maxLogLines is used to specify how many lines of logs need to store in slice +// maxLogLines is used to specify how many lines of logs need to store in slice var maxLogLines = 100 // PresenterStatusHandler is the AppStatusHandler impl that is able to process and store received data diff --git a/cmd/termui/view/termuic/interface.go b/cmd/termui/view/termuic/interface.go index ecc3e618da6..63384792e6b 100644 --- a/cmd/termui/view/termuic/interface.go +++ b/cmd/termui/view/termuic/interface.go @@ -1,6 +1,6 @@ package termuic -//TermuiRender defines the actions which should be handled by a render +// TermuiRender defines the actions which should be handled by a render type TermuiRender interface { // RefreshData method is used to refresh data that are displayed on a grid RefreshData(numMillisecondsRefreshTime int) diff --git a/cmd/termui/view/termuic/termuiRenders/drawableContainer.go b/cmd/termui/view/termuic/termuiRenders/drawableContainer.go index 4964c9d6a85..f21472b2185 100644 --- a/cmd/termui/view/termuic/termuiRenders/drawableContainer.go +++ b/cmd/termui/view/termuic/termuiRenders/drawableContainer.go @@ -17,7 +17,7 @@ type DrawableContainer struct { maxHeight int } -//NewDrawableContainer method is used to return a new NewDrawableContainer structure +// NewDrawableContainer method is used to return a new NewDrawableContainer structure func NewDrawableContainer() *DrawableContainer { dc := DrawableContainer{} return &dc diff --git a/common/constants.go b/common/constants.go index 971dda11dca..d70d000dca5 100644 --- a/common/constants.go +++ b/common/constants.go @@ -43,6 +43,14 @@ const NewList PeerType = "new" // MetachainTopicIdentifier is the identifier used in topics to define the metachain shard ID const MetachainTopicIdentifier = "META" // TODO - move this to mx-chain-core-go and change wherever we use the string value +// AuctionList represents the list of peers which don't participate in consensus yet, but will be selected +// based on their top up stake +const AuctionList PeerType = "auction" + +// SelectedFromAuctionList represents the list of peers which have been selected from AuctionList based on +// their top up to be distributed on the WaitingList in the next epoch +const SelectedFromAuctionList PeerType = "selectedFromAuction" + // CombinedPeerType - represents the combination of two peerTypes const CombinedPeerType = "%s (%s)" @@ -508,12 +516,6 @@ const ( // MetricESDTTransferRoleEnableEpoch represents the epoch when the ESDT transfer role feature is enabled MetricESDTTransferRoleEnableEpoch = "erd_esdt_transfer_role_enable_epoch" - // MetricBuiltInFunctionOnMetaEnableEpoch represents the epoch when the builtin functions on metachain are enabled - MetricBuiltInFunctionOnMetaEnableEpoch = "erd_builtin_function_on_meta_enable_epoch" - - // MetricWaitingListFixEnableEpoch represents the epoch when the waiting list fix is enabled - MetricWaitingListFixEnableEpoch = "erd_waiting_list_fix_enable_epoch" - // MetricMaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MetricMaxNodesChangeEnableEpoch = "erd_max_nodes_change_enable_epoch" @@ -932,7 +934,6 @@ const ( ESDTMultiTransferFlag core.EnableEpochFlag = "ESDTMultiTransferFlag" GlobalMintBurnFlag core.EnableEpochFlag = "GlobalMintBurnFlag" ESDTTransferRoleFlag core.EnableEpochFlag = "ESDTTransferRoleFlag" - BuiltInFunctionOnMetaFlag core.EnableEpochFlag = "BuiltInFunctionOnMetaFlag" ComputeRewardCheckpointFlag core.EnableEpochFlag = "ComputeRewardCheckpointFlag" SCRSizeInvariantCheckFlag core.EnableEpochFlag = "SCRSizeInvariantCheckFlag" BackwardCompSaveKeyValueFlag core.EnableEpochFlag = "BackwardCompSaveKeyValueFlag" @@ -973,7 +974,6 @@ const ( SendAlwaysFlag core.EnableEpochFlag = "SendAlwaysFlag" ValueLengthCheckFlag core.EnableEpochFlag = "ValueLengthCheckFlag" CheckTransferFlag core.EnableEpochFlag = "CheckTransferFlag" - TransferToMetaFlag core.EnableEpochFlag = "TransferToMetaFlag" ESDTNFTImprovementV1Flag core.EnableEpochFlag = "ESDTNFTImprovementV1Flag" ChangeDelegationOwnerFlag core.EnableEpochFlag = "ChangeDelegationOwnerFlag" RefactorPeersMiniBlocksFlag core.EnableEpochFlag = "RefactorPeersMiniBlocksFlag" @@ -1000,11 +1000,17 @@ const ( ScToScLogEventFlag core.EnableEpochFlag = "ScToScLogEventFlag" BlockGasAndFeesReCheckFlag core.EnableEpochFlag = "BlockGasAndFeesReCheckFlag" BalanceWaitingListsFlag core.EnableEpochFlag = "BalanceWaitingListsFlag" - WaitingListFixFlag core.EnableEpochFlag = "WaitingListFixFlag" NFTStopCreateFlag core.EnableEpochFlag = "NFTStopCreateFlag" FixGasRemainingForSaveKeyValueFlag core.EnableEpochFlag = "FixGasRemainingForSaveKeyValueFlag" IsChangeOwnerAddressCrossShardThroughSCFlag core.EnableEpochFlag = "IsChangeOwnerAddressCrossShardThroughSCFlag" CurrentRandomnessOnSortingFlag core.EnableEpochFlag = "CurrentRandomnessOnSortingFlag" + StakeLimitsFlag core.EnableEpochFlag = "StakeLimitsFlag" + StakingV4Step1Flag core.EnableEpochFlag = "StakingV4Step1Flag" + StakingV4Step2Flag core.EnableEpochFlag = "StakingV4Step2Flag" + StakingV4Step3Flag core.EnableEpochFlag = "StakingV4Step3Flag" + StakingQueueFlag core.EnableEpochFlag = "StakingQueueFlag" + StakingV4StartedFlag core.EnableEpochFlag = "StakingV4StartedFlag" + AlwaysMergeContextsInEEIFlag core.EnableEpochFlag = "AlwaysMergeContextsInEEIFlag" DynamicESDTFlag core.EnableEpochFlag = "DynamicEsdtFlag" // all new flags must be added to createAllFlagsMap method, as part of enableEpochsHandler allFlagsDefined ) diff --git a/common/dtos.go b/common/dtos.go index e7876a9131b..50cf1109017 100644 --- a/common/dtos.go +++ b/common/dtos.go @@ -75,3 +75,19 @@ type EpochStartDataAPI struct { type AlteredAccountsForBlockAPIResponse struct { Accounts []*alteredAccount.AlteredAccount `json:"accounts"` } + +// AuctionNode holds data needed for a node in auction to respond to API calls +type AuctionNode struct { + BlsKey string `json:"blsKey"` + Qualified bool `json:"qualified"` +} + +// AuctionListValidatorAPIResponse holds the data needed for an auction node validator for responding to API calls +type AuctionListValidatorAPIResponse struct { + Owner string `json:"owner"` + NumStakedNodes int64 `json:"numStakedNodes"` + TotalTopUp string `json:"totalTopUp"` + TopUpPerNode string `json:"topUpPerNode"` + QualifiedTopUp string `json:"qualifiedTopUp"` + Nodes []*AuctionNode `json:"nodes"` +} diff --git a/common/enablers/enableEpochsHandler.go b/common/enablers/enableEpochsHandler.go index fd1ddd87d99..ea440d30b34 100644 --- a/common/enablers/enableEpochsHandler.go +++ b/common/enablers/enableEpochsHandler.go @@ -275,18 +275,6 @@ func (handler *enableEpochsHandler) createAllFlagsMap() { }, activationEpoch: handler.enableEpochsConfig.ESDTTransferRoleEnableEpoch, }, - common.BuiltInFunctionOnMetaFlag: { - isActiveInEpoch: func(epoch uint32) bool { - return epoch >= handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch - }, - activationEpoch: handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch, - }, - common.TransferToMetaFlag: { - isActiveInEpoch: func(epoch uint32) bool { - return epoch >= handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch - }, - activationEpoch: handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch, - }, common.ComputeRewardCheckpointFlag: { isActiveInEpoch: func(epoch uint32) bool { return epoch >= handler.enableEpochsConfig.ComputeRewardCheckpointEnableEpoch @@ -677,12 +665,6 @@ func (handler *enableEpochsHandler) createAllFlagsMap() { }, activationEpoch: handler.enableEpochsConfig.BalanceWaitingListsEnableEpoch, }, - common.WaitingListFixFlag: { - isActiveInEpoch: func(epoch uint32) bool { - return epoch >= handler.enableEpochsConfig.WaitingListFixEnableEpoch - }, - activationEpoch: handler.enableEpochsConfig.WaitingListFixEnableEpoch, - }, common.NFTStopCreateFlag: { isActiveInEpoch: func(epoch uint32) bool { return epoch >= handler.enableEpochsConfig.NFTStopCreateEnableEpoch @@ -707,6 +689,48 @@ func (handler *enableEpochsHandler) createAllFlagsMap() { }, activationEpoch: handler.enableEpochsConfig.CurrentRandomnessOnSortingEnableEpoch, }, + common.StakeLimitsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakeLimitsEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakeLimitsEnableEpoch, + }, + common.StakingV4Step1Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch == handler.enableEpochsConfig.StakingV4Step1EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV4Step1EnableEpoch, + }, + common.StakingV4Step2Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakingV4Step2EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV4Step2EnableEpoch, + }, + common.StakingV4Step3Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakingV4Step3EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV4Step3EnableEpoch, + }, + common.StakingQueueFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch < handler.enableEpochsConfig.StakingV4Step1EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV4Step1EnableEpoch, + }, + common.StakingV4StartedFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakingV4Step1EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV4Step1EnableEpoch, + }, + common.AlwaysMergeContextsInEEIFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.AlwaysMergeContextsInEEIEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.AlwaysMergeContextsInEEIEnableEpoch, + }, common.DynamicESDTFlag: { isActiveInEpoch: func(epoch uint32) bool { return epoch >= handler.enableEpochsConfig.DynamicESDTEnableEpoch @@ -781,6 +805,16 @@ func (handler *enableEpochsHandler) GetCurrentEpoch() uint32 { return currentEpoch } +// StakingV4Step2EnableEpoch returns the epoch when stakingV4 becomes active +func (handler *enableEpochsHandler) StakingV4Step2EnableEpoch() uint32 { + return handler.enableEpochsConfig.StakingV4Step2EnableEpoch +} + +// StakingV4Step1EnableEpoch returns the epoch when stakingV4 phase1 becomes active +func (handler *enableEpochsHandler) StakingV4Step1EnableEpoch() uint32 { + return handler.enableEpochsConfig.StakingV4Step1EnableEpoch +} + // IsInterfaceNil returns true if there is no value under the interface func (handler *enableEpochsHandler) IsInterfaceNil() bool { return handler == nil diff --git a/common/enablers/enableEpochsHandler_test.go b/common/enablers/enableEpochsHandler_test.go index c31f240436a..89289ac628e 100644 --- a/common/enablers/enableEpochsHandler_test.go +++ b/common/enablers/enableEpochsHandler_test.go @@ -45,12 +45,10 @@ func createEnableEpochsConfig() config.EnableEpochs { SaveJailedAlwaysEnableEpoch: 27, ReDelegateBelowMinCheckEnableEpoch: 28, ValidatorToDelegationEnableEpoch: 29, - WaitingListFixEnableEpoch: 30, IncrementSCRNonceInMultiTransferEnableEpoch: 31, ESDTMultiTransferEnableEpoch: 32, GlobalMintBurnDisableEpoch: 33, ESDTTransferRoleEnableEpoch: 34, - BuiltInFunctionOnMetaEnableEpoch: 35, ComputeRewardCheckpointEnableEpoch: 36, SCRSizeInvariantCheckEnableEpoch: 37, BackwardCompSaveKeyValueEnableEpoch: 38, @@ -111,7 +109,12 @@ func createEnableEpochsConfig() config.EnableEpochs { FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch: 93, ChangeOwnerAddressCrossShardThroughSCEnableEpoch: 94, CurrentRandomnessOnSortingEnableEpoch: 95, - DynamicESDTEnableEpoch: 96, + StakeLimitsEnableEpoch: 96, + StakingV4Step1EnableEpoch: 97, + StakingV4Step2EnableEpoch: 98, + StakingV4Step3EnableEpoch: 99, + AlwaysMergeContextsInEEIEnableEpoch: 100, + DynamicESDTEnableEpoch: 101, } } @@ -190,6 +193,20 @@ func TestEnableEpochsHandler_IsFlagEnabled(t *testing.T) { handler.EpochConfirmed(cfg.SetGuardianEnableEpoch+1, 0) require.True(t, handler.IsFlagEnabled(common.SetGuardianFlag)) + handler.EpochConfirmed(cfg.StakingV4Step1EnableEpoch-1, 0) + require.True(t, handler.IsFlagEnabled(common.StakingQueueFlag)) + handler.EpochConfirmed(cfg.StakingV4Step1EnableEpoch, 0) + require.False(t, handler.IsFlagEnabled(common.StakingQueueFlag)) + handler.EpochConfirmed(cfg.StakingV4Step1EnableEpoch+1, 0) + require.False(t, handler.IsFlagEnabled(common.StakingQueueFlag)) + + handler.EpochConfirmed(cfg.StakingV4Step1EnableEpoch-1, 0) + require.False(t, handler.IsFlagEnabled(common.StakingV4StartedFlag)) + handler.EpochConfirmed(cfg.StakingV4Step1EnableEpoch, 0) + require.True(t, handler.IsFlagEnabled(common.StakingV4StartedFlag)) + handler.EpochConfirmed(cfg.StakingV4Step1EnableEpoch+1, 0) + require.True(t, handler.IsFlagEnabled(common.StakingV4StartedFlag)) + handler.EpochConfirmed(math.MaxUint32, 0) require.True(t, handler.IsFlagEnabled(common.SCDeployFlag)) require.True(t, handler.IsFlagEnabled(common.BuiltInFunctionsFlag)) @@ -228,7 +245,6 @@ func TestEnableEpochsHandler_IsFlagEnabled(t *testing.T) { require.True(t, handler.IsFlagEnabled(common.ESDTMultiTransferFlag)) require.False(t, handler.IsFlagEnabled(common.GlobalMintBurnFlag)) // < require.True(t, handler.IsFlagEnabled(common.ESDTTransferRoleFlag)) - require.True(t, handler.IsFlagEnabled(common.BuiltInFunctionOnMetaFlag)) require.True(t, handler.IsFlagEnabled(common.ComputeRewardCheckpointFlag)) require.True(t, handler.IsFlagEnabled(common.SCRSizeInvariantCheckFlag)) require.False(t, handler.IsFlagEnabled(common.BackwardCompSaveKeyValueFlag)) // < @@ -269,7 +285,6 @@ func TestEnableEpochsHandler_IsFlagEnabled(t *testing.T) { require.True(t, handler.IsFlagEnabled(common.SendAlwaysFlag)) require.True(t, handler.IsFlagEnabled(common.ValueLengthCheckFlag)) require.True(t, handler.IsFlagEnabled(common.CheckTransferFlag)) - require.True(t, handler.IsFlagEnabled(common.TransferToMetaFlag)) require.True(t, handler.IsFlagEnabled(common.ESDTNFTImprovementV1Flag)) require.True(t, handler.IsFlagEnabled(common.ChangeDelegationOwnerFlag)) require.True(t, handler.IsFlagEnabled(common.RefactorPeersMiniBlocksFlag)) @@ -296,11 +311,17 @@ func TestEnableEpochsHandler_IsFlagEnabled(t *testing.T) { require.True(t, handler.IsFlagEnabled(common.ScToScLogEventFlag)) require.True(t, handler.IsFlagEnabled(common.BlockGasAndFeesReCheckFlag)) require.True(t, handler.IsFlagEnabled(common.BalanceWaitingListsFlag)) - require.True(t, handler.IsFlagEnabled(common.WaitingListFixFlag)) require.True(t, handler.IsFlagEnabled(common.NFTStopCreateFlag)) require.True(t, handler.IsFlagEnabled(common.FixGasRemainingForSaveKeyValueFlag)) require.True(t, handler.IsFlagEnabled(common.IsChangeOwnerAddressCrossShardThroughSCFlag)) require.True(t, handler.IsFlagEnabled(common.CurrentRandomnessOnSortingFlag)) + require.True(t, handler.IsFlagEnabled(common.StakeLimitsFlag)) + require.False(t, handler.IsFlagEnabled(common.StakingV4Step1Flag)) + require.True(t, handler.IsFlagEnabled(common.StakingV4Step2Flag)) + require.True(t, handler.IsFlagEnabled(common.StakingV4Step3Flag)) + require.False(t, handler.IsFlagEnabled(common.StakingQueueFlag)) + require.True(t, handler.IsFlagEnabled(common.StakingV4StartedFlag)) + require.True(t, handler.IsFlagEnabled(common.AlwaysMergeContextsInEEIFlag)) require.True(t, handler.IsFlagEnabled(common.DynamicESDTFlag)) } @@ -341,7 +362,6 @@ func TestEnableEpochsHandler_GetActivationEpoch(t *testing.T) { require.Equal(t, cfg.ESDTMultiTransferEnableEpoch, handler.GetActivationEpoch(common.ESDTMultiTransferFlag)) require.Equal(t, cfg.GlobalMintBurnDisableEpoch, handler.GetActivationEpoch(common.GlobalMintBurnFlag)) require.Equal(t, cfg.ESDTTransferRoleEnableEpoch, handler.GetActivationEpoch(common.ESDTTransferRoleFlag)) - require.Equal(t, cfg.BuiltInFunctionOnMetaEnableEpoch, handler.GetActivationEpoch(common.BuiltInFunctionOnMetaFlag)) require.Equal(t, cfg.ComputeRewardCheckpointEnableEpoch, handler.GetActivationEpoch(common.ComputeRewardCheckpointFlag)) require.Equal(t, cfg.SCRSizeInvariantCheckEnableEpoch, handler.GetActivationEpoch(common.SCRSizeInvariantCheckFlag)) require.Equal(t, cfg.BackwardCompSaveKeyValueEnableEpoch, handler.GetActivationEpoch(common.BackwardCompSaveKeyValueFlag)) @@ -382,7 +402,6 @@ func TestEnableEpochsHandler_GetActivationEpoch(t *testing.T) { require.Equal(t, cfg.ESDTMetadataContinuousCleanupEnableEpoch, handler.GetActivationEpoch(common.SendAlwaysFlag)) require.Equal(t, cfg.OptimizeNFTStoreEnableEpoch, handler.GetActivationEpoch(common.ValueLengthCheckFlag)) require.Equal(t, cfg.OptimizeNFTStoreEnableEpoch, handler.GetActivationEpoch(common.CheckTransferFlag)) - require.Equal(t, cfg.BuiltInFunctionOnMetaEnableEpoch, handler.GetActivationEpoch(common.TransferToMetaFlag)) require.Equal(t, cfg.ESDTMultiTransferEnableEpoch, handler.GetActivationEpoch(common.ESDTNFTImprovementV1Flag)) require.Equal(t, cfg.ESDTMetadataContinuousCleanupEnableEpoch, handler.GetActivationEpoch(common.ChangeDelegationOwnerFlag)) require.Equal(t, cfg.RefactorPeersMiniBlocksEnableEpoch, handler.GetActivationEpoch(common.RefactorPeersMiniBlocksFlag)) @@ -409,11 +428,17 @@ func TestEnableEpochsHandler_GetActivationEpoch(t *testing.T) { require.Equal(t, cfg.ScToScLogEventEnableEpoch, handler.GetActivationEpoch(common.ScToScLogEventFlag)) require.Equal(t, cfg.BlockGasAndFeesReCheckEnableEpoch, handler.GetActivationEpoch(common.BlockGasAndFeesReCheckFlag)) require.Equal(t, cfg.BalanceWaitingListsEnableEpoch, handler.GetActivationEpoch(common.BalanceWaitingListsFlag)) - require.Equal(t, cfg.WaitingListFixEnableEpoch, handler.GetActivationEpoch(common.WaitingListFixFlag)) require.Equal(t, cfg.NFTStopCreateEnableEpoch, handler.GetActivationEpoch(common.NFTStopCreateFlag)) require.Equal(t, cfg.ChangeOwnerAddressCrossShardThroughSCEnableEpoch, handler.GetActivationEpoch(common.IsChangeOwnerAddressCrossShardThroughSCFlag)) require.Equal(t, cfg.FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch, handler.GetActivationEpoch(common.FixGasRemainingForSaveKeyValueFlag)) require.Equal(t, cfg.CurrentRandomnessOnSortingEnableEpoch, handler.GetActivationEpoch(common.CurrentRandomnessOnSortingFlag)) + require.Equal(t, cfg.StakeLimitsEnableEpoch, handler.GetActivationEpoch(common.StakeLimitsFlag)) + require.Equal(t, cfg.StakingV4Step1EnableEpoch, handler.GetActivationEpoch(common.StakingV4Step1Flag)) + require.Equal(t, cfg.StakingV4Step2EnableEpoch, handler.GetActivationEpoch(common.StakingV4Step2Flag)) + require.Equal(t, cfg.StakingV4Step3EnableEpoch, handler.GetActivationEpoch(common.StakingV4Step3Flag)) + require.Equal(t, cfg.StakingV4Step1EnableEpoch, handler.GetActivationEpoch(common.StakingQueueFlag)) + require.Equal(t, cfg.StakingV4Step1EnableEpoch, handler.GetActivationEpoch(common.StakingV4StartedFlag)) + require.Equal(t, cfg.AlwaysMergeContextsInEEIEnableEpoch, handler.GetActivationEpoch(common.AlwaysMergeContextsInEEIFlag)) require.Equal(t, cfg.DynamicESDTEnableEpoch, handler.GetActivationEpoch(common.DynamicESDTFlag)) } diff --git a/common/interface.go b/common/interface.go index 38efb0a082b..73238c66e8c 100644 --- a/common/interface.go +++ b/common/interface.go @@ -223,17 +223,17 @@ type StateStatisticsHandler interface { Reset() ResetSnapshot() - IncrCache() + IncrementCache() Cache() uint64 - IncrSnapshotCache() + IncrementSnapshotCache() SnapshotCache() uint64 - IncrPersister(epoch uint32) + IncrementPersister(epoch uint32) Persister(epoch uint32) uint64 - IncrSnapshotPersister(epoch uint32) + IncrementSnapshotPersister(epoch uint32) SnapshotPersister(epoch uint32) uint64 - IncrTrie() + IncrementTrie() Trie() uint64 ProcessingStats() []string @@ -314,6 +314,7 @@ type ManagedPeersHolder interface { IncrementRoundsWithoutReceivedMessages(pkBytes []byte) ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) GetManagedKeysByCurrentNode() map[string]crypto.PrivateKey + GetLoadedKeysByCurrentNode() [][]byte IsKeyManagedByCurrentNode(pkBytes []byte) bool IsKeyRegistered(pkBytes []byte) bool IsPidManagedByCurrentNode(pid core.PeerID) bool @@ -343,6 +344,7 @@ type StateSyncNotifierSubscriber interface { type ManagedPeersMonitor interface { GetManagedKeysCount() int GetManagedKeys() [][]byte + GetLoadedKeys() [][]byte GetEligibleManagedKeys() ([][]byte, error) GetWaitingManagedKeys() ([][]byte, error) IsInterfaceNil() bool diff --git a/common/operationmodes/historicalBalances.go b/common/operationmodes/historicalBalances.go new file mode 100644 index 00000000000..da3cfe98dde --- /dev/null +++ b/common/operationmodes/historicalBalances.go @@ -0,0 +1,41 @@ +package operationmodes + +import ( + "github.com/multiversx/mx-chain-go/config" + logger "github.com/multiversx/mx-chain-logger-go" +) + +// ProcessHistoricalBalancesMode will process the provided flags for the historical balances +func ProcessHistoricalBalancesMode(log logger.Logger, configs *config.Configs) { + configs.GeneralConfig.StoragePruning.Enabled = true + configs.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData = false + configs.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData = false + configs.GeneralConfig.GeneralSettings.StartInEpochEnabled = false + configs.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData = false + configs.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled = false + configs.GeneralConfig.DbLookupExtensions.Enabled = true + configs.PreferencesConfig.Preferences.FullArchive = true + + log.Warn("the node is in historical balances mode! Will auto-set some config values", + "StoragePruning.Enabled", configs.GeneralConfig.StoragePruning.Enabled, + "StoragePruning.ValidatorCleanOldEpochsData", configs.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData, + "StoragePruning.ObserverCleanOldEpochsData", configs.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData, + "StoragePruning.AccountsTrieCleanOldEpochsData", configs.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData, + "GeneralSettings.StartInEpochEnabled", configs.GeneralConfig.GeneralSettings.StartInEpochEnabled, + "StateTriesConfig.AccountsStatePruningEnabled", configs.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled, + "DbLookupExtensions.Enabled", configs.GeneralConfig.DbLookupExtensions.Enabled, + "Preferences.FullArchive", configs.PreferencesConfig.Preferences.FullArchive, + ) +} + +// IsInHistoricalBalancesMode returns true if the configuration provided denotes a historical balances mode +func IsInHistoricalBalancesMode(configs *config.Configs) bool { + return configs.GeneralConfig.StoragePruning.Enabled && + !configs.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData && + !configs.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData && + !configs.GeneralConfig.GeneralSettings.StartInEpochEnabled && + !configs.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData && + !configs.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled && + configs.GeneralConfig.DbLookupExtensions.Enabled && + configs.PreferencesConfig.Preferences.FullArchive +} diff --git a/common/operationmodes/historicalBalances_test.go b/common/operationmodes/historicalBalances_test.go new file mode 100644 index 00000000000..d06061c3027 --- /dev/null +++ b/common/operationmodes/historicalBalances_test.go @@ -0,0 +1,141 @@ +package operationmodes + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/stretchr/testify/assert" +) + +func TestProcessHistoricalBalancesMode(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + + assert.True(t, cfg.GeneralConfig.StoragePruning.Enabled) + assert.False(t, cfg.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData) + assert.False(t, cfg.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData) + assert.False(t, cfg.GeneralConfig.GeneralSettings.StartInEpochEnabled) + assert.False(t, cfg.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData) + assert.False(t, cfg.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled) + assert.True(t, cfg.GeneralConfig.DbLookupExtensions.Enabled) + assert.True(t, cfg.PreferencesConfig.Preferences.FullArchive) +} + +func TestIsInHistoricalBalancesMode(t *testing.T) { + t.Parallel() + + t.Run("empty configs should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("storage pruning disabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.StoragePruning.Enabled = false + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("validator clean old epoch data enabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData = true + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("observer clean old epoch data enabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData = true + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("start in epoch enabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.GeneralSettings.StartInEpochEnabled = true + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("accounts trie clean old epoch data enabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData = true + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("accounts state pruning enabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled = true + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("db lookup extension disabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.DbLookupExtensions.Enabled = false + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("not a full archive node should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.PreferencesConfig.Preferences.FullArchive = false + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("with historical balances config should return true", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + assert.True(t, IsInHistoricalBalancesMode(cfg)) + }) + +} diff --git a/common/operationmodes/operationmodes.go b/common/operationmodes/operationmodes.go index 70aed256f4b..1ae6a6fad70 100644 --- a/common/operationmodes/operationmodes.go +++ b/common/operationmodes/operationmodes.go @@ -5,6 +5,7 @@ import ( "strings" ) +// constants that define the operation mode of the node const ( OperationModeFullArchive = "full-archive" OperationModeDbLookupExtension = "db-lookup-extension" diff --git a/common/statistics/disabled/stateStatistics.go b/common/statistics/disabled/stateStatistics.go index d10d310129a..c3bdf12420d 100644 --- a/common/statistics/disabled/stateStatistics.go +++ b/common/statistics/disabled/stateStatistics.go @@ -19,8 +19,8 @@ func (s *stateStatistics) Reset() { func (s *stateStatistics) ResetSnapshot() { } -// IncrCache does nothing -func (s *stateStatistics) IncrCache() { +// IncrementCache does nothing +func (s *stateStatistics) IncrementCache() { } // Cache returns zero @@ -28,8 +28,8 @@ func (s *stateStatistics) Cache() uint64 { return 0 } -// IncrSnapshotCache does nothing -func (ss *stateStatistics) IncrSnapshotCache() { +// IncrementSnapshotCache does nothing +func (ss *stateStatistics) IncrementSnapshotCache() { } // SnapshotCache returns the number of cached operations @@ -37,8 +37,8 @@ func (ss *stateStatistics) SnapshotCache() uint64 { return 0 } -// IncrPersister does nothing -func (s *stateStatistics) IncrPersister(epoch uint32) { +// IncrementPersister does nothing +func (s *stateStatistics) IncrementPersister(epoch uint32) { } // Persister returns zero @@ -46,8 +46,8 @@ func (s *stateStatistics) Persister(epoch uint32) uint64 { return 0 } -// IncrSnapshotPersister does nothing -func (ss *stateStatistics) IncrSnapshotPersister(epoch uint32) { +// IncrementSnapshotPersister does nothing +func (ss *stateStatistics) IncrementSnapshotPersister(epoch uint32) { } // SnapshotPersister returns the number of persister operations @@ -55,8 +55,8 @@ func (ss *stateStatistics) SnapshotPersister(epoch uint32) uint64 { return 0 } -// IncrTrie does nothing -func (s *stateStatistics) IncrTrie() { +// IncrementTrie does nothing +func (s *stateStatistics) IncrementTrie() { } // Trie returns zero diff --git a/common/statistics/disabled/stateStatistics_test.go b/common/statistics/disabled/stateStatistics_test.go index 7d17aa689d1..725ec3ee6a1 100644 --- a/common/statistics/disabled/stateStatistics_test.go +++ b/common/statistics/disabled/stateStatistics_test.go @@ -31,12 +31,12 @@ func TestStateStatistics_MethodsShouldNotPanic(t *testing.T) { stats.ResetSnapshot() stats.ResetAll() - stats.IncrCache() - stats.IncrSnapshotCache() - stats.IncrSnapshotCache() - stats.IncrPersister(1) - stats.IncrSnapshotPersister(1) - stats.IncrTrie() + stats.IncrementCache() + stats.IncrementSnapshotCache() + stats.IncrementSnapshotCache() + stats.IncrementPersister(1) + stats.IncrementSnapshotPersister(1) + stats.IncrementTrie() require.Equal(t, uint64(0), stats.Cache()) require.Equal(t, uint64(0), stats.SnapshotCache()) diff --git a/common/statistics/osLevel/memStats_test.go b/common/statistics/osLevel/memStats_test.go index 99724172e67..ff42ad516c2 100644 --- a/common/statistics/osLevel/memStats_test.go +++ b/common/statistics/osLevel/memStats_test.go @@ -3,12 +3,17 @@ package osLevel import ( + "runtime" "testing" "github.com/stretchr/testify/assert" ) func TestReadCurrentMemStats(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("skipping test on darwin") + } + t.Parallel() memStats, err := ReadCurrentMemStats() diff --git a/common/statistics/stateStatistics.go b/common/statistics/stateStatistics.go index c41040ab933..474dc6d47d1 100644 --- a/common/statistics/stateStatistics.go +++ b/common/statistics/stateStatistics.go @@ -51,8 +51,8 @@ func (ss *stateStatistics) ResetSnapshot() { ss.mutPersisters.Unlock() } -// IncrCache will increment cache counter -func (ss *stateStatistics) IncrCache() { +// IncrementCache will increment cache counter +func (ss *stateStatistics) IncrementCache() { atomic.AddUint64(&ss.numCache, 1) } @@ -61,8 +61,8 @@ func (ss *stateStatistics) Cache() uint64 { return atomic.LoadUint64(&ss.numCache) } -// IncrSnapshotCache will increment snapshot cache counter -func (ss *stateStatistics) IncrSnapshotCache() { +// IncrementSnapshotCache will increment snapshot cache counter +func (ss *stateStatistics) IncrementSnapshotCache() { atomic.AddUint64(&ss.numSnapshotCache, 1) } @@ -71,8 +71,8 @@ func (ss *stateStatistics) SnapshotCache() uint64 { return atomic.LoadUint64(&ss.numSnapshotCache) } -// IncrPersister will increment persister counter -func (ss *stateStatistics) IncrPersister(epoch uint32) { +// IncrementPersister will increment persister counter +func (ss *stateStatistics) IncrementPersister(epoch uint32) { ss.mutPersisters.Lock() defer ss.mutPersisters.Unlock() @@ -87,8 +87,8 @@ func (ss *stateStatistics) Persister(epoch uint32) uint64 { return ss.numPersister[epoch] } -// IncrSnapshotPersister will increment snapshot persister counter -func (ss *stateStatistics) IncrSnapshotPersister(epoch uint32) { +// IncrementSnapshotPersister will increment snapshot persister counter +func (ss *stateStatistics) IncrementSnapshotPersister(epoch uint32) { ss.mutPersisters.Lock() defer ss.mutPersisters.Unlock() @@ -103,8 +103,8 @@ func (ss *stateStatistics) SnapshotPersister(epoch uint32) uint64 { return ss.numSnapshotPersister[epoch] } -// IncrTrie will increment trie counter -func (ss *stateStatistics) IncrTrie() { +// IncrementTrie will increment trie counter +func (ss *stateStatistics) IncrementTrie() { atomic.AddUint64(&ss.numTrie, 1) } diff --git a/common/statistics/stateStatistics_test.go b/common/statistics/stateStatistics_test.go index e1beaf9d35b..674b3d8ea6b 100644 --- a/common/statistics/stateStatistics_test.go +++ b/common/statistics/stateStatistics_test.go @@ -27,11 +27,11 @@ func TestStateStatistics_Processing(t *testing.T) { assert.Equal(t, uint64(0), ss.Trie()) - ss.IncrTrie() - ss.IncrTrie() + ss.IncrementTrie() + ss.IncrementTrie() assert.Equal(t, uint64(2), ss.Trie()) - ss.IncrTrie() + ss.IncrementTrie() assert.Equal(t, uint64(3), ss.Trie()) ss.Reset() @@ -47,11 +47,11 @@ func TestStateStatistics_Processing(t *testing.T) { assert.Equal(t, uint64(0), ss.Persister(epoch)) - ss.IncrPersister(epoch) - ss.IncrPersister(epoch) + ss.IncrementPersister(epoch) + ss.IncrementPersister(epoch) assert.Equal(t, uint64(2), ss.Persister(epoch)) - ss.IncrPersister(epoch) + ss.IncrementPersister(epoch) assert.Equal(t, uint64(3), ss.Persister(epoch)) ss.Reset() @@ -65,11 +65,11 @@ func TestStateStatistics_Processing(t *testing.T) { assert.Equal(t, uint64(0), ss.Cache()) - ss.IncrCache() - ss.IncrCache() + ss.IncrementCache() + ss.IncrementCache() assert.Equal(t, uint64(2), ss.Cache()) - ss.IncrCache() + ss.IncrementCache() assert.Equal(t, uint64(3), ss.Cache()) ss.Reset() @@ -89,11 +89,11 @@ func TestStateStatistics_Snapshot(t *testing.T) { assert.Equal(t, uint64(0), ss.SnapshotPersister(epoch)) - ss.IncrSnapshotPersister(epoch) - ss.IncrSnapshotPersister(epoch) + ss.IncrementSnapshotPersister(epoch) + ss.IncrementSnapshotPersister(epoch) assert.Equal(t, uint64(2), ss.SnapshotPersister(epoch)) - ss.IncrSnapshotPersister(epoch) + ss.IncrementSnapshotPersister(epoch) assert.Equal(t, uint64(3), ss.SnapshotPersister(epoch)) ss.ResetSnapshot() @@ -107,11 +107,11 @@ func TestStateStatistics_Snapshot(t *testing.T) { assert.Equal(t, uint64(0), ss.Cache()) - ss.IncrSnapshotCache() - ss.IncrSnapshotCache() + ss.IncrementSnapshotCache() + ss.IncrementSnapshotCache() assert.Equal(t, uint64(2), ss.SnapshotCache()) - ss.IncrSnapshotCache() + ss.IncrementSnapshotCache() assert.Equal(t, uint64(3), ss.SnapshotCache()) ss.ResetSnapshot() @@ -144,11 +144,11 @@ func TestStateStatistics_ConcurrenyOperations(t *testing.T) { case 0: ss.Reset() case 1: - ss.IncrCache() + ss.IncrementCache() case 2: - ss.IncrPersister(epoch) + ss.IncrementPersister(epoch) case 3: - ss.IncrTrie() + ss.IncrementTrie() case 7: _ = ss.Cache() case 8: diff --git a/common/validatorInfo/validatorInfoUtils.go b/common/validatorInfo/validatorInfoUtils.go index e6cf36ba52a..20f4e97897a 100644 --- a/common/validatorInfo/validatorInfoUtils.go +++ b/common/validatorInfo/validatorInfoUtils.go @@ -6,41 +6,41 @@ import ( ) // WasActiveInCurrentEpoch returns true if the node was active in current epoch -func WasActiveInCurrentEpoch(valInfo *state.ValidatorInfo) bool { +func WasActiveInCurrentEpoch(valInfo state.ValidatorInfoHandler) bool { if valInfo == nil { return false } - active := valInfo.LeaderFailure > 0 || valInfo.LeaderSuccess > 0 || valInfo.ValidatorSuccess > 0 || valInfo.ValidatorFailure > 0 + active := valInfo.GetLeaderFailure() > 0 || valInfo.GetLeaderSuccess() > 0 || valInfo.GetValidatorSuccess() > 0 || valInfo.GetValidatorFailure() > 0 return active } // WasLeavingEligibleInCurrentEpoch returns true if the validator was eligible in the epoch but has done an unstake. -func WasLeavingEligibleInCurrentEpoch(valInfo *state.ValidatorInfo) bool { +func WasLeavingEligibleInCurrentEpoch(valInfo state.ValidatorInfoHandler) bool { if valInfo == nil { return false } - return valInfo.List == string(common.LeavingList) && WasActiveInCurrentEpoch(valInfo) + return valInfo.GetList() == string(common.LeavingList) && WasActiveInCurrentEpoch(valInfo) } // WasJailedEligibleInCurrentEpoch returns true if the validator was jailed in the epoch but also active/eligible due to not enough -//nodes in shard. -func WasJailedEligibleInCurrentEpoch(valInfo *state.ValidatorInfo) bool { +// nodes in shard. +func WasJailedEligibleInCurrentEpoch(valInfo state.ValidatorInfoHandler) bool { if valInfo == nil { return false } - return valInfo.List == string(common.JailedList) && WasActiveInCurrentEpoch(valInfo) + return valInfo.GetList() == string(common.JailedList) && WasActiveInCurrentEpoch(valInfo) } // WasEligibleInCurrentEpoch returns true if the validator was eligible for consensus in current epoch -func WasEligibleInCurrentEpoch(valInfo *state.ValidatorInfo) bool { +func WasEligibleInCurrentEpoch(valInfo state.ValidatorInfoHandler) bool { if valInfo == nil { return false } - wasEligibleInShard := valInfo.List == string(common.EligibleList) || + wasEligibleInShard := valInfo.GetList() == string(common.EligibleList) || WasLeavingEligibleInCurrentEpoch(valInfo) || WasJailedEligibleInCurrentEpoch(valInfo) diff --git a/config/config.go b/config/config.go index 1a4f5a625c1..472378d49fd 100644 --- a/config/config.go +++ b/config/config.go @@ -95,6 +95,7 @@ type EpochStartConfig struct { MinNumConnectedPeersToStart int MinNumOfPeersToConsiderBlockValid int ExtraDelayForRequestBlockInfoInMilliseconds int + GenesisEpoch uint32 } // BlockSizeThrottleConfig will hold the configuration for adaptive block size throttle @@ -366,15 +367,16 @@ type TxAccumulatorConfig struct { // AntifloodConfig will hold all p2p antiflood parameters type AntifloodConfig struct { - Enabled bool - NumConcurrentResolverJobs int32 - OutOfSpecs FloodPreventerConfig - FastReacting FloodPreventerConfig - SlowReacting FloodPreventerConfig - PeerMaxOutput AntifloodLimitsConfig - Cache CacheConfig - Topic TopicAntifloodConfig - TxAccumulator TxAccumulatorConfig + Enabled bool + NumConcurrentResolverJobs int32 + NumConcurrentResolvingTrieNodesJobs int32 + OutOfSpecs FloodPreventerConfig + FastReacting FloodPreventerConfig + SlowReacting FloodPreventerConfig + PeerMaxOutput AntifloodLimitsConfig + Cache CacheConfig + Topic TopicAntifloodConfig + TxAccumulator TxAccumulatorConfig } // FloodPreventerConfig will hold all flood preventer parameters diff --git a/config/configChecker.go b/config/configChecker.go new file mode 100644 index 00000000000..11ddc7eff9a --- /dev/null +++ b/config/configChecker.go @@ -0,0 +1,103 @@ +package config + +import ( + "fmt" + + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("config-checker") + +// SanityCheckNodesConfig checks if the nodes limit setup is set correctly +func SanityCheckNodesConfig( + nodesSetup NodesSetupHandler, + cfg EnableEpochs, +) error { + maxNodesChange := cfg.MaxNodesChangeEnableEpoch + for _, maxNodesConfig := range maxNodesChange { + err := checkMaxNodesConfig(nodesSetup, maxNodesConfig) + if err != nil { + return fmt.Errorf("%w in MaxNodesChangeConfig at EpochEnable = %d", err, maxNodesConfig.EpochEnable) + } + } + + return sanityCheckEnableEpochsStakingV4(cfg, nodesSetup.NumberOfShards()) +} + +func checkMaxNodesConfig( + nodesSetup NodesSetupHandler, + maxNodesConfig MaxNodesChangeConfig, +) error { + maxNumNodes := maxNodesConfig.MaxNumNodes + minNumNodesWithHysteresis := nodesSetup.MinNumberOfNodesWithHysteresis() + if maxNumNodes < minNumNodesWithHysteresis { + return fmt.Errorf("%w, maxNumNodes: %d, minNumNodesWithHysteresis: %d", + errInvalidMaxMinNodes, maxNumNodes, minNumNodesWithHysteresis) + } + + return nil +} + +// sanityCheckEnableEpochsStakingV4 checks if the enable epoch configs for stakingV4 are set correctly +func sanityCheckEnableEpochsStakingV4(enableEpochsCfg EnableEpochs, numOfShards uint32) error { + if !areStakingV4StepsInOrder(enableEpochsCfg) { + return errStakingV4StepsNotInOrder + } + + return checkStakingV4MaxNodesChangeCfg(enableEpochsCfg, numOfShards) +} + +func areStakingV4StepsInOrder(enableEpochsCfg EnableEpochs) bool { + return (enableEpochsCfg.StakingV4Step1EnableEpoch == enableEpochsCfg.StakingV4Step2EnableEpoch-1) && + (enableEpochsCfg.StakingV4Step2EnableEpoch == enableEpochsCfg.StakingV4Step3EnableEpoch-1) +} + +func checkStakingV4MaxNodesChangeCfg(enableEpochsCfg EnableEpochs, numOfShards uint32) error { + maxNodesChangeCfg := enableEpochsCfg.MaxNodesChangeEnableEpoch + if len(maxNodesChangeCfg) <= 1 { + return nil + } + + maxNodesConfigAdaptedForStakingV4 := false + + for idx, currMaxNodesChangeCfg := range maxNodesChangeCfg { + if currMaxNodesChangeCfg.EpochEnable == enableEpochsCfg.StakingV4Step3EnableEpoch { + maxNodesConfigAdaptedForStakingV4 = true + + if idx == 0 { + log.Warn(fmt.Errorf("found config change in MaxNodesChangeEnableEpoch for StakingV4Step3EnableEpoch = %d, but %w ", + enableEpochsCfg.StakingV4Step3EnableEpoch, errNoMaxNodesConfigBeforeStakingV4).Error()) + break + } + + prevMaxNodesChange := maxNodesChangeCfg[idx-1] + err := checkMaxNodesChangedCorrectly(prevMaxNodesChange, currMaxNodesChangeCfg, numOfShards) + if err != nil { + return err + } + + break + } + } + + if !maxNodesConfigAdaptedForStakingV4 { + return fmt.Errorf("%w = %d", errNoMaxNodesConfigChangeForStakingV4, enableEpochsCfg.StakingV4Step3EnableEpoch) + } + + return nil +} + +func checkMaxNodesChangedCorrectly(prevMaxNodesChange MaxNodesChangeConfig, currMaxNodesChange MaxNodesChangeConfig, numOfShards uint32) error { + if prevMaxNodesChange.NodesToShufflePerShard != currMaxNodesChange.NodesToShufflePerShard { + return errMismatchNodesToShuffle + } + + totalShuffled := (numOfShards + 1) * prevMaxNodesChange.NodesToShufflePerShard + expectedMaxNumNodes := prevMaxNodesChange.MaxNumNodes - totalShuffled + if expectedMaxNumNodes != currMaxNodesChange.MaxNumNodes { + return fmt.Errorf("expected MaxNodesChangeEnableEpoch.MaxNumNodes for StakingV4Step3EnableEpoch = %d, but got %d", + expectedMaxNumNodes, currMaxNodesChange.MaxNumNodes) + } + + return nil +} diff --git a/config/configChecker_test.go b/config/configChecker_test.go new file mode 100644 index 00000000000..ec993631fbb --- /dev/null +++ b/config/configChecker_test.go @@ -0,0 +1,382 @@ +package config + +import ( + "strings" + "testing" + + "github.com/multiversx/mx-chain-go/testscommon/nodesSetupMock" + "github.com/stretchr/testify/require" +) + +const numOfShards = 3 + +func generateCorrectConfig() EnableEpochs { + return EnableEpochs{ + StakingV4Step1EnableEpoch: 4, + StakingV4Step2EnableEpoch: 5, + StakingV4Step3EnableEpoch: 6, + MaxNodesChangeEnableEpoch: []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + }, + } +} + +func TestSanityCheckEnableEpochsStakingV4(t *testing.T) { + t.Parallel() + + t.Run("correct config, should work", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Nil(t, err) + }) + + t.Run("staking v4 steps not in ascending order, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.StakingV4Step1EnableEpoch = 5 + cfg.StakingV4Step2EnableEpoch = 5 + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + + cfg = generateCorrectConfig() + cfg.StakingV4Step2EnableEpoch = 5 + cfg.StakingV4Step3EnableEpoch = 4 + err = sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + }) + + t.Run("staking v4 steps not in cardinal order, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + + cfg.StakingV4Step1EnableEpoch = 1 + cfg.StakingV4Step2EnableEpoch = 3 + cfg.StakingV4Step3EnableEpoch = 6 + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + + cfg.StakingV4Step1EnableEpoch = 1 + cfg.StakingV4Step2EnableEpoch = 2 + cfg.StakingV4Step3EnableEpoch = 6 + err = sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + + cfg.StakingV4Step1EnableEpoch = 1 + cfg.StakingV4Step2EnableEpoch = 5 + cfg.StakingV4Step3EnableEpoch = 6 + err = sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + }) + + t.Run("no previous config for max nodes change with one entry, should not return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Nil(t, err) + }) + + t.Run("no max nodes config change for StakingV4Step3EnableEpoch, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 444, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), errNoMaxNodesConfigChangeForStakingV4.Error())) + require.True(t, strings.Contains(err.Error(), "6")) + }) + + t.Run("max nodes config change for StakingV4Step3EnableEpoch has no previous config change, should not error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: cfg.StakingV4Step3EnableEpoch, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 444, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + } + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Nil(t, err) + }) + + t.Run("stakingV4 config for max nodes changed with different nodes to shuffle, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch[1].NodesToShufflePerShard = 2 + cfg.MaxNodesChangeEnableEpoch[2].NodesToShufflePerShard = 4 + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.ErrorIs(t, err, errMismatchNodesToShuffle) + }) + + t.Run("stakingV4 config for max nodes changed with wrong max num nodes, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch[2].MaxNumNodes = 56 + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), "expected")) + require.True(t, strings.Contains(err.Error(), "48")) + require.True(t, strings.Contains(err.Error(), "got")) + require.True(t, strings.Contains(err.Error(), "56")) + }) +} + +func TestSanityCheckNodesConfig(t *testing.T) { + t.Parallel() + + numShards := uint32(3) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + nodesSetup := &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0, + MinNumberOfMetaNodesField: 5, + MinNumberOfShardNodesField: 5, + } + err := SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 1, + MaxNumNodes: 3200, + NodesToShufflePerShard: 80, + }, + { + EpochEnable: 2, + MaxNumNodes: 2880, + NodesToShufflePerShard: 80, + }, + { + EpochEnable: 3, + MaxNumNodes: 2240, + NodesToShufflePerShard: 80, + }, + { + EpochEnable: 4, + MaxNumNodes: 2240, + NodesToShufflePerShard: 40, + }, + { + EpochEnable: 6, + MaxNumNodes: 2080, + NodesToShufflePerShard: 40, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 400, + MinNumberOfShardNodesField: 400, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0, + MinNumberOfMetaNodesField: 3, + MinNumberOfShardNodesField: 3, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 7, + MinNumberOfShardNodesField: 7, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 48, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 10, + MinNumberOfShardNodesField: 10, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 2169, + NodesToShufflePerShard: 143, + }, + { + EpochEnable: 1, + MaxNumNodes: 3200, + NodesToShufflePerShard: 80, + }, + { + EpochEnable: 6, + MaxNumNodes: 2880, + NodesToShufflePerShard: 80, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 400, + MinNumberOfShardNodesField: 400, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + }) + + t.Run("zero nodes to shuffle per shard, should not return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 4, + MaxNumNodes: 3200, + NodesToShufflePerShard: 0, + }, + { + EpochEnable: 6, + MaxNumNodes: 3200, + NodesToShufflePerShard: 0, + }, + } + nodesSetup := &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 400, + MinNumberOfShardNodesField: 400, + } + err := SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + }) + + t.Run("maxNumNodes < minNumNodesWithHysteresis, should return error ", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 4, + MaxNumNodes: 1900, + NodesToShufflePerShard: 80, + }, + } + nodesSetup := &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 400, + MinNumberOfShardNodesField: 400, + } + err := SanityCheckNodesConfig(nodesSetup, cfg) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), errInvalidMaxMinNodes.Error())) + require.True(t, strings.Contains(err.Error(), "maxNumNodes: 1900")) + require.True(t, strings.Contains(err.Error(), "minNumNodesWithHysteresis: 1920")) + }) +} diff --git a/config/epochConfig.go b/config/epochConfig.go index 385f2a3f7e2..f03492e1826 100644 --- a/config/epochConfig.go +++ b/config/epochConfig.go @@ -43,13 +43,11 @@ type EnableEpochs struct { SaveJailedAlwaysEnableEpoch uint32 ValidatorToDelegationEnableEpoch uint32 ReDelegateBelowMinCheckEnableEpoch uint32 - WaitingListFixEnableEpoch uint32 IncrementSCRNonceInMultiTransferEnableEpoch uint32 ScheduledMiniBlocksEnableEpoch uint32 ESDTMultiTransferEnableEpoch uint32 GlobalMintBurnDisableEpoch uint32 ESDTTransferRoleEnableEpoch uint32 - BuiltInFunctionOnMetaEnableEpoch uint32 ComputeRewardCheckpointEnableEpoch uint32 SCRSizeInvariantCheckEnableEpoch uint32 BackwardCompSaveKeyValueEnableEpoch uint32 @@ -110,6 +108,11 @@ type EnableEpochs struct { ChangeOwnerAddressCrossShardThroughSCEnableEpoch uint32 FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch uint32 CurrentRandomnessOnSortingEnableEpoch uint32 + StakeLimitsEnableEpoch uint32 + StakingV4Step1EnableEpoch uint32 + StakingV4Step2EnableEpoch uint32 + StakingV4Step3EnableEpoch uint32 + AlwaysMergeContextsInEEIEnableEpoch uint32 DynamicESDTEnableEpoch uint32 BLSMultiSignerEnableEpoch []MultiSignerConfig } diff --git a/config/errors.go b/config/errors.go new file mode 100644 index 00000000000..6161ef4c168 --- /dev/null +++ b/config/errors.go @@ -0,0 +1,13 @@ +package config + +import "errors" + +var errStakingV4StepsNotInOrder = errors.New("staking v4 enable epoch steps should be in cardinal order(e.g.: StakingV4Step1EnableEpoch = 2, StakingV4Step2EnableEpoch = 3, StakingV4Step3EnableEpoch = 4)") + +var errNoMaxNodesConfigBeforeStakingV4 = errors.New("no previous config change entry in MaxNodesChangeEnableEpoch before entry with EpochEnable = StakingV4Step3EnableEpoch") + +var errMismatchNodesToShuffle = errors.New("previous MaxNodesChangeEnableEpoch.NodesToShufflePerShard != MaxNodesChangeEnableEpoch.NodesToShufflePerShard with EnableEpoch = StakingV4Step3EnableEpoch") + +var errNoMaxNodesConfigChangeForStakingV4 = errors.New("no MaxNodesChangeEnableEpoch config found for EpochEnable = StakingV4Step3EnableEpoch") + +var errInvalidMaxMinNodes = errors.New("number of min nodes with hysteresis > number of max nodes") diff --git a/config/interface.go b/config/interface.go new file mode 100644 index 00000000000..859e845c434 --- /dev/null +++ b/config/interface.go @@ -0,0 +1,7 @@ +package config + +// NodesSetupHandler provides nodes setup information +type NodesSetupHandler interface { + MinNumberOfNodesWithHysteresis() uint32 + NumberOfShards() uint32 +} diff --git a/config/ratingsConfig.go b/config/ratingsConfig.go index 3558a32f446..a4c243cd51b 100644 --- a/config/ratingsConfig.go +++ b/config/ratingsConfig.go @@ -27,7 +27,7 @@ type MetaChain struct { RatingSteps } -//RatingValue will hold different rating options with increase and decrease steps +// RatingValue will hold different rating options with increase and decrease steps type RatingValue struct { Name string Value int32 diff --git a/config/systemSmartContractsConfig.go b/config/systemSmartContractsConfig.go index d48027574eb..0ed6cce28b1 100644 --- a/config/systemSmartContractsConfig.go +++ b/config/systemSmartContractsConfig.go @@ -7,6 +7,7 @@ type SystemSmartContractsConfig struct { StakingSystemSCConfig StakingSystemSCConfig DelegationManagerSystemSCConfig DelegationManagerSystemSCConfig DelegationSystemSCConfig DelegationSystemSCConfig + SoftAuctionConfig SoftAuctionConfig } // StakingSystemSCConfig will hold the staking system smart contract settings @@ -23,6 +24,8 @@ type StakingSystemSCConfig struct { BleedPercentagePerRound float64 MaxNumberOfNodesForStake uint64 ActivateBLSPubKeyMessageVerification bool + StakeLimitPercentage float64 + NodeLimitPercentage float64 } // ESDTSystemSCConfig defines a set of constant to initialize the esdt system smart contract @@ -32,7 +35,7 @@ type ESDTSystemSCConfig struct { } // GovernanceSystemSCConfigV1 holds the initial set of values that were used to initialise the -// governance system smart contract at genesis time +// governance system smart contract at genesis time type GovernanceSystemSCConfigV1 struct { NumNodes int64 ProposalCost string @@ -42,7 +45,7 @@ type GovernanceSystemSCConfigV1 struct { } // GovernanceSystemSCConfigActive defines the set of configuration values used by the governance -// system smart contract once it activates +// system smart contract once it activates type GovernanceSystemSCConfigActive struct { ProposalCost string LostProposalFee string @@ -71,3 +74,11 @@ type DelegationSystemSCConfig struct { MaxServiceFee uint64 AddTokensWhitelistedAddress string } + +// SoftAuctionConfig represents the config options for soft auction selecting used in staking v4 +type SoftAuctionConfig struct { + TopUpStep string + MinTopUp string + MaxTopUp string + MaxNumberOfIterations uint64 +} diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 178afe20959..0c48df9e40e 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -490,10 +490,11 @@ func TestP2pConfig(t *testing.T) { [Node.Transports.TCP] ListenAddress = "/ip4/0.0.0.0/tcp/%d" PreventPortReuse = true - [Node.ResourceLimiter] - Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". - ManualSystemMemoryInMB = 1 # not taken into account if the type is not "default with manual scale" - ManualMaximumFD = 2 # not taken into account if the type is not "default with manual scale" + + [Node.ResourceLimiter] + Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". + ManualSystemMemoryInMB = 1 # not taken into account if the type is not "default with manual scale" + ManualMaximumFD = 2 # not taken into account if the type is not "default with manual scale" [KadDhtPeerDiscovery] Enabled = false @@ -652,9 +653,6 @@ func TestEnableEpochConfig(t *testing.T) { # ValidatorToDelegationEnableEpoch represents the epoch when the validator-to-delegation feature will be enabled ValidatorToDelegationEnableEpoch = 29 - # WaitingListFixEnableEpoch represents the epoch when the 6 epoch waiting list fix is enabled - WaitingListFixEnableEpoch = 30 - # IncrementSCRNonceInMultiTransferEnableEpoch represents the epoch when the fix for preventing the generation of the same SCRs # is enabled. The fix is done by adding an extra increment. IncrementSCRNonceInMultiTransferEnableEpoch = 31 @@ -668,9 +666,6 @@ func TestEnableEpochConfig(t *testing.T) { # ESDTTransferRoleEnableEpoch represents the epoch when esdt transfer role set is enabled ESDTTransferRoleEnableEpoch = 34 - # BuiltInFunctionOnMetaEnableEpoch represents the epoch when built in function processing on metachain is enabled - BuiltInFunctionOnMetaEnableEpoch = 35 - # ComputeRewardCheckpointEnableEpoch represents the epoch when compute rewards checkpoint epoch is enabled ComputeRewardCheckpointEnableEpoch = 36 @@ -852,8 +847,11 @@ func TestEnableEpochConfig(t *testing.T) { # CurrentRandomnessOnSortingEnableEpoch represents the epoch when the current randomness on sorting is enabled CurrentRandomnessOnSortingEnableEpoch = 93 + # AlwaysMergeContextsInEEIEnableEpoch represents the epoch in which the EEI will always merge the contexts + AlwaysMergeContextsInEEIEnableEpoch = 94 + # DynamicESDTEnableEpoch represents the epoch when dynamic NFT feature is enabled - DynamicESDTEnableEpoch = 94 + DynamicESDTEnableEpoch = 95 # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ @@ -904,12 +902,10 @@ func TestEnableEpochConfig(t *testing.T) { SaveJailedAlwaysEnableEpoch: 27, ReDelegateBelowMinCheckEnableEpoch: 28, ValidatorToDelegationEnableEpoch: 29, - WaitingListFixEnableEpoch: 30, IncrementSCRNonceInMultiTransferEnableEpoch: 31, ESDTMultiTransferEnableEpoch: 32, GlobalMintBurnDisableEpoch: 33, ESDTTransferRoleEnableEpoch: 34, - BuiltInFunctionOnMetaEnableEpoch: 35, ComputeRewardCheckpointEnableEpoch: 36, SCRSizeInvariantCheckEnableEpoch: 37, BackwardCompSaveKeyValueEnableEpoch: 38, @@ -968,7 +964,8 @@ func TestEnableEpochConfig(t *testing.T) { FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch: 91, MigrateDataTrieEnableEpoch: 92, CurrentRandomnessOnSortingEnableEpoch: 93, - DynamicESDTEnableEpoch: 94, + AlwaysMergeContextsInEEIEnableEpoch: 94, + DynamicESDTEnableEpoch: 95, MaxNodesChangeEnableEpoch: []MaxNodesChangeConfig{ { EpochEnable: 44, diff --git a/consensus/mock/peerProcessorStub.go b/consensus/mock/peerProcessorStub.go deleted file mode 100644 index 69e8b8d7d31..00000000000 --- a/consensus/mock/peerProcessorStub.go +++ /dev/null @@ -1,37 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/sharding" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - LoadInitialStateCalled func(in []*sharding.InitialNode) error - UpdatePeerStateCalled func(header, previousHeader data.HeaderHandler) error - IsInterfaceNilCalled func() bool -} - -// LoadInitialState - -func (pm *ValidatorStatisticsProcessorStub) LoadInitialState(in []*sharding.InitialNode) error { - if pm.LoadInitialStateCalled != nil { - return pm.LoadInitialStateCalled(in) - } - return nil -} - -// UpdatePeerState - -func (pm *ValidatorStatisticsProcessorStub) UpdatePeerState(header, previousHeader data.HeaderHandler) error { - if pm.UpdatePeerStateCalled != nil { - return pm.UpdatePeerStateCalled(header, previousHeader) - } - return nil -} - -// IsInterfaceNil - -func (pm *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - if pm.IsInterfaceNilCalled != nil { - return pm.IsInterfaceNilCalled() - } - return false -} diff --git a/consensus/spos/bls/blsWorker.go b/consensus/spos/bls/blsWorker.go index 8a5eabe6b5a..456d4e8b1d8 100644 --- a/consensus/spos/bls/blsWorker.go +++ b/consensus/spos/bls/blsWorker.go @@ -7,12 +7,13 @@ import ( // peerMaxMessagesPerSec defines how many messages can be propagated by a pid in a round. The value was chosen by // following the next premises: -// 1. a leader can propagate as maximum as 3 messages per round: proposed header block + proposed body + final info; -// 2. due to the fact that a delayed signature of the proposer (from previous round) can be received in the current round -// adds an extra 1 to the total value, reaching value 4; -// 3. Because the leader might be selected in the next round and might have an empty data pool, it can send the newly -// empty proposed block at the very beginning of the next round. One extra message here, yielding to a total of 5. -// 4. If we consider the forks that can appear on the system wee need to add one more to the value. +// 1. a leader can propagate as maximum as 3 messages per round: proposed header block + proposed body + final info; +// 2. due to the fact that a delayed signature of the proposer (from previous round) can be received in the current round +// adds an extra 1 to the total value, reaching value 4; +// 3. Because the leader might be selected in the next round and might have an empty data pool, it can send the newly +// empty proposed block at the very beginning of the next round. One extra message here, yielding to a total of 5. +// 4. If we consider the forks that can appear on the system wee need to add one more to the value. +// // Validators only send one signature message in a round, treating the edge case of a delayed message, will need at most // 2 messages per round (which is ok as it is below the set value of 5) const peerMaxMessagesPerSec = uint32(6) @@ -36,7 +37,7 @@ func NewConsensusService() (*worker, error) { return &wrk, nil } -//InitReceivedMessages initializes the MessagesType map for all messages for the current ConsensusService +// InitReceivedMessages initializes the MessagesType map for all messages for the current ConsensusService func (wrk *worker) InitReceivedMessages() map[consensus.MessageType][]*consensus.Message { receivedMessages := make(map[consensus.MessageType][]*consensus.Message) receivedMessages[MtBlockBodyAndHeader] = make([]*consensus.Message, 0) @@ -54,47 +55,47 @@ func (wrk *worker) GetMaxMessagesInARoundPerPeer() uint32 { return peerMaxMessagesPerSec } -//GetStringValue gets the name of the messageType +// GetStringValue gets the name of the messageType func (wrk *worker) GetStringValue(messageType consensus.MessageType) string { return getStringValue(messageType) } -//GetSubroundName gets the subround name for the subround id provided +// GetSubroundName gets the subround name for the subround id provided func (wrk *worker) GetSubroundName(subroundId int) string { return getSubroundName(subroundId) } -//IsMessageWithBlockBodyAndHeader returns if the current messageType is about block body and header +// IsMessageWithBlockBodyAndHeader returns if the current messageType is about block body and header func (wrk *worker) IsMessageWithBlockBodyAndHeader(msgType consensus.MessageType) bool { return msgType == MtBlockBodyAndHeader } -//IsMessageWithBlockBody returns if the current messageType is about block body +// IsMessageWithBlockBody returns if the current messageType is about block body func (wrk *worker) IsMessageWithBlockBody(msgType consensus.MessageType) bool { return msgType == MtBlockBody } -//IsMessageWithBlockHeader returns if the current messageType is about block header +// IsMessageWithBlockHeader returns if the current messageType is about block header func (wrk *worker) IsMessageWithBlockHeader(msgType consensus.MessageType) bool { return msgType == MtBlockHeader } -//IsMessageWithSignature returns if the current messageType is about signature +// IsMessageWithSignature returns if the current messageType is about signature func (wrk *worker) IsMessageWithSignature(msgType consensus.MessageType) bool { return msgType == MtSignature } -//IsMessageWithFinalInfo returns if the current messageType is about header final info +// IsMessageWithFinalInfo returns if the current messageType is about header final info func (wrk *worker) IsMessageWithFinalInfo(msgType consensus.MessageType) bool { return msgType == MtBlockHeaderFinalInfo } -//IsMessageWithInvalidSigners returns if the current messageType is about invalid signers +// IsMessageWithInvalidSigners returns if the current messageType is about invalid signers func (wrk *worker) IsMessageWithInvalidSigners(msgType consensus.MessageType) bool { return msgType == MtInvalidSigners } -//IsMessageTypeValid returns if the current messageType is valid +// IsMessageTypeValid returns if the current messageType is valid func (wrk *worker) IsMessageTypeValid(msgType consensus.MessageType) bool { isMessageTypeValid := msgType == MtBlockBodyAndHeader || msgType == MtBlockBody || @@ -106,17 +107,17 @@ func (wrk *worker) IsMessageTypeValid(msgType consensus.MessageType) bool { return isMessageTypeValid } -//IsSubroundSignature returns if the current subround is about signature +// IsSubroundSignature returns if the current subround is about signature func (wrk *worker) IsSubroundSignature(subroundId int) bool { return subroundId == SrSignature } -//IsSubroundStartRound returns if the current subround is about start round +// IsSubroundStartRound returns if the current subround is about start round func (wrk *worker) IsSubroundStartRound(subroundId int) bool { return subroundId == SrStartRound } -//GetMessageRange provides the MessageType range used in checks by the consensus +// GetMessageRange provides the MessageType range used in checks by the consensus func (wrk *worker) GetMessageRange() []consensus.MessageType { var v []consensus.MessageType @@ -127,7 +128,7 @@ func (wrk *worker) GetMessageRange() []consensus.MessageType { return v } -//CanProceed returns if the current messageType can proceed further if previous subrounds finished +// CanProceed returns if the current messageType can proceed further if previous subrounds finished func (wrk *worker) CanProceed(consensusState *spos.ConsensusState, msgType consensus.MessageType) bool { switch msgType { case MtBlockBodyAndHeader: diff --git a/consensus/spos/consensusCore.go b/consensus/spos/consensusCore.go index 1edfb09b5fc..2cf7ca369d6 100644 --- a/consensus/spos/consensusCore.go +++ b/consensus/spos/consensusCore.go @@ -14,7 +14,7 @@ import ( ) // ConsensusCore implements ConsensusCoreHandler and provides access to common functionality -// for the rest of the consensus structures +// for the rest of the consensus structures type ConsensusCore struct { blockChain data.ChainHandler blockProcessor process.BlockProcessor @@ -148,7 +148,7 @@ func (cc *ConsensusCore) MultiSignerContainer() cryptoCommon.MultiSignerContaine return cc.multiSignerContainer } -//RoundHandler gets the RoundHandler stored in the ConsensusCore +// RoundHandler gets the RoundHandler stored in the ConsensusCore func (cc *ConsensusCore) RoundHandler() consensus.RoundHandler { return cc.roundHandler } @@ -158,7 +158,7 @@ func (cc *ConsensusCore) ShardCoordinator() sharding.Coordinator { return cc.shardCoordinator } -//SyncTimer gets the SyncTimer stored in the ConsensusCore +// SyncTimer gets the SyncTimer stored in the ConsensusCore func (cc *ConsensusCore) SyncTimer() ntp.SyncTimer { return cc.syncTimer } diff --git a/dataRetriever/chainStorer.go b/dataRetriever/chainStorer.go index 88541d10077..933d4b97a51 100644 --- a/dataRetriever/chainStorer.go +++ b/dataRetriever/chainStorer.go @@ -10,7 +10,7 @@ import ( var _ StorageService = (*ChainStorer)(nil) // ChainStorer is a StorageService implementation that can hold multiple storages -// grouped by storage unit type +// grouped by storage unit type type ChainStorer struct { lock sync.RWMutex chain map[UnitType]storage.Storer diff --git a/dataRetriever/factory/dataPoolFactory.go b/dataRetriever/factory/dataPoolFactory.go index 0033d14f686..6e1415ddfd8 100644 --- a/dataRetriever/factory/dataPoolFactory.go +++ b/dataRetriever/factory/dataPoolFactory.go @@ -2,7 +2,6 @@ package factory import ( "fmt" - "os" "time" "github.com/multiversx/mx-chain-core-go/core" @@ -179,22 +178,12 @@ func createTrieSyncDB(args ArgsDataPool) (storage.Persister, error) { shardId := core.GetShardIDString(args.ShardCoordinator.SelfId()) path := args.PathManager.PathForStatic(shardId, mainConfig.TrieSyncStorage.DB.FilePath) - dbConfigHandler := factory.NewDBConfigHandler(mainConfig.TrieSyncStorage.DB) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(mainConfig.TrieSyncStorage.DB) if err != nil { return nil, err } - if mainConfig.TrieSyncStorage.DB.UseTmpAsFilePath { - filePath, errTempDir := os.MkdirTemp("", "trieSyncStorage") - if errTempDir != nil { - return nil, errTempDir - } - - path = filePath - } - - db, err := storageunit.NewDB(persisterFactory, path) + db, err := persisterFactory.CreateWithRetries(path) if err != nil { return nil, fmt.Errorf("%w while creating the db for the trie nodes", err) } diff --git a/dataRetriever/factory/resolverscontainer/args.go b/dataRetriever/factory/resolverscontainer/args.go index 1446af01b97..d0001014a4d 100644 --- a/dataRetriever/factory/resolverscontainer/args.go +++ b/dataRetriever/factory/resolverscontainer/args.go @@ -11,21 +11,22 @@ import ( // FactoryArgs will hold the arguments for ResolversContainerFactory for both shard and meta type FactoryArgs struct { - NumConcurrentResolvingJobs int32 - ShardCoordinator sharding.Coordinator - MainMessenger p2p.Messenger - FullArchiveMessenger p2p.Messenger - Store dataRetriever.StorageService - Marshalizer marshal.Marshalizer - DataPools dataRetriever.PoolsHolder - Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter - DataPacker dataRetriever.DataPacker - TriesContainer common.TriesHolder - InputAntifloodHandler dataRetriever.P2PAntifloodHandler - OutputAntifloodHandler dataRetriever.P2PAntifloodHandler - MainPreferredPeersHolder p2p.PreferredPeersHolderHandler - FullArchivePreferredPeersHolder p2p.PreferredPeersHolderHandler - SizeCheckDelta uint32 - IsFullHistoryNode bool - PayloadValidator dataRetriever.PeerAuthenticationPayloadValidator + NumConcurrentResolvingJobs int32 + NumConcurrentResolvingTrieNodesJobs int32 + ShardCoordinator sharding.Coordinator + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger + Store dataRetriever.StorageService + Marshalizer marshal.Marshalizer + DataPools dataRetriever.PoolsHolder + Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + DataPacker dataRetriever.DataPacker + TriesContainer common.TriesHolder + InputAntifloodHandler dataRetriever.P2PAntifloodHandler + OutputAntifloodHandler dataRetriever.P2PAntifloodHandler + MainPreferredPeersHolder p2p.PreferredPeersHolderHandler + FullArchivePreferredPeersHolder p2p.PreferredPeersHolderHandler + SizeCheckDelta uint32 + IsFullHistoryNode bool + PayloadValidator dataRetriever.PeerAuthenticationPayloadValidator } diff --git a/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go index c1fc1e3a16b..3d0eff8eaa9 100644 --- a/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go @@ -36,6 +36,7 @@ type baseResolversContainerFactory struct { inputAntifloodHandler dataRetriever.P2PAntifloodHandler outputAntifloodHandler dataRetriever.P2PAntifloodHandler throttler dataRetriever.ResolverThrottler + trieNodesThrottler dataRetriever.ResolverThrottler intraShardTopic string isFullHistoryNode bool mainPreferredPeersHolder dataRetriever.PreferredPeersHolderHandler @@ -78,7 +79,10 @@ func (brcf *baseResolversContainerFactory) checkParams() error { return fmt.Errorf("%w for output", dataRetriever.ErrNilAntifloodHandler) } if check.IfNil(brcf.throttler) { - return dataRetriever.ErrNilThrottler + return fmt.Errorf("%w for the main throttler", dataRetriever.ErrNilThrottler) + } + if check.IfNil(brcf.trieNodesThrottler) { + return fmt.Errorf("%w for the trie nodes throttler", dataRetriever.ErrNilThrottler) } if check.IfNil(brcf.mainPreferredPeersHolder) { return fmt.Errorf("%w for main network", dataRetriever.ErrNilPreferredPeersHolder) @@ -351,7 +355,7 @@ func (brcf *baseResolversContainerFactory) createTrieNodesResolver( SenderResolver: resolverSender, Marshaller: brcf.marshalizer, AntifloodHandler: brcf.inputAntifloodHandler, - Throttler: brcf.throttler, + Throttler: brcf.trieNodesThrottler, }, TrieDataGetter: trie, } diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go index 426a978ae20..b72f8c3154a 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go @@ -27,7 +27,12 @@ func NewMetaResolversContainerFactory( args.Marshalizer = marshal.NewSizeCheckUnmarshalizer(args.Marshalizer, args.SizeCheckDelta) } - thr, err := throttler.NewNumGoRoutinesThrottler(args.NumConcurrentResolvingJobs) + mainThrottler, err := throttler.NewNumGoRoutinesThrottler(args.NumConcurrentResolvingJobs) + if err != nil { + return nil, err + } + + trieNodesThrottler, err := throttler.NewNumGoRoutinesThrottler(args.NumConcurrentResolvingTrieNodesJobs) if err != nil { return nil, err } @@ -46,7 +51,8 @@ func NewMetaResolversContainerFactory( triesContainer: args.TriesContainer, inputAntifloodHandler: args.InputAntifloodHandler, outputAntifloodHandler: args.OutputAntifloodHandler, - throttler: thr, + throttler: mainThrottler, + trieNodesThrottler: trieNodesThrottler, isFullHistoryNode: args.IsFullHistoryNode, mainPreferredPeersHolder: args.MainPreferredPeersHolder, fullArchivePreferredPeersHolder: args.FullArchivePreferredPeersHolder, diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go index c6659693d79..755672384cd 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go @@ -94,8 +94,15 @@ func TestNewMetaResolversContainerFactory_NewNumGoRoutinesThrottlerFailsShouldEr args := getArgumentsMeta() args.NumConcurrentResolvingJobs = 0 + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + assert.Nil(t, rcf) + assert.Equal(t, core.ErrNotPositiveValue, err) + + args.NumConcurrentResolvingJobs = 10 + args.NumConcurrentResolvingTrieNodesJobs = 0 + rcf, err = resolverscontainer.NewMetaResolversContainerFactory(args) assert.Nil(t, rcf) assert.Equal(t, core.ErrNotPositiveValue, err) } @@ -357,21 +364,22 @@ func TestMetaResolversContainerFactory_IsInterfaceNil(t *testing.T) { func getArgumentsMeta() resolverscontainer.FactoryArgs { return resolverscontainer.FactoryArgs{ - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - MainMessenger: createStubMessengerForMeta("", ""), - FullArchiveMessenger: createStubMessengerForMeta("", ""), - Store: createStoreForMeta(), - Marshalizer: &mock.MarshalizerMock{}, - DataPools: createDataPoolsForMeta(), - Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, - DataPacker: &mock.DataPackerStub{}, - TriesContainer: createTriesHolderForMeta(), - SizeCheckDelta: 0, - InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - NumConcurrentResolvingJobs: 10, - MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + MainMessenger: createStubMessengerForMeta("", ""), + FullArchiveMessenger: createStubMessengerForMeta("", ""), + Store: createStoreForMeta(), + Marshalizer: &mock.MarshalizerMock{}, + DataPools: createDataPoolsForMeta(), + Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + DataPacker: &mock.DataPackerStub{}, + TriesContainer: createTriesHolderForMeta(), + SizeCheckDelta: 0, + InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + NumConcurrentResolvingJobs: 10, + NumConcurrentResolvingTrieNodesJobs: 3, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, } } diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go index 28582f03bc5..f24beaa4331 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go @@ -25,7 +25,12 @@ func NewShardResolversContainerFactory( args.Marshalizer = marshal.NewSizeCheckUnmarshalizer(args.Marshalizer, args.SizeCheckDelta) } - thr, err := throttler.NewNumGoRoutinesThrottler(args.NumConcurrentResolvingJobs) + mainThrottler, err := throttler.NewNumGoRoutinesThrottler(args.NumConcurrentResolvingJobs) + if err != nil { + return nil, err + } + + trieNodesThrottler, err := throttler.NewNumGoRoutinesThrottler(args.NumConcurrentResolvingTrieNodesJobs) if err != nil { return nil, err } @@ -44,7 +49,8 @@ func NewShardResolversContainerFactory( triesContainer: args.TriesContainer, inputAntifloodHandler: args.InputAntifloodHandler, outputAntifloodHandler: args.OutputAntifloodHandler, - throttler: thr, + throttler: mainThrottler, + trieNodesThrottler: trieNodesThrottler, isFullHistoryNode: args.IsFullHistoryNode, mainPreferredPeersHolder: args.MainPreferredPeersHolder, fullArchivePreferredPeersHolder: args.FullArchivePreferredPeersHolder, diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go index 4d6ca351195..ca97015f3ae 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go @@ -100,8 +100,15 @@ func TestNewShardResolversContainerFactory_NewNumGoRoutinesThrottlerFailsShouldE args := getArgumentsShard() args.NumConcurrentResolvingJobs = 0 + rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) + assert.Nil(t, rcf) + assert.Equal(t, core.ErrNotPositiveValue, err) + + args.NumConcurrentResolvingJobs = 10 + args.NumConcurrentResolvingTrieNodesJobs = 0 + rcf, err = resolverscontainer.NewShardResolversContainerFactory(args) assert.Nil(t, rcf) assert.Equal(t, core.ErrNotPositiveValue, err) } @@ -465,21 +472,22 @@ func TestShardResolversContainerFactory_IsInterfaceNil(t *testing.T) { func getArgumentsShard() resolverscontainer.FactoryArgs { return resolverscontainer.FactoryArgs{ - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - MainMessenger: createMessengerStubForShard("", ""), - FullArchiveMessenger: createMessengerStubForShard("", ""), - Store: createStoreForShard(), - Marshalizer: &mock.MarshalizerMock{}, - DataPools: createDataPoolsForShard(), - Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, - DataPacker: &mock.DataPackerStub{}, - TriesContainer: createTriesHolderForShard(), - SizeCheckDelta: 0, - InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - NumConcurrentResolvingJobs: 10, - MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + MainMessenger: createMessengerStubForShard("", ""), + FullArchiveMessenger: createMessengerStubForShard("", ""), + Store: createStoreForShard(), + Marshalizer: &mock.MarshalizerMock{}, + DataPools: createDataPoolsForShard(), + Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + DataPacker: &mock.DataPackerStub{}, + TriesContainer: createTriesHolderForShard(), + SizeCheckDelta: 0, + InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + NumConcurrentResolvingJobs: 10, + NumConcurrentResolvingTrieNodesJobs: 3, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, } } diff --git a/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go b/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go index e68b10d5e46..2682231a768 100644 --- a/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go +++ b/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go @@ -10,7 +10,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/common/disabled" "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" @@ -20,9 +19,6 @@ import ( "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/sharding" - "github.com/multiversx/mx-chain-go/storage" - storageFactory "github.com/multiversx/mx-chain-go/storage/factory" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" ) const defaultBeforeGracefulClose = time.Minute @@ -239,46 +235,6 @@ func (brcf *baseRequestersContainerFactory) createMiniBlocksRequester(responseTo return mbRequester, nil } -func (brcf *baseRequestersContainerFactory) newImportDBTrieStorage( - mainStorer storage.Storer, - storageIdentifier dataRetriever.UnitType, - handler common.EnableEpochsHandler, - stateStatsHandler common.StateStatisticsHandler, -) (common.StorageManager, dataRetriever.TrieDataGetter, error) { - pathManager, err := storageFactory.CreatePathManager( - storageFactory.ArgCreatePathManager{ - WorkingDir: brcf.workingDir, - ChainID: brcf.chainID, - }, - ) - if err != nil { - return nil, nil, err - } - - trieFactoryArgs := trieFactory.TrieFactoryArgs{ - Marshalizer: brcf.marshalizer, - Hasher: brcf.hasher, - PathManager: pathManager, - TrieStorageManagerConfig: brcf.generalConfig.TrieStorageManagerConfig, - } - trieFactoryInstance, err := trieFactory.NewTrieFactory(trieFactoryArgs) - if err != nil { - return nil, nil, err - } - - args := trieFactory.TrieCreateArgs{ - MainStorer: mainStorer, - PruningEnabled: brcf.generalConfig.StateTriesConfig.AccountsStatePruningEnabled, - MaxTrieLevelInMem: brcf.generalConfig.StateTriesConfig.MaxStateTrieLevelInMemory, - SnapshotsEnabled: brcf.snapshotsEnabled, - IdleProvider: disabled.NewProcessStatusHandler(), - Identifier: storageIdentifier.String(), - EnableEpochsHandler: handler, - StatsCollector: stateStatsHandler, - } - return trieFactoryInstance.Create(args) -} - func (brcf *baseRequestersContainerFactory) generatePeerAuthenticationRequester() error { identifierPeerAuth := common.PeerAuthenticationTopic peerAuthRequester := disabledRequesters.NewDisabledRequester() diff --git a/dataRetriever/resolvers/epochproviders/arithmeticEpochProvider.go b/dataRetriever/resolvers/epochproviders/arithmeticEpochProvider.go index a0d6963ad14..675ebd6f276 100644 --- a/dataRetriever/resolvers/epochproviders/arithmeticEpochProvider.go +++ b/dataRetriever/resolvers/epochproviders/arithmeticEpochProvider.go @@ -9,7 +9,7 @@ import ( ) // deltaEpochActive represents how many epochs behind the current computed epoch are to be considered "active" and -//cause the requests to be sent to all peers regardless of being full observers or not. Usually, a node will have +// cause the requests to be sent to all peers regardless of being full observers or not. Usually, a node will have // [config.toml].[StoragePruning].NumActivePersisters opened persisters but to the fact that a shorter epoch can happen, // that value is lowered at a maximum 1. const deltaEpochActive = uint32(1) diff --git a/dataRetriever/resolvers/trieNodeResolver.go b/dataRetriever/resolvers/trieNodeResolver.go index 871ed85fee5..275327d44c6 100644 --- a/dataRetriever/resolvers/trieNodeResolver.go +++ b/dataRetriever/resolvers/trieNodeResolver.go @@ -1,6 +1,8 @@ package resolvers import ( + "sync" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/batch" @@ -20,6 +22,7 @@ type ArgTrieNodeResolver struct { // TrieNodeResolver is a wrapper over Resolver that is specialized in resolving trie node requests type TrieNodeResolver struct { + mutCriticalSection sync.Mutex *baseResolver messageProcessor trieDataGetter dataRetriever.TrieDataGetter @@ -104,6 +107,9 @@ func (tnRes *TrieNodeResolver) resolveMultipleHashes(hashesBuff []byte, message } func (tnRes *TrieNodeResolver) resolveOnlyRequestedHashes(hashes [][]byte, nodes map[string]struct{}) (int, bool) { + tnRes.mutCriticalSection.Lock() + defer tnRes.mutCriticalSection.Unlock() + spaceUsed := 0 usedAllSpace := false remainingSpace := core.MaxBufferSizeToSendTrieNodes @@ -129,6 +135,9 @@ func (tnRes *TrieNodeResolver) resolveOnlyRequestedHashes(hashes [][]byte, nodes } func (tnRes *TrieNodeResolver) resolveSubTries(hashes [][]byte, nodes map[string]struct{}, spaceUsedAlready int) { + tnRes.mutCriticalSection.Lock() + defer tnRes.mutCriticalSection.Unlock() + var serializedNodes [][]byte var err error var serializedNode []byte @@ -168,7 +177,10 @@ func convertMapToSlice(m map[string]struct{}) [][]byte { } func (tnRes *TrieNodeResolver) resolveOneHash(hash []byte, chunkIndex uint32, message p2p.MessageP2P, source p2p.MessageHandler) error { + tnRes.mutCriticalSection.Lock() serializedNode, err := tnRes.trieDataGetter.GetSerializedNode(hash) + tnRes.mutCriticalSection.Unlock() + if err != nil { return err } diff --git a/debug/handler/interceptorDebugHandler.go b/debug/handler/interceptorDebugHandler.go index 9c5b2cb361a..a00f7b878b9 100644 --- a/debug/handler/interceptorDebugHandler.go +++ b/debug/handler/interceptorDebugHandler.go @@ -202,7 +202,7 @@ func (idh *interceptorDebugHandler) incrementNumOfPrints() { } } -//TODO replace this with a call to Query(search) when a suitable conditional parser will be used. Also replace config parameters +// TODO replace this with a call to Query(search) when a suitable conditional parser will be used. Also replace config parameters // with a query string so it will be more extensible func (idh *interceptorDebugHandler) getStringEvents(maxNumPrints int) []string { acceptEvent := func(ev *event) bool { diff --git a/epochStart/bootstrap/baseStorageHandler.go b/epochStart/bootstrap/baseStorageHandler.go index dcf9193808d..1442af7e3b0 100644 --- a/epochStart/bootstrap/baseStorageHandler.go +++ b/epochStart/bootstrap/baseStorageHandler.go @@ -2,22 +2,65 @@ package bootstrap import ( "encoding/hex" - "encoding/json" "strings" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/typeConverters" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/storage" ) +// StorageHandlerArgs is a struct placeholder for all arguments required to create either a shard or a meta storage handler +type StorageHandlerArgs struct { + GeneralConfig config.Config + PreferencesConfig config.PreferencesConfig + ShardCoordinator sharding.Coordinator + PathManagerHandler storage.PathManagerHandler + Marshaller marshal.Marshalizer + Hasher hashing.Hasher + CurrentEpoch uint32 + Uint64Converter typeConverters.Uint64ByteSliceConverter + NodeTypeProvider NodeTypeProviderHandler + NodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory + SnapshotsEnabled bool + ManagedPeersHolder common.ManagedPeersHolder + NodeProcessingMode common.NodeProcessingMode + RepopulateTokensSupplies bool + StateStatsHandler common.StateStatisticsHandler +} + +func checkNilArgs(args StorageHandlerArgs) error { + if check.IfNil(args.ShardCoordinator) { + return core.ErrNilShardCoordinator + } + if check.IfNil(args.PathManagerHandler) { + return dataRetriever.ErrNilPathManager + } + if check.IfNil(args.Marshaller) { + return core.ErrNilMarshalizer + } + if check.IfNil(args.Hasher) { + return core.ErrNilHasher + } + if check.IfNil(args.Uint64Converter) { + return dataRetriever.ErrNilUint64ByteSliceConverter + } + if check.IfNil(args.NodesCoordinatorRegistryFactory) { + return nodesCoordinator.ErrNilNodesCoordinatorRegistryFactory + } + return nil +} + type miniBlocksInfo struct { miniBlockHashes [][]byte fullyProcessed []bool @@ -33,12 +76,13 @@ type processedIndexes struct { // baseStorageHandler handles the storage functions for saving bootstrap data type baseStorageHandler struct { - storageService dataRetriever.StorageService - shardCoordinator sharding.Coordinator - marshalizer marshal.Marshalizer - hasher hashing.Hasher - currentEpoch uint32 - uint64Converter typeConverters.Uint64ByteSliceConverter + storageService dataRetriever.StorageService + shardCoordinator sharding.Coordinator + marshalizer marshal.Marshalizer + hasher hashing.Hasher + currentEpoch uint32 + uint64Converter typeConverters.Uint64ByteSliceConverter + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory } func (bsh *baseStorageHandler) groupMiniBlocksByShard(miniBlocks map[string]*block.MiniBlock) ([]bootstrapStorage.PendingMiniBlocksInfo, error) { @@ -61,12 +105,11 @@ func (bsh *baseStorageHandler) groupMiniBlocksByShard(miniBlocks map[string]*blo func (bsh *baseStorageHandler) saveNodesCoordinatorRegistry( metaBlock data.HeaderHandler, - nodesConfig *nodesCoordinator.NodesCoordinatorRegistry, + nodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler, ) ([]byte, error) { key := append([]byte(common.NodesCoordinatorRegistryKeyPrefix), metaBlock.GetPrevRandSeed()...) - // TODO: replace hardcoded json - although it is hardcoded in nodesCoordinator as well. - registryBytes, err := json.Marshal(nodesConfig) + registryBytes, err := bsh.nodesCoordinatorRegistryFactory.GetRegistryData(nodesConfig, metaBlock.GetEpoch()) if err != nil { return nil, err } @@ -81,7 +124,7 @@ func (bsh *baseStorageHandler) saveNodesCoordinatorRegistry( return nil, err } - log.Debug("saving nodes coordinator config", "key", key) + log.Debug("saving nodes coordinator config", "key", key, "epoch", metaBlock.GetEpoch()) return metaBlock.GetPrevRandSeed(), nil } diff --git a/epochStart/bootstrap/common.go b/epochStart/bootstrap/common.go index 19bfa2acc54..da6e99fda1b 100644 --- a/epochStart/bootstrap/common.go +++ b/epochStart/bootstrap/common.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" ) const baseErrorMessage = "error with epoch start bootstrapper arguments" @@ -119,6 +120,9 @@ func checkArguments(args ArgsEpochStartBootstrap) error { if check.IfNil(args.StateStatsHandler) { return fmt.Errorf("%s: %w", baseErrorMessage, statistics.ErrNilStateStatsHandler) } + if check.IfNil(args.NodesCoordinatorRegistryFactory) { + return fmt.Errorf("%s: %w", baseErrorMessage, nodesCoordinator.ErrNilNodesCoordinatorRegistryFactory) + } return nil } diff --git a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go index 742fa1e0523..efee420feec 100644 --- a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go +++ b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go @@ -49,6 +49,11 @@ func (n *nodesCoordinator) GetAllWaitingValidatorsPublicKeys(_ uint32) (map[uint return nil, nil } +// GetAllShuffledOutValidatorsPublicKeys - +func (n *nodesCoordinator) GetAllShuffledOutValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { + return nil, nil +} + // GetConsensusValidatorsPublicKeys - func (n *nodesCoordinator) GetConsensusValidatorsPublicKeys(_ []byte, _ uint64, _ uint32, _ uint32) ([]string, error) { return nil, nil diff --git a/epochStart/bootstrap/fromLocalStorage.go b/epochStart/bootstrap/fromLocalStorage.go index b6dea44ee81..868d0359ef5 100644 --- a/epochStart/bootstrap/fromLocalStorage.go +++ b/epochStart/bootstrap/fromLocalStorage.go @@ -2,7 +2,6 @@ package bootstrap import ( "bytes" - "encoding/json" "fmt" "strconv" @@ -196,22 +195,22 @@ func (e *epochStartBootstrap) prepareEpochFromStorage() (Parameters, error) { func (e *epochStartBootstrap) checkIfShuffledOut( pubKey []byte, - nodesConfig *nodesCoordinator.NodesCoordinatorRegistry, + nodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler, ) (uint32, bool) { epochIDasString := fmt.Sprint(e.baseData.lastEpoch) - epochConfig := nodesConfig.EpochsConfig[epochIDasString] + epochConfig := nodesConfig.GetEpochsConfig()[epochIDasString] if epochConfig == nil { return e.baseData.shardId, false } - newShardId, isWaitingForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.WaitingValidators) + newShardId, isWaitingForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.GetWaitingValidators()) if isWaitingForShard { isShuffledOut := newShardId != e.baseData.shardId e.nodeType = core.NodeTypeValidator return newShardId, isShuffledOut } - newShardId, isEligibleForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.EligibleValidators) + newShardId, isEligibleForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.GetEligibleValidators()) if isEligibleForShard { isShuffledOut := newShardId != e.baseData.shardId e.nodeType = core.NodeTypeValidator @@ -252,7 +251,7 @@ func checkIfValidatorIsInList( return false } -func (e *epochStartBootstrap) getLastBootstrapData(storer storage.Storer) (*bootstrapStorage.BootstrapData, *nodesCoordinator.NodesCoordinatorRegistry, error) { +func (e *epochStartBootstrap) getLastBootstrapData(storer storage.Storer) (*bootstrapStorage.BootstrapData, nodesCoordinator.NodesCoordinatorRegistryHandler, error) { bootStorer, err := bootstrapStorage.NewBootstrapStorer(e.coreComponentsHolder.InternalMarshalizer(), storer) if err != nil { return nil, nil, err @@ -271,8 +270,7 @@ func (e *epochStartBootstrap) getLastBootstrapData(storer storage.Storer) (*boot return nil, nil, err } - config := &nodesCoordinator.NodesCoordinatorRegistry{} - err = json.Unmarshal(d, config) + config, err := e.nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry(d) if err != nil { return nil, nil, err } diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go index e934e450f7c..bfc293032ee 100644 --- a/epochStart/bootstrap/interface.go +++ b/epochStart/bootstrap/interface.go @@ -13,7 +13,7 @@ import ( // StartOfEpochNodesConfigHandler defines the methods to process nodesConfig from epoch start metablocks type StartOfEpochNodesConfigHandler interface { - NodesConfigFromMetaBlock(currMetaBlock data.HeaderHandler, prevMetaBlock data.HeaderHandler) (*nodesCoordinator.NodesCoordinatorRegistry, uint32, []*block.MiniBlock, error) + NodesConfigFromMetaBlock(currMetaBlock data.HeaderHandler, prevMetaBlock data.HeaderHandler) (nodesCoordinator.NodesCoordinatorRegistryHandler, uint32, []*block.MiniBlock, error) IsInterfaceNil() bool } @@ -26,7 +26,7 @@ type EpochStartMetaBlockInterceptorProcessor interface { // StartInEpochNodesCoordinator defines the methods to process and save nodesCoordinator information to storage type StartInEpochNodesCoordinator interface { EpochStartPrepare(metaHdr data.HeaderHandler, body data.BodyHandler) - NodesCoordinatorToRegistry() *nodesCoordinator.NodesCoordinatorRegistry + NodesCoordinatorToRegistry(epoch uint32) nodesCoordinator.NodesCoordinatorRegistryHandler ShardIdForEpoch(epoch uint32) (uint32, error) IsInterfaceNil() bool } diff --git a/epochStart/bootstrap/metaStorageHandler.go b/epochStart/bootstrap/metaStorageHandler.go index 65e7e9c9237..01f65ccabe6 100644 --- a/epochStart/bootstrap/metaStorageHandler.go +++ b/epochStart/bootstrap/metaStorageHandler.go @@ -7,17 +7,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-core-go/data/typeConverters" - "github.com/multiversx/mx-chain-core-go/hashing" - "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" - "github.com/multiversx/mx-chain-go/sharding" - "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/factory" ) @@ -26,36 +20,28 @@ type metaStorageHandler struct { } // NewMetaStorageHandler will return a new instance of metaStorageHandler -func NewMetaStorageHandler( - generalConfig config.Config, - prefsConfig config.PreferencesConfig, - shardCoordinator sharding.Coordinator, - pathManagerHandler storage.PathManagerHandler, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - currentEpoch uint32, - uint64Converter typeConverters.Uint64ByteSliceConverter, - nodeTypeProvider NodeTypeProviderHandler, - nodeProcessingMode common.NodeProcessingMode, - managedPeersHolder common.ManagedPeersHolder, - stateStatsHandler common.StateStatisticsHandler, -) (*metaStorageHandler, error) { +func NewMetaStorageHandler(args StorageHandlerArgs) (*metaStorageHandler, error) { + err := checkNilArgs(args) + if err != nil { + return nil, err + } + epochStartNotifier := &disabled.EpochStartNotifier{} storageFactory, err := factory.NewStorageServiceFactory( factory.StorageServiceFactoryArgs{ - Config: generalConfig, - PrefsConfig: prefsConfig, - ShardCoordinator: shardCoordinator, - PathManager: pathManagerHandler, + Config: args.GeneralConfig, + PrefsConfig: args.PreferencesConfig, + ShardCoordinator: args.ShardCoordinator, + PathManager: args.PathManagerHandler, EpochStartNotifier: epochStartNotifier, - NodeTypeProvider: nodeTypeProvider, - CurrentEpoch: currentEpoch, + NodeTypeProvider: args.NodeTypeProvider, StorageType: factory.BootstrapStorageService, + ManagedPeersHolder: args.ManagedPeersHolder, + CurrentEpoch: args.CurrentEpoch, CreateTrieEpochRootHashStorer: false, - NodeProcessingMode: nodeProcessingMode, - RepopulateTokensSupplies: false, // tokens supplies cannot be repopulated at this time - ManagedPeersHolder: managedPeersHolder, - StateStatsHandler: stateStatsHandler, + NodeProcessingMode: args.NodeProcessingMode, + RepopulateTokensSupplies: false, + StateStatsHandler: args.StateStatsHandler, }, ) if err != nil { @@ -68,12 +54,13 @@ func NewMetaStorageHandler( } base := &baseStorageHandler{ - storageService: storageService, - shardCoordinator: shardCoordinator, - marshalizer: marshalizer, - hasher: hasher, - currentEpoch: currentEpoch, - uint64Converter: uint64Converter, + storageService: storageService, + shardCoordinator: args.ShardCoordinator, + marshalizer: args.Marshaller, + hasher: args.Hasher, + currentEpoch: args.CurrentEpoch, + uint64Converter: args.Uint64Converter, + nodesCoordinatorRegistryFactory: args.NodesCoordinatorRegistryFactory, } return &metaStorageHandler{baseStorageHandler: base}, nil diff --git a/epochStart/bootstrap/metaStorageHandler_test.go b/epochStart/bootstrap/metaStorageHandler_test.go index 4fee7dee5b5..92603df176a 100644 --- a/epochStart/bootstrap/metaStorageHandler_test.go +++ b/epochStart/bootstrap/metaStorageHandler_test.go @@ -20,36 +20,37 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func createStorageHandlerArgs() StorageHandlerArgs { + return StorageHandlerArgs{ + GeneralConfig: testscommon.GetGeneralConfig(), + PreferencesConfig: config.PreferencesConfig{}, + ShardCoordinator: &mock.ShardCoordinatorStub{}, + PathManagerHandler: &testscommon.PathManagerStub{}, + Marshaller: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + CurrentEpoch: 0, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, + ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SnapshotsEnabled: false, + NodeProcessingMode: common.Normal, + StateStatsHandler: disabled.NewStateStatistics(), + RepopulateTokensSupplies: false, + } +} + func TestNewMetaStorageHandler_InvalidConfigErr(t *testing.T) { - gCfg := config.Config{} - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, err := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + args.GeneralConfig = config.Config{} + + mtStrHandler, err := NewMetaStorageHandler(args) assert.True(t, check.IfNil(mtStrHandler)) assert.NotNil(t, err) } @@ -59,29 +60,8 @@ func TestNewMetaStorageHandler_CreateForMetaErr(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - mtStrHandler, err := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + mtStrHandler, err := NewMetaStorageHandler(args) assert.False(t, check.IfNil(mtStrHandler)) assert.Nil(t, err) } @@ -91,34 +71,11 @@ func TestMetaStorageHandler_saveLastHeader(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) - + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) header := &block.MetaBlock{Nonce: 0} - headerHash, _ := core.CalculateHash(marshalizer, hasher, header) + headerHash, _ := core.CalculateHash(args.Marshaller, args.Hasher, header) expectedBootInfo := bootstrapStorage.BootstrapHeaderInfo{ ShardId: core.MetachainShardId, Hash: headerHash, } @@ -133,35 +90,13 @@ func TestMetaStorageHandler_saveLastCrossNotarizedHeaders(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) hdr1 := &block.Header{Nonce: 1} hdr2 := &block.Header{Nonce: 2} - hdrHash1, _ := core.CalculateHash(marshalizer, hasher, hdr1) - hdrHash2, _ := core.CalculateHash(marshalizer, hasher, hdr2) + hdrHash1, _ := core.CalculateHash(args.Marshaller, args.Hasher, hdr1) + hdrHash2, _ := core.CalculateHash(args.Marshaller, args.Hasher, hdr2) hdr3 := &block.MetaBlock{ Nonce: 3, @@ -181,30 +116,8 @@ func TestMetaStorageHandler_saveTriggerRegistry(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{Nonce: 3}, @@ -220,30 +133,8 @@ func TestMetaStorageHandler_saveDataToStorage(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{Nonce: 3}, @@ -276,30 +167,8 @@ func testMetaWithMissingStorer(missingUnit dataRetriever.UnitType, atCallNumber _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) counter := 0 mtStrHandler.storageService = &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 55a642a6793..dce9135e0a3 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -73,7 +73,7 @@ type Parameters struct { Epoch uint32 SelfShardId uint32 NumOfShards uint32 - NodesConfig *nodesCoordinator.NodesCoordinatorRegistry + NodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler } // ComponentsNeededForBootstrap holds the components which need to be initialized from network @@ -81,7 +81,7 @@ type ComponentsNeededForBootstrap struct { EpochStartMetaBlock data.MetaHeaderHandler PreviousEpochStart data.MetaHeaderHandler ShardHeader data.HeaderHandler - NodesConfig *nodesCoordinator.NodesCoordinatorRegistry + NodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler Headers map[string]data.HeaderHandler ShardCoordinator sharding.Coordinator PendingMiniBlocks map[string]*block.MiniBlock @@ -136,15 +136,17 @@ type epochStartBootstrap struct { storageOpenerHandler storage.UnitOpenerHandler latestStorageDataProvider storage.LatestStorageDataProviderHandler argumentsParser process.ArgumentsParser + dataSyncerFactory types.ScheduledDataSyncerCreator dataSyncerWithScheduled types.ScheduledDataSyncer storageService dataRetriever.StorageService + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory // gathered data epochStartMeta data.MetaHeaderHandler prevEpochStartMeta data.MetaHeaderHandler syncedHeaders map[string]data.HeaderHandler - nodesConfig *nodesCoordinator.NodesCoordinatorRegistry + nodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler baseData baseDataInStorage startRound int64 nodeType core.NodeType @@ -163,30 +165,31 @@ type baseDataInStorage struct { // ArgsEpochStartBootstrap holds the arguments needed for creating an epoch start data provider component type ArgsEpochStartBootstrap struct { - CoreComponentsHolder process.CoreComponentsHolder - CryptoComponentsHolder process.CryptoComponentsHolder - DestinationShardAsObserver uint32 - MainMessenger p2p.Messenger - FullArchiveMessenger p2p.Messenger - GeneralConfig config.Config - PrefsConfig config.PreferencesConfig - FlagsConfig config.ContextFlagsConfig - EconomicsData process.EconomicsDataHandler - GenesisNodesConfig sharding.GenesisNodesSetupHandler - GenesisShardCoordinator sharding.Coordinator - StorageUnitOpener storage.UnitOpenerHandler - LatestStorageDataProvider storage.LatestStorageDataProviderHandler - Rater nodesCoordinator.ChanceComputer - NodeShuffler nodesCoordinator.NodesShuffler - RoundHandler epochStart.RoundHandler - ArgumentsParser process.ArgumentsParser - StatusHandler core.AppStatusHandler - HeaderIntegrityVerifier process.HeaderIntegrityVerifier - DataSyncerCreator types.ScheduledDataSyncerCreator - ScheduledSCRsStorer storage.Storer - TrieSyncStatisticsProvider common.SizeSyncStatisticsHandler - NodeProcessingMode common.NodeProcessingMode - StateStatsHandler common.StateStatisticsHandler + CoreComponentsHolder process.CoreComponentsHolder + CryptoComponentsHolder process.CryptoComponentsHolder + DestinationShardAsObserver uint32 + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger + GeneralConfig config.Config + PrefsConfig config.PreferencesConfig + FlagsConfig config.ContextFlagsConfig + EconomicsData process.EconomicsDataHandler + GenesisNodesConfig sharding.GenesisNodesSetupHandler + GenesisShardCoordinator sharding.Coordinator + StorageUnitOpener storage.UnitOpenerHandler + LatestStorageDataProvider storage.LatestStorageDataProviderHandler + Rater nodesCoordinator.ChanceComputer + NodeShuffler nodesCoordinator.NodesShuffler + RoundHandler epochStart.RoundHandler + ArgumentsParser process.ArgumentsParser + StatusHandler core.AppStatusHandler + HeaderIntegrityVerifier process.HeaderIntegrityVerifier + DataSyncerCreator types.ScheduledDataSyncerCreator + ScheduledSCRsStorer storage.Storer + TrieSyncStatisticsProvider common.SizeSyncStatisticsHandler + NodeProcessingMode common.NodeProcessingMode + StateStatsHandler common.StateStatisticsHandler + NodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory } type dataToSync struct { @@ -205,38 +208,40 @@ func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, } epochStartProvider := &epochStartBootstrap{ - coreComponentsHolder: args.CoreComponentsHolder, - cryptoComponentsHolder: args.CryptoComponentsHolder, - mainMessenger: args.MainMessenger, - fullArchiveMessenger: args.FullArchiveMessenger, - generalConfig: args.GeneralConfig, - prefsConfig: args.PrefsConfig, - flagsConfig: args.FlagsConfig, - economicsData: args.EconomicsData, - genesisNodesConfig: args.GenesisNodesConfig, - genesisShardCoordinator: args.GenesisShardCoordinator, - rater: args.Rater, - destinationShardAsObserver: args.DestinationShardAsObserver, - nodeShuffler: args.NodeShuffler, - roundHandler: args.RoundHandler, - storageOpenerHandler: args.StorageUnitOpener, - latestStorageDataProvider: args.LatestStorageDataProvider, - shuffledOut: false, - statusHandler: args.StatusHandler, - nodeType: core.NodeTypeObserver, - argumentsParser: args.ArgumentsParser, - headerIntegrityVerifier: args.HeaderIntegrityVerifier, - numConcurrentTrieSyncers: args.GeneralConfig.TrieSync.NumConcurrentTrieSyncers, - maxHardCapForMissingNodes: args.GeneralConfig.TrieSync.MaxHardCapForMissingNodes, - trieSyncerVersion: args.GeneralConfig.TrieSync.TrieSyncerVersion, - checkNodesOnDisk: args.GeneralConfig.TrieSync.CheckNodesOnDisk, - dataSyncerFactory: args.DataSyncerCreator, - storerScheduledSCRs: args.ScheduledSCRsStorer, - shardCoordinator: args.GenesisShardCoordinator, - trieSyncStatisticsProvider: args.TrieSyncStatisticsProvider, - nodeProcessingMode: args.NodeProcessingMode, - nodeOperationMode: common.NormalOperation, - stateStatsHandler: args.StateStatsHandler, + coreComponentsHolder: args.CoreComponentsHolder, + cryptoComponentsHolder: args.CryptoComponentsHolder, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + generalConfig: args.GeneralConfig, + prefsConfig: args.PrefsConfig, + flagsConfig: args.FlagsConfig, + economicsData: args.EconomicsData, + genesisNodesConfig: args.GenesisNodesConfig, + genesisShardCoordinator: args.GenesisShardCoordinator, + rater: args.Rater, + destinationShardAsObserver: args.DestinationShardAsObserver, + nodeShuffler: args.NodeShuffler, + roundHandler: args.RoundHandler, + storageOpenerHandler: args.StorageUnitOpener, + latestStorageDataProvider: args.LatestStorageDataProvider, + shuffledOut: false, + statusHandler: args.StatusHandler, + nodeType: core.NodeTypeObserver, + argumentsParser: args.ArgumentsParser, + headerIntegrityVerifier: args.HeaderIntegrityVerifier, + numConcurrentTrieSyncers: args.GeneralConfig.TrieSync.NumConcurrentTrieSyncers, + maxHardCapForMissingNodes: args.GeneralConfig.TrieSync.MaxHardCapForMissingNodes, + trieSyncerVersion: args.GeneralConfig.TrieSync.TrieSyncerVersion, + checkNodesOnDisk: args.GeneralConfig.TrieSync.CheckNodesOnDisk, + dataSyncerFactory: args.DataSyncerCreator, + storerScheduledSCRs: args.ScheduledSCRsStorer, + shardCoordinator: args.GenesisShardCoordinator, + trieSyncStatisticsProvider: args.TrieSyncStatisticsProvider, + nodeProcessingMode: args.NodeProcessingMode, + nodeOperationMode: common.NormalOperation, + stateStatsHandler: args.StateStatsHandler, + startEpoch: args.GeneralConfig.EpochStartConfig.GenesisEpoch, + nodesCoordinatorRegistryFactory: args.NodesCoordinatorRegistryFactory, } if epochStartProvider.prefsConfig.FullArchive { @@ -754,19 +759,20 @@ func (e *epochStartBootstrap) processNodesConfig(pubKey []byte) ([]*block.MiniBl shardId = e.genesisShardCoordinator.SelfId() } argsNewValidatorStatusSyncers := ArgsNewSyncValidatorStatus{ - DataPool: e.dataPool, - Marshalizer: e.coreComponentsHolder.InternalMarshalizer(), - RequestHandler: e.requestHandler, - ChanceComputer: e.rater, - GenesisNodesConfig: e.genesisNodesConfig, - NodeShuffler: e.nodeShuffler, - Hasher: e.coreComponentsHolder.Hasher(), - PubKey: pubKey, - ShardIdAsObserver: shardId, - ChanNodeStop: e.coreComponentsHolder.ChanStopNodeProcess(), - NodeTypeProvider: e.coreComponentsHolder.NodeTypeProvider(), - IsFullArchive: e.prefsConfig.FullArchive, - EnableEpochsHandler: e.coreComponentsHolder.EnableEpochsHandler(), + DataPool: e.dataPool, + Marshalizer: e.coreComponentsHolder.InternalMarshalizer(), + RequestHandler: e.requestHandler, + ChanceComputer: e.rater, + GenesisNodesConfig: e.genesisNodesConfig, + NodeShuffler: e.nodeShuffler, + Hasher: e.coreComponentsHolder.Hasher(), + PubKey: pubKey, + ShardIdAsObserver: shardId, + ChanNodeStop: e.coreComponentsHolder.ChanStopNodeProcess(), + NodeTypeProvider: e.coreComponentsHolder.NodeTypeProvider(), + IsFullArchive: e.prefsConfig.FullArchive, + EnableEpochsHandler: e.coreComponentsHolder.EnableEpochsHandler(), + NodesCoordinatorRegistryFactory: e.nodesCoordinatorRegistryFactory, } e.nodesConfigHandler, err = NewSyncValidatorStatus(argsNewValidatorStatusSyncers) @@ -784,20 +790,22 @@ func (e *epochStartBootstrap) processNodesConfig(pubKey []byte) ([]*block.MiniBl func (e *epochStartBootstrap) requestAndProcessForMeta(peerMiniBlocks []*block.MiniBlock) error { var err error - storageHandlerComponent, err := NewMetaStorageHandler( - e.generalConfig, - e.prefsConfig, - e.shardCoordinator, - e.coreComponentsHolder.PathHandler(), - e.coreComponentsHolder.InternalMarshalizer(), - e.coreComponentsHolder.Hasher(), - e.epochStartMeta.GetEpoch(), - e.coreComponentsHolder.Uint64ByteSliceConverter(), - e.coreComponentsHolder.NodeTypeProvider(), - e.nodeProcessingMode, - e.cryptoComponentsHolder.ManagedPeersHolder(), - e.stateStatsHandler, - ) + argsStorageHandler := StorageHandlerArgs{ + GeneralConfig: e.generalConfig, + PreferencesConfig: e.prefsConfig, + ShardCoordinator: e.shardCoordinator, + PathManagerHandler: e.coreComponentsHolder.PathHandler(), + Marshaller: e.coreComponentsHolder.InternalMarshalizer(), + Hasher: e.coreComponentsHolder.Hasher(), + CurrentEpoch: e.epochStartMeta.GetEpoch(), + Uint64Converter: e.coreComponentsHolder.Uint64ByteSliceConverter(), + NodeTypeProvider: e.coreComponentsHolder.NodeTypeProvider(), + NodesCoordinatorRegistryFactory: e.nodesCoordinatorRegistryFactory, + ManagedPeersHolder: e.cryptoComponentsHolder.ManagedPeersHolder(), + NodeProcessingMode: e.nodeProcessingMode, + StateStatsHandler: e.stateStatsHandler, + } + storageHandlerComponent, err := NewMetaStorageHandler(argsStorageHandler) if err != nil { return err } @@ -954,20 +962,22 @@ func (e *epochStartBootstrap) requestAndProcessForShard(peerMiniBlocks []*block. e.syncedHeaders[hash] = hdr } - storageHandlerComponent, err := NewShardStorageHandler( - e.generalConfig, - e.prefsConfig, - e.shardCoordinator, - e.coreComponentsHolder.PathHandler(), - e.coreComponentsHolder.InternalMarshalizer(), - e.coreComponentsHolder.Hasher(), - e.baseData.lastEpoch, - e.coreComponentsHolder.Uint64ByteSliceConverter(), - e.coreComponentsHolder.NodeTypeProvider(), - e.nodeProcessingMode, - e.cryptoComponentsHolder.ManagedPeersHolder(), - e.stateStatsHandler, - ) + argsStorageHandler := StorageHandlerArgs{ + GeneralConfig: e.generalConfig, + PreferencesConfig: e.prefsConfig, + ShardCoordinator: e.shardCoordinator, + PathManagerHandler: e.coreComponentsHolder.PathHandler(), + Marshaller: e.coreComponentsHolder.InternalMarshalizer(), + Hasher: e.coreComponentsHolder.Hasher(), + CurrentEpoch: e.baseData.lastEpoch, + Uint64Converter: e.coreComponentsHolder.Uint64ByteSliceConverter(), + NodeTypeProvider: e.coreComponentsHolder.NodeTypeProvider(), + NodesCoordinatorRegistryFactory: e.nodesCoordinatorRegistryFactory, + ManagedPeersHolder: e.cryptoComponentsHolder.ManagedPeersHolder(), + NodeProcessingMode: e.nodeProcessingMode, + StateStatsHandler: e.stateStatsHandler, + } + storageHandlerComponent, err := NewShardStorageHandler(argsStorageHandler) if err != nil { return err } @@ -1220,22 +1230,23 @@ func (e *epochStartBootstrap) createResolversContainer() error { // this one should only be used before determining the correct shard where the node should reside log.Debug("epochStartBootstrap.createRequestHandler", "shard", e.shardCoordinator.SelfId()) resolversContainerArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: e.shardCoordinator, - MainMessenger: e.mainMessenger, - FullArchiveMessenger: e.fullArchiveMessenger, - Store: storageService, - Marshalizer: e.coreComponentsHolder.InternalMarshalizer(), - DataPools: e.dataPool, - Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), - NumConcurrentResolvingJobs: 10, - DataPacker: dataPacker, - TriesContainer: e.trieContainer, - SizeCheckDelta: 0, - InputAntifloodHandler: disabled.NewAntiFloodHandler(), - OutputAntifloodHandler: disabled.NewAntiFloodHandler(), - MainPreferredPeersHolder: disabled.NewPreferredPeersHolder(), - FullArchivePreferredPeersHolder: disabled.NewPreferredPeersHolder(), - PayloadValidator: payloadValidator, + ShardCoordinator: e.shardCoordinator, + MainMessenger: e.mainMessenger, + FullArchiveMessenger: e.fullArchiveMessenger, + Store: storageService, + Marshalizer: e.coreComponentsHolder.InternalMarshalizer(), + DataPools: e.dataPool, + Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), + NumConcurrentResolvingJobs: 10, + NumConcurrentResolvingTrieNodesJobs: 3, + DataPacker: dataPacker, + TriesContainer: e.trieContainer, + SizeCheckDelta: 0, + InputAntifloodHandler: disabled.NewAntiFloodHandler(), + OutputAntifloodHandler: disabled.NewAntiFloodHandler(), + MainPreferredPeersHolder: disabled.NewPreferredPeersHolder(), + FullArchivePreferredPeersHolder: disabled.NewPreferredPeersHolder(), + PayloadValidator: payloadValidator, } resolverFactory, err := resolverscontainer.NewMetaResolversContainerFactory(resolversContainerArgs) if err != nil { diff --git a/epochStart/bootstrap/process_test.go b/epochStart/bootstrap/process_test.go index d95d97282d5..11a42a22301 100644 --- a/epochStart/bootstrap/process_test.go +++ b/epochStart/bootstrap/process_test.go @@ -41,6 +41,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" @@ -85,7 +86,14 @@ func createComponentsForEpochStart() (*mock.CoreComponentsMock, *mock.CryptoComp NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, ProcessStatusHandlerInstance: &testscommon.ProcessStatusHandlerStub{}, HardforkTriggerPubKeyField: []byte("provided hardfork pub key"), - EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.StakingV4Step2Flag { + return 99999 + } + return 0 + }, + }, }, &mock.CryptoComponentsMock{ PubKey: &cryptoMocks.PublicKeyStub{}, @@ -111,9 +119,9 @@ func createMockEpochStartBootstrapArgs( MainMessenger: &p2pmocks.MessengerStub{ ConnectedPeersCalled: func() []core.PeerID { return []core.PeerID{"peer0", "peer1", "peer2", "peer3", "peer4", "peer5"} - }, - }, - FullArchiveMessenger: &p2pmocks.MessengerStub{}, + }}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, + FullArchiveMessenger: &p2pmocks.MessengerStub{}, GeneralConfig: config.Config{ MiniBlocksStorage: generalCfg.MiniBlocksStorage, PeerBlockBodyStorage: generalCfg.PeerBlockBodyStorage, @@ -205,7 +213,7 @@ func createMockEpochStartBootstrapArgs( return 1 }, }, - GenesisNodesConfig: &mock.NodesSetupStub{}, + GenesisNodesConfig: &genesisMocks.NodesSetupStub{}, GenesisShardCoordinator: mock.NewMultipleShardsCoordinatorMock(), Rater: &mock.RaterStub{}, DestinationShardAsObserver: 0, @@ -794,7 +802,7 @@ func TestIsStartInEpochZero(t *testing.T) { coreComp, cryptoComp := createComponentsForEpochStart() args := createMockEpochStartBootstrapArgs(coreComp, cryptoComp) - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ GetStartTimeCalled: func() int64 { return 1000 }, @@ -828,7 +836,7 @@ func TestEpochStartBootstrap_BootstrapShouldStartBootstrapProcess(t *testing.T) roundDuration := uint64(60000) coreComp, cryptoComp := createComponentsForEpochStart() args := createMockEpochStartBootstrapArgs(coreComp, cryptoComp) - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return roundDuration }, @@ -887,7 +895,7 @@ func TestPrepareForEpochZero_NodeInGenesisShouldNotAlterShardID(t *testing.T) { } args.DestinationShardAsObserver = uint32(7) - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { eligibleMap := map[uint32][]nodesCoordinator.GenesisNodeInfoHandler{ 1: {mock.NewNodeInfo([]byte("addr"), []byte("pubKey11"), 1, initRating)}, @@ -922,7 +930,7 @@ func TestPrepareForEpochZero_NodeNotInGenesisShouldAlterShardID(t *testing.T) { }, } args.DestinationShardAsObserver = desiredShardAsObserver - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { eligibleMap := map[uint32][]nodesCoordinator.GenesisNodeInfoHandler{ 1: {mock.NewNodeInfo([]byte("addr"), []byte("pubKey11"), 1, initRating)}, @@ -1487,7 +1495,7 @@ func getNodesConfigMock(numOfShards uint32) sharding.GenesisNodesSetupHandler { roundDurationMillis := 4000 epochDurationMillis := 50 * int64(roundDurationMillis) - nodesConfig := &mock.NodesSetupStub{ + nodesConfig := &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) for i := uint32(0); i < numOfShards; i++ { diff --git a/epochStart/bootstrap/shardStorageHandler.go b/epochStart/bootstrap/shardStorageHandler.go index 881aedf74c2..49535a7228c 100644 --- a/epochStart/bootstrap/shardStorageHandler.go +++ b/epochStart/bootstrap/shardStorageHandler.go @@ -10,17 +10,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-core-go/data/typeConverters" - "github.com/multiversx/mx-chain-core-go/hashing" - "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" - "github.com/multiversx/mx-chain-go/sharding" - "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/factory" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -30,36 +24,28 @@ type shardStorageHandler struct { } // NewShardStorageHandler will return a new instance of shardStorageHandler -func NewShardStorageHandler( - generalConfig config.Config, - prefsConfig config.PreferencesConfig, - shardCoordinator sharding.Coordinator, - pathManagerHandler storage.PathManagerHandler, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - currentEpoch uint32, - uint64Converter typeConverters.Uint64ByteSliceConverter, - nodeTypeProvider core.NodeTypeProviderHandler, - nodeProcessingMode common.NodeProcessingMode, - managedPeersHolder common.ManagedPeersHolder, - stateStatsHandler common.StateStatisticsHandler, -) (*shardStorageHandler, error) { +func NewShardStorageHandler(args StorageHandlerArgs) (*shardStorageHandler, error) { + err := checkNilArgs(args) + if err != nil { + return nil, err + } + epochStartNotifier := &disabled.EpochStartNotifier{} storageFactory, err := factory.NewStorageServiceFactory( factory.StorageServiceFactoryArgs{ - Config: generalConfig, - PrefsConfig: prefsConfig, - ShardCoordinator: shardCoordinator, - PathManager: pathManagerHandler, + Config: args.GeneralConfig, + PrefsConfig: args.PreferencesConfig, + ShardCoordinator: args.ShardCoordinator, + PathManager: args.PathManagerHandler, EpochStartNotifier: epochStartNotifier, - NodeTypeProvider: nodeTypeProvider, - CurrentEpoch: currentEpoch, + NodeTypeProvider: args.NodeTypeProvider, StorageType: factory.BootstrapStorageService, + ManagedPeersHolder: args.ManagedPeersHolder, + CurrentEpoch: args.CurrentEpoch, CreateTrieEpochRootHashStorer: false, - NodeProcessingMode: nodeProcessingMode, + NodeProcessingMode: args.NodeProcessingMode, RepopulateTokensSupplies: false, // tokens supplies cannot be repopulated at this time - ManagedPeersHolder: managedPeersHolder, - StateStatsHandler: stateStatsHandler, + StateStatsHandler: args.StateStatsHandler, }, ) if err != nil { @@ -72,12 +58,13 @@ func NewShardStorageHandler( } base := &baseStorageHandler{ - storageService: storageService, - shardCoordinator: shardCoordinator, - marshalizer: marshalizer, - hasher: hasher, - currentEpoch: currentEpoch, - uint64Converter: uint64Converter, + storageService: storageService, + shardCoordinator: args.ShardCoordinator, + marshalizer: args.Marshaller, + hasher: args.Hasher, + currentEpoch: args.CurrentEpoch, + uint64Converter: args.Uint64Converter, + nodesCoordinatorRegistryFactory: args.NodesCoordinatorRegistryFactory, } return &shardStorageHandler{baseStorageHandler: base}, nil @@ -123,7 +110,7 @@ func (ssh *shardStorageHandler) SaveDataToStorage(components *ComponentsNeededFo return err } - components.NodesConfig.CurrentEpoch = components.ShardHeader.GetEpoch() + components.NodesConfig.SetCurrentEpoch(components.ShardHeader.GetEpoch()) nodesCoordinatorConfigKey, err := ssh.saveNodesCoordinatorRegistry(components.EpochStartMetaBlock, components.NodesConfig) if err != nil { return err diff --git a/epochStart/bootstrap/shardStorageHandler_test.go b/epochStart/bootstrap/shardStorageHandler_test.go index b27f13df28b..018bc4b99b8 100644 --- a/epochStart/bootstrap/shardStorageHandler_test.go +++ b/epochStart/bootstrap/shardStorageHandler_test.go @@ -13,24 +13,13 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-core-go/data/typeConverters" - "github.com/multiversx/mx-chain-core-go/hashing" - "github.com/multiversx/mx-chain-core-go/marshal" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/common/statistics/disabled" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/epochStart/mock" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" - "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/storage" - "github.com/multiversx/mx-chain-go/testscommon" epochStartMocks "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks/epochStart" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" - "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -41,21 +30,8 @@ func TestNewShardStorageHandler_ShouldWork(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, err := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, err := NewShardStorageHandler(args) assert.False(t, check.IfNil(shardStorage)) assert.Nil(t, err) @@ -66,21 +42,8 @@ func TestShardStorageHandler_SaveDataToStorageShardDataNotFound(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{Epoch: 1}, @@ -97,21 +60,8 @@ func TestShardStorageHandler_SaveDataToStorageMissingHeader(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{ @@ -151,21 +101,8 @@ func testShardWithMissingStorer(missingUnit dataRetriever.UnitType, atCallNumber }() counter := 0 - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shardStorage.storageService = &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { counter++ @@ -206,21 +143,8 @@ func TestShardStorageHandler_SaveDataToStorage(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) hash1 := []byte("hash1") hdr1 := block.MetaBlock{ @@ -318,21 +242,8 @@ func TestShardStorageHandler_getCrossProcessedMiniBlockHeadersDestMe(t *testing. mbs := append(intraMbs, crossMbs...) - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shardHeader := &block.Header{ Nonce: 100, MiniBlockHeaders: mbs, @@ -351,21 +262,8 @@ func TestShardStorageHandler_getCrossProcessedMiniBlockHeadersDestMe(t *testing. func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledErrorGettingProcessedAndPendingMbs(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) meta := &block.MetaBlock{ Nonce: 100, EpochStart: block.EpochStart{}, @@ -382,21 +280,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledErrorG func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledNoScheduled(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, err := shardStorage.getProcessedAndPendingMiniBlocksWithScheduled(scenario.metaBlock, scenario.headers, scenario.shardHeader, false) @@ -410,21 +295,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledNoSche func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledWrongHeaderType(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) scenario := createPendingAndProcessedMiniBlocksScenario() wrongShardHeader := &block.MetaBlock{} @@ -445,21 +317,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledWrongH func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduled(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, err := shardStorage.getProcessedAndPendingMiniBlocksWithScheduled(scenario.metaBlock, scenario.headers, scenario.shardHeader, true) @@ -626,21 +485,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksErrorGettingEpochSt _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) meta := &block.MetaBlock{ Nonce: 100, EpochStart: block.EpochStart{}, @@ -662,21 +508,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksMissingHeader(t *te }() lastFinishedMetaBlock := "last finished meta block" - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) meta := &block.MetaBlock{ Nonce: 100, EpochStart: block.EpochStart{ @@ -701,21 +534,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWrongHeader(t *test lastFinishedMetaBlockHash := "last finished meta block" firstPendingMeta := "first pending meta" - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) meta := &block.MetaBlock{ @@ -745,21 +565,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksNilMetaBlock(t *tes lastFinishedMetaBlockHash := "last finished meta block" firstPendingMeta := "first pending meta" - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) meta := &block.MetaBlock{ @@ -791,21 +598,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksNoProcessedNoPendin lastFinishedMetaBlockHash := "last finished meta block" firstPendingMeta := "first pending meta" - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) lastFinishedHeaders[0].PendingMiniBlockHeaders = nil @@ -833,21 +627,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksNoProcessedNoPendin func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithProcessedAndPendingMbs(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, firstPendingMetaBlockHash, err := shardStorage.getProcessedAndPendingMiniBlocks(scenario.metaBlock, scenario.headers) @@ -864,21 +645,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledGetSha _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) headers := map[string]data.HeaderHandler{} meta := &block.MetaBlock{ @@ -898,21 +666,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledMissin _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -940,21 +695,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledWrongT _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -984,26 +726,12 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledErrorW _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() + args := createStorageHandlerArgs() expectedErr := fmt.Errorf("expected error") - // Simulate an error when writing to storage with a mock marshaller - args.marshalizer = &marshallerMock.MarshalizerStub{MarshalCalled: func(obj interface{}) ([]byte, error) { + args.Marshaller = &marshallerMock.MarshalizerStub{MarshalCalled: func(obj interface{}) ([]byte, error) { return nil, expectedErr }} - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -1033,21 +761,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduled(t *te _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -1082,21 +797,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithScheduledErrorUpda _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -1125,21 +827,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithScheduled(t *testi _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" prevMetaHash := "prev metaHlock hash" @@ -1351,36 +1040,6 @@ func Test_getShardHeaderAndMetaHashes(t *testing.T) { require.Equal(t, metaHashes, headers[shardHdrKey].(data.ShardHeaderHandler).GetMetaBlockHashes()) } -type shardStorageArgs struct { - generalConfig config.Config - prefsConfig config.PreferencesConfig - shardCoordinator sharding.Coordinator - pathManagerHandler storage.PathManagerHandler - marshalizer marshal.Marshalizer - hasher hashing.Hasher - currentEpoch uint32 - uint64Converter typeConverters.Uint64ByteSliceConverter - nodeTypeProvider core.NodeTypeProviderHandler - nodeProcessingMode common.NodeProcessingMode - managedPeersHolder common.ManagedPeersHolder -} - -func createDefaultShardStorageArgs() shardStorageArgs { - return shardStorageArgs{ - generalConfig: testscommon.GetGeneralConfig(), - prefsConfig: config.PreferencesConfig{}, - shardCoordinator: &mock.ShardCoordinatorStub{}, - pathManagerHandler: &testscommon.PathManagerStub{}, - marshalizer: &mock.MarshalizerMock{}, - hasher: &hashingMocks.HasherMock{}, - currentEpoch: 0, - uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - nodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - nodeProcessingMode: common.Normal, - managedPeersHolder: &testscommon.ManagedPeersHolderStub{}, - } -} - func createDefaultEpochStartShardData(lastFinishedMetaBlockHash []byte, shardHeaderHash []byte) []block.EpochStartShardData { return []block.EpochStartShardData{ { @@ -1451,7 +1110,6 @@ func createPendingAndProcessedMiniBlocksScenario() scenarioData { expectedPendingMbsWithScheduled := []bootstrapStorage.PendingMiniBlocksInfo{ {ShardID: 0, MiniBlocksHashes: [][]byte{crossMbHeaders[1].Hash, crossMbHeaders[2].Hash, crossMbHeaders[3].Hash, crossMbHeaders[4].Hash, crossMbHeaders[0].Hash}}, } - expectedProcessedMbsWithScheduled := make([]bootstrapStorage.MiniBlocksInMeta, 0) headers := map[string]data.HeaderHandler{ lastFinishedMetaBlockHash: &block.MetaBlock{ @@ -1492,7 +1150,7 @@ func createPendingAndProcessedMiniBlocksScenario() scenarioData { expectedPendingMbs: expectedPendingMiniBlocks, expectedProcessedMbs: expectedProcessedMiniBlocks, expectedPendingMbsWithScheduled: expectedPendingMbsWithScheduled, - expectedProcessedMbsWithScheduled: expectedProcessedMbsWithScheduled, + expectedProcessedMbsWithScheduled: []bootstrapStorage.MiniBlocksInMeta{}, } } diff --git a/epochStart/bootstrap/storageProcess.go b/epochStart/bootstrap/storageProcess.go index 0f87b3626e7..809b0dfbb8b 100644 --- a/epochStart/bootstrap/storageProcess.go +++ b/epochStart/bootstrap/storageProcess.go @@ -404,19 +404,20 @@ func (sesb *storageEpochStartBootstrap) processNodesConfig(pubKey []byte) error shardId = sesb.genesisShardCoordinator.SelfId() } argsNewValidatorStatusSyncers := ArgsNewSyncValidatorStatus{ - DataPool: sesb.dataPool, - Marshalizer: sesb.coreComponentsHolder.InternalMarshalizer(), - RequestHandler: sesb.requestHandler, - ChanceComputer: sesb.rater, - GenesisNodesConfig: sesb.genesisNodesConfig, - NodeShuffler: sesb.nodeShuffler, - Hasher: sesb.coreComponentsHolder.Hasher(), - PubKey: pubKey, - ShardIdAsObserver: shardId, - ChanNodeStop: sesb.coreComponentsHolder.ChanStopNodeProcess(), - NodeTypeProvider: sesb.coreComponentsHolder.NodeTypeProvider(), - IsFullArchive: sesb.prefsConfig.FullArchive, - EnableEpochsHandler: sesb.coreComponentsHolder.EnableEpochsHandler(), + DataPool: sesb.dataPool, + Marshalizer: sesb.coreComponentsHolder.InternalMarshalizer(), + RequestHandler: sesb.requestHandler, + ChanceComputer: sesb.rater, + GenesisNodesConfig: sesb.genesisNodesConfig, + NodeShuffler: sesb.nodeShuffler, + Hasher: sesb.coreComponentsHolder.Hasher(), + PubKey: pubKey, + ShardIdAsObserver: shardId, + ChanNodeStop: sesb.coreComponentsHolder.ChanStopNodeProcess(), + NodeTypeProvider: sesb.coreComponentsHolder.NodeTypeProvider(), + IsFullArchive: sesb.prefsConfig.FullArchive, + EnableEpochsHandler: sesb.coreComponentsHolder.EnableEpochsHandler(), + NodesCoordinatorRegistryFactory: sesb.nodesCoordinatorRegistryFactory, } sesb.nodesConfigHandler, err = NewSyncValidatorStatus(argsNewValidatorStatusSyncers) if err != nil { diff --git a/epochStart/bootstrap/storageProcess_test.go b/epochStart/bootstrap/storageProcess_test.go index 78288156144..a59b0d125f2 100644 --- a/epochStart/bootstrap/storageProcess_test.go +++ b/epochStart/bootstrap/storageProcess_test.go @@ -22,6 +22,7 @@ import ( epochStartMocks "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks/epochStart" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/stretchr/testify/assert" ) @@ -92,7 +93,7 @@ func TestStorageEpochStartBootstrap_BootstrapFromGenesis(t *testing.T) { return 1 }, } - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return roundDuration }, @@ -116,7 +117,7 @@ func TestStorageEpochStartBootstrap_BootstrapMetablockNotFound(t *testing.T) { return 1 }, } - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return roundDuration }, diff --git a/epochStart/bootstrap/syncValidatorStatus.go b/epochStart/bootstrap/syncValidatorStatus.go index 0a74d4151fb..0bcb9308311 100644 --- a/epochStart/bootstrap/syncValidatorStatus.go +++ b/epochStart/bootstrap/syncValidatorStatus.go @@ -38,19 +38,20 @@ type syncValidatorStatus struct { // ArgsNewSyncValidatorStatus holds the arguments needed for creating a new validator status process component type ArgsNewSyncValidatorStatus struct { - DataPool dataRetriever.PoolsHolder - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - RequestHandler process.RequestHandler - ChanceComputer nodesCoordinator.ChanceComputer - GenesisNodesConfig sharding.GenesisNodesSetupHandler - NodeShuffler nodesCoordinator.NodesShuffler - PubKey []byte - ShardIdAsObserver uint32 - ChanNodeStop chan endProcess.ArgEndProcess - NodeTypeProvider NodeTypeProviderHandler - IsFullArchive bool - EnableEpochsHandler common.EnableEpochsHandler + DataPool dataRetriever.PoolsHolder + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + RequestHandler process.RequestHandler + ChanceComputer nodesCoordinator.ChanceComputer + GenesisNodesConfig sharding.GenesisNodesSetupHandler + NodeShuffler nodesCoordinator.NodesShuffler + PubKey []byte + ShardIdAsObserver uint32 + ChanNodeStop chan endProcess.ArgEndProcess + NodeTypeProvider NodeTypeProviderHandler + IsFullArchive bool + EnableEpochsHandler common.EnableEpochsHandler + NodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory } // NewSyncValidatorStatus creates a new validator status process component @@ -111,26 +112,27 @@ func NewSyncValidatorStatus(args ArgsNewSyncValidatorStatus) (*syncValidatorStat s.memDB = disabled.CreateMemUnit() argsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: int(args.GenesisNodesConfig.GetShardConsensusGroupSize()), - MetaConsensusGroupSize: int(args.GenesisNodesConfig.GetMetaConsensusGroupSize()), - Marshalizer: args.Marshalizer, - Hasher: args.Hasher, - Shuffler: args.NodeShuffler, - EpochStartNotifier: &disabled.EpochStartNotifier{}, - BootStorer: s.memDB, - ShardIDAsObserver: args.ShardIdAsObserver, - NbShards: args.GenesisNodesConfig.NumberOfShards(), - EligibleNodes: eligibleValidators, - WaitingNodes: waitingValidators, - SelfPublicKey: args.PubKey, - ConsensusGroupCache: consensusGroupCache, - ShuffledOutHandler: disabled.NewShuffledOutHandler(), - ChanStopNode: args.ChanNodeStop, - NodeTypeProvider: args.NodeTypeProvider, - IsFullArchive: args.IsFullArchive, - EnableEpochsHandler: args.EnableEpochsHandler, - ValidatorInfoCacher: s.dataPool.CurrentEpochValidatorInfo(), - GenesisNodesSetupHandler: s.genesisNodesConfig, + ShardConsensusGroupSize: int(args.GenesisNodesConfig.GetShardConsensusGroupSize()), + MetaConsensusGroupSize: int(args.GenesisNodesConfig.GetMetaConsensusGroupSize()), + Marshalizer: args.Marshalizer, + Hasher: args.Hasher, + Shuffler: args.NodeShuffler, + EpochStartNotifier: &disabled.EpochStartNotifier{}, + BootStorer: s.memDB, + ShardIDAsObserver: args.ShardIdAsObserver, + NbShards: args.GenesisNodesConfig.NumberOfShards(), + EligibleNodes: eligibleValidators, + WaitingNodes: waitingValidators, + SelfPublicKey: args.PubKey, + ConsensusGroupCache: consensusGroupCache, + ShuffledOutHandler: disabled.NewShuffledOutHandler(), + ChanStopNode: args.ChanNodeStop, + NodeTypeProvider: args.NodeTypeProvider, + IsFullArchive: args.IsFullArchive, + EnableEpochsHandler: args.EnableEpochsHandler, + ValidatorInfoCacher: s.dataPool.CurrentEpochValidatorInfo(), + GenesisNodesSetupHandler: s.genesisNodesConfig, + NodesCoordinatorRegistryFactory: args.NodesCoordinatorRegistryFactory, } baseNodesCoordinator, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argsNodesCoordinator) if err != nil { @@ -151,7 +153,7 @@ func NewSyncValidatorStatus(args ArgsNewSyncValidatorStatus) (*syncValidatorStat func (s *syncValidatorStatus) NodesConfigFromMetaBlock( currMetaBlock data.HeaderHandler, prevMetaBlock data.HeaderHandler, -) (*nodesCoordinator.NodesCoordinatorRegistry, uint32, []*block.MiniBlock, error) { +) (nodesCoordinator.NodesCoordinatorRegistryHandler, uint32, []*block.MiniBlock, error) { if currMetaBlock.GetNonce() > 1 && !currMetaBlock.IsStartOfEpochBlock() { return nil, 0, nil, epochStart.ErrNotEpochStartBlock } @@ -177,8 +179,8 @@ func (s *syncValidatorStatus) NodesConfigFromMetaBlock( return nil, 0, nil, err } - nodesConfig := s.nodeCoordinator.NodesCoordinatorToRegistry() - nodesConfig.CurrentEpoch = currMetaBlock.GetEpoch() + nodesConfig := s.nodeCoordinator.NodesCoordinatorToRegistry(currMetaBlock.GetEpoch()) + nodesConfig.SetCurrentEpoch(currMetaBlock.GetEpoch()) return nodesConfig, selfShardId, allMiniblocks, nil } diff --git a/epochStart/bootstrap/syncValidatorStatus_test.go b/epochStart/bootstrap/syncValidatorStatus_test.go index f7e409af875..7cfe6061c77 100644 --- a/epochStart/bootstrap/syncValidatorStatus_test.go +++ b/epochStart/bootstrap/syncValidatorStatus_test.go @@ -17,6 +17,7 @@ import ( epochStartMocks "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks/epochStart" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" @@ -246,6 +247,11 @@ func TestSyncValidatorStatus_getPeerBlockBodyForMeta(t *testing.T) { } func getSyncValidatorStatusArgs() ArgsNewSyncValidatorStatus { + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + &mock.MarshalizerMock{}, + 444, + ) + return ArgsNewSyncValidatorStatus{ DataPool: &dataRetrieverMock.PoolsHolderStub{ MiniBlocksCalled: func() storage.Cacher { @@ -259,7 +265,7 @@ func getSyncValidatorStatusArgs() ArgsNewSyncValidatorStatus { Hasher: &hashingMocks.HasherMock{}, RequestHandler: &testscommon.RequestHandlerStub{}, ChanceComputer: &shardingMocks.NodesCoordinatorStub{}, - GenesisNodesConfig: &mock.NodesSetupStub{ + GenesisNodesConfig: &genesisMocks.NodesSetupStub{ NumberOfShardsCalled: func() uint32 { return 1 }, @@ -301,12 +307,13 @@ func getSyncValidatorStatusArgs() ArgsNewSyncValidatorStatus { return 2 }, }, - NodeShuffler: &shardingMocks.NodeShufflerMock{}, - PubKey: []byte("public key"), - ShardIdAsObserver: 0, - ChanNodeStop: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + NodeShuffler: &shardingMocks.NodeShufflerMock{}, + PubKey: []byte("public key"), + ShardIdAsObserver: 0, + ChanNodeStop: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } } diff --git a/epochStart/dtos.go b/epochStart/dtos.go new file mode 100644 index 00000000000..ea5aa95f626 --- /dev/null +++ b/epochStart/dtos.go @@ -0,0 +1,17 @@ +package epochStart + +import ( + "math/big" + + "github.com/multiversx/mx-chain-go/state" +) + +// OwnerData is a struct containing relevant information about owner's nodes data +type OwnerData struct { + NumStakedNodes int64 + NumActiveNodes int64 + TotalTopUp *big.Int + TopUpPerNode *big.Int + AuctionList []state.ValidatorInfoHandler + Qualified bool +} diff --git a/epochStart/errors.go b/epochStart/errors.go index 3f705f585fd..ca115e939f4 100644 --- a/epochStart/errors.go +++ b/epochStart/errors.go @@ -281,6 +281,9 @@ var ErrSystemValidatorSCCall = errors.New("system validator sc call failed") // ErrOwnerDoesntHaveEligibleNodesInEpoch signals that the owner doesn't have any eligible nodes in epoch var ErrOwnerDoesntHaveEligibleNodesInEpoch = errors.New("owner has no eligible nodes in epoch") +// ErrOwnerDoesntHaveNodesInEpoch signals that the owner has no nodes in epoch +var ErrOwnerDoesntHaveNodesInEpoch = errors.New("owner has no nodes in epoch") + // ErrInvalidMaxHardCapForMissingNodes signals that the maximum hardcap value for missing nodes is invalid var ErrInvalidMaxHardCapForMissingNodes = errors.New("invalid max hardcap for missing nodes") @@ -331,3 +334,21 @@ var ErrNilManagedPeersHolder = errors.New("nil managed peers holder") // ErrNilExecutionOrderHandler signals that a nil execution order handler has been provided var ErrNilExecutionOrderHandler = errors.New("nil execution order handler") + +// ErrReceivedNewListNodeInStakingV4 signals that a new node has been assigned in common.NewList instead of common.AuctionList after staking v4 +var ErrReceivedNewListNodeInStakingV4 = errors.New("new node has been assigned in common.NewList instead of common.AuctionList after staking v4") + +// ErrNilMaxNodesChangeConfigProvider signals that a nil nodes config provider has been provided +var ErrNilMaxNodesChangeConfigProvider = errors.New("nil nodes config provider has been provided") + +// ErrNilAuctionListSelector signals that a nil auction list selector has been provided +var ErrNilAuctionListSelector = errors.New("nil auction list selector has been provided") + +// ErrOwnerHasNoStakedNode signals that the owner has no staked node +var ErrOwnerHasNoStakedNode = errors.New("owner has no staked node") + +// ErrUint32SubtractionOverflow signals uint32 subtraction overflowed +var ErrUint32SubtractionOverflow = errors.New("uint32 subtraction overflowed") + +// ErrReceivedAuctionValidatorsBeforeStakingV4 signals that an auction node has been provided before enabling staking v4 +var ErrReceivedAuctionValidatorsBeforeStakingV4 = errors.New("auction node has been provided before enabling staking v4") diff --git a/epochStart/interface.go b/epochStart/interface.go index fc4364afc43..06f04c11117 100644 --- a/epochStart/interface.go +++ b/epochStart/interface.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/state" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -85,14 +86,6 @@ type Notifier interface { IsInterfaceNil() bool } -// ValidatorStatisticsProcessorHandler defines the actions for processing validator statistics -// needed in the epoch events -type ValidatorStatisticsProcessorHandler interface { - Process(info data.ShardValidatorInfoHandler) error - Commit() ([]byte, error) - IsInterfaceNil() bool -} - // ValidatorInfoCreator defines the methods to create a validator info type ValidatorInfoCreator interface { PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo @@ -161,9 +154,12 @@ type StakingDataProvider interface { GetTotalStakeEligibleNodes() *big.Int GetTotalTopUpStakeEligibleNodes() *big.Int GetNodeStakedTopUp(blsKey []byte) (*big.Int, error) - PrepareStakingDataForRewards(keys map[uint32][][]byte) error - FillValidatorInfo(blsKey []byte) error - ComputeUnQualifiedNodes(validatorInfos map[uint32][]*state.ValidatorInfo) ([][]byte, map[string][][]byte, error) + PrepareStakingData(validatorsMap state.ShardValidatorsInfoMapHandler) error + FillValidatorInfo(validator state.ValidatorInfoHandler) error + ComputeUnQualifiedNodes(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) + GetBlsKeyOwner(blsKey []byte) (string, error) + GetNumOfValidatorsInCurrentEpoch() uint32 + GetOwnersData() map[string]*OwnerData Clean() IsInterfaceNil() bool } @@ -186,10 +182,10 @@ type EpochEconomicsDataProvider interface { // RewardsCreator defines the functionality for the metachain to create rewards at end of epoch type RewardsCreator interface { CreateRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) VerifyRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error GetProtocolSustainabilityRewards() *big.Int GetLocalTxCache() TransactionCacher @@ -214,3 +210,21 @@ type EpochStartNotifier interface { RegisterHandler(handler ActionHandler) IsInterfaceNil() bool } + +// MaxNodesChangeConfigProvider provides all config.MaxNodesChangeConfig, as well as +// the current config.MaxNodesChangeConfig based on the current epoch +type MaxNodesChangeConfigProvider interface { + GetAllNodesConfig() []config.MaxNodesChangeConfig + GetCurrentNodesConfig() config.MaxNodesChangeConfig + EpochConfirmed(epoch uint32, round uint64) + IsInterfaceNil() bool +} + +// AuctionListSelector handles selection of nodes from auction list to be sent to waiting list, based on their top up +type AuctionListSelector interface { + SelectNodesFromAuctionList( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + randomness []byte, + ) error + IsInterfaceNil() bool +} diff --git a/epochStart/metachain/auctionListDisplayer.go b/epochStart/metachain/auctionListDisplayer.go new file mode 100644 index 00000000000..d64a156a51c --- /dev/null +++ b/epochStart/metachain/auctionListDisplayer.go @@ -0,0 +1,232 @@ +package metachain + +import ( + "math/big" + "strconv" + "strings" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-go/config" + errorsCommon "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/state" + logger "github.com/multiversx/mx-chain-logger-go" +) + +const maxPubKeyDisplayableLen = 20 +const maxNumOfDecimalsToDisplay = 5 + +type auctionListDisplayer struct { + softAuctionConfig *auctionConfig + tableDisplayer TableDisplayHandler + validatorPubKeyConverter core.PubkeyConverter + addressPubKeyConverter core.PubkeyConverter +} + +// ArgsAuctionListDisplayer is a struct placeholder for arguments needed to create an auction list displayer +type ArgsAuctionListDisplayer struct { + TableDisplayHandler TableDisplayHandler + ValidatorPubKeyConverter core.PubkeyConverter + AddressPubKeyConverter core.PubkeyConverter + AuctionConfig config.SoftAuctionConfig + Denomination int +} + +// NewAuctionListDisplayer creates an auction list data displayer, useful for debugging purposes during selection process +func NewAuctionListDisplayer(args ArgsAuctionListDisplayer) (*auctionListDisplayer, error) { + softAuctionConfig, err := getAuctionConfig(args.AuctionConfig, args.Denomination) + if err != nil { + return nil, err + } + + err = checkDisplayerNilArgs(args) + if err != nil { + return nil, err + } + + return &auctionListDisplayer{ + softAuctionConfig: softAuctionConfig, + tableDisplayer: args.TableDisplayHandler, + validatorPubKeyConverter: args.ValidatorPubKeyConverter, + addressPubKeyConverter: args.AddressPubKeyConverter, + }, nil +} + +func checkDisplayerNilArgs(args ArgsAuctionListDisplayer) error { + if check.IfNil(args.TableDisplayHandler) { + return errNilTableDisplayHandler + } + if check.IfNil(args.ValidatorPubKeyConverter) { + return errorsCommon.ErrNilValidatorPublicKeyConverter + } + if check.IfNil(args.AddressPubKeyConverter) { + return errorsCommon.ErrNilAddressPublicKeyConverter + } + + return nil +} + +// DisplayOwnersData will display initial owners data for auction selection +func (ald *auctionListDisplayer) DisplayOwnersData(ownersData map[string]*OwnerAuctionData) { + if log.GetLevel() > logger.LogDebug { + return + } + + tableHeader := []string{ + "Owner", + "Num staked nodes", + "Num active nodes", + "Num auction nodes", + "Total top up", + "Top up per node", + "Auction list nodes", + } + + lines := make([]*display.LineData, 0, len(ownersData)) + for ownerPubKey, owner := range ownersData { + line := []string{ + ald.addressPubKeyConverter.SilentEncode([]byte(ownerPubKey), log), + strconv.Itoa(int(owner.numStakedNodes)), + strconv.Itoa(int(owner.numActiveNodes)), + strconv.Itoa(int(owner.numAuctionNodes)), + getPrettyValue(owner.totalTopUp, ald.softAuctionConfig.denominator), + getPrettyValue(owner.topUpPerNode, ald.softAuctionConfig.denominator), + ald.getShortDisplayableBlsKeys(owner.auctionList), + } + lines = append(lines, display.NewLineData(false, line)) + } + + ald.tableDisplayer.DisplayTable(tableHeader, lines, "Initial nodes config in auction list") +} + +func getPrettyValue(val *big.Int, denominator *big.Int) string { + first := big.NewInt(0).Div(val, denominator).String() + decimals := big.NewInt(0).Mod(val, denominator).String() + + zeroesCt := (len(denominator.String()) - len(decimals)) - 1 + zeroesCt = core.MaxInt(zeroesCt, 0) + zeroes := strings.Repeat("0", zeroesCt) + + second := zeroes + decimals + if len(second) > maxNumOfDecimalsToDisplay { + second = second[:maxNumOfDecimalsToDisplay] + } + + return first + "." + second +} + +func (ald *auctionListDisplayer) getShortDisplayableBlsKeys(list []state.ValidatorInfoHandler) string { + pubKeys := "" + + for idx, validator := range list { + pubKeys += ald.getShortKey(validator.GetPublicKey()) + addDelimiter := idx != len(list)-1 + if addDelimiter { + pubKeys += ", " + } + } + + return pubKeys +} + +func (ald *auctionListDisplayer) getShortKey(pubKey []byte) string { + pubKeyHex := ald.validatorPubKeyConverter.SilentEncode(pubKey, log) + displayablePubKey := pubKeyHex + + pubKeyLen := len(displayablePubKey) + if pubKeyLen > maxPubKeyDisplayableLen { + displayablePubKey = pubKeyHex[:maxPubKeyDisplayableLen/2] + "..." + pubKeyHex[pubKeyLen-maxPubKeyDisplayableLen/2:] + } + + return displayablePubKey +} + +// DisplayOwnersSelectedNodes will display owners' selected nodes +func (ald *auctionListDisplayer) DisplayOwnersSelectedNodes(ownersData map[string]*OwnerAuctionData) { + if log.GetLevel() > logger.LogDebug { + return + } + + tableHeader := []string{ + "Owner", + "Num staked nodes", + "TopUp per node", + "Total top up", + "Num auction nodes", + "Num qualified auction nodes", + "Num active nodes", + "Qualified top up per node", + "Selected auction list nodes", + } + + lines := make([]*display.LineData, 0, len(ownersData)) + for ownerPubKey, owner := range ownersData { + line := []string{ + ald.addressPubKeyConverter.SilentEncode([]byte(ownerPubKey), log), + strconv.Itoa(int(owner.numStakedNodes)), + getPrettyValue(owner.topUpPerNode, ald.softAuctionConfig.denominator), + getPrettyValue(owner.totalTopUp, ald.softAuctionConfig.denominator), + strconv.Itoa(int(owner.numAuctionNodes)), + strconv.Itoa(int(owner.numQualifiedAuctionNodes)), + strconv.Itoa(int(owner.numActiveNodes)), + getPrettyValue(owner.qualifiedTopUpPerNode, ald.softAuctionConfig.denominator), + ald.getShortDisplayableBlsKeys(owner.auctionList[:owner.numQualifiedAuctionNodes]), + } + lines = append(lines, display.NewLineData(false, line)) + } + + ald.tableDisplayer.DisplayTable(tableHeader, lines, "Selected nodes config from auction list") +} + +// DisplayAuctionList will display the final selected auction nodes +func (ald *auctionListDisplayer) DisplayAuctionList( + auctionList []state.ValidatorInfoHandler, + ownersData map[string]*OwnerAuctionData, + numOfSelectedNodes uint32, +) { + if log.GetLevel() > logger.LogDebug { + return + } + + tableHeader := []string{"Owner", "Registered key", "Qualified TopUp per node"} + lines := make([]*display.LineData, 0, len(auctionList)) + blsKeysOwnerMap := getBlsKeyOwnerMap(ownersData) + for idx, validator := range auctionList { + pubKey := validator.GetPublicKey() + pubKeyEncoded := ald.validatorPubKeyConverter.SilentEncode(pubKey, log) + owner, found := blsKeysOwnerMap[string(pubKey)] + if !found { + log.Error("auctionListSelector.displayAuctionList could not find owner for", + "bls key", pubKeyEncoded) + continue + } + + qualifiedTopUp := ownersData[owner].qualifiedTopUpPerNode + horizontalLine := uint32(idx) == numOfSelectedNodes-1 + line := display.NewLineData(horizontalLine, []string{ + ald.addressPubKeyConverter.SilentEncode([]byte(owner), log), + pubKeyEncoded, + getPrettyValue(qualifiedTopUp, ald.softAuctionConfig.denominator), + }) + lines = append(lines, line) + } + + ald.tableDisplayer.DisplayTable(tableHeader, lines, "Final selected nodes from auction list") +} + +func getBlsKeyOwnerMap(ownersData map[string]*OwnerAuctionData) map[string]string { + ret := make(map[string]string) + for ownerPubKey, owner := range ownersData { + for _, blsKey := range owner.auctionList { + ret[string(blsKey.GetPublicKey())] = ownerPubKey + } + } + + return ret +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ald *auctionListDisplayer) IsInterfaceNil() bool { + return ald == nil +} diff --git a/epochStart/metachain/auctionListDisplayer_test.go b/epochStart/metachain/auctionListDisplayer_test.go new file mode 100644 index 00000000000..68d74e08e41 --- /dev/null +++ b/epochStart/metachain/auctionListDisplayer_test.go @@ -0,0 +1,288 @@ +package metachain + +import ( + "math" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/stretchr/testify/require" +) + +func createDisplayerArgs() ArgsAuctionListDisplayer { + return ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: createSoftAuctionConfig(), + Denomination: 0, + } +} + +func TestNewAuctionListDisplayer(t *testing.T) { + t.Parallel() + + t.Run("invalid auction config", func(t *testing.T) { + args := createDisplayerArgs() + args.AuctionConfig.MaxNumberOfIterations = 0 + ald, err := NewAuctionListDisplayer(args) + require.Nil(t, ald) + requireInvalidValueError(t, err, "for max number of iterations") + }) + + t.Run("should work", func(t *testing.T) { + args := createDisplayerArgs() + ald, err := NewAuctionListDisplayer(args) + require.Nil(t, err) + require.False(t, ald.IsInterfaceNil()) + }) +} + +func TestAuctionListDisplayer_DisplayOwnersData(t *testing.T) { + _ = logger.SetLogLevel("*:DEBUG") + defer func() { + _ = logger.SetLogLevel("*:INFO") + }() + + owner := []byte("owner") + validator := &state.ValidatorInfo{PublicKey: []byte("pubKey")} + wasDisplayCalled := false + + args := createDisplayerArgs() + args.AddressPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, owner, pkBytes) + return "ownerEncoded" + }, + } + args.ValidatorPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, validator.PublicKey, pkBytes) + return "pubKeyEncoded" + }, + } + args.TableDisplayHandler = &testscommon.TableDisplayerMock{ + DisplayTableCalled: func(tableHeader []string, lines []*display.LineData, message string) { + require.Equal(t, []string{ + "Owner", + "Num staked nodes", + "Num active nodes", + "Num auction nodes", + "Total top up", + "Top up per node", + "Auction list nodes", + }, tableHeader) + require.Equal(t, "Initial nodes config in auction list", message) + require.Equal(t, []*display.LineData{ + { + Values: []string{"ownerEncoded", "4", "4", "1", "100.0", "25.0", "pubKeyEncoded"}, + HorizontalRuleAfter: false, + }, + }, lines) + + wasDisplayCalled = true + }, + } + ald, _ := NewAuctionListDisplayer(args) + + ownersData := map[string]*OwnerAuctionData{ + "owner": { + numStakedNodes: 4, + numActiveNodes: 4, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 4, + totalTopUp: big.NewInt(100), + topUpPerNode: big.NewInt(25), + qualifiedTopUpPerNode: big.NewInt(15), + auctionList: []state.ValidatorInfoHandler{&state.ValidatorInfo{PublicKey: []byte("pubKey")}}, + }, + } + + ald.DisplayOwnersData(ownersData) + require.True(t, wasDisplayCalled) +} + +func TestAuctionListDisplayer_DisplayOwnersSelectedNodes(t *testing.T) { + _ = logger.SetLogLevel("*:DEBUG") + defer func() { + _ = logger.SetLogLevel("*:INFO") + }() + + owner := []byte("owner") + validator := &state.ValidatorInfo{PublicKey: []byte("pubKey")} + wasDisplayCalled := false + + args := createDisplayerArgs() + args.AddressPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, owner, pkBytes) + return "ownerEncoded" + }, + } + args.ValidatorPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, validator.PublicKey, pkBytes) + return "pubKeyEncoded" + }, + } + args.TableDisplayHandler = &testscommon.TableDisplayerMock{ + DisplayTableCalled: func(tableHeader []string, lines []*display.LineData, message string) { + require.Equal(t, []string{ + "Owner", + "Num staked nodes", + "TopUp per node", + "Total top up", + "Num auction nodes", + "Num qualified auction nodes", + "Num active nodes", + "Qualified top up per node", + "Selected auction list nodes", + }, tableHeader) + require.Equal(t, "Selected nodes config from auction list", message) + require.Equal(t, []*display.LineData{ + { + Values: []string{"ownerEncoded", "4", "25.0", "100.0", "1", "1", "4", "15.0", "pubKeyEncoded"}, + HorizontalRuleAfter: false, + }, + }, lines) + + wasDisplayCalled = true + }, + } + ald, _ := NewAuctionListDisplayer(args) + + ownersData := map[string]*OwnerAuctionData{ + "owner": { + numStakedNodes: 4, + numActiveNodes: 4, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + totalTopUp: big.NewInt(100), + topUpPerNode: big.NewInt(25), + qualifiedTopUpPerNode: big.NewInt(15), + auctionList: []state.ValidatorInfoHandler{&state.ValidatorInfo{PublicKey: []byte("pubKey")}}, + }, + } + + ald.DisplayOwnersSelectedNodes(ownersData) + require.True(t, wasDisplayCalled) +} + +func TestAuctionListDisplayer_DisplayAuctionList(t *testing.T) { + _ = logger.SetLogLevel("*:DEBUG") + defer func() { + _ = logger.SetLogLevel("*:INFO") + }() + + owner := []byte("owner") + validator := &state.ValidatorInfo{PublicKey: []byte("pubKey")} + wasDisplayCalled := false + + args := createDisplayerArgs() + args.AddressPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, owner, pkBytes) + return "ownerEncoded" + }, + } + args.ValidatorPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, validator.PublicKey, pkBytes) + return "pubKeyEncoded" + }, + } + args.TableDisplayHandler = &testscommon.TableDisplayerMock{ + DisplayTableCalled: func(tableHeader []string, lines []*display.LineData, message string) { + require.Equal(t, []string{ + "Owner", + "Registered key", + "Qualified TopUp per node", + }, tableHeader) + require.Equal(t, "Final selected nodes from auction list", message) + require.Equal(t, []*display.LineData{ + { + Values: []string{"ownerEncoded", "pubKeyEncoded", "15.0"}, + HorizontalRuleAfter: true, + }, + }, lines) + + wasDisplayCalled = true + }, + } + ald, _ := NewAuctionListDisplayer(args) + + auctionList := []state.ValidatorInfoHandler{&state.ValidatorInfo{PublicKey: []byte("pubKey")}} + ownersData := map[string]*OwnerAuctionData{ + "owner": { + numStakedNodes: 4, + numActiveNodes: 4, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + totalTopUp: big.NewInt(100), + topUpPerNode: big.NewInt(25), + qualifiedTopUpPerNode: big.NewInt(15), + auctionList: auctionList, + }, + } + + ald.DisplayAuctionList(auctionList, ownersData, 1) + require.True(t, wasDisplayCalled) +} + +func TestGetPrettyValue(t *testing.T) { + t.Parallel() + + require.Equal(t, "1234.0", getPrettyValue(big.NewInt(1234), big.NewInt(1))) + require.Equal(t, "123.4", getPrettyValue(big.NewInt(1234), big.NewInt(10))) + require.Equal(t, "12.34", getPrettyValue(big.NewInt(1234), big.NewInt(100))) + require.Equal(t, "1.234", getPrettyValue(big.NewInt(1234), big.NewInt(1000))) + require.Equal(t, "0.1234", getPrettyValue(big.NewInt(1234), big.NewInt(10000))) + require.Equal(t, "0.01234", getPrettyValue(big.NewInt(1234), big.NewInt(100000))) + require.Equal(t, "0.00123", getPrettyValue(big.NewInt(1234), big.NewInt(1000000))) + require.Equal(t, "0.00012", getPrettyValue(big.NewInt(1234), big.NewInt(10000000))) + require.Equal(t, "0.00001", getPrettyValue(big.NewInt(1234), big.NewInt(100000000))) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1234), big.NewInt(1000000000))) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1234), big.NewInt(10000000000))) + + require.Equal(t, "1.0", getPrettyValue(big.NewInt(1), big.NewInt(1))) + require.Equal(t, "0.1", getPrettyValue(big.NewInt(1), big.NewInt(10))) + require.Equal(t, "0.01", getPrettyValue(big.NewInt(1), big.NewInt(100))) + require.Equal(t, "0.001", getPrettyValue(big.NewInt(1), big.NewInt(1000))) + require.Equal(t, "0.0001", getPrettyValue(big.NewInt(1), big.NewInt(10000))) + require.Equal(t, "0.00001", getPrettyValue(big.NewInt(1), big.NewInt(100000))) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1), big.NewInt(1000000))) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1), big.NewInt(10000000))) + + oneEGLD := big.NewInt(1000000000000000000) + denominationEGLD := big.NewInt(int64(math.Pow10(18))) + + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(0), denominationEGLD)) + require.Equal(t, "1.00000", getPrettyValue(oneEGLD, denominationEGLD)) + require.Equal(t, "1.10000", getPrettyValue(big.NewInt(1100000000000000000), denominationEGLD)) + require.Equal(t, "1.10000", getPrettyValue(big.NewInt(1100000000000000001), denominationEGLD)) + require.Equal(t, "1.11000", getPrettyValue(big.NewInt(1110000000000000001), denominationEGLD)) + require.Equal(t, "0.11100", getPrettyValue(big.NewInt(111000000000000001), denominationEGLD)) + require.Equal(t, "0.01110", getPrettyValue(big.NewInt(11100000000000001), denominationEGLD)) + require.Equal(t, "0.00111", getPrettyValue(big.NewInt(1110000000000001), denominationEGLD)) + require.Equal(t, "0.00011", getPrettyValue(big.NewInt(111000000000001), denominationEGLD)) + require.Equal(t, "0.00001", getPrettyValue(big.NewInt(11100000000001), denominationEGLD)) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1110000000001), denominationEGLD)) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(111000000001), denominationEGLD)) + + require.Equal(t, "2.00000", getPrettyValue(big.NewInt(0).Mul(oneEGLD, big.NewInt(2)), denominationEGLD)) + require.Equal(t, "20.00000", getPrettyValue(big.NewInt(0).Mul(oneEGLD, big.NewInt(20)), denominationEGLD)) + require.Equal(t, "2000000.00000", getPrettyValue(big.NewInt(0).Mul(oneEGLD, big.NewInt(2000000)), denominationEGLD)) + + require.Equal(t, "3.22220", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(2222200000000000000)), denominationEGLD)) + require.Equal(t, "1.22222", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(222220000000000000)), denominationEGLD)) + require.Equal(t, "1.02222", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(22222000000000000)), denominationEGLD)) + require.Equal(t, "1.00222", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(2222200000000000)), denominationEGLD)) + require.Equal(t, "1.00022", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(222220000000000)), denominationEGLD)) + require.Equal(t, "1.00002", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(22222000000000)), denominationEGLD)) + require.Equal(t, "1.00000", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(2222200000000)), denominationEGLD)) + require.Equal(t, "1.00000", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(222220000000)), denominationEGLD)) +} diff --git a/epochStart/metachain/auctionListSelector.go b/epochStart/metachain/auctionListSelector.go new file mode 100644 index 00000000000..4b7c353a180 --- /dev/null +++ b/epochStart/metachain/auctionListSelector.go @@ -0,0 +1,422 @@ +package metachain + +import ( + "fmt" + "math/big" + "strings" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/state" +) + +// OwnerAuctionData holds necessary auction data for an owner +type OwnerAuctionData struct { + numStakedNodes int64 + numActiveNodes int64 + numAuctionNodes int64 + numQualifiedAuctionNodes int64 + totalTopUp *big.Int + topUpPerNode *big.Int + qualifiedTopUpPerNode *big.Int + auctionList []state.ValidatorInfoHandler +} + +type auctionConfig struct { + step *big.Int + minTopUp *big.Int + maxTopUp *big.Int + denominator *big.Int + maxNumberOfIterations uint64 +} + +type auctionListSelector struct { + shardCoordinator sharding.Coordinator + stakingDataProvider epochStart.StakingDataProvider + nodesConfigProvider epochStart.MaxNodesChangeConfigProvider + auctionListDisplayer AuctionListDisplayHandler + softAuctionConfig *auctionConfig +} + +// AuctionListSelectorArgs is a struct placeholder for all arguments required to create an auctionListSelector +type AuctionListSelectorArgs struct { + ShardCoordinator sharding.Coordinator + StakingDataProvider epochStart.StakingDataProvider + MaxNodesChangeConfigProvider epochStart.MaxNodesChangeConfigProvider + AuctionListDisplayHandler AuctionListDisplayHandler + SoftAuctionConfig config.SoftAuctionConfig + Denomination int +} + +// NewAuctionListSelector will create a new auctionListSelector, which handles selection of nodes from auction list based +// on their top up +func NewAuctionListSelector(args AuctionListSelectorArgs) (*auctionListSelector, error) { + softAuctionConfig, err := getAuctionConfig(args.SoftAuctionConfig, args.Denomination) + if err != nil { + return nil, err + } + err = checkNilArgs(args) + if err != nil { + return nil, err + } + + log.Debug("NewAuctionListSelector with config", + "top up step", softAuctionConfig.step.String(), + "min top up", softAuctionConfig.minTopUp.String(), + "max top up", softAuctionConfig.maxTopUp.String(), + "denomination", args.Denomination, + "denominator for pretty values", softAuctionConfig.denominator.String(), + ) + + return &auctionListSelector{ + shardCoordinator: args.ShardCoordinator, + stakingDataProvider: args.StakingDataProvider, + nodesConfigProvider: args.MaxNodesChangeConfigProvider, + auctionListDisplayer: args.AuctionListDisplayHandler, + softAuctionConfig: softAuctionConfig, + }, nil +} + +func getAuctionConfig(softAuctionConfig config.SoftAuctionConfig, denomination int) (*auctionConfig, error) { + step, ok := big.NewInt(0).SetString(softAuctionConfig.TopUpStep, 10) + if !ok || step.Cmp(zero) <= 0 { + return nil, fmt.Errorf("%w for step in soft auction config;expected number > 0, got %s", + process.ErrInvalidValue, + softAuctionConfig.TopUpStep, + ) + } + + minTopUp, ok := big.NewInt(0).SetString(softAuctionConfig.MinTopUp, 10) + if !ok || minTopUp.Cmp(zero) <= 0 { + return nil, fmt.Errorf("%w for min top up in soft auction config;expected number > 0, got %s", + process.ErrInvalidValue, + softAuctionConfig.MinTopUp, + ) + } + + maxTopUp, ok := big.NewInt(0).SetString(softAuctionConfig.MaxTopUp, 10) + if !ok || maxTopUp.Cmp(zero) <= 0 { + return nil, fmt.Errorf("%w for max top up in soft auction config;expected number > 0, got %s", + process.ErrInvalidValue, + softAuctionConfig.MaxTopUp, + ) + } + + if minTopUp.Cmp(maxTopUp) > 0 { + return nil, fmt.Errorf("%w for min/max top up in soft auction config; min value: %s > max value: %s", + process.ErrInvalidValue, + softAuctionConfig.MinTopUp, + softAuctionConfig.MaxTopUp, + ) + } + + if denomination < 0 { + return nil, fmt.Errorf("%w for denomination in soft auction config;expected number >= 0, got %d", + process.ErrInvalidValue, + denomination, + ) + } + + if softAuctionConfig.MaxNumberOfIterations == 0 { + return nil, fmt.Errorf("%w for max number of iterations in soft auction config;expected value > 0", + process.ErrInvalidValue, + ) + } + + denominationStr := "1" + strings.Repeat("0", denomination) + denominator, ok := big.NewInt(0).SetString(denominationStr, 10) + if !ok { + return nil, fmt.Errorf("%w for denomination: %d", + errCannotComputeDenominator, + denomination, + ) + } + + if minTopUp.Cmp(denominator) < 0 { + return nil, fmt.Errorf("%w for min top up in auction config; expected value to be >= %s, got %s", + process.ErrInvalidValue, + denominator.String(), + minTopUp.String(), + ) + } + + if step.Cmp(denominator) < 0 { + return nil, fmt.Errorf("%w for step in auction config; expected value to be >= %s, got %s", + process.ErrInvalidValue, + denominator.String(), + step.String(), + ) + } + + return &auctionConfig{ + step: step, + minTopUp: minTopUp, + maxTopUp: maxTopUp, + denominator: denominator, + maxNumberOfIterations: softAuctionConfig.MaxNumberOfIterations, + }, nil +} + +func checkNilArgs(args AuctionListSelectorArgs) error { + if check.IfNil(args.ShardCoordinator) { + return epochStart.ErrNilShardCoordinator + } + if check.IfNil(args.StakingDataProvider) { + return epochStart.ErrNilStakingDataProvider + } + if check.IfNil(args.MaxNodesChangeConfigProvider) { + return epochStart.ErrNilMaxNodesChangeConfigProvider + } + if check.IfNil(args.AuctionListDisplayHandler) { + return errNilAuctionListDisplayHandler + } + + return nil +} + +// SelectNodesFromAuctionList will select nodes from validatorsInfoMap based on their top up. If two or more validators +// have the same top-up, then sorting will be done based on blsKey XOR randomness. Selected nodes will have their list set +// to common.SelectNodesFromAuctionList +func (als *auctionListSelector) SelectNodesFromAuctionList( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + randomness []byte, +) error { + if len(randomness) == 0 { + return process.ErrNilRandSeed + } + + ownersData, auctionListSize := als.getAuctionData() + if auctionListSize == 0 { + log.Info("auctionListSelector.SelectNodesFromAuctionList: empty auction list; skip selection") + return nil + } + + currNodesConfig := als.nodesConfigProvider.GetCurrentNodesConfig() + currNumOfValidators := als.stakingDataProvider.GetNumOfValidatorsInCurrentEpoch() + numOfShuffledNodes := currNodesConfig.NodesToShufflePerShard * (als.shardCoordinator.NumberOfShards() + 1) + numOfValidatorsAfterShuffling, err := safeSub(currNumOfValidators, numOfShuffledNodes) + if err != nil { + log.Warn(fmt.Sprintf("auctionListSelector.SelectNodesFromAuctionList: %v when trying to compute numOfValidatorsAfterShuffling = %v - %v (currNumOfValidators - numOfShuffledNodes)", + err, + currNumOfValidators, + numOfShuffledNodes, + )) + numOfValidatorsAfterShuffling = 0 + } + + maxNumNodes := currNodesConfig.MaxNumNodes + availableSlots, err := safeSub(maxNumNodes, numOfValidatorsAfterShuffling) + if availableSlots == 0 || err != nil { + log.Info(fmt.Sprintf("auctionListSelector.SelectNodesFromAuctionList: %v or zero value when trying to compute availableSlots = %v - %v (maxNodes - numOfValidatorsAfterShuffling); skip selecting nodes from auction list", + err, + maxNumNodes, + numOfValidatorsAfterShuffling, + )) + return nil + } + + log.Info("auctionListSelector.SelectNodesFromAuctionList", + "max nodes", maxNumNodes, + "current number of validators", currNumOfValidators, + "num of nodes which will be shuffled out", numOfShuffledNodes, + "num of validators after shuffling", numOfValidatorsAfterShuffling, + "auction list size", auctionListSize, + fmt.Sprintf("available slots (%v - %v)", maxNumNodes, numOfValidatorsAfterShuffling), availableSlots, + ) + + als.auctionListDisplayer.DisplayOwnersData(ownersData) + numOfAvailableNodeSlots := core.MinUint32(auctionListSize, availableSlots) + + sw := core.NewStopWatch() + sw.Start("auctionListSelector.sortAuctionList") + defer func() { + sw.Stop("auctionListSelector.sortAuctionList") + log.Debug("time measurements", sw.GetMeasurements()...) + }() + + return als.sortAuctionList(ownersData, numOfAvailableNodeSlots, validatorsInfoMap, randomness) +} + +func (als *auctionListSelector) getAuctionData() (map[string]*OwnerAuctionData, uint32) { + ownersData := make(map[string]*OwnerAuctionData) + numOfNodesInAuction := uint32(0) + + for owner, ownerData := range als.stakingDataProvider.GetOwnersData() { + if ownerData.Qualified && len(ownerData.AuctionList) > 0 { + numAuctionNodes := len(ownerData.AuctionList) + + ownersData[owner] = &OwnerAuctionData{ + numActiveNodes: ownerData.NumActiveNodes, + numAuctionNodes: int64(numAuctionNodes), + numQualifiedAuctionNodes: int64(numAuctionNodes), + numStakedNodes: ownerData.NumStakedNodes, + totalTopUp: ownerData.TotalTopUp, + topUpPerNode: ownerData.TopUpPerNode, + qualifiedTopUpPerNode: ownerData.TopUpPerNode, + auctionList: make([]state.ValidatorInfoHandler, numAuctionNodes), + } + copy(ownersData[owner].auctionList, ownerData.AuctionList) + numOfNodesInAuction += uint32(numAuctionNodes) + } + } + + return ownersData, numOfNodesInAuction +} + +func isInAuction(validator state.ValidatorInfoHandler) bool { + return validator.GetList() == string(common.AuctionList) +} + +// TODO: Move this in elrond-go-core +func safeSub(a, b uint32) (uint32, error) { + if a < b { + return 0, epochStart.ErrUint32SubtractionOverflow + } + return a - b, nil +} + +func (als *auctionListSelector) sortAuctionList( + ownersData map[string]*OwnerAuctionData, + numOfAvailableNodeSlots uint32, + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + randomness []byte, +) error { + softAuctionNodesConfig := als.calcSoftAuctionNodesConfig(ownersData, numOfAvailableNodeSlots) + selectedNodes := als.selectNodes(softAuctionNodesConfig, numOfAvailableNodeSlots, randomness) + return markAuctionNodesAsSelected(selectedNodes, validatorsInfoMap) +} + +func (als *auctionListSelector) calcSoftAuctionNodesConfig( + data map[string]*OwnerAuctionData, + numAvailableSlots uint32, +) map[string]*OwnerAuctionData { + ownersData := copyOwnersData(data) + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + log.Debug("auctionListSelector: calc min and max possible top up", + "min top up per node", getPrettyValue(minTopUp, als.softAuctionConfig.denominator), + "max top up per node", getPrettyValue(maxTopUp, als.softAuctionConfig.denominator), + ) + + topUp := big.NewInt(0).SetBytes(minTopUp.Bytes()) + previousConfig := copyOwnersData(ownersData) + iterationNumber := uint64(0) + maxNumberOfIterationsReached := false + + for ; topUp.Cmp(maxTopUp) < 0 && !maxNumberOfIterationsReached; topUp.Add(topUp, als.softAuctionConfig.step) { + previousConfig = copyOwnersData(ownersData) + numNodesQualifyingForTopUp := calcNodesConfig(ownersData, topUp) + + if numNodesQualifyingForTopUp < int64(numAvailableSlots) { + break + } + + iterationNumber++ + maxNumberOfIterationsReached = iterationNumber >= als.softAuctionConfig.maxNumberOfIterations + } + + log.Debug("auctionListSelector: found min required", + "topUp", getPrettyValue(topUp, als.softAuctionConfig.denominator), + "after num of iterations", iterationNumber, + ) + return previousConfig +} + +func (als *auctionListSelector) getMinMaxPossibleTopUp(ownersData map[string]*OwnerAuctionData) (*big.Int, *big.Int) { + min := big.NewInt(0).SetBytes(als.softAuctionConfig.maxTopUp.Bytes()) + max := big.NewInt(0).SetBytes(als.softAuctionConfig.minTopUp.Bytes()) + + for _, owner := range ownersData { + if owner.topUpPerNode.Cmp(min) < 0 { + min = big.NewInt(0).SetBytes(owner.topUpPerNode.Bytes()) + } + + ownerNumNodesWithOnlyOneAuctionNode := big.NewInt(owner.numActiveNodes + 1) + maxPossibleTopUpForOwner := big.NewInt(0).Div(owner.totalTopUp, ownerNumNodesWithOnlyOneAuctionNode) + if maxPossibleTopUpForOwner.Cmp(max) > 0 { + max = big.NewInt(0).SetBytes(maxPossibleTopUpForOwner.Bytes()) + } + } + + if min.Cmp(als.softAuctionConfig.minTopUp) < 0 { + min = als.softAuctionConfig.minTopUp + } + + return min, max +} + +func copyOwnersData(ownersData map[string]*OwnerAuctionData) map[string]*OwnerAuctionData { + ret := make(map[string]*OwnerAuctionData) + for owner, data := range ownersData { + ret[owner] = &OwnerAuctionData{ + numActiveNodes: data.numActiveNodes, + numAuctionNodes: data.numAuctionNodes, + numQualifiedAuctionNodes: data.numQualifiedAuctionNodes, + numStakedNodes: data.numStakedNodes, + totalTopUp: data.totalTopUp, + topUpPerNode: data.topUpPerNode, + qualifiedTopUpPerNode: data.qualifiedTopUpPerNode, + auctionList: make([]state.ValidatorInfoHandler, len(data.auctionList)), + } + copy(ret[owner].auctionList, data.auctionList) + } + + return ret +} + +func calcNodesConfig(ownersData map[string]*OwnerAuctionData, topUp *big.Int) int64 { + numNodesQualifyingForTopUp := int64(0) + + for ownerPubKey, owner := range ownersData { + activeNodes := big.NewInt(owner.numActiveNodes) + topUpActiveNodes := big.NewInt(0).Mul(topUp, activeNodes) + validatorTopUpForAuction := big.NewInt(0).Sub(owner.totalTopUp, topUpActiveNodes) + if validatorTopUpForAuction.Cmp(topUp) < 0 { + delete(ownersData, ownerPubKey) + continue + } + + qualifiedNodesBigInt := big.NewInt(0).Div(validatorTopUpForAuction, topUp) + qualifiedNodes := qualifiedNodesBigInt.Int64() + isNumQualifiedNodesOverflow := !qualifiedNodesBigInt.IsUint64() + + if qualifiedNodes > owner.numAuctionNodes || isNumQualifiedNodesOverflow { + numNodesQualifyingForTopUp += owner.numAuctionNodes + } else { + numNodesQualifyingForTopUp += qualifiedNodes + owner.numQualifiedAuctionNodes = qualifiedNodes + + ownerRemainingNodes := big.NewInt(owner.numActiveNodes + owner.numQualifiedAuctionNodes) + owner.qualifiedTopUpPerNode = big.NewInt(0).Div(owner.totalTopUp, ownerRemainingNodes) + } + } + + return numNodesQualifyingForTopUp +} + +func markAuctionNodesAsSelected( + selectedNodes []state.ValidatorInfoHandler, + validatorsInfoMap state.ShardValidatorsInfoMapHandler, +) error { + for _, node := range selectedNodes { + newNode := node.ShallowClone() + newNode.SetPreviousList(node.GetList()) + newNode.SetList(string(common.SelectedFromAuctionList)) + + err := validatorsInfoMap.Replace(node, newNode) + if err != nil { + return err + } + } + + return nil +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (als *auctionListSelector) IsInterfaceNil() bool { + return als == nil +} diff --git a/epochStart/metachain/auctionListSelector_test.go b/epochStart/metachain/auctionListSelector_test.go new file mode 100644 index 00000000000..25cced015fc --- /dev/null +++ b/epochStart/metachain/auctionListSelector_test.go @@ -0,0 +1,895 @@ +package metachain + +import ( + "math/big" + "strings" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/epochStart/notifier" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + "github.com/stretchr/testify/require" +) + +func createSoftAuctionConfig() config.SoftAuctionConfig { + return config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } +} + +func createAuctionListSelectorArgs(maxNodesChangeConfig []config.MaxNodesChangeConfig) AuctionListSelectorArgs { + epochNotifier := forking.NewGenericEpochNotifier() + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(epochNotifier, maxNodesChangeConfig) + + argsStakingDataProvider := createStakingDataProviderArgs() + stakingSCProvider, _ := NewStakingDataProvider(argsStakingDataProvider) + shardCoordinator, _ := sharding.NewMultiShardCoordinator(3, core.MetachainShardId) + + softAuctionCfg := createSoftAuctionConfig() + auctionDisplayer, _ := NewAuctionListDisplayer(ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: softAuctionCfg, + }) + return AuctionListSelectorArgs{ + ShardCoordinator: shardCoordinator, + StakingDataProvider: stakingSCProvider, + MaxNodesChangeConfigProvider: nodesConfigProvider, + AuctionListDisplayHandler: auctionDisplayer, + SoftAuctionConfig: softAuctionCfg, + } +} + +func createFullAuctionListSelectorArgs(maxNodesChangeConfig []config.MaxNodesChangeConfig) (AuctionListSelectorArgs, ArgsNewEpochStartSystemSCProcessing) { + epochNotifier := forking.NewGenericEpochNotifier() + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(epochNotifier, maxNodesChangeConfig) + + argsSystemSC, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + argsSystemSC.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ + EpochField: stakingV4Step2EnableEpoch, + }) + argsSystemSC.MaxNodesChangeConfigProvider = nodesConfigProvider + + softAuctionCfg := createSoftAuctionConfig() + auctionDisplayer, _ := NewAuctionListDisplayer(ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: softAuctionCfg, + }) + return AuctionListSelectorArgs{ + ShardCoordinator: argsSystemSC.ShardCoordinator, + StakingDataProvider: argsSystemSC.StakingDataProvider, + MaxNodesChangeConfigProvider: nodesConfigProvider, + AuctionListDisplayHandler: auctionDisplayer, + SoftAuctionConfig: softAuctionCfg, + }, argsSystemSC +} + +func fillValidatorsInfo(t *testing.T, validatorsMap state.ShardValidatorsInfoMapHandler, sdp epochStart.StakingDataProvider) { + for _, validator := range validatorsMap.GetAllValidatorsInfo() { + err := sdp.FillValidatorInfo(validator) + require.Nil(t, err) + } +} + +func TestNewAuctionListSelector(t *testing.T) { + t.Parallel() + + t.Run("nil shard coordinator", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.ShardCoordinator = nil + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + require.Equal(t, epochStart.ErrNilShardCoordinator, err) + }) + + t.Run("nil staking data provider", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.StakingDataProvider = nil + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + require.Equal(t, epochStart.ErrNilStakingDataProvider, err) + }) + + t.Run("nil max nodes change config provider", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.MaxNodesChangeConfigProvider = nil + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + require.Equal(t, epochStart.ErrNilMaxNodesChangeConfigProvider, err) + }) + + t.Run("nil auction list displayer", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.AuctionListDisplayHandler = nil + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + require.Equal(t, errNilAuctionListDisplayHandler, err) + }) + + t.Run("invalid soft auction config", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.SoftAuctionConfig.TopUpStep = "0" + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + requireInvalidValueError(t, err, "step") + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + als, err := NewAuctionListSelector(args) + require.NotNil(t, als) + require.Nil(t, err) + require.False(t, als.IsInterfaceNil()) + }) +} + +func requireInvalidValueError(t *testing.T, err error, msgToContain string) { + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), process.ErrInvalidValue.Error())) + require.True(t, strings.Contains(err.Error(), msgToContain)) +} + +func TestGetAuctionConfig(t *testing.T) { + t.Parallel() + + t.Run("invalid step", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + + cfg.TopUpStep = "dsa" + res, err := getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "step") + + cfg.TopUpStep = "-1" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "step") + + cfg.TopUpStep = "0" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "step") + }) + + t.Run("invalid min top up", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + + cfg.MinTopUp = "dsa" + res, err := getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "min top up") + + cfg.MinTopUp = "-1" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "min top up") + + cfg.MinTopUp = "0" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "min top up") + }) + + t.Run("invalid max top up", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + + cfg.MaxTopUp = "dsa" + res, err := getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "max top up") + + cfg.MaxTopUp = "-1" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "max top up") + + cfg.MaxTopUp = "0" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "max top up") + }) + + t.Run("invalid denomination", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + + res, err := getAuctionConfig(cfg, -1) + require.Nil(t, res) + requireInvalidValueError(t, err, "denomination") + }) + + t.Run("zero max number of iterations", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + cfg.MaxNumberOfIterations = 0 + + res, err := getAuctionConfig(cfg, 10) + require.Nil(t, res) + requireInvalidValueError(t, err, "for max number of iterations in soft auction config") + }) + + t.Run("min top up > max top up", func(t *testing.T) { + t.Parallel() + + cfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "32", + MaxTopUp: "16", + MaxNumberOfIterations: 1, + } + + res, err := getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "min value: 32 > max value: 16") + }) + + t.Run("min top up < denominator", func(t *testing.T) { + t.Parallel() + + cfg := config.SoftAuctionConfig{ + TopUpStep: "100", + MinTopUp: "10", + MaxTopUp: "5000", + MaxNumberOfIterations: 1, + } + + res, err := getAuctionConfig(cfg, 2) + require.Nil(t, res) + requireInvalidValueError(t, err, "for min top up in auction config; expected value to be >= 100, got 10") + }) + + t.Run("step < denominator", func(t *testing.T) { + t.Parallel() + + cfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "100", + MaxTopUp: "5000", + MaxNumberOfIterations: 1, + } + + res, err := getAuctionConfig(cfg, 2) + require.Nil(t, res) + requireInvalidValueError(t, err, "for step in auction config; expected value to be >= 100, got 10") + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "444", + MaxNumberOfIterations: 100000, + } + + res, err := getAuctionConfig(cfg, 0) + require.Nil(t, err) + require.Equal(t, &auctionConfig{ + step: big.NewInt(10), + minTopUp: big.NewInt(1), + maxTopUp: big.NewInt(444), + denominator: big.NewInt(1), + maxNumberOfIterations: 100000, + }, res) + + minTopUp, _ := big.NewInt(0).SetString("1000000000000000000", 10) + maxTopUp, _ := big.NewInt(0).SetString("32000000000000000000000000", 10) + step, _ := big.NewInt(0).SetString("10000000000000000000", 10) + cfg = config.SoftAuctionConfig{ + TopUpStep: step.String(), + MinTopUp: minTopUp.String(), + MaxTopUp: maxTopUp.String(), + MaxNumberOfIterations: 100000, + } + + res, err = getAuctionConfig(cfg, 18) + require.Nil(t, err) + require.Equal(t, &auctionConfig{ + step: step, + minTopUp: minTopUp, + maxTopUp: maxTopUp, + denominator: minTopUp, + maxNumberOfIterations: 100000, + }, res) + }) +} + +func TestAuctionListSelector_SelectNodesFromAuction(t *testing.T) { + t.Parallel() + + t.Run("nil randomness, expect error", func(t *testing.T) { + t.Parallel() + + args := createAuctionListSelectorArgs(nil) + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(state.NewShardValidatorsInfoMap(), nil) + require.Equal(t, process.ErrNilRandSeed, err) + }) + + t.Run("empty auction list", func(t *testing.T) { + t.Parallel() + + owner1 := []byte("owner1") + owner1StakedKeys := [][]byte{[]byte("pubKey0")} + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1)) + + args, argsSystemSC := createFullAuctionListSelectorArgs([]config.MaxNodesChangeConfig{{MaxNumNodes: 2}}) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + fillValidatorsInfo(t, validatorsInfo, argsSystemSC.StakingDataProvider) + + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(state.NewShardValidatorsInfoMap(), []byte("rnd")) + require.Nil(t, err) + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1), + }, + } + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) + }) + + t.Run("not enough available slots to select auction nodes", func(t *testing.T) { + t.Parallel() + + owner1 := []byte("owner1") + owner2 := []byte("owner2") + owner1StakedKeys := [][]byte{[]byte("pubKey0")} + owner2StakedKeys := [][]byte{[]byte("pubKey1")} + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[0], common.AuctionList, "", 0, owner2)) + + args, argsSystemSC := createFullAuctionListSelectorArgs([]config.MaxNodesChangeConfig{{MaxNumNodes: 1}}) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner2, owner2, owner2StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + fillValidatorsInfo(t, validatorsInfo, argsSystemSC.StakingDataProvider) + + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(validatorsInfo, []byte("rnd")) + require.Nil(t, err) + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1), + createValidatorInfo(owner2StakedKeys[0], common.AuctionList, "", 0, owner2), + }, + } + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) + }) + + t.Run("one eligible + one auction, max num nodes = 1, number of nodes after shuffling = 0, expect node in auction is selected", func(t *testing.T) { + t.Parallel() + + owner1 := []byte("owner1") + owner2 := []byte("owner2") + owner1StakedKeys := [][]byte{[]byte("pubKey0")} + owner2StakedKeys := [][]byte{[]byte("pubKey1")} + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[0], common.AuctionList, "", 0, owner2)) + + args, argsSystemSC := createFullAuctionListSelectorArgs([]config.MaxNodesChangeConfig{{MaxNumNodes: 1, NodesToShufflePerShard: 1}}) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner2, owner2, owner2StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + fillValidatorsInfo(t, validatorsInfo, argsSystemSC.StakingDataProvider) + + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(validatorsInfo, []byte("rnd")) + require.Nil(t, err) + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1), + createValidatorInfo(owner2StakedKeys[0], common.SelectedFromAuctionList, common.AuctionList, 0, owner2), + }, + } + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) + }) + + t.Run("two available slots for auction nodes, but only one node in auction", func(t *testing.T) { + t.Parallel() + + owner1 := []byte("owner1") + owner1StakedKeys := [][]byte{[]byte("pubKey0")} + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.AuctionList, "", 0, owner1)) + + args, argsSystemSC := createFullAuctionListSelectorArgs([]config.MaxNodesChangeConfig{{MaxNumNodes: 2}}) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + fillValidatorsInfo(t, validatorsInfo, argsSystemSC.StakingDataProvider) + + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(validatorsInfo, []byte("rnd")) + require.Nil(t, err) + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.SelectedFromAuctionList, common.AuctionList, 0, owner1), + }, + } + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) + }) +} + +func TestAuctionListSelector_calcSoftAuctionNodesConfigEdgeCases(t *testing.T) { + t.Parallel() + + randomness := []byte("pk0") + args := createAuctionListSelectorArgs(nil) + als, _ := NewAuctionListSelector(args) + + t.Run("two validators, both have zero top up", func(t *testing.T) { + t.Parallel() + + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v2}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, als.softAuctionConfig.minTopUp, minTopUp) + require.Equal(t, als.softAuctionConfig.minTopUp, maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 2) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2}, selectedNodes) + }) + + t.Run("one validator with zero top up, one with min top up, one with top up", func(t *testing.T) { + t.Parallel() + + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + v3 := &state.ValidatorInfo{PublicKey: []byte("pk3")} + + owner1 := "owner1" + owner2 := "owner2" + owner3 := "owner3" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1), + topUpPerNode: big.NewInt(1), + qualifiedTopUpPerNode: big.NewInt(1), + auctionList: []state.ValidatorInfoHandler{v2}, + }, + owner3: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v3}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(1), minTopUp) + require.Equal(t, big.NewInt(1000), maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 3) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 3, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v3, v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 2) + expectedSoftAuctionConfig := copyOwnersData(softAuctionConfig) + delete(expectedSoftAuctionConfig, owner1) + require.Equal(t, expectedSoftAuctionConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v3, v2}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + delete(expectedSoftAuctionConfig, owner2) + require.Equal(t, expectedSoftAuctionConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v3}, selectedNodes) + }) + + t.Run("two validators, both have same top up", func(t *testing.T) { + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v2}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(1000), minTopUp) + require.Equal(t, big.NewInt(1000), maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 2) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2}, selectedNodes) + }) + + t.Run("two validators, top up difference less than step", func(t *testing.T) { + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(995), + topUpPerNode: big.NewInt(995), + qualifiedTopUpPerNode: big.NewInt(995), + auctionList: []state.ValidatorInfoHandler{v2}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(995), minTopUp) + require.Equal(t, big.NewInt(1000), maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 2) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v1, v2}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v1}, selectedNodes) + }) + + t.Run("three validators, top up difference equal to step", func(t *testing.T) { + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + v0 := &state.ValidatorInfo{PublicKey: []byte("pk0")} + + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 2, + numQualifiedAuctionNodes: 2, + numStakedNodes: 2, + totalTopUp: big.NewInt(1980), + topUpPerNode: big.NewInt(990), + qualifiedTopUpPerNode: big.NewInt(990), + auctionList: []state.ValidatorInfoHandler{v2, v0}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(990), minTopUp) + require.Equal(t, big.NewInt(1980), maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 3) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 3, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v1, v2, v0}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 2) + expectedSoftAuction := copyOwnersData(ownersData) + expectedSoftAuction[owner2].numQualifiedAuctionNodes = 1 + expectedSoftAuction[owner2].qualifiedTopUpPerNode = big.NewInt(1980) + require.Equal(t, expectedSoftAuction, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + delete(expectedSoftAuction, owner1) + require.Equal(t, expectedSoftAuction, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2}, selectedNodes) + }) + + t.Run("large top up difference, would qualify more nodes than an owner has, expect correct computation", func(t *testing.T) { + argsLargeTopUp := createAuctionListSelectorArgs(nil) + argsLargeTopUp.SoftAuctionConfig = config.SoftAuctionConfig{ + TopUpStep: "10000000000000000000", // 10 eGLD + MinTopUp: "1000000000000000000", // 1 eGLD + MaxTopUp: "32000000000000000000000000", // 32 mil eGLD + MaxNumberOfIterations: 10, + } + argsLargeTopUp.Denomination = 18 + selector, _ := NewAuctionListSelector(argsLargeTopUp) + + v0 := &state.ValidatorInfo{PublicKey: []byte("pk0")} + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + + oneEGLD, _ := big.NewInt(0).SetString("1000000000000000000", 10) + owner1TopUp, _ := big.NewInt(0).SetString("32000000000000000000000000", 10) // 31 mil eGLD + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: owner1TopUp, + topUpPerNode: owner1TopUp, + qualifiedTopUpPerNode: owner1TopUp, + auctionList: []state.ValidatorInfoHandler{v0}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 2, + numQualifiedAuctionNodes: 2, + numStakedNodes: 2, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v1, v2}, + }, + } + + minTopUp, maxTopUp := selector.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, oneEGLD, minTopUp) + require.Equal(t, owner1TopUp, maxTopUp) + + softAuctionConfig := selector.calcSoftAuctionNodesConfig(ownersData, 3) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := selector.selectNodes(softAuctionConfig, 3, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v0, v2, v1}, selectedNodes) + + softAuctionConfig = selector.calcSoftAuctionNodesConfig(ownersData, 2) + expectedSoftAuction := copyOwnersData(ownersData) + expectedSoftAuction[owner1].numQualifiedAuctionNodes = 1 + expectedSoftAuction[owner1].qualifiedTopUpPerNode = owner1TopUp + require.Equal(t, expectedSoftAuction, softAuctionConfig) + selectedNodes = selector.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v0, v2}, selectedNodes) + + softAuctionConfig = selector.calcSoftAuctionNodesConfig(ownersData, 1) + delete(expectedSoftAuction, owner2) + require.Equal(t, expectedSoftAuction, softAuctionConfig) + selectedNodes = selector.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v0}, selectedNodes) + }) +} + +func TestAuctionListSelector_calcSoftAuctionNodesConfig(t *testing.T) { + t.Parallel() + + randomness := []byte("pk0") + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + v3 := &state.ValidatorInfo{PublicKey: []byte("pk3")} + v4 := &state.ValidatorInfo{PublicKey: []byte("pk4")} + v5 := &state.ValidatorInfo{PublicKey: []byte("pk5")} + v6 := &state.ValidatorInfo{PublicKey: []byte("pk6")} + v7 := &state.ValidatorInfo{PublicKey: []byte("pk7")} + v8 := &state.ValidatorInfo{PublicKey: []byte("pk8")} + + owner1 := "owner1" + owner2 := "owner2" + owner3 := "owner3" + owner4 := "owner4" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 2, + numAuctionNodes: 2, + numQualifiedAuctionNodes: 2, + numStakedNodes: 4, + totalTopUp: big.NewInt(1500), + topUpPerNode: big.NewInt(375), + qualifiedTopUpPerNode: big.NewInt(375), + auctionList: []state.ValidatorInfoHandler{v1, v2}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 3, + numQualifiedAuctionNodes: 3, + numStakedNodes: 3, + totalTopUp: big.NewInt(3000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v3, v4, v5}, + }, + owner3: { + numActiveNodes: 1, + numAuctionNodes: 2, + numQualifiedAuctionNodes: 2, + numStakedNodes: 3, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(333), + qualifiedTopUpPerNode: big.NewInt(333), + auctionList: []state.ValidatorInfoHandler{v6, v7}, + }, + owner4: { + numActiveNodes: 1, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 2, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v8}, + }, + } + + args := createAuctionListSelectorArgs(nil) + als, _ := NewAuctionListSelector(args) + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(1), minTopUp) // owner4 having all nodes in auction + require.Equal(t, big.NewInt(3000), maxTopUp) // owner2 having only only one node in auction + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 9) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 8, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v2, v1, v7, v6, v8}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 8) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 8, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v2, v1, v7, v6, v8}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 7) + expectedConfig := copyOwnersData(ownersData) + delete(expectedConfig, owner4) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 7, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v2, v1, v7, v6}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 6) + expectedConfig[owner3].numQualifiedAuctionNodes = 1 + expectedConfig[owner3].qualifiedTopUpPerNode = big.NewInt(500) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 6, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v7, v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 5) + expectedConfig[owner1].numQualifiedAuctionNodes = 1 + expectedConfig[owner1].qualifiedTopUpPerNode = big.NewInt(500) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 5, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v7, v2}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 4) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 4, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v7}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 3) + delete(expectedConfig, owner3) + delete(expectedConfig, owner1) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 3, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 2) + expectedConfig[owner2].numQualifiedAuctionNodes = 2 + expectedConfig[owner2].qualifiedTopUpPerNode = big.NewInt(1500) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + expectedConfig[owner2].numQualifiedAuctionNodes = 1 + expectedConfig[owner2].qualifiedTopUpPerNode = big.NewInt(3000) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5}, selectedNodes) +} diff --git a/epochStart/metachain/auctionListSorting.go b/epochStart/metachain/auctionListSorting.go new file mode 100644 index 00000000000..4759ec65bcb --- /dev/null +++ b/epochStart/metachain/auctionListSorting.go @@ -0,0 +1,104 @@ +package metachain + +import ( + "bytes" + "math/big" + "sort" + + "github.com/multiversx/mx-chain-go/state" +) + +func (als *auctionListSelector) selectNodes( + ownersData map[string]*OwnerAuctionData, + numAvailableSlots uint32, + randomness []byte, +) []state.ValidatorInfoHandler { + selectedFromAuction := make([]state.ValidatorInfoHandler, 0) + validatorTopUpMap := make(map[string]*big.Int) + + pubKeyLen := getPubKeyLen(ownersData) + normRand := calcNormalizedRandomness(randomness, pubKeyLen) + + for _, owner := range ownersData { + sortListByPubKey(owner.auctionList) + addQualifiedValidatorsTopUpInMap(owner, validatorTopUpMap) + selectedFromAuction = append(selectedFromAuction, owner.auctionList[:owner.numQualifiedAuctionNodes]...) + } + + als.auctionListDisplayer.DisplayOwnersSelectedNodes(ownersData) + sortValidators(selectedFromAuction, validatorTopUpMap, normRand) + als.auctionListDisplayer.DisplayAuctionList(selectedFromAuction, ownersData, numAvailableSlots) + + return selectedFromAuction[:numAvailableSlots] +} + +func getPubKeyLen(ownersData map[string]*OwnerAuctionData) int { + for _, owner := range ownersData { + return len(owner.auctionList[0].GetPublicKey()) + } + + return 0 +} + +func calcNormalizedRandomness(randomness []byte, expectedLen int) []byte { + rand := randomness + randLen := len(rand) + + if expectedLen > randLen { + repeatedCt := expectedLen/randLen + 1 + rand = bytes.Repeat(randomness, repeatedCt) + } + + rand = rand[:expectedLen] + return rand +} + +func sortListByPubKey(list []state.ValidatorInfoHandler) { + sort.SliceStable(list, func(i, j int) bool { + pubKey1 := list[i].GetPublicKey() + pubKey2 := list[j].GetPublicKey() + + return bytes.Compare(pubKey1, pubKey2) > 0 + }) +} + +func addQualifiedValidatorsTopUpInMap(owner *OwnerAuctionData, validatorTopUpMap map[string]*big.Int) { + for i := int64(0); i < owner.numQualifiedAuctionNodes; i++ { + validatorPubKey := string(owner.auctionList[i].GetPublicKey()) + validatorTopUpMap[validatorPubKey] = big.NewInt(0).SetBytes(owner.qualifiedTopUpPerNode.Bytes()) + } +} + +func sortValidators( + list []state.ValidatorInfoHandler, + validatorTopUpMap map[string]*big.Int, + randomness []byte, +) { + sort.SliceStable(list, func(i, j int) bool { + pubKey1 := list[i].GetPublicKey() + pubKey2 := list[j].GetPublicKey() + + nodeTopUpPubKey1 := validatorTopUpMap[string(pubKey1)] + nodeTopUpPubKey2 := validatorTopUpMap[string(pubKey2)] + + if nodeTopUpPubKey1.Cmp(nodeTopUpPubKey2) == 0 { + return compareByXORWithRandomness(pubKey1, pubKey2, randomness) + } + + return nodeTopUpPubKey1.Cmp(nodeTopUpPubKey2) > 0 + }) +} + +func compareByXORWithRandomness(pubKey1, pubKey2, randomness []byte) bool { + xorLen := len(randomness) + + key1Xor := make([]byte, xorLen) + key2Xor := make([]byte, xorLen) + + for idx := 0; idx < xorLen; idx++ { + key1Xor[idx] = pubKey1[idx] ^ randomness[idx] + key2Xor[idx] = pubKey2[idx] ^ randomness[idx] + } + + return bytes.Compare(key1Xor, key2Xor) == 1 +} diff --git a/epochStart/metachain/auctionListSorting_test.go b/epochStart/metachain/auctionListSorting_test.go new file mode 100644 index 00000000000..637869ea1d6 --- /dev/null +++ b/epochStart/metachain/auctionListSorting_test.go @@ -0,0 +1,39 @@ +package metachain + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCalcNormalizedRandomness(t *testing.T) { + t.Parallel() + + t.Run("randomness longer than expected len", func(t *testing.T) { + t.Parallel() + + result := calcNormalizedRandomness([]byte("rand"), 2) + require.Equal(t, []byte("ra"), result) + }) + + t.Run("randomness length equal to expected len", func(t *testing.T) { + t.Parallel() + + result := calcNormalizedRandomness([]byte("rand"), 4) + require.Equal(t, []byte("rand"), result) + }) + + t.Run("randomness length less than expected len", func(t *testing.T) { + t.Parallel() + + result := calcNormalizedRandomness([]byte("rand"), 6) + require.Equal(t, []byte("randra"), result) + }) + + t.Run("expected len is zero", func(t *testing.T) { + t.Parallel() + + result := calcNormalizedRandomness([]byte("rand"), 0) + require.Empty(t, result) + }) +} diff --git a/epochStart/metachain/common.go b/epochStart/metachain/common.go new file mode 100644 index 00000000000..9eb614772ab --- /dev/null +++ b/epochStart/metachain/common.go @@ -0,0 +1,16 @@ +package metachain + +import "github.com/multiversx/mx-chain-go/state" + +// GetAllNodeKeys returns all from the provided map +func GetAllNodeKeys(validatorsInfo state.ShardValidatorsInfoMapHandler) map[uint32][][]byte { + nodeKeys := make(map[uint32][][]byte) + for shardID, validatorsInfoSlice := range validatorsInfo.GetShardValidatorsInfoMap() { + nodeKeys[shardID] = make([][]byte, 0) + for _, validatorInfo := range validatorsInfoSlice { + nodeKeys[shardID] = append(nodeKeys[shardID], validatorInfo.GetPublicKey()) + } + } + + return nodeKeys +} diff --git a/epochStart/metachain/economicsDataProvider.go b/epochStart/metachain/economicsDataProvider.go index c39eb917521..ec165ffe80a 100644 --- a/epochStart/metachain/economicsDataProvider.go +++ b/epochStart/metachain/economicsDataProvider.go @@ -53,7 +53,7 @@ func (es *epochEconomicsStatistics) SetLeadersFees(fees *big.Int) { } // SetRewardsToBeDistributed sets the rewards to be distributed at the end of the epoch (includes the rewards per block, -//the block producers fees, protocol sustainability rewards and developer fees) +// the block producers fees, protocol sustainability rewards and developer fees) func (es *epochEconomicsStatistics) SetRewardsToBeDistributed(rewards *big.Int) { es.mutEconomicsStatistics.Lock() defer es.mutEconomicsStatistics.Unlock() @@ -99,7 +99,7 @@ func (es *epochEconomicsStatistics) LeaderFees() *big.Int { } // RewardsToBeDistributed returns the rewards to be distributed at the end of epoch (includes rewards for produced -//blocks, protocol sustainability rewards, block producer fees and developer fees) +// blocks, protocol sustainability rewards, block producer fees and developer fees) func (es *epochEconomicsStatistics) RewardsToBeDistributed() *big.Int { es.mutEconomicsStatistics.RLock() defer es.mutEconomicsStatistics.RUnlock() diff --git a/epochStart/metachain/epochStartData.go b/epochStart/metachain/epochStartData.go index 1c6bd30516e..1a67b3a3692 100644 --- a/epochStart/metachain/epochStartData.go +++ b/epochStart/metachain/epochStartData.go @@ -289,7 +289,7 @@ func (e *epochStartData) getShardDataFromEpochStartData( } epochStartIdentifier := core.EpochStartIdentifier(prevEpoch) - if prevEpoch == 0 { + if prevEpoch == e.genesisEpoch { return lastMetaHash, []byte(epochStartIdentifier), nil } diff --git a/epochStart/metachain/errors.go b/epochStart/metachain/errors.go new file mode 100644 index 00000000000..319bf83dafd --- /dev/null +++ b/epochStart/metachain/errors.go @@ -0,0 +1,11 @@ +package metachain + +import "errors" + +var errNilValidatorsInfoMap = errors.New("received nil shard validators info map") + +var errCannotComputeDenominator = errors.New("cannot compute denominator value") + +var errNilAuctionListDisplayHandler = errors.New("nil auction list display handler provided") + +var errNilTableDisplayHandler = errors.New("nil table display handler provided") diff --git a/epochStart/metachain/interface.go b/epochStart/metachain/interface.go new file mode 100644 index 00000000000..1e141fc079f --- /dev/null +++ b/epochStart/metachain/interface.go @@ -0,0 +1,24 @@ +package metachain + +import ( + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-go/state" +) + +// AuctionListDisplayHandler should be able to display auction list data during selection process +type AuctionListDisplayHandler interface { + DisplayOwnersData(ownersData map[string]*OwnerAuctionData) + DisplayOwnersSelectedNodes(ownersData map[string]*OwnerAuctionData) + DisplayAuctionList( + auctionList []state.ValidatorInfoHandler, + ownersData map[string]*OwnerAuctionData, + numOfSelectedNodes uint32, + ) + IsInterfaceNil() bool +} + +// TableDisplayHandler should be able to display tables in log +type TableDisplayHandler interface { + DisplayTable(tableHeader []string, lines []*display.LineData, message string) + IsInterfaceNil() bool +} diff --git a/epochStart/metachain/legacySystemSCs.go b/epochStart/metachain/legacySystemSCs.go new file mode 100644 index 00000000000..327a5ab88e5 --- /dev/null +++ b/epochStart/metachain/legacySystemSCs.go @@ -0,0 +1,1340 @@ +package metachain + +import ( + "bytes" + "context" + "fmt" + "math" + "math/big" + "sort" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/atomic" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" + vInfo "github.com/multiversx/mx-chain-go/common/validatorInfo" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/vm" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +type legacySystemSCProcessor struct { + systemVM vmcommon.VMExecutionHandler + userAccountsDB state.AccountsAdapter + marshalizer marshal.Marshalizer + peerAccountsDB state.AccountsAdapter + chanceComputer nodesCoordinator.ChanceComputer + shardCoordinator sharding.Coordinator + startRating uint32 + validatorInfoCreator epochStart.ValidatorInfoCreator + genesisNodesConfig sharding.GenesisNodesSetupHandler + nodesConfigProvider epochStart.NodesConfigProvider + stakingDataProvider epochStart.StakingDataProvider + maxNodesChangeConfigProvider epochStart.MaxNodesChangeConfigProvider + endOfEpochCallerAddress []byte + stakingSCAddress []byte + esdtOwnerAddressBytes []byte + mapNumSwitchedPerShard map[uint32]uint32 + mapNumSwitchablePerShard map[uint32]uint32 + maxNodes uint32 + + flagChangeMaxNodesEnabled atomic.Flag + enableEpochsHandler common.EnableEpochsHandler +} + +func newLegacySystemSCProcessor(args ArgsNewEpochStartSystemSCProcessing) (*legacySystemSCProcessor, error) { + err := checkLegacyArgs(args) + if err != nil { + return nil, err + } + + legacy := &legacySystemSCProcessor{ + systemVM: args.SystemVM, + userAccountsDB: args.UserAccountsDB, + peerAccountsDB: args.PeerAccountsDB, + marshalizer: args.Marshalizer, + startRating: args.StartRating, + validatorInfoCreator: args.ValidatorInfoCreator, + genesisNodesConfig: args.GenesisNodesConfig, + endOfEpochCallerAddress: args.EndOfEpochCallerAddress, + stakingSCAddress: args.StakingSCAddress, + chanceComputer: args.ChanceComputer, + mapNumSwitchedPerShard: make(map[uint32]uint32), + mapNumSwitchablePerShard: make(map[uint32]uint32), + stakingDataProvider: args.StakingDataProvider, + nodesConfigProvider: args.NodesConfigProvider, + shardCoordinator: args.ShardCoordinator, + esdtOwnerAddressBytes: args.ESDTOwnerAddressBytes, + maxNodesChangeConfigProvider: args.MaxNodesChangeConfigProvider, + enableEpochsHandler: args.EnableEpochsHandler, + } + + return legacy, nil +} + +func checkLegacyArgs(args ArgsNewEpochStartSystemSCProcessing) error { + if check.IfNilReflect(args.SystemVM) { + return epochStart.ErrNilSystemVM + } + if check.IfNil(args.UserAccountsDB) { + return epochStart.ErrNilAccountsDB + } + if check.IfNil(args.PeerAccountsDB) { + return epochStart.ErrNilAccountsDB + } + if check.IfNil(args.Marshalizer) { + return epochStart.ErrNilMarshalizer + } + if check.IfNil(args.ValidatorInfoCreator) { + return epochStart.ErrNilValidatorInfoProcessor + } + if len(args.EndOfEpochCallerAddress) == 0 { + return epochStart.ErrNilEndOfEpochCallerAddress + } + if len(args.StakingSCAddress) == 0 { + return epochStart.ErrNilStakingSCAddress + } + if check.IfNil(args.ChanceComputer) { + return epochStart.ErrNilChanceComputer + } + if check.IfNil(args.GenesisNodesConfig) { + return epochStart.ErrNilGenesisNodesConfig + } + if check.IfNil(args.NodesConfigProvider) { + return epochStart.ErrNilNodesConfigProvider + } + if check.IfNil(args.StakingDataProvider) { + return epochStart.ErrNilStakingDataProvider + } + if check.IfNil(args.ShardCoordinator) { + return epochStart.ErrNilShardCoordinator + } + if check.IfNil(args.MaxNodesChangeConfigProvider) { + return epochStart.ErrNilMaxNodesChangeConfigProvider + } + if check.IfNil(args.EnableEpochsHandler) { + return process.ErrNilEnableEpochsHandler + } + if len(args.ESDTOwnerAddressBytes) == 0 { + return epochStart.ErrEmptyESDTOwnerAddress + } + + return nil +} + +func (s *legacySystemSCProcessor) processLegacy( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + nonce uint64, + epoch uint32, +) error { + if s.enableEpochsHandler.IsFlagEnabled(common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly) { + err := s.updateSystemSCConfigMinNodes() + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2OwnerFlagInSpecificEpochOnly) { + err := s.updateOwnersForBlsKeys() + if err != nil { + return err + } + } + + if s.flagChangeMaxNodesEnabled.IsSet() { + err := s.updateMaxNodes(validatorsInfoMap, nonce) + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlagInSpecificEpochOnly) { + err := s.resetLastUnJailed() + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.DelegationSmartContractFlagInSpecificEpochOnly) { + err := s.initDelegationSystemSC() + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + err := s.cleanAdditionalQueue() + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + err := s.computeNumWaitingPerShard(validatorsInfoMap) + if err != nil { + return err + } + + err = s.swapJailedWithWaiting(validatorsInfoMap) + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + err := s.prepareStakingDataForEligibleNodes(validatorsInfoMap) + if err != nil { + return err + } + + err = s.fillStakingDataForNonEligible(validatorsInfoMap) + if err != nil { + return err + } + + numUnStaked, err := s.unStakeNodesWithNotEnoughFunds(validatorsInfoMap, epoch) + if err != nil { + return err + } + + if s.enableEpochsHandler.IsFlagEnabled(common.StakingQueueFlag) { + err = s.stakeNodesFromQueue(validatorsInfoMap, numUnStaked, nonce, common.NewList) + if err != nil { + return err + } + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.ESDTFlagInSpecificEpochOnly) { + err := s.initESDT() + if err != nil { + // not a critical error + log.Error("error while initializing ESDT", "err", err) + } + } + + return nil +} + +// ToggleUnStakeUnBond will pause/unPause the unStake/unBond functions on the validator system sc +func (s *legacySystemSCProcessor) ToggleUnStakeUnBond(value bool) error { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + return nil + } + + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: nil, + CallValue: big.NewInt(0), + }, + RecipientAddr: vm.ValidatorSCAddress, + Function: "unPauseUnStakeUnBond", + } + + if value { + vmInput.Function = "pauseUnStakeUnBond" + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrSystemValidatorSCCall + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) unStakeNodesWithNotEnoughFunds( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + epoch uint32, +) (uint32, error) { + nodesToUnStake, mapOwnersKeys, err := s.stakingDataProvider.ComputeUnQualifiedNodes(validatorsInfoMap) + if err != nil { + return 0, err + } + + nodesUnStakedFromAdditionalQueue := uint32(0) + + log.Debug("unStake nodes with not enough funds", "num", len(nodesToUnStake)) + for _, blsKey := range nodesToUnStake { + log.Debug("unStake at end of epoch for node", "blsKey", blsKey) + err = s.unStakeOneNode(blsKey, epoch) + if err != nil { + return 0, err + } + + validatorInfo := validatorsInfoMap.GetValidator(blsKey) + if validatorInfo == nil { + nodesUnStakedFromAdditionalQueue++ + log.Debug("unStaked node which was in additional queue", "blsKey", blsKey) + continue + } + + validatorLeaving := validatorInfo.ShallowClone() + validatorLeaving.SetListAndIndex(string(common.LeavingList), validatorLeaving.GetIndex(), s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) + err = validatorsInfoMap.Replace(validatorInfo, validatorLeaving) + if err != nil { + return 0, err + } + } + + err = s.updateDelegationContracts(mapOwnersKeys) + if err != nil { + return 0, err + } + + nodesToStakeFromQueue := uint32(len(nodesToUnStake)) + nodesToStakeFromQueue -= nodesUnStakedFromAdditionalQueue + + log.Debug("stake nodes from waiting list", "num", nodesToStakeFromQueue) + return nodesToStakeFromQueue, nil +} + +func (s *legacySystemSCProcessor) unStakeOneNode(blsKey []byte, epoch uint32) error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{blsKey}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "unStakeAtEndOfEpoch", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + if vmOutput.ReturnCode != vmcommon.Ok { + log.Debug("unStakeOneNode", "returnMessage", vmOutput.ReturnMessage, "returnCode", vmOutput.ReturnCode.String()) + return epochStart.ErrUnStakeExecuteError + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + account, errExists := s.peerAccountsDB.GetExistingAccount(blsKey) + if errExists != nil { + return nil + } + + peerAccount, ok := account.(state.PeerAccountHandler) + if !ok { + return epochStart.ErrWrongTypeAssertion + } + + peerAccount.SetListAndIndex(peerAccount.GetShardId(), string(common.LeavingList), peerAccount.GetIndexInList(), s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) + peerAccount.SetUnStakedEpoch(epoch) + err = s.peerAccountsDB.SaveAccount(peerAccount) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) updateDelegationContracts(mapOwnerKeys map[string][][]byte) error { + sortedDelegationsSCs := make([]string, 0, len(mapOwnerKeys)) + for address := range mapOwnerKeys { + shardId := s.shardCoordinator.ComputeId([]byte(address)) + if shardId != core.MetachainShardId { + continue + } + sortedDelegationsSCs = append(sortedDelegationsSCs, address) + } + + sort.Slice(sortedDelegationsSCs, func(i, j int) bool { + return sortedDelegationsSCs[i] < sortedDelegationsSCs[j] + }) + + for _, address := range sortedDelegationsSCs { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: mapOwnerKeys[address], + CallValue: big.NewInt(0), + }, + RecipientAddr: []byte(address), + Function: "unStakeAtEndOfEpoch", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + if vmOutput.ReturnCode != vmcommon.Ok { + log.Debug("unStakeAtEndOfEpoch", "returnMessage", vmOutput.ReturnMessage, "returnCode", vmOutput.ReturnCode.String()) + return epochStart.ErrUnStakeExecuteError + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) fillStakingDataForNonEligible(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + for shId, validatorsInfoSlice := range validatorsInfoMap.GetShardValidatorsInfoMap() { + newList := make([]state.ValidatorInfoHandler, 0, len(validatorsInfoSlice)) + deleteCalled := false + + for _, validatorInfo := range validatorsInfoSlice { + if vInfo.WasEligibleInCurrentEpoch(validatorInfo) { + newList = append(newList, validatorInfo) + continue + } + + err := s.stakingDataProvider.FillValidatorInfo(validatorInfo) + if err != nil { + deleteCalled = true + + log.Error("fillStakingDataForNonEligible", "error", err) + if len(validatorInfo.GetList()) > 0 { + return err + } + + err = s.peerAccountsDB.RemoveAccount(validatorInfo.GetPublicKey()) + if err != nil { + log.Error("fillStakingDataForNonEligible removeAccount", "error", err) + } + + continue + } + + newList = append(newList, validatorInfo) + } + + if deleteCalled { + err := validatorsInfoMap.SetValidatorsInShard(shId, newList) + if err != nil { + return err + } + } + } + + return nil +} + +func (s *legacySystemSCProcessor) prepareStakingDataForEligibleNodes(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + eligibleNodes, err := getEligibleNodeKeys(validatorsInfoMap) + if err != nil { + return err + } + + return s.prepareStakingData(eligibleNodes) +} + +func (s *legacySystemSCProcessor) prepareStakingData(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + sw := core.NewStopWatch() + sw.Start("prepareStakingDataForRewards") + defer func() { + sw.Stop("prepareStakingDataForRewards") + log.Debug("systemSCProcessor.prepareStakingDataForRewards time measurements", sw.GetMeasurements()...) + }() + + return s.stakingDataProvider.PrepareStakingData(validatorsInfoMap) +} + +func getEligibleNodeKeys( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, +) (state.ShardValidatorsInfoMapHandler, error) { + eligibleNodesKeys := state.NewShardValidatorsInfoMap() + for _, validatorInfo := range validatorsInfoMap.GetAllValidatorsInfo() { + if vInfo.WasEligibleInCurrentEpoch(validatorInfo) { + err := eligibleNodesKeys.Add(validatorInfo.ShallowClone()) + if err != nil { + log.Error("getEligibleNodeKeys: could not add validator info in map", "error", err) + return nil, err + } + } + } + + return eligibleNodesKeys, nil +} + +// ProcessDelegationRewards will process the rewards which are directed towards the delegation system smart contracts +func (s *legacySystemSCProcessor) ProcessDelegationRewards( + miniBlocks block.MiniBlockSlice, + txCache epochStart.TransactionCacher, +) error { + if txCache == nil { + return epochStart.ErrNilLocalTxCache + } + + rwdMb := getRewardsMiniBlockForMeta(miniBlocks) + if rwdMb == nil { + return nil + } + + for _, txHash := range rwdMb.TxHashes { + rwdTx, err := txCache.GetTx(txHash) + if err != nil { + return err + } + + err = s.executeRewardTx(rwdTx) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) executeRewardTx(rwdTx data.TransactionHandler) error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: nil, + CallValue: rwdTx.GetValue(), + }, + RecipientAddr: rwdTx.GetRcvAddr(), + Function: "updateRewards", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrSystemDelegationCall + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + return nil +} + +// updates the configuration of the system SC if the flags permit +func (s *legacySystemSCProcessor) updateSystemSCConfigMinNodes() error { + minNumberOfNodesWithHysteresis := s.genesisNodesConfig.MinNumberOfNodesWithHysteresis() + err := s.setMinNumberOfNodes(minNumberOfNodesWithHysteresis) + + return err +} + +func (s *legacySystemSCProcessor) resetLastUnJailed() error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "resetLastUnJailedFromQueue", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrResetLastUnJailedFromQueue + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + return nil +} + +// updates the configuration of the system SC if the flags permit +func (s *legacySystemSCProcessor) updateMaxNodes(validatorsInfoMap state.ShardValidatorsInfoMapHandler, nonce uint64) error { + sw := core.NewStopWatch() + sw.Start("total") + defer func() { + sw.Stop("total") + log.Debug("systemSCProcessor.updateMaxNodes", sw.GetMeasurements()...) + }() + + maxNumberOfNodes := s.maxNodes + sw.Start("setMaxNumberOfNodes") + prevMaxNumberOfNodes, err := s.setMaxNumberOfNodes(maxNumberOfNodes) + sw.Stop("setMaxNumberOfNodes") + if err != nil { + return err + } + + if s.enableEpochsHandler.IsFlagEnabled(common.StakingQueueFlag) { + sw.Start("stakeNodesFromQueue") + err = s.stakeNodesFromQueue(validatorsInfoMap, maxNumberOfNodes-prevMaxNumberOfNodes, nonce, common.NewList) + sw.Stop("stakeNodesFromQueue") + if err != nil { + return err + } + } + return nil +} + +func (s *legacySystemSCProcessor) computeNumWaitingPerShard(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + for shardID, validatorInfoList := range validatorsInfoMap.GetShardValidatorsInfoMap() { + totalInWaiting := uint32(0) + for _, validatorInfo := range validatorInfoList { + switch validatorInfo.GetList() { + case string(common.WaitingList): + totalInWaiting++ + } + } + s.mapNumSwitchablePerShard[shardID] = totalInWaiting + s.mapNumSwitchedPerShard[shardID] = 0 + } + return nil +} + +func (s *legacySystemSCProcessor) swapJailedWithWaiting(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + jailedValidators := s.getSortedJailedNodes(validatorsInfoMap) + + log.Debug("number of jailed validators", "num", len(jailedValidators)) + + newValidators := make(map[string]struct{}) + for _, jailedValidator := range jailedValidators { + if _, ok := newValidators[string(jailedValidator.GetPublicKey())]; ok { + continue + } + if isValidator(jailedValidator) && s.mapNumSwitchablePerShard[jailedValidator.GetShardId()] <= s.mapNumSwitchedPerShard[jailedValidator.GetShardId()] { + log.Debug("cannot switch in this epoch anymore for this shard as switched num waiting", + "shardID", jailedValidator.GetShardId(), + "numSwitched", s.mapNumSwitchedPerShard[jailedValidator.GetShardId()]) + continue + } + + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{jailedValidator.GetPublicKey()}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "switchJailedWithWaiting", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + log.Debug("switchJailedWithWaiting called for", + "key", jailedValidator.GetPublicKey(), + "returnMessage", vmOutput.ReturnMessage) + if vmOutput.ReturnCode != vmcommon.Ok { + continue + } + + newValidator, err := s.stakingToValidatorStatistics(validatorsInfoMap, jailedValidator, vmOutput) + if err != nil { + return err + } + + if len(newValidator) != 0 { + newValidators[string(newValidator)] = struct{}{} + } + } + + return nil +} + +func (s *legacySystemSCProcessor) stakingToValidatorStatistics( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + jailedValidator state.ValidatorInfoHandler, + vmOutput *vmcommon.VMOutput, +) ([]byte, error) { + stakingSCOutput, ok := vmOutput.OutputAccounts[string(s.stakingSCAddress)] + if !ok { + return nil, epochStart.ErrStakingSCOutputAccountNotFound + } + + var activeStorageUpdate *vmcommon.StorageUpdate + for _, storageUpdate := range stakingSCOutput.StorageUpdates { + isNewValidatorKey := len(storageUpdate.Offset) == len(jailedValidator.GetPublicKey()) && + !bytes.Equal(storageUpdate.Offset, jailedValidator.GetPublicKey()) + if isNewValidatorKey { + activeStorageUpdate = storageUpdate + break + } + } + if activeStorageUpdate == nil { + log.Debug("no one in waiting suitable for switch") + if s.enableEpochsHandler.IsFlagEnabled(common.SaveJailedAlwaysFlag) { + err := s.processSCOutputAccounts(vmOutput) + if err != nil { + return nil, err + } + } + + return nil, nil + } + + err := s.processSCOutputAccounts(vmOutput) + if err != nil { + return nil, err + } + + var stakingData systemSmartContracts.StakedDataV2_0 + err = s.marshalizer.Unmarshal(&stakingData, activeStorageUpdate.Data) + if err != nil { + return nil, err + } + + blsPubKey := activeStorageUpdate.Offset + log.Debug("staking validator key who switches with the jailed one", "blsKey", blsPubKey) + account, isNew, err := state.GetPeerAccountAndReturnIfNew(s.peerAccountsDB, blsPubKey) + if err != nil { + return nil, err + } + + if !bytes.Equal(account.GetRewardAddress(), stakingData.RewardAddress) { + err = account.SetRewardAddress(stakingData.RewardAddress) + if err != nil { + return nil, err + } + } + + if !isNew { + err = validatorsInfoMap.Delete(jailedValidator) + if err != nil { + return nil, err + } + } + + account.SetListAndIndex(jailedValidator.GetShardId(), string(common.NewList), uint32(stakingData.StakedNonce), s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) + account.SetTempRating(s.startRating) + account.SetUnStakedEpoch(common.DefaultUnstakedEpoch) + + err = s.peerAccountsDB.SaveAccount(account) + if err != nil { + return nil, err + } + + jailedAccount, err := s.getPeerAccount(jailedValidator.GetPublicKey()) + if err != nil { + return nil, err + } + + jailedAccount.SetListAndIndex(jailedValidator.GetShardId(), string(common.JailedList), jailedValidator.GetIndex(), s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) + jailedAccount.ResetAtNewEpoch() + err = s.peerAccountsDB.SaveAccount(jailedAccount) + if err != nil { + return nil, err + } + + if isValidator(jailedValidator) { + s.mapNumSwitchedPerShard[jailedValidator.GetShardId()]++ + } + + newValidatorInfo := s.validatorInfoCreator.PeerAccountToValidatorInfo(account) + err = validatorsInfoMap.Replace(jailedValidator, newValidatorInfo) + if err != nil { + return nil, err + } + + return blsPubKey, nil +} + +func isValidator(validator state.ValidatorInfoHandler) bool { + return validator.GetList() == string(common.WaitingList) || validator.GetList() == string(common.EligibleList) +} + +func (s *legacySystemSCProcessor) getUserAccount(address []byte) (state.UserAccountHandler, error) { + acnt, err := s.userAccountsDB.LoadAccount(address) + if err != nil { + return nil, err + } + + stAcc, ok := acnt.(state.UserAccountHandler) + if !ok { + return nil, process.ErrWrongTypeAssertion + } + + return stAcc, nil +} + +// save account changes in state from vmOutput - protected by VM - every output can be treated as is. +func (s *legacySystemSCProcessor) processSCOutputAccounts( + vmOutput *vmcommon.VMOutput, +) error { + + outputAccounts := process.SortVMOutputInsideData(vmOutput) + for _, outAcc := range outputAccounts { + acc, err := s.getUserAccount(outAcc.Address) + if err != nil { + return err + } + + storageUpdates := process.GetSortedStorageUpdates(outAcc) + for _, storeUpdate := range storageUpdates { + err = acc.SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) + if err != nil { + return err + } + } + + if outAcc.BalanceDelta != nil && outAcc.BalanceDelta.Cmp(zero) != 0 { + err = acc.AddToBalance(outAcc.BalanceDelta) + if err != nil { + return err + } + } + + err = s.userAccountsDB.SaveAccount(acc) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) getSortedJailedNodes(validatorsInfoMap state.ShardValidatorsInfoMapHandler) []state.ValidatorInfoHandler { + newJailedValidators := make([]state.ValidatorInfoHandler, 0) + oldJailedValidators := make([]state.ValidatorInfoHandler, 0) + + minChance := s.chanceComputer.GetChance(0) + for _, validatorInfo := range validatorsInfoMap.GetAllValidatorsInfo() { + if validatorInfo.GetList() == string(common.JailedList) { + oldJailedValidators = append(oldJailedValidators, validatorInfo) + } else if s.chanceComputer.GetChance(validatorInfo.GetTempRating()) < minChance { + newJailedValidators = append(newJailedValidators, validatorInfo) + } + + } + + sort.Sort(validatorList(oldJailedValidators)) + sort.Sort(validatorList(newJailedValidators)) + + return append(oldJailedValidators, newJailedValidators...) +} + +func (s *legacySystemSCProcessor) getPeerAccount(key []byte) (state.PeerAccountHandler, error) { + account, err := s.peerAccountsDB.LoadAccount(key) + if err != nil { + return nil, err + } + + peerAcc, ok := account.(state.PeerAccountHandler) + if !ok { + return nil, epochStart.ErrWrongTypeAssertion + } + + return peerAcc, nil +} + +func (s *legacySystemSCProcessor) setMinNumberOfNodes(minNumNodes uint32) error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{big.NewInt(int64(minNumNodes)).Bytes()}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "updateConfigMinNodes", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + log.Debug("setMinNumberOfNodes called with", + "minNumNodes", minNumNodes, + "returnMessage", vmOutput.ReturnMessage) + + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrInvalidMinNumberOfNodes + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) setMaxNumberOfNodes(maxNumNodes uint32) (uint32, error) { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{big.NewInt(int64(maxNumNodes)).Bytes()}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "updateConfigMaxNodes", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return 0, err + } + + log.Debug("setMaxNumberOfNodes called with", + "maxNumNodes", maxNumNodes, + "current maxNumNodes in legacySystemSCProcessor", s.maxNodes, + "returnMessage", vmOutput.ReturnMessage) + + if vmOutput.ReturnCode != vmcommon.Ok { + return 0, epochStart.ErrInvalidMaxNumberOfNodes + } + if len(vmOutput.ReturnData) != 1 { + return 0, epochStart.ErrInvalidSystemSCReturn + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return 0, err + } + + prevMaxNumNodes := big.NewInt(0).SetBytes(vmOutput.ReturnData[0]).Uint64() + return uint32(prevMaxNumNodes), nil +} + +func (s *legacySystemSCProcessor) updateOwnersForBlsKeys() error { + sw := core.NewStopWatch() + sw.Start("systemSCProcessor") + defer func() { + sw.Stop("systemSCProcessor") + log.Debug("systemSCProcessor.updateOwnersForBlsKeys time measurements", sw.GetMeasurements()...) + }() + + sw.Start("getValidatorSystemAccount") + userValidatorAccount, err := s.getValidatorSystemAccount() + sw.Stop("getValidatorSystemAccount") + if err != nil { + return err + } + + sw.Start("getArgumentsForSetOwnerFunctionality") + arguments, err := s.getArgumentsForSetOwnerFunctionality(userValidatorAccount) + sw.Stop("getArgumentsForSetOwnerFunctionality") + if err != nil { + return err + } + + sw.Start("callSetOwnersOnAddresses") + err = s.callSetOwnersOnAddresses(arguments) + sw.Stop("callSetOwnersOnAddresses") + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) getValidatorSystemAccount() (state.UserAccountHandler, error) { + validatorAccount, err := s.userAccountsDB.LoadAccount(vm.ValidatorSCAddress) + if err != nil { + return nil, fmt.Errorf("%w when loading validator account", err) + } + + userValidatorAccount, ok := validatorAccount.(state.UserAccountHandler) + if !ok { + return nil, fmt.Errorf("%w when loading validator account", epochStart.ErrWrongTypeAssertion) + } + + if check.IfNil(userValidatorAccount.DataTrie()) { + return nil, epochStart.ErrNilDataTrie + } + + return userValidatorAccount, nil +} + +func (s *legacySystemSCProcessor) getArgumentsForSetOwnerFunctionality(userValidatorAccount state.UserAccountHandler) ([][]byte, error) { + arguments := make([][]byte, 0) + + leavesChannels := &common.TrieIteratorChannels{ + LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), + ErrChan: errChan.NewErrChanWrapper(), + } + err := userValidatorAccount.GetAllLeaves(leavesChannels, context.Background()) + if err != nil { + return nil, err + } + for leaf := range leavesChannels.LeavesChan { + validatorData := &systemSmartContracts.ValidatorDataV2{} + + err = s.marshalizer.Unmarshal(validatorData, leaf.Value()) + if err != nil { + continue + } + for _, blsKey := range validatorData.BlsPubKeys { + arguments = append(arguments, blsKey) + arguments = append(arguments, leaf.Key()) + } + } + + err = leavesChannels.ErrChan.ReadFromChanNonBlocking() + if err != nil { + return nil, err + } + + return arguments, nil +} + +func (s *legacySystemSCProcessor) callSetOwnersOnAddresses(arguments [][]byte) error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.EndOfEpochAddress, + CallValue: big.NewInt(0), + Arguments: arguments, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "setOwnersOnAddresses", + } + + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when calling setOwnersOnAddresses function", errRun) + } + if vmOutput.ReturnCode != vmcommon.Ok { + return fmt.Errorf("got return code %s when calling setOwnersOnAddresses", vmOutput.ReturnCode) + } + + return s.processSCOutputAccounts(vmOutput) +} + +func (s *legacySystemSCProcessor) initDelegationSystemSC() error { + codeMetaData := &vmcommon.CodeMetadata{ + Upgradeable: false, + Payable: false, + Readable: true, + } + + vmInput := &vmcommon.ContractCreateInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.DelegationManagerSCAddress, + Arguments: [][]byte{}, + CallValue: big.NewInt(0), + }, + ContractCode: vm.DelegationManagerSCAddress, + ContractCodeMetadata: codeMetaData.ToBytes(), + } + + vmOutput, err := s.systemVM.RunSmartContractCreate(vmInput) + if err != nil { + return err + } + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrCouldNotInitDelegationSystemSC + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + err = s.updateSystemSCContractsCode(vmInput.ContractCodeMetadata) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) updateSystemSCContractsCode(contractMetadata []byte) error { + contractsToUpdate := make([][]byte, 0) + contractsToUpdate = append(contractsToUpdate, vm.StakingSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.ValidatorSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.GovernanceSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.ESDTSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.DelegationManagerSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.FirstDelegationSCAddress) + + for _, address := range contractsToUpdate { + userAcc, err := s.getUserAccount(address) + if err != nil { + return err + } + + userAcc.SetOwnerAddress(address) + userAcc.SetCodeMetadata(contractMetadata) + userAcc.SetCode(address) + + err = s.userAccountsDB.SaveAccount(userAcc) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) cleanAdditionalQueue() error { + sw := core.NewStopWatch() + sw.Start("systemSCProcessor") + defer func() { + sw.Stop("systemSCProcessor") + log.Debug("systemSCProcessor.cleanAdditionalQueue time measurements", sw.GetMeasurements()...) + }() + + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.EndOfEpochAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{}, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "cleanAdditionalQueue", + } + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when cleaning additional queue", errRun) + } + if vmOutput.ReturnCode != vmcommon.Ok { + return fmt.Errorf("got return code %s, return message %s when cleaning additional queue", vmOutput.ReturnCode, vmOutput.ReturnMessage) + } + + err := s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + // returnData format is list(address - all blsKeys which were unstaked for that) + addressLength := len(s.endOfEpochCallerAddress) + mapOwnersKeys := make(map[string][][]byte) + currentOwner := "" + for _, returnData := range vmOutput.ReturnData { + if len(returnData) == addressLength { + currentOwner = string(returnData) + continue + } + + if len(currentOwner) != addressLength { + continue + } + + mapOwnersKeys[currentOwner] = append(mapOwnersKeys[currentOwner], returnData) + } + + err = s.updateDelegationContracts(mapOwnersKeys) + if err != nil { + log.Error("update delegation contracts failed after cleaning additional queue", "error", err.Error()) + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) stakeNodesFromQueue( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + nodesToStake uint32, + nonce uint64, + list common.PeerType, +) error { + if nodesToStake == 0 { + return nil + } + + nodesToStakeAsBigInt := big.NewInt(0).SetUint64(uint64(nodesToStake)) + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.EndOfEpochAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{nodesToStakeAsBigInt.Bytes()}, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "stakeNodesFromQueue", + } + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when staking nodes from waiting list", errRun) + } + if vmOutput.ReturnCode != vmcommon.Ok { + return fmt.Errorf("got return code %s when staking nodes from waiting list", vmOutput.ReturnCode) + } + if len(vmOutput.ReturnData)%2 != 0 { + return fmt.Errorf("%w return data must be divisible by 2 when staking nodes from waiting list", epochStart.ErrInvalidSystemSCReturn) + } + + err := s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + err = s.addNewlyStakedNodesToValidatorTrie(validatorsInfoMap, vmOutput.ReturnData, nonce, list) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) addNewlyStakedNodesToValidatorTrie( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + returnData [][]byte, + nonce uint64, + list common.PeerType, +) error { + for i := 0; i < len(returnData); i += 2 { + blsKey := returnData[i] + rewardAddress := returnData[i+1] + + peerAcc, err := s.getPeerAccount(blsKey) + if err != nil { + return err + } + + err = peerAcc.SetRewardAddress(rewardAddress) + if err != nil { + return err + } + + err = peerAcc.SetBLSPublicKey(blsKey) + if err != nil { + return err + } + + peerAcc.SetListAndIndex(peerAcc.GetShardId(), string(list), uint32(nonce), s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) + peerAcc.SetTempRating(s.startRating) + peerAcc.SetUnStakedEpoch(common.DefaultUnstakedEpoch) + + err = s.peerAccountsDB.SaveAccount(peerAcc) + if err != nil { + return err + } + + validatorInfo := &state.ValidatorInfo{ + PublicKey: blsKey, + ShardId: peerAcc.GetShardId(), + List: string(list), + Index: uint32(nonce), + TempRating: s.startRating, + Rating: s.startRating, + RewardAddress: rewardAddress, + AccumulatedFees: big.NewInt(0), + } + + existingValidator := validatorsInfoMap.GetValidator(validatorInfo.GetPublicKey()) + // This fix is not be backwards incompatible + if !check.IfNil(existingValidator) && s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + err = validatorsInfoMap.Delete(existingValidator) + if err != nil { + return err + } + } + + err = validatorsInfoMap.Add(validatorInfo) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) initESDT() error { + currentConfigValues, err := s.extractConfigFromESDTContract() + if err != nil { + return err + } + + return s.changeESDTOwner(currentConfigValues) +} + +func (s *legacySystemSCProcessor) extractConfigFromESDTContract() ([][]byte, error) { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{}, + CallValue: big.NewInt(0), + GasProvided: math.MaxUint64, + }, + Function: "getContractConfig", + RecipientAddr: vm.ESDTSCAddress, + } + + output, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return nil, err + } + if len(output.ReturnData) != 4 { + return nil, fmt.Errorf("%w getContractConfig should have returned 4 values", epochStart.ErrInvalidSystemSCReturn) + } + + return output.ReturnData, nil +} + +func (s *legacySystemSCProcessor) changeESDTOwner(currentConfigValues [][]byte) error { + baseIssuingCost := currentConfigValues[1] + minTokenNameLength := currentConfigValues[2] + maxTokenNameLength := currentConfigValues[3] + + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{s.esdtOwnerAddressBytes, baseIssuingCost, minTokenNameLength, maxTokenNameLength}, + CallValue: big.NewInt(0), + GasProvided: math.MaxUint64, + }, + Function: "configChange", + RecipientAddr: vm.ESDTSCAddress, + } + + output, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + if output.ReturnCode != vmcommon.Ok { + return fmt.Errorf("%w changeESDTOwner should have returned Ok", epochStart.ErrInvalidSystemSCReturn) + } + + return s.processSCOutputAccounts(output) +} + +func getRewardsMiniBlockForMeta(miniBlocks block.MiniBlockSlice) *block.MiniBlock { + for _, miniBlock := range miniBlocks { + if miniBlock.Type != block.RewardsBlock { + continue + } + if miniBlock.ReceiverShardID != core.MetachainShardId { + continue + } + return miniBlock + } + return nil +} + +func (s *legacySystemSCProcessor) legacyEpochConfirmed(epoch uint32) { + s.flagChangeMaxNodesEnabled.SetValue(false) + for _, maxNodesConfig := range s.maxNodesChangeConfigProvider.GetAllNodesConfig() { + if epoch == maxNodesConfig.EpochEnable { + s.flagChangeMaxNodesEnabled.SetValue(true) + break + } + } + s.maxNodes = s.maxNodesChangeConfigProvider.GetCurrentNodesConfig().MaxNumNodes + + log.Debug("legacySystemSC: change of maximum number of nodes and/or shuffling percentage", + "enabled", s.flagChangeMaxNodesEnabled.IsSet(), + "epoch", epoch, + "maxNodes", s.maxNodes, + ) +} diff --git a/epochStart/metachain/rewards.go b/epochStart/metachain/rewards.go index 3620070a6e0..0b279d56c32 100644 --- a/epochStart/metachain/rewards.go +++ b/epochStart/metachain/rewards.go @@ -50,7 +50,7 @@ func NewRewardsCreator(args ArgsNewRewardsCreator) (*rewardsCreator, error) { // CreateRewardsMiniBlocks creates the rewards miniblocks according to economics data and validator info func (rc *rewardsCreator) CreateRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { if check.IfNil(metaBlock) { @@ -116,7 +116,7 @@ func (rc *rewardsCreator) adjustProtocolSustainabilityRewards(protocolSustainabi } func (rc *rewardsCreator) addValidatorRewardsToMiniBlocks( - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, metaBlock data.HeaderHandler, miniBlocks block.MiniBlockSlice, protocolSustainabilityRwdTx *rewardTx.RewardTx, @@ -162,41 +162,40 @@ func (rc *rewardsCreator) addValidatorRewardsToMiniBlocks( } func (rc *rewardsCreator) computeValidatorInfoPerRewardAddress( - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, protocolSustainabilityRwd *rewardTx.RewardTx, epoch uint32, ) map[string]*rewardInfoData { rwdAddrValidatorInfo := make(map[string]*rewardInfoData) - for _, shardValidatorsInfo := range validatorsInfo { - for _, validatorInfo := range shardValidatorsInfo { - rewardsPerBlockPerNodeForShard := rc.mapBaseRewardsPerBlockPerValidator[validatorInfo.ShardId] - protocolRewardValue := big.NewInt(0).Mul(rewardsPerBlockPerNodeForShard, big.NewInt(0).SetUint64(uint64(validatorInfo.NumSelectedInSuccessBlocks))) + for _, validatorInfo := range validatorsInfo.GetAllValidatorsInfo() { + rewardsPerBlockPerNodeForShard := rc.mapBaseRewardsPerBlockPerValidator[validatorInfo.GetShardId()] + protocolRewardValue := big.NewInt(0).Mul(rewardsPerBlockPerNodeForShard, big.NewInt(0).SetUint64(uint64(validatorInfo.GetNumSelectedInSuccessBlocks()))) - isFix1Enabled := rc.isRewardsFix1Enabled(epoch) - if isFix1Enabled && validatorInfo.LeaderSuccess == 0 && validatorInfo.ValidatorSuccess == 0 { - protocolSustainabilityRwd.Value.Add(protocolSustainabilityRwd.Value, protocolRewardValue) - continue - } - if !isFix1Enabled && validatorInfo.LeaderSuccess == 0 && validatorInfo.ValidatorFailure == 0 { - protocolSustainabilityRwd.Value.Add(protocolSustainabilityRwd.Value, protocolRewardValue) - continue - } + isFix1Enabled := rc.isRewardsFix1Enabled(epoch) + if isFix1Enabled && validatorInfo.GetLeaderSuccess() == 0 && validatorInfo.GetValidatorSuccess() == 0 { + protocolSustainabilityRwd.Value.Add(protocolSustainabilityRwd.Value, protocolRewardValue) + continue + } + if !isFix1Enabled && validatorInfo.GetLeaderSuccess() == 0 && validatorInfo.GetValidatorFailure() == 0 { + protocolSustainabilityRwd.Value.Add(protocolSustainabilityRwd.Value, protocolRewardValue) + continue + } - rwdInfo, ok := rwdAddrValidatorInfo[string(validatorInfo.RewardAddress)] - if !ok { - rwdInfo = &rewardInfoData{ - accumulatedFees: big.NewInt(0), - rewardsFromProtocol: big.NewInt(0), - address: string(validatorInfo.RewardAddress), - } - rwdAddrValidatorInfo[string(validatorInfo.RewardAddress)] = rwdInfo + rwdInfo, ok := rwdAddrValidatorInfo[string(validatorInfo.GetRewardAddress())] + if !ok { + rwdInfo = &rewardInfoData{ + accumulatedFees: big.NewInt(0), + rewardsFromProtocol: big.NewInt(0), + address: string(validatorInfo.GetRewardAddress()), } - - rwdInfo.accumulatedFees.Add(rwdInfo.accumulatedFees, validatorInfo.AccumulatedFees) - rwdInfo.rewardsFromProtocol.Add(rwdInfo.rewardsFromProtocol, protocolRewardValue) + rwdAddrValidatorInfo[string(validatorInfo.GetRewardAddress())] = rwdInfo } + + rwdInfo.accumulatedFees.Add(rwdInfo.accumulatedFees, validatorInfo.GetAccumulatedFees()) + rwdInfo.rewardsFromProtocol.Add(rwdInfo.rewardsFromProtocol, protocolRewardValue) + } return rwdAddrValidatorInfo @@ -205,7 +204,7 @@ func (rc *rewardsCreator) computeValidatorInfoPerRewardAddress( // VerifyRewardsMiniBlocks verifies if received rewards miniblocks are correct func (rc *rewardsCreator) VerifyRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { if check.IfNil(metaBlock) { diff --git a/epochStart/metachain/rewardsCreatorProxy.go b/epochStart/metachain/rewardsCreatorProxy.go index 6c183f43f7b..0e770c69629 100644 --- a/epochStart/metachain/rewardsCreatorProxy.go +++ b/epochStart/metachain/rewardsCreatorProxy.go @@ -64,7 +64,7 @@ func NewRewardsCreatorProxy(args RewardsCreatorProxyArgs) (*rewardsCreatorProxy, // CreateRewardsMiniBlocks proxies the CreateRewardsMiniBlocks method of the configured rewardsCreator instance func (rcp *rewardsCreatorProxy) CreateRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { err := rcp.changeRewardCreatorIfNeeded(metaBlock.GetEpoch()) @@ -77,7 +77,7 @@ func (rcp *rewardsCreatorProxy) CreateRewardsMiniBlocks( // VerifyRewardsMiniBlocks proxies the same method of the configured rewardsCreator instance func (rcp *rewardsCreatorProxy) VerifyRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { err := rcp.changeRewardCreatorIfNeeded(metaBlock.GetEpoch()) diff --git a/epochStart/metachain/rewardsCreatorProxy_test.go b/epochStart/metachain/rewardsCreatorProxy_test.go index 6de5ac93a49..e41730d34f1 100644 --- a/epochStart/metachain/rewardsCreatorProxy_test.go +++ b/epochStart/metachain/rewardsCreatorProxy_test.go @@ -15,9 +15,11 @@ import ( "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/mock" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" "github.com/stretchr/testify/require" ) @@ -55,9 +57,9 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithError(t *testing.T) { t.Parallel() expectedErr := fmt.Errorf("expectedError") - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { return nil, expectedErr }, @@ -74,9 +76,9 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithError(t *testing.T) { func TestRewardsCreatorProxy_CreateRewardsMiniBlocksOK(t *testing.T) { t.Parallel() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { return make(block.MiniBlockSlice, 2), nil }, @@ -93,9 +95,9 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksOK(t *testing.T) { func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithSwitchToRewardsCreatorV2(t *testing.T) { t.Parallel() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { return make(block.MiniBlockSlice, 2), nil }, @@ -125,9 +127,9 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithSwitchToRewardsCreatorV2 func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithSwitchToRewardsCreatorV1(t *testing.T) { t.Parallel() - rewardCreatorV2 := &mock.RewardsCreatorStub{ + rewardCreatorV2 := &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { return make(block.MiniBlockSlice, 2), nil }, @@ -159,9 +161,9 @@ func TestRewardsCreatorProxy_VerifyRewardsMiniBlocksWithError(t *testing.T) { t.Parallel() expectedErr := fmt.Errorf("expectedError") - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ VerifyRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics) error { + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics) error { return expectedErr }, } @@ -176,9 +178,9 @@ func TestRewardsCreatorProxy_VerifyRewardsMiniBlocksWithError(t *testing.T) { func TestRewardsCreatorProxy_VerifyRewardsMiniBlocksOK(t *testing.T) { t.Parallel() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ VerifyRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics) error { + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics) error { return nil }, } @@ -194,7 +196,7 @@ func TestRewardsCreatorProxy_GetProtocolSustainabilityRewards(t *testing.T) { t.Parallel() expectedValue := big.NewInt(12345) - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ GetProtocolSustainabilityRewardsCalled: func() *big.Int { return expectedValue }, @@ -210,7 +212,7 @@ func TestRewardsCreatorProxy_GetLocalTxCache(t *testing.T) { t.Parallel() expectedValue := &mock.TxForCurrentBlockStub{} - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ GetLocalTxCacheCalled: func() epochStart.TransactionCacher { return expectedValue }, @@ -228,7 +230,7 @@ func TestRewardsCreatorProxy_CreateMarshalizedData(t *testing.T) { expectedValue := make(map[string][][]byte) blockBody := createDefaultBlockBody() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ CreateMarshalledDataCalled: func(body *block.Body) map[string][][]byte { if blockBody == body { return expectedValue @@ -252,7 +254,7 @@ func TestRewardsCreatorProxy_GetRewardsTxs(t *testing.T) { } blockBody := createDefaultBlockBody() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ GetRewardsTxsCalled: func(body *block.Body) map[string]data.TransactionHandler { if blockBody == body { return expectedValue @@ -273,7 +275,7 @@ func TestRewardsCreatorProxy_SaveTxBlockToStorage(t *testing.T) { blockBody := createDefaultBlockBody() functionCalled := false - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ SaveBlockDataToStorageCalled: func(metaBlock data.MetaHeaderHandler, body *block.Body) { functionCalled = true }, @@ -291,7 +293,7 @@ func TestRewardsCreatorProxy_DeleteTxsFromStorage(t *testing.T) { blockBody := createDefaultBlockBody() functionCalled := false - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ DeleteBlockDataFromStorageCalled: func(metaBlock data.MetaHeaderHandler, body *block.Body) { functionCalled = true }, @@ -309,7 +311,7 @@ func TestRewardsCreatorProxy_RemoveBlockDataFromPools(t *testing.T) { blockBody := createDefaultBlockBody() functionCalled := false - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ RemoveBlockDataFromPoolsCalled: func(metaBlock data.MetaHeaderHandler, body *block.Body) { functionCalled = true }, @@ -327,13 +329,13 @@ func TestRewardsCreatorProxy_IsInterfaceNil(t *testing.T) { var rewardsCreatorProxy epochStart.RewardsCreator require.True(t, check.IfNil(rewardsCreatorProxy)) - rewardCreatorV1 := &mock.RewardsCreatorStub{} + rewardCreatorV1 := &testscommon.RewardsCreatorStub{} rewardsCreatorProxy, _, _ = createTestData(rewardCreatorV1, rCreatorV1) require.False(t, check.IfNil(rewardsCreatorProxy)) } -func createTestData(rewardCreator *mock.RewardsCreatorStub, rcType configuredRewardsCreator) (*rewardsCreatorProxy, map[uint32][]*state.ValidatorInfo, *block.MetaBlock) { +func createTestData(rewardCreator epochStart.RewardsCreator, rcType configuredRewardsCreator) (*rewardsCreatorProxy, state.ShardValidatorsInfoMapHandler, *block.MetaBlock) { args := createDefaultRewardsCreatorProxyArgs() rewardsCreatorProxy := &rewardsCreatorProxy{ rc: rewardCreator, @@ -380,7 +382,7 @@ func createDefaultRewardsCreatorProxyArgs() RewardsCreatorProxyArgs { return RewardsCreatorProxyArgs{ BaseRewardsCreatorArgs: getBaseRewardsArguments(), - StakingDataProvider: &mock.StakingDataProviderStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, EconomicsDataProvider: NewEpochEconomicsStatistics(), RewardsHandler: rewardsHandler, } diff --git a/epochStart/metachain/rewardsV2.go b/epochStart/metachain/rewardsV2.go index 371f577b875..ddfc05abcfe 100644 --- a/epochStart/metachain/rewardsV2.go +++ b/epochStart/metachain/rewardsV2.go @@ -25,7 +25,7 @@ type nodeRewardsData struct { fullRewards *big.Int topUpStake *big.Int powerInShard *big.Int - valInfo *state.ValidatorInfo + valInfo state.ValidatorInfoHandler } // RewardsCreatorArgsV2 holds the data required to create end of epoch rewards @@ -75,7 +75,7 @@ func NewRewardsCreatorV2(args RewardsCreatorArgsV2) (*rewardsCreatorV2, error) { // stake top-up values per node func (rc *rewardsCreatorV2) CreateRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { if check.IfNil(metaBlock) { @@ -151,7 +151,7 @@ func (rc *rewardsCreatorV2) adjustProtocolSustainabilityRewards(protocolSustaina // VerifyRewardsMiniBlocks verifies if received rewards miniblocks are correct func (rc *rewardsCreatorV2) VerifyRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { if check.IfNil(metaBlock) { @@ -222,23 +222,23 @@ func (rc *rewardsCreatorV2) computeValidatorInfoPerRewardAddress( for _, nodeInfoList := range nodesRewardInfo { for _, nodeInfo := range nodeInfoList { - if nodeInfo.valInfo.LeaderSuccess == 0 && nodeInfo.valInfo.ValidatorSuccess == 0 { + if nodeInfo.valInfo.GetLeaderSuccess() == 0 && nodeInfo.valInfo.GetValidatorSuccess() == 0 { accumulatedUnassigned.Add(accumulatedUnassigned, nodeInfo.fullRewards) continue } - rwdInfo, ok := rwdAddrValidatorInfo[string(nodeInfo.valInfo.RewardAddress)] + rwdInfo, ok := rwdAddrValidatorInfo[string(nodeInfo.valInfo.GetRewardAddress())] if !ok { rwdInfo = &rewardInfoData{ accumulatedFees: big.NewInt(0), rewardsFromProtocol: big.NewInt(0), - address: string(nodeInfo.valInfo.RewardAddress), + address: string(nodeInfo.valInfo.GetRewardAddress()), } - rwdAddrValidatorInfo[string(nodeInfo.valInfo.RewardAddress)] = rwdInfo + rwdAddrValidatorInfo[string(nodeInfo.valInfo.GetRewardAddress())] = rwdInfo } - distributedLeaderFees.Add(distributedLeaderFees, nodeInfo.valInfo.AccumulatedFees) - rwdInfo.accumulatedFees.Add(rwdInfo.accumulatedFees, nodeInfo.valInfo.AccumulatedFees) + distributedLeaderFees.Add(distributedLeaderFees, nodeInfo.valInfo.GetAccumulatedFees()) + rwdInfo.accumulatedFees.Add(rwdInfo.accumulatedFees, nodeInfo.valInfo.GetAccumulatedFees()) rwdInfo.rewardsFromProtocol.Add(rwdInfo.rewardsFromProtocol, nodeInfo.fullRewards) } } @@ -263,7 +263,7 @@ func (rc *rewardsCreatorV2) IsInterfaceNil() bool { } func (rc *rewardsCreatorV2) computeRewardsPerNode( - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, ) (map[uint32][]*nodeRewardsData, *big.Int) { var baseRewardsPerBlock *big.Int @@ -302,11 +302,11 @@ func (rc *rewardsCreatorV2) computeRewardsPerNode( } func (rc *rewardsCreatorV2) initNodesRewardsInfo( - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, ) map[uint32][]*nodeRewardsData { nodesRewardsInfo := make(map[uint32][]*nodeRewardsData) - for shardID, valInfoList := range validatorsInfo { + for shardID, valInfoList := range validatorsInfo.GetShardValidatorsInfoMap() { nodesRewardsInfo[shardID] = make([]*nodeRewardsData, 0, len(valInfoList)) for _, valInfo := range valInfoList { if validatorInfo.WasEligibleInCurrentEpoch(valInfo) { @@ -336,7 +336,7 @@ func (rc *rewardsCreatorV2) computeBaseRewardsPerNode( for _, nodeRewardsInfo := range nodeRewardsInfoList { nodeRewardsInfo.baseReward = big.NewInt(0).Mul( rc.mapBaseRewardsPerBlockPerValidator[shardID], - big.NewInt(int64(nodeRewardsInfo.valInfo.NumSelectedInSuccessBlocks))) + big.NewInt(int64(nodeRewardsInfo.valInfo.GetNumSelectedInSuccessBlocks()))) accumulatedRewards.Add(accumulatedRewards, nodeRewardsInfo.baseReward) } } @@ -507,13 +507,13 @@ func computeNodesPowerInShard( // power in epoch is computed as nbBlocks*nodeTopUp, where nbBlocks represents the number of blocks the node // participated at creation/validation -func computeNodePowerInShard(nodeInfo *state.ValidatorInfo, nodeTopUp *big.Int) *big.Int { +func computeNodePowerInShard(nodeInfo state.ValidatorInfoHandler, nodeTopUp *big.Int) *big.Int { // if node was offline, it had no power, so the rewards should go to the others - if nodeInfo.LeaderSuccess == 0 && nodeInfo.ValidatorSuccess == 0 { + if nodeInfo.GetLeaderSuccess() == 0 && nodeInfo.GetValidatorSuccess() == 0 { return big.NewInt(0) } - nbBlocks := big.NewInt(0).SetUint64(uint64(nodeInfo.NumSelectedInSuccessBlocks)) + nbBlocks := big.NewInt(0).SetUint64(uint64(nodeInfo.GetNumSelectedInSuccessBlocks())) return big.NewInt(0).Mul(nbBlocks, nodeTopUp) } diff --git a/epochStart/metachain/rewardsV2_test.go b/epochStart/metachain/rewardsV2_test.go index 48d9564b7aa..7abea51dea3 100644 --- a/epochStart/metachain/rewardsV2_test.go +++ b/epochStart/metachain/rewardsV2_test.go @@ -19,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" "github.com/stretchr/testify/require" ) @@ -106,12 +107,12 @@ func TestNewRewardsCreatorV2_initNodesRewardsInfo(t *testing.T) { valInfoEligibleWithExtra := addNonEligibleValidatorInfo(100, valInfoEligible, string(common.WaitingList)) nodesRewardInfo := rwd.initNodesRewardsInfo(valInfoEligibleWithExtra) - require.Equal(t, len(valInfoEligible), len(nodesRewardInfo)) + require.Equal(t, len(valInfoEligible.GetShardValidatorsInfoMap()), len(nodesRewardInfo)) for shardID, nodeInfoList := range nodesRewardInfo { - require.Equal(t, len(nodeInfoList), len(valInfoEligible[shardID])) + require.Equal(t, len(nodeInfoList), len(valInfoEligible.GetShardValidatorsInfoMap()[shardID])) for i, nodeInfo := range nodeInfoList { - require.True(t, valInfoEligible[shardID][i] == nodeInfo.valInfo) + require.True(t, valInfoEligible.GetShardValidatorsInfoMap()[shardID][i] == nodeInfo.valInfo) require.Equal(t, zero, nodeInfo.topUpStake) require.Equal(t, zero, nodeInfo.powerInShard) require.Equal(t, zero, nodeInfo.baseReward) @@ -126,7 +127,7 @@ func TestNewRewardsCreatorV2_getTopUpForAllEligibleNodes(t *testing.T) { args := getRewardsCreatorV2Arguments() topUpVal, _ := big.NewInt(0).SetString("100000000000000000000", 10) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { topUp := big.NewInt(0).Set(topUpVal) return topUp, nil @@ -155,7 +156,7 @@ func TestNewRewardsCreatorV2_getTopUpForAllEligibleSomeBLSKeysNotFoundZeroed(t * args := getRewardsCreatorV2Arguments() topUpVal, _ := big.NewInt(0).SetString("100000000000000000000", 10) notFoundKey := []byte("notFound") - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { if bytes.Equal(blsKey, notFoundKey) { return nil, fmt.Errorf("not found") @@ -170,9 +171,9 @@ func TestNewRewardsCreatorV2_getTopUpForAllEligibleSomeBLSKeysNotFoundZeroed(t * nodesPerShard := uint32(10) valInfo := createDefaultValidatorInfo(nodesPerShard, args.ShardCoordinator, args.NodesConfigProvider, 100, defaultBlocksPerShard) - for _, valList := range valInfo { - valList[0].PublicKey = notFoundKey - valList[1].PublicKey = notFoundKey + for _, valList := range valInfo.GetShardValidatorsInfoMap() { + valList[0].SetPublicKey(notFoundKey) + valList[1].SetPublicKey(notFoundKey) } nodesRewardInfo := rwd.initNodesRewardsInfo(valInfo) @@ -387,7 +388,7 @@ func TestNewRewardsCreatorV2_computeNodesPowerInShard(t *testing.T) { for _, nodeInfoList := range nodesRewardInfo { for _, nodeInfo := range nodeInfoList { - blocks := nodeInfo.valInfo.NumSelectedInSuccessBlocks + blocks := nodeInfo.valInfo.GetNumSelectedInSuccessBlocks() topUp := nodeInfo.topUpStake require.Equal(t, big.NewInt(0).Mul(big.NewInt(int64(blocks)), topUp), nodeInfo.powerInShard) } @@ -607,11 +608,11 @@ func TestNewRewardsCreatorV2_computeTopUpRewardsPerNode(t *testing.T) { nodesRewardInfo := dummyRwd.initNodesRewardsInfo(vInfo) _, _ = setDummyValuesInNodesRewardInfo(nodesRewardInfo, nbEligiblePerShard, tuStake, 0) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -653,7 +654,7 @@ func TestNewRewardsCreatorV2_computeTopUpRewardsPerNodeNotFoundBLSKeys(t *testin args := getRewardsCreatorV2Arguments() nbEligiblePerShard := uint32(400) vInfo := createDefaultValidatorInfo(nbEligiblePerShard, args.ShardCoordinator, args.NodesConfigProvider, 100, defaultBlocksPerShard) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { return nil, fmt.Errorf("not found") }, @@ -737,15 +738,15 @@ func TestNewRewardsCreatorV2_computeRewardsPerNode(t *testing.T) { nodesRewardInfo := dummyRwd.initNodesRewardsInfo(vInfo) _, totalTopUpStake := setDummyValuesInNodesRewardInfo(nodesRewardInfo, nbEligiblePerShard, tuStake, 0) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { topUpStake := big.NewInt(0).Set(totalTopUpStake) return topUpStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1042,7 +1043,7 @@ func TestNewRewardsCreatorV35_computeRewardsPer3200NodesWithDifferentTopups(t *t nodesRewardInfo, _ := setupNodeRewardInfo(setupResult, vInfo, topupStakePerNode, tt.validatorTopupStake) setupResult.EconomicsDataProvider.SetRewardsToBeDistributedForBlocks(setupResult.rewardsForBlocks) - setupResult.RewardsCreatorArgsV2.StakingDataProvider = &mock.StakingDataProviderStub{ + setupResult.RewardsCreatorArgsV2.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { return topupEligibleStake }, @@ -1050,9 +1051,9 @@ func TestNewRewardsCreatorV35_computeRewardsPer3200NodesWithDifferentTopups(t *t return baseEligibleStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1149,7 +1150,7 @@ func TestNewRewardsCreatorV2_computeRewardsPer3200NodesWithDifferentTopups(t *te nodesRewardInfo, _ := setupNodeRewardInfo(setupResult, vInfo, topupStakePerNode, tt.validatorTopupStake) setupResult.EconomicsDataProvider.SetRewardsToBeDistributedForBlocks(setupResult.rewardsForBlocks) - setupResult.RewardsCreatorArgsV2.StakingDataProvider = &mock.StakingDataProviderStub{ + setupResult.RewardsCreatorArgsV2.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { return topupEligibleStake }, @@ -1157,9 +1158,9 @@ func TestNewRewardsCreatorV2_computeRewardsPer3200NodesWithDifferentTopups(t *te return baseEligibleStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1200,7 +1201,7 @@ func TestNewRewardsCreatorV2_computeRewardsPer3200NodesWithDifferentTopups(t *te func setupNodeRewardInfo( setupResult SetupRewardsResult, - vInfo map[uint32][]*state.ValidatorInfo, + vInfo state.ShardValidatorsInfoMapHandler, topupStakePerNode *big.Int, validatorTopupStake *big.Int, ) (map[uint32][]*nodeRewardsData, error) { @@ -1267,7 +1268,7 @@ func computeRewardsAndDust(nbEligiblePerShard uint32, args SetupRewardsResult, t totalEligibleStake, _ := big.NewInt(0).SetString("4000000"+"000000000000000000", 10) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { return totalTopUpStake }, @@ -1275,9 +1276,9 @@ func computeRewardsAndDust(nbEligiblePerShard uint32, args SetupRewardsResult, t return totalEligibleStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1360,11 +1361,11 @@ func TestNewRewardsCreatorV2_computeValidatorInfoPerRewardAddressWithOfflineVali nbShards := int64(args.ShardCoordinator.NumberOfShards()) + 1 args.EconomicsDataProvider.SetLeadersFees(big.NewInt(0).Mul(big.NewInt(int64(proposerFee)), big.NewInt(int64(nbEligiblePerShard-nbOfflinePerShard)*nbShards))) valInfo := createDefaultValidatorInfo(nbEligiblePerShard, args.ShardCoordinator, args.NodesConfigProvider, proposerFee, defaultBlocksPerShard) - for _, valList := range valInfo { + for _, valList := range valInfo.GetShardValidatorsInfoMap() { for i := 0; i < int(nbOfflinePerShard); i++ { - valList[i].LeaderSuccess = 0 - valList[i].ValidatorSuccess = 0 - valList[i].AccumulatedFees = big.NewInt(0) + valList[i].SetLeaderSuccess(0) + valList[i].SetValidatorSuccess(0) + valList[i].SetAccumulatedFees(big.NewInt(0)) } } @@ -1412,9 +1413,9 @@ func TestNewRewardsCreatorV2_computeValidatorInfoPerRewardAddressWithLeavingVali nbShards := int64(args.ShardCoordinator.NumberOfShards()) + 1 args.EconomicsDataProvider.SetLeadersFees(big.NewInt(0).Mul(big.NewInt(int64(proposerFee)), big.NewInt(int64(nbEligiblePerShard)*nbShards))) valInfo := createDefaultValidatorInfo(nbEligiblePerShard, args.ShardCoordinator, args.NodesConfigProvider, proposerFee, defaultBlocksPerShard) - for _, valList := range valInfo { + for _, valList := range valInfo.GetShardValidatorsInfoMap() { for i := 0; i < int(nbLeavingPerShard); i++ { - valList[i].List = string(common.LeavingList) + valList[i].SetList(string(common.LeavingList)) } } @@ -1500,10 +1501,8 @@ func TestNewRewardsCreatorV2_addValidatorRewardsToMiniBlocks(t *testing.T) { DevFeesInEpoch: big.NewInt(0), } sumFees := big.NewInt(0) - for _, vInfoList := range valInfo { - for _, vInfo := range vInfoList { - sumFees.Add(sumFees, vInfo.AccumulatedFees) - } + for _, vInfo := range valInfo.GetAllValidatorsInfo() { + sumFees.Add(sumFees, vInfo.GetAccumulatedFees()) } accumulatedDust, err := rwd.addValidatorRewardsToMiniBlocks(metaBlock, miniBlocks, nodesRewardInfo) @@ -1548,12 +1547,12 @@ func TestNewRewardsCreatorV2_addValidatorRewardsToMiniBlocksAddressInMetaChainDe nbAddrInMetachainPerShard := 2 sumFees := big.NewInt(0) - for _, vInfoList := range valInfo { + for _, vInfoList := range valInfo.GetShardValidatorsInfoMap() { for i, vInfo := range vInfoList { if i < nbAddrInMetachainPerShard { - vInfo.RewardAddress = addrInMeta + vInfo.SetRewardAddress(addrInMeta) } - sumFees.Add(sumFees, vInfo.AccumulatedFees) + sumFees.Add(sumFees, vInfo.GetAccumulatedFees()) } } @@ -1585,15 +1584,15 @@ func TestNewRewardsCreatorV2_CreateRewardsMiniBlocks(t *testing.T) { nodesRewardInfo := dummyRwd.initNodesRewardsInfo(vInfo) _, _ = setDummyValuesInNodesRewardInfo(nodesRewardInfo, nbEligiblePerShard, tuStake, 0) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { totalTopUpStake, _ := big.NewInt(0).SetString("3000000000000000000000000", 10) return totalTopUpStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1637,10 +1636,8 @@ func TestNewRewardsCreatorV2_CreateRewardsMiniBlocks(t *testing.T) { } sumFees := big.NewInt(0) - for _, vInfoList := range vInfo { - for _, v := range vInfoList { - sumFees.Add(sumFees, v.AccumulatedFees) - } + for _, v := range vInfo.GetAllValidatorsInfo() { + sumFees.Add(sumFees, v.GetAccumulatedFees()) } totalRws := rwd.economicsDataProvider.RewardsToBeDistributedForBlocks() @@ -1683,14 +1680,14 @@ func TestNewRewardsCreatorV2_CreateRewardsMiniBlocks2169Nodes(t *testing.T) { topupValue.Mul(topupValue, multiplier) _, totalTopupStake := setValuesInNodesRewardInfo(nodesRewardInfo, topupValue, tuStake) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { return totalTopupStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1734,10 +1731,8 @@ func TestNewRewardsCreatorV2_CreateRewardsMiniBlocks2169Nodes(t *testing.T) { } sumFees := big.NewInt(0) - for _, vInfoList := range vInfo { - for _, v := range vInfoList { - sumFees.Add(sumFees, v.AccumulatedFees) - } + for _, v := range vInfo.GetAllValidatorsInfo() { + sumFees.Add(sumFees, v.GetAccumulatedFees()) } totalRws := rwd.economicsDataProvider.RewardsToBeDistributedForBlocks() @@ -1781,7 +1776,7 @@ func getRewardsCreatorV2Arguments() RewardsCreatorArgsV2 { } return RewardsCreatorArgsV2{ BaseRewardsCreatorArgs: getBaseRewardsArguments(), - StakingDataProvider: &mock.StakingDataProviderStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, EconomicsDataProvider: NewEpochEconomicsStatistics(), RewardsHandler: rewardsHandler, } @@ -1801,7 +1796,7 @@ func getRewardsCreatorV35Arguments() RewardsCreatorArgsV2 { } return RewardsCreatorArgsV2{ BaseRewardsCreatorArgs: getBaseRewardsArguments(), - StakingDataProvider: &mock.StakingDataProviderStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, EconomicsDataProvider: NewEpochEconomicsStatistics(), RewardsHandler: rewardsHandler, } @@ -1877,7 +1872,7 @@ func createDefaultValidatorInfo( nodesConfigProvider epochStart.NodesConfigProvider, proposerFeesPerNode uint32, nbBlocksPerShard uint32, -) map[uint32][]*state.ValidatorInfo { +) state.ShardValidatorsInfoMapHandler { cGrShard := uint32(nodesConfigProvider.ConsensusGroupSize(0)) cGrMeta := uint32(nodesConfigProvider.ConsensusGroupSize(core.MetachainShardId)) nbBlocksSelectedNodeInShard := nbBlocksPerShard * cGrShard / eligibleNodesPerShard @@ -1886,9 +1881,8 @@ func createDefaultValidatorInfo( shardsMap := createShardsMap(shardCoordinator) var nbBlocksSelected uint32 - validators := make(map[uint32][]*state.ValidatorInfo) + validators := state.NewShardValidatorsInfoMap() for shardID := range shardsMap { - validators[shardID] = make([]*state.ValidatorInfo, eligibleNodesPerShard) nbBlocksSelected = nbBlocksSelectedNodeInShard if shardID == core.MetachainShardId { nbBlocksSelected = nbBlocksSelectedNodeInMeta @@ -1900,7 +1894,7 @@ func createDefaultValidatorInfo( _ = hex.Encode(addrHex, []byte(str)) leaderSuccess := uint32(20) - validators[shardID][i] = &state.ValidatorInfo{ + _ = validators.Add(&state.ValidatorInfo{ PublicKey: []byte(fmt.Sprintf("pubKeyBLS%d%d", shardID, i)), ShardId: shardID, RewardAddress: addrHex, @@ -1909,7 +1903,7 @@ func createDefaultValidatorInfo( NumSelectedInSuccessBlocks: nbBlocksSelected, AccumulatedFees: big.NewInt(int64(proposerFeesPerNode)), List: string(common.EligibleList), - } + }) } } @@ -1918,13 +1912,14 @@ func createDefaultValidatorInfo( func addNonEligibleValidatorInfo( nonEligiblePerShard uint32, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, list string, -) map[uint32][]*state.ValidatorInfo { - resultedValidatorsInfo := make(map[uint32][]*state.ValidatorInfo) - for shardID, valInfoList := range validatorsInfo { +) state.ShardValidatorsInfoMapHandler { + resultedValidatorsInfo := state.NewShardValidatorsInfoMap() + for shardID, valInfoList := range validatorsInfo.GetShardValidatorsInfoMap() { + _ = resultedValidatorsInfo.SetValidatorsInShard(shardID, valInfoList) for i := uint32(0); i < nonEligiblePerShard; i++ { - vInfo := &state.ValidatorInfo{ + _ = resultedValidatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte(fmt.Sprintf("pubKeyBLSExtra%d", i)), ShardId: shardID, RewardAddress: []byte(fmt.Sprintf("addrRewardsExtra%d", i)), @@ -1933,8 +1928,7 @@ func addNonEligibleValidatorInfo( NumSelectedInSuccessBlocks: 1, AccumulatedFees: big.NewInt(int64(10)), List: list, - } - resultedValidatorsInfo[shardID] = append(valInfoList, vInfo) + }) } } diff --git a/epochStart/metachain/rewards_test.go b/epochStart/metachain/rewards_test.go index a41355bef67..b40fe8882e9 100644 --- a/epochStart/metachain/rewards_test.go +++ b/epochStart/metachain/rewards_test.go @@ -136,14 +136,12 @@ func TestRewardsCreator_CreateRewardsMiniBlocks(t *testing.T) { EpochStart: getDefaultEpochStart(), DevFeesInEpoch: big.NewInt(0), } - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + }) bdy, err := rwd.CreateRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Nil(t, err) assert.NotNil(t, bdy) @@ -178,14 +176,12 @@ func TestRewardsCreator_VerifyRewardsMiniBlocksHashDoesNotMatch(t *testing.T) { }, DevFeesInEpoch: big.NewInt(0), } - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + }) err := rwd.VerifyRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Equal(t, epochStart.ErrRewardMiniBlockHashDoesNotMatch, err) @@ -236,15 +232,13 @@ func TestRewardsCreator_VerifyRewardsMiniBlocksRewardsMbNumDoesNotMatch(t *testi mbh.Hash = mbHash mb.MiniBlockHeaders = []block.MiniBlockHeader{mbh, mbh} - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) err := rwd.VerifyRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Equal(t, epochStart.ErrRewardMiniBlocksNumDoesNotMatch, err) @@ -393,15 +387,13 @@ func TestRewardsCreator_VerifyRewardsMiniBlocksShouldWork(t *testing.T) { mb.EpochStart.Economics.RewardsForProtocolSustainability.Set(protocolSustainabilityRewardTx.Value) mb.EpochStart.Economics.TotalToDistribute.Set(big.NewInt(0).Add(rwdTx.Value, protocolSustainabilityRewardTx.Value)) - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) err := rwd.VerifyRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Nil(t, err) @@ -463,15 +455,13 @@ func TestRewardsCreator_VerifyRewardsMiniBlocksShouldWorkEvenIfNotAllShardsHaveR mb.EpochStart.Economics.RewardsForProtocolSustainability.Set(protocolSustainabilityRewardTx.Value) mb.EpochStart.Economics.TotalToDistribute.Set(big.NewInt(0).Add(rwdTx.Value, protocolSustainabilityRewardTx.Value)) - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: receivedShardID, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: receivedShardID, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) err := rwd.VerifyRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Nil(t, err) @@ -487,14 +477,12 @@ func TestRewardsCreator_CreateMarshalizedData(t *testing.T) { EpochStart: getDefaultEpochStart(), DevFeesInEpoch: big.NewInt(0), } - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + }) _, _ = rwd.CreateRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) rwdTx := rewardTx.RewardTx{ @@ -544,15 +532,13 @@ func TestRewardsCreator_SaveTxBlockToStorage(t *testing.T) { EpochStart: getDefaultEpochStart(), DevFeesInEpoch: big.NewInt(0), } - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) _, _ = rwd.CreateRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) mb2 := block.MetaBlock{ @@ -613,15 +599,13 @@ func TestRewardsCreator_addValidatorRewardsToMiniBlocks(t *testing.T) { expectedRwdTxHash, _ := core.CalculateHash(&marshal.JsonMarshalizer{}, &hashingMocks.HasherMock{}, expectedRwdTx) cloneMb.TxHashes = append(cloneMb.TxHashes, expectedRwdTxHash) - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) rwdc.fillBaseRewardsPerBlockPerNode(mb.EpochStart.Economics.RewardsPerBlock) err := rwdc.addValidatorRewardsToMiniBlocks(valInfo, mb, miniBlocks, &rewardTx.RewardTx{}) @@ -648,25 +632,21 @@ func TestRewardsCreator_ProtocolRewardsForValidatorFromMultipleShards(t *testing } pubkey := "pubkey" - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - RewardAddress: []byte(pubkey), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - NumSelectedInSuccessBlocks: 100, - LeaderSuccess: 1, - }, - } - valInfo[core.MetachainShardId] = []*state.ValidatorInfo{ - { - RewardAddress: []byte(pubkey), - ShardId: core.MetachainShardId, - AccumulatedFees: big.NewInt(100), - NumSelectedInSuccessBlocks: 200, - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + RewardAddress: []byte(pubkey), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + NumSelectedInSuccessBlocks: 100, + LeaderSuccess: 1, + }) + _ = valInfo.Add(&state.ValidatorInfo{ + RewardAddress: []byte(pubkey), + ShardId: core.MetachainShardId, + AccumulatedFees: big.NewInt(100), + NumSelectedInSuccessBlocks: 200, + LeaderSuccess: 1, + }) rwdc.fillBaseRewardsPerBlockPerNode(mb.EpochStart.Economics.RewardsPerBlock) rwdInfoData := rwdc.computeValidatorInfoPerRewardAddress(valInfo, &rewardTx.RewardTx{}, 0) @@ -675,8 +655,8 @@ func TestRewardsCreator_ProtocolRewardsForValidatorFromMultipleShards(t *testing assert.Equal(t, rwdInfo.address, pubkey) assert.Equal(t, rwdInfo.accumulatedFees.Cmp(big.NewInt(200)), 0) - protocolRewards := uint64(valInfo[0][0].NumSelectedInSuccessBlocks) * (mb.EpochStart.Economics.RewardsPerBlock.Uint64() / uint64(args.NodesConfigProvider.ConsensusGroupSize(0))) - protocolRewards += uint64(valInfo[core.MetachainShardId][0].NumSelectedInSuccessBlocks) * (mb.EpochStart.Economics.RewardsPerBlock.Uint64() / uint64(args.NodesConfigProvider.ConsensusGroupSize(core.MetachainShardId))) + protocolRewards := uint64(valInfo.GetShardValidatorsInfoMap()[0][0].GetNumSelectedInSuccessBlocks()) * (mb.EpochStart.Economics.RewardsPerBlock.Uint64() / uint64(args.NodesConfigProvider.ConsensusGroupSize(0))) + protocolRewards += uint64(valInfo.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetNumSelectedInSuccessBlocks()) * (mb.EpochStart.Economics.RewardsPerBlock.Uint64() / uint64(args.NodesConfigProvider.ConsensusGroupSize(core.MetachainShardId))) assert.Equal(t, rwdInfo.rewardsFromProtocol.Uint64(), protocolRewards) } @@ -730,7 +710,7 @@ func TestRewardsCreator_AddProtocolSustainabilityRewardToMiniBlocks(t *testing.T metaBlk.EpochStart.Economics.RewardsForProtocolSustainability.Set(expectedRewardTx.Value) metaBlk.EpochStart.Economics.TotalToDistribute.Set(expectedRewardTx.Value) - miniBlocks, err := rwdc.CreateRewardsMiniBlocks(metaBlk, make(map[uint32][]*state.ValidatorInfo), &metaBlk.EpochStart.Economics) + miniBlocks, err := rwdc.CreateRewardsMiniBlocks(metaBlk, state.NewShardValidatorsInfoMap(), &metaBlk.EpochStart.Economics) assert.Nil(t, err) assert.Equal(t, cloneMb, miniBlocks[0]) } @@ -747,23 +727,21 @@ func TestRewardsCreator_ValidatorInfoWithMetaAddressAddedToProtocolSustainabilit DevFeesInEpoch: big.NewInt(0), } metaBlk.EpochStart.Economics.TotalToDistribute = big.NewInt(20250) - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - RewardAddress: vm.StakingSCAddress, - ShardId: 0, - AccumulatedFees: big.NewInt(100), - NumSelectedInSuccessBlocks: 1, - LeaderSuccess: 1, - }, - { - RewardAddress: vm.FirstDelegationSCAddress, - ShardId: 0, - AccumulatedFees: big.NewInt(100), - NumSelectedInSuccessBlocks: 1, - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + RewardAddress: vm.StakingSCAddress, + ShardId: 0, + AccumulatedFees: big.NewInt(100), + NumSelectedInSuccessBlocks: 1, + LeaderSuccess: 1, + }) + _ = valInfo.Add(&state.ValidatorInfo{ + RewardAddress: vm.FirstDelegationSCAddress, + ShardId: 0, + AccumulatedFees: big.NewInt(100), + NumSelectedInSuccessBlocks: 1, + LeaderSuccess: 1, + }) acc, _ := args.UserAccountsDB.LoadAccount(vm.FirstDelegationSCAddress) userAcc, _ := acc.(state.UserAccountHandler) diff --git a/epochStart/metachain/stakingDataProvider.go b/epochStart/metachain/stakingDataProvider.go index bf3faf572b3..722a838193f 100644 --- a/epochStart/metachain/stakingDataProvider.go +++ b/epochStart/metachain/stakingDataProvider.go @@ -16,46 +16,67 @@ import ( ) type ownerStats struct { - numEligible int - numStakedNodes int64 - topUpValue *big.Int - totalStaked *big.Int - eligibleBaseStake *big.Int - eligibleTopUpStake *big.Int - topUpPerNode *big.Int - blsKeys [][]byte + numEligible int + numStakedNodes int64 + numActiveNodes int64 + totalTopUp *big.Int + topUpPerNode *big.Int + totalStaked *big.Int + eligibleBaseStake *big.Int + eligibleTopUpStake *big.Int + eligibleTopUpPerNode *big.Int + blsKeys [][]byte + auctionList []state.ValidatorInfoHandler + qualified bool +} + +type ownerInfoSC struct { + topUpValue *big.Int + totalStakedValue *big.Int + numStakedWaiting *big.Int + blsKeys [][]byte } type stakingDataProvider struct { - mutStakingData sync.RWMutex - cache map[string]*ownerStats - systemVM vmcommon.VMExecutionHandler - totalEligibleStake *big.Int - totalEligibleTopUpStake *big.Int - minNodePrice *big.Int + mutStakingData sync.RWMutex + cache map[string]*ownerStats + systemVM vmcommon.VMExecutionHandler + totalEligibleStake *big.Int + totalEligibleTopUpStake *big.Int + minNodePrice *big.Int + numOfValidatorsInCurrEpoch uint32 + enableEpochsHandler common.EnableEpochsHandler +} + +// StakingDataProviderArgs is a struct placeholder for all arguments required to create a NewStakingDataProvider +type StakingDataProviderArgs struct { + EnableEpochsHandler common.EnableEpochsHandler + SystemVM vmcommon.VMExecutionHandler + MinNodePrice string } // NewStakingDataProvider will create a new instance of a staking data provider able to aid in the final rewards // computation as this will retrieve the staking data from the system VM -func NewStakingDataProvider( - systemVM vmcommon.VMExecutionHandler, - minNodePrice string, -) (*stakingDataProvider, error) { - if check.IfNil(systemVM) { +func NewStakingDataProvider(args StakingDataProviderArgs) (*stakingDataProvider, error) { + if check.IfNil(args.SystemVM) { return nil, epochStart.ErrNilSystemVmInstance } + if check.IfNil(args.EnableEpochsHandler) { + return nil, epochStart.ErrNilEnableEpochsHandler + } - nodePrice, ok := big.NewInt(0).SetString(minNodePrice, 10) + nodePrice, ok := big.NewInt(0).SetString(args.MinNodePrice, 10) if !ok || nodePrice.Cmp(big.NewInt(0)) <= 0 { return nil, epochStart.ErrInvalidMinNodePrice } sdp := &stakingDataProvider{ - systemVM: systemVM, + systemVM: args.SystemVM, cache: make(map[string]*ownerStats), minNodePrice: nodePrice, totalEligibleStake: big.NewInt(0), totalEligibleTopUpStake: big.NewInt(0), + enableEpochsHandler: args.EnableEpochsHandler, } return sdp, nil @@ -67,6 +88,7 @@ func (sdp *stakingDataProvider) Clean() { sdp.cache = make(map[string]*ownerStats) sdp.totalEligibleStake.SetInt64(0) sdp.totalEligibleTopUpStake.SetInt64(0) + sdp.numOfValidatorsInCurrEpoch = 0 sdp.mutStakingData.Unlock() } @@ -91,7 +113,7 @@ func (sdp *stakingDataProvider) GetTotalTopUpStakeEligibleNodes() *big.Int { // GetNodeStakedTopUp returns the owner of provided bls key staking stats for the current epoch func (sdp *stakingDataProvider) GetNodeStakedTopUp(blsKey []byte) (*big.Int, error) { - owner, err := sdp.getBlsKeyOwner(blsKey) + owner, err := sdp.GetBlsKeyOwner(blsKey) if err != nil { log.Debug("GetOwnerStakingStats", "key", hex.EncodeToString(blsKey), "error", err) return nil, err @@ -102,19 +124,17 @@ func (sdp *stakingDataProvider) GetNodeStakedTopUp(blsKey []byte) (*big.Int, err return nil, epochStart.ErrOwnerDoesntHaveEligibleNodesInEpoch } - return ownerInfo.topUpPerNode, nil + return ownerInfo.eligibleTopUpPerNode, nil } -// PrepareStakingDataForRewards prepares the staking data for the given map of node keys per shard -func (sdp *stakingDataProvider) PrepareStakingDataForRewards(keys map[uint32][][]byte) error { +// PrepareStakingData prepares the staking data for the given map of node keys per shard +func (sdp *stakingDataProvider) PrepareStakingData(validatorsMap state.ShardValidatorsInfoMapHandler) error { sdp.Clean() - for _, keysList := range keys { - for _, blsKey := range keysList { - err := sdp.loadDataForBlsKey(blsKey) - if err != nil { - return err - } + for _, validator := range validatorsMap.GetAllValidatorsInfo() { + err := sdp.loadDataForBlsKey(validator) + if err != nil { + return err } } @@ -146,7 +166,7 @@ func (sdp *stakingDataProvider) processStakingData() { totalEligibleStake.Add(totalEligibleStake, ownerEligibleStake) totalEligibleTopUpStake.Add(totalEligibleTopUpStake, owner.eligibleTopUpStake) - owner.topUpPerNode = big.NewInt(0).Div(owner.eligibleTopUpStake, ownerEligibleNodes) + owner.eligibleTopUpPerNode = big.NewInt(0).Div(owner.eligibleTopUpStake, ownerEligibleNodes) } sdp.totalEligibleTopUpStake = totalEligibleTopUpStake @@ -154,40 +174,48 @@ func (sdp *stakingDataProvider) processStakingData() { } // FillValidatorInfo will fill the validator info for the bls key if it was not already filled -func (sdp *stakingDataProvider) FillValidatorInfo(blsKey []byte) error { +func (sdp *stakingDataProvider) FillValidatorInfo(validator state.ValidatorInfoHandler) error { sdp.mutStakingData.Lock() defer sdp.mutStakingData.Unlock() - _, err := sdp.getAndFillOwnerStatsFromSC(blsKey) + _, err := sdp.getAndFillOwnerStats(validator) return err } -func (sdp *stakingDataProvider) getAndFillOwnerStatsFromSC(blsKey []byte) (*ownerStats, error) { - owner, err := sdp.getBlsKeyOwner(blsKey) +func (sdp *stakingDataProvider) getAndFillOwnerStats(validator state.ValidatorInfoHandler) (*ownerStats, error) { + blsKey := validator.GetPublicKey() + owner, err := sdp.GetBlsKeyOwner(blsKey) if err != nil { log.Debug("error fill owner stats", "step", "get owner from bls", "key", hex.EncodeToString(blsKey), "error", err) return nil, err } - ownerData, err := sdp.getValidatorData(owner) + ownerData, err := sdp.fillOwnerData(owner, validator) if err != nil { log.Debug("error fill owner stats", "step", "get owner data", "key", hex.EncodeToString(blsKey), "owner", hex.EncodeToString([]byte(owner)), "error", err) return nil, err } + if isValidator(validator) { + sdp.numOfValidatorsInCurrEpoch++ + } + return ownerData, nil } // loadDataForBlsKey will be called for each BLS key that took part in the consensus (no matter the shard ID) so the // staking data can be recovered from the staking system smart contracts. // The function will error if something went wrong. It does change the inner state of the called instance. -func (sdp *stakingDataProvider) loadDataForBlsKey(blsKey []byte) error { +func (sdp *stakingDataProvider) loadDataForBlsKey(validator state.ValidatorInfoHandler) error { sdp.mutStakingData.Lock() defer sdp.mutStakingData.Unlock() - ownerData, err := sdp.getAndFillOwnerStatsFromSC(blsKey) + ownerData, err := sdp.getAndFillOwnerStats(validator) if err != nil { - log.Debug("error computing rewards for bls key", "step", "get owner data", "key", hex.EncodeToString(blsKey), "error", err) + log.Debug("error computing rewards for bls key", + "step", "get owner data", + "key", hex.EncodeToString(validator.GetPublicKey()), + "error", err) return err } ownerData.numEligible++ @@ -195,7 +223,29 @@ func (sdp *stakingDataProvider) loadDataForBlsKey(blsKey []byte) error { return nil } -func (sdp *stakingDataProvider) getBlsKeyOwner(blsKey []byte) (string, error) { +// GetOwnersData returns all owner stats +func (sdp *stakingDataProvider) GetOwnersData() map[string]*epochStart.OwnerData { + sdp.mutStakingData.RLock() + defer sdp.mutStakingData.RUnlock() + + ret := make(map[string]*epochStart.OwnerData) + for owner, ownerData := range sdp.cache { + ret[owner] = &epochStart.OwnerData{ + NumActiveNodes: ownerData.numActiveNodes, + NumStakedNodes: ownerData.numStakedNodes, + TotalTopUp: big.NewInt(0).SetBytes(ownerData.totalTopUp.Bytes()), + TopUpPerNode: big.NewInt(0).SetBytes(ownerData.topUpPerNode.Bytes()), + AuctionList: make([]state.ValidatorInfoHandler, len(ownerData.auctionList)), + Qualified: ownerData.qualified, + } + copy(ret[owner].AuctionList, ownerData.auctionList) + } + + return ret +} + +// GetBlsKeyOwner returns the owner's public key of the provided bls key +func (sdp *stakingDataProvider) GetBlsKeyOwner(blsKey []byte) (string, error) { vmInput := &vmcommon.ContractCallInput{ VMInput: vmcommon.VMInput{ CallerAddr: vm.ValidatorSCAddress, @@ -221,48 +271,109 @@ func (sdp *stakingDataProvider) getBlsKeyOwner(blsKey []byte) (string, error) { return string(data[0]), nil } -func (sdp *stakingDataProvider) getValidatorData(validatorAddress string) (*ownerStats, error) { - ownerData, exists := sdp.cache[validatorAddress] +func (sdp *stakingDataProvider) fillOwnerData(owner string, validator state.ValidatorInfoHandler) (*ownerStats, error) { + var err error + ownerData, exists := sdp.cache[owner] if exists { - return ownerData, nil + updateOwnerData(ownerData, validator) + } else { + ownerData, err = sdp.getAndFillOwnerDataFromSC(owner, validator) + if err != nil { + return nil, err + } + sdp.cache[owner] = ownerData } - return sdp.getValidatorDataFromStakingSC(validatorAddress) + return ownerData, nil +} + +func updateOwnerData(ownerData *ownerStats, validator state.ValidatorInfoHandler) { + if isInAuction(validator) { + ownerData.numActiveNodes-- + ownerData.auctionList = append(ownerData.auctionList, validator.ShallowClone()) + } } -func (sdp *stakingDataProvider) getValidatorDataFromStakingSC(validatorAddress string) (*ownerStats, error) { - topUpValue, totalStakedValue, numStakedWaiting, blsKeys, err := sdp.getValidatorInfoFromSC(validatorAddress) +func (sdp *stakingDataProvider) getAndFillOwnerDataFromSC(owner string, validator state.ValidatorInfoHandler) (*ownerStats, error) { + ownerInfo, err := sdp.getOwnerInfoFromSC(owner) if err != nil { return nil, err } - ownerData := &ownerStats{ - numEligible: 0, - numStakedNodes: numStakedWaiting.Int64(), - topUpValue: topUpValue, - totalStaked: totalStakedValue, - eligibleBaseStake: big.NewInt(0).Set(sdp.minNodePrice), - eligibleTopUpStake: big.NewInt(0), - topUpPerNode: big.NewInt(0), + topUpPerNode := big.NewInt(0) + numStakedNodes := ownerInfo.numStakedWaiting.Int64() + if numStakedNodes == 0 { + log.Debug("stakingDataProvider.fillOwnerData", + "message", epochStart.ErrOwnerHasNoStakedNode, + "owner", hex.EncodeToString([]byte(owner)), + "validator", hex.EncodeToString(validator.GetPublicKey()), + ) + } else { + topUpPerNode = big.NewInt(0).Div(ownerInfo.topUpValue, ownerInfo.numStakedWaiting) } - ownerData.blsKeys = make([][]byte, len(blsKeys)) - copy(ownerData.blsKeys, blsKeys) + ownerData := &ownerStats{ + numEligible: 0, + numStakedNodes: numStakedNodes, + numActiveNodes: numStakedNodes, + totalTopUp: ownerInfo.topUpValue, + topUpPerNode: topUpPerNode, + totalStaked: ownerInfo.totalStakedValue, + eligibleBaseStake: big.NewInt(0).Set(sdp.minNodePrice), + eligibleTopUpStake: big.NewInt(0), + eligibleTopUpPerNode: big.NewInt(0), + qualified: true, + } + err = sdp.checkAndFillOwnerValidatorAuctionData([]byte(owner), ownerData, validator) + if err != nil { + return nil, err + } - sdp.cache[validatorAddress] = ownerData + ownerData.blsKeys = make([][]byte, len(ownerInfo.blsKeys)) + copy(ownerData.blsKeys, ownerInfo.blsKeys) return ownerData, nil } -func (sdp *stakingDataProvider) getValidatorInfoFromSC(validatorAddress string) (*big.Int, *big.Int, *big.Int, [][]byte, error) { - validatorAddressBytes := []byte(validatorAddress) +func (sdp *stakingDataProvider) checkAndFillOwnerValidatorAuctionData( + ownerPubKey []byte, + ownerData *ownerStats, + validator state.ValidatorInfoHandler, +) error { + validatorInAuction := isInAuction(validator) + if !validatorInAuction { + return nil + } + if ownerData.numStakedNodes == 0 { + return fmt.Errorf("stakingDataProvider.checkAndFillOwnerValidatorAuctionData for validator in auction error: %w, owner: %s, node: %s", + epochStart.ErrOwnerHasNoStakedNode, + hex.EncodeToString(ownerPubKey), + hex.EncodeToString(validator.GetPublicKey()), + ) + } + if !sdp.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return fmt.Errorf("stakingDataProvider.checkAndFillOwnerValidatorAuctionData for validator in auction error: %w, owner: %s, node: %s", + epochStart.ErrReceivedAuctionValidatorsBeforeStakingV4, + hex.EncodeToString(ownerPubKey), + hex.EncodeToString(validator.GetPublicKey()), + ) + } + + ownerData.numActiveNodes -= 1 + ownerData.auctionList = []state.ValidatorInfoHandler{validator} + + return nil +} + +func (sdp *stakingDataProvider) getOwnerInfoFromSC(owner string) (*ownerInfoSC, error) { + ownerAddressBytes := []byte(owner) vmInput := &vmcommon.ContractCallInput{ VMInput: vmcommon.VMInput{ CallerAddr: vm.EndOfEpochAddress, CallValue: big.NewInt(0), GasProvided: math.MaxInt64, - Arguments: [][]byte{validatorAddressBytes}, + Arguments: [][]byte{ownerAddressBytes}, }, RecipientAddr: vm.ValidatorSCAddress, Function: "getTotalStakedTopUpStakedBlsKeys", @@ -270,41 +381,50 @@ func (sdp *stakingDataProvider) getValidatorInfoFromSC(validatorAddress string) vmOutput, err := sdp.systemVM.RunSmartContractCall(vmInput) if err != nil { - return nil, nil, nil, nil, err + return nil, err } if vmOutput.ReturnCode != vmcommon.Ok { - return nil, nil, nil, nil, fmt.Errorf("%w, error: %v message: %s", epochStart.ErrExecutingSystemScCode, vmOutput.ReturnCode, vmOutput.ReturnMessage) + return nil, fmt.Errorf("%w, error: %v message: %s", epochStart.ErrExecutingSystemScCode, vmOutput.ReturnCode, vmOutput.ReturnMessage) } if len(vmOutput.ReturnData) < 3 { - return nil, nil, nil, nil, fmt.Errorf("%w, getTotalStakedTopUpStakedBlsKeys function should have at least three values", epochStart.ErrExecutingSystemScCode) + return nil, fmt.Errorf("%w, getTotalStakedTopUpStakedBlsKeys function should have at least three values", epochStart.ErrExecutingSystemScCode) } topUpValue := big.NewInt(0).SetBytes(vmOutput.ReturnData[0]) totalStakedValue := big.NewInt(0).SetBytes(vmOutput.ReturnData[1]) numStakedWaiting := big.NewInt(0).SetBytes(vmOutput.ReturnData[2]) - return topUpValue, totalStakedValue, numStakedWaiting, vmOutput.ReturnData[3:], nil + return &ownerInfoSC{ + topUpValue: topUpValue, + totalStakedValue: totalStakedValue, + numStakedWaiting: numStakedWaiting, + blsKeys: vmOutput.ReturnData[3:], + }, nil } // ComputeUnQualifiedNodes will compute which nodes are not qualified - do not have enough tokens to be validators -func (sdp *stakingDataProvider) ComputeUnQualifiedNodes(validatorInfos map[uint32][]*state.ValidatorInfo) ([][]byte, map[string][][]byte, error) { +func (sdp *stakingDataProvider) ComputeUnQualifiedNodes(validatorsInfo state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) { sdp.mutStakingData.Lock() defer sdp.mutStakingData.Unlock() mapOwnersKeys := make(map[string][][]byte) keysToUnStake := make([][]byte, 0) - mapBLSKeyStatus := createMapBLSKeyStatus(validatorInfos) + mapBLSKeyStatus, err := sdp.createMapBLSKeyStatus(validatorsInfo) + if err != nil { + return nil, nil, err + } + for ownerAddress, stakingInfo := range sdp.cache { maxQualified := big.NewInt(0).Div(stakingInfo.totalStaked, sdp.minNodePrice) if maxQualified.Int64() >= stakingInfo.numStakedNodes { continue } - sortedKeys := arrangeBlsKeysByStatus(mapBLSKeyStatus, stakingInfo.blsKeys) + sortedKeys := sdp.arrangeBlsKeysByStatus(mapBLSKeyStatus, stakingInfo.blsKeys) numKeysToUnStake := stakingInfo.numStakedNodes - maxQualified.Int64() - selectedKeys := selectKeysToUnStake(sortedKeys, numKeysToUnStake) + selectedKeys, numRemovedValidators := sdp.selectKeysToUnStake(sortedKeys, numKeysToUnStake) if len(selectedKeys) == 0 { continue } @@ -313,31 +433,44 @@ func (sdp *stakingDataProvider) ComputeUnQualifiedNodes(validatorInfos map[uint3 mapOwnersKeys[ownerAddress] = make([][]byte, len(selectedKeys)) copy(mapOwnersKeys[ownerAddress], selectedKeys) + + stakingInfo.qualified = false + sdp.numOfValidatorsInCurrEpoch -= uint32(numRemovedValidators) } return keysToUnStake, mapOwnersKeys, nil } -func createMapBLSKeyStatus(validatorInfos map[uint32][]*state.ValidatorInfo) map[string]string { +func (sdp *stakingDataProvider) createMapBLSKeyStatus(validatorsInfo state.ShardValidatorsInfoMapHandler) (map[string]string, error) { mapBLSKeyStatus := make(map[string]string) - for _, validatorsInfoSlice := range validatorInfos { - for _, validatorInfo := range validatorsInfoSlice { - mapBLSKeyStatus[string(validatorInfo.PublicKey)] = validatorInfo.List + for _, validator := range validatorsInfo.GetAllValidatorsInfo() { + list := validator.GetList() + pubKey := validator.GetPublicKey() + + if sdp.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) && list == string(common.NewList) { + return nil, fmt.Errorf("%w, bls key = %s", + epochStart.ErrReceivedNewListNodeInStakingV4, + hex.EncodeToString(pubKey), + ) } + + mapBLSKeyStatus[string(pubKey)] = list } - return mapBLSKeyStatus + return mapBLSKeyStatus, nil } -func selectKeysToUnStake(sortedKeys map[string][][]byte, numToSelect int64) [][]byte { +func (sdp *stakingDataProvider) selectKeysToUnStake(sortedKeys map[string][][]byte, numToSelect int64) ([][]byte, int) { selectedKeys := make([][]byte, 0) - newKeys := sortedKeys[string(common.NewList)] + newNodesList := sdp.getNewNodesList() + + newKeys := sortedKeys[newNodesList] if len(newKeys) > 0 { selectedKeys = append(selectedKeys, newKeys...) } if int64(len(selectedKeys)) >= numToSelect { - return selectedKeys[:numToSelect] + return selectedKeys[:numToSelect], 0 } waitingKeys := sortedKeys[string(common.WaitingList)] @@ -346,7 +479,9 @@ func selectKeysToUnStake(sortedKeys map[string][][]byte, numToSelect int64) [][] } if int64(len(selectedKeys)) >= numToSelect { - return selectedKeys[:numToSelect] + overFlowKeys := len(selectedKeys) - int(numToSelect) + removedWaiting := len(waitingKeys) - overFlowKeys + return selectedKeys[:numToSelect], removedWaiting } eligibleKeys := sortedKeys[string(common.EligibleList)] @@ -355,18 +490,22 @@ func selectKeysToUnStake(sortedKeys map[string][][]byte, numToSelect int64) [][] } if int64(len(selectedKeys)) >= numToSelect { - return selectedKeys[:numToSelect] + overFlowKeys := len(selectedKeys) - int(numToSelect) + removedEligible := len(eligibleKeys) - overFlowKeys + return selectedKeys[:numToSelect], removedEligible + len(waitingKeys) } - return selectedKeys + return selectedKeys, len(eligibleKeys) + len(waitingKeys) } -func arrangeBlsKeysByStatus(mapBlsKeyStatus map[string]string, blsKeys [][]byte) map[string][][]byte { +func (sdp *stakingDataProvider) arrangeBlsKeysByStatus(mapBlsKeyStatus map[string]string, blsKeys [][]byte) map[string][][]byte { sortedKeys := make(map[string][][]byte) + newNodesList := sdp.getNewNodesList() + for _, blsKey := range blsKeys { - blsKeyStatus, ok := mapBlsKeyStatus[string(blsKey)] - if !ok { - sortedKeys[string(common.NewList)] = append(sortedKeys[string(common.NewList)], blsKey) + blsKeyStatus, found := mapBlsKeyStatus[string(blsKey)] + if !found { + sortedKeys[newNodesList] = append(sortedKeys[newNodesList], blsKey) continue } @@ -376,6 +515,23 @@ func arrangeBlsKeysByStatus(mapBlsKeyStatus map[string]string, blsKeys [][]byte) return sortedKeys } +func (sdp *stakingDataProvider) getNewNodesList() string { + newNodesList := string(common.NewList) + if sdp.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + newNodesList = string(common.AuctionList) + } + + return newNodesList +} + +// GetNumOfValidatorsInCurrentEpoch returns the number of validators(eligible + waiting) in current epoch +func (sdp *stakingDataProvider) GetNumOfValidatorsInCurrentEpoch() uint32 { + sdp.mutStakingData.RLock() + defer sdp.mutStakingData.RUnlock() + + return sdp.numOfValidatorsInCurrEpoch +} + // IsInterfaceNil return true if underlying object is nil func (sdp *stakingDataProvider) IsInterfaceNil() bool { return sdp == nil diff --git a/epochStart/metachain/stakingDataProvider_test.go b/epochStart/metachain/stakingDataProvider_test.go index 257485e8f0c..e3bfc1e6259 100644 --- a/epochStart/metachain/stakingDataProvider_test.go +++ b/epochStart/metachain/stakingDataProvider_test.go @@ -17,28 +17,49 @@ import ( "github.com/multiversx/mx-chain-go/epochStart/mock" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/vm" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestNewStakingDataProvider_NilSystemVMShouldErr(t *testing.T) { - t.Parallel() - - sdp, err := NewStakingDataProvider(nil, "100000") +const stakingV4Step1EnableEpoch = 444 +const stakingV4Step2EnableEpoch = 445 - assert.True(t, check.IfNil(sdp)) - assert.Equal(t, epochStart.ErrNilSystemVmInstance, err) +func createStakingDataProviderArgs() StakingDataProviderArgs { + return StakingDataProviderArgs{ + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + SystemVM: &mock.VMExecutionHandlerStub{}, + MinNodePrice: "2500", + } } -func TestNewStakingDataProvider_ShouldWork(t *testing.T) { +func TestNewStakingDataProvider_NilInputPointersShouldErr(t *testing.T) { t.Parallel() - sdp, err := NewStakingDataProvider(&mock.VMExecutionHandlerStub{}, "100000") + t.Run("nil system vm", func(t *testing.T) { + args := createStakingDataProviderArgs() + args.SystemVM = nil + sdp, err := NewStakingDataProvider(args) + assert.True(t, check.IfNil(sdp)) + assert.Equal(t, epochStart.ErrNilSystemVmInstance, err) + }) - assert.False(t, check.IfNil(sdp)) - assert.Nil(t, err) + t.Run("nil epoch notifier", func(t *testing.T) { + args := createStakingDataProviderArgs() + args.EnableEpochsHandler = nil + sdp, err := NewStakingDataProvider(args) + assert.True(t, check.IfNil(sdp)) + assert.Equal(t, vm.ErrNilEnableEpochsHandler, err) + }) + + t.Run("should work", func(t *testing.T) { + args := createStakingDataProviderArgs() + sdp, err := NewStakingDataProvider(args) + assert.False(t, check.IfNil(sdp)) + assert.Nil(t, err) + }) } func TestStakingDataProvider_PrepareDataForBlsKeyGetBlsKeyOwnerErrorsShouldErr(t *testing.T) { @@ -46,7 +67,8 @@ func TestStakingDataProvider_PrepareDataForBlsKeyGetBlsKeyOwnerErrorsShouldErr(t numCall := 0 expectedErr := errors.New("expected error") - sdp, _ := NewStakingDataProvider(&mock.VMExecutionHandlerStub{ + args := createStakingDataProviderArgs() + args.SystemVM = &mock.VMExecutionHandlerStub{ RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { numCall++ if numCall == 1 { @@ -65,17 +87,18 @@ func TestStakingDataProvider_PrepareDataForBlsKeyGetBlsKeyOwnerErrorsShouldErr(t return nil, nil }, - }, "100000") + } + sdp, _ := NewStakingDataProvider(args) - err := sdp.loadDataForBlsKey([]byte("bls key")) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.Equal(t, expectedErr, err) - err = sdp.loadDataForBlsKey([]byte("bls key")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), epochStart.ErrExecutingSystemScCode.Error())) assert.True(t, strings.Contains(err.Error(), vmcommon.UserError.String())) - err = sdp.loadDataForBlsKey([]byte("bls key")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), epochStart.ErrExecutingSystemScCode.Error())) assert.True(t, strings.Contains(err.Error(), "returned exactly one value: the owner address")) @@ -87,7 +110,8 @@ func TestStakingDataProvider_PrepareDataForBlsKeyLoadOwnerDataErrorsShouldErr(t numCall := 0 owner := []byte("owner") expectedErr := errors.New("expected error") - sdp, _ := NewStakingDataProvider(&mock.VMExecutionHandlerStub{ + args := createStakingDataProviderArgs() + args.SystemVM = &mock.VMExecutionHandlerStub{ RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { if input.Function == "getOwner" { return &vmcommon.VMOutput{ @@ -111,17 +135,18 @@ func TestStakingDataProvider_PrepareDataForBlsKeyLoadOwnerDataErrorsShouldErr(t } return nil, nil }, - }, "100000") + } + sdp, _ := NewStakingDataProvider(args) - err := sdp.loadDataForBlsKey([]byte("bls key")) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.Equal(t, expectedErr, err) - err = sdp.loadDataForBlsKey([]byte("bls key")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), epochStart.ErrExecutingSystemScCode.Error())) assert.True(t, strings.Contains(err.Error(), vmcommon.UserError.String())) - err = sdp.loadDataForBlsKey([]byte("bls key")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), epochStart.ErrExecutingSystemScCode.Error())) assert.True(t, strings.Contains(err.Error(), "getTotalStakedTopUpStakedBlsKeys function should have at least three values")) @@ -138,12 +163,12 @@ func TestStakingDataProvider_PrepareDataForBlsKeyFromSCShouldWork(t *testing.T) sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) - err := sdp.loadDataForBlsKey([]byte("bls key")) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.Nil(t, err) assert.Equal(t, 2, numRunContractCalls) ownerData := sdp.GetFromCache(owner) require.NotNil(t, ownerData) - assert.Equal(t, topUpVal, ownerData.topUpValue) + assert.Equal(t, topUpVal, ownerData.totalTopUp) assert.Equal(t, 1, ownerData.numEligible) } @@ -158,16 +183,16 @@ func TestStakingDataProvider_PrepareDataForBlsKeyCachedResponseShouldWork(t *tes sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) - err := sdp.loadDataForBlsKey([]byte("bls key")) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.Nil(t, err) - err = sdp.loadDataForBlsKey([]byte("bls key2")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key2")}) assert.Nil(t, err) assert.Equal(t, 3, numRunContractCalls) ownerData := sdp.GetFromCache(owner) require.NotNil(t, ownerData) - assert.Equal(t, topUpVal, ownerData.topUpValue) + assert.Equal(t, topUpVal, ownerData.totalTopUp) assert.Equal(t, 2, ownerData.numEligible) } @@ -179,11 +204,11 @@ func TestStakingDataProvider_PrepareDataForBlsKeyWithRealSystemVmShouldWork(t *t blsKey := []byte("bls key") sdp := createStakingDataProviderWithRealArgs(t, owner, blsKey, topUpVal) - err := sdp.loadDataForBlsKey(blsKey) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: blsKey}) assert.Nil(t, err) ownerData := sdp.GetFromCache(owner) require.NotNil(t, ownerData) - assert.Equal(t, topUpVal, ownerData.topUpValue) + assert.Equal(t, topUpVal, ownerData.totalTopUp) assert.Equal(t, 1, ownerData.numEligible) } @@ -224,6 +249,39 @@ func TestStakingDataProvider_ComputeUnQualifiedNodes(t *testing.T) { require.Zero(t, len(ownersWithNotEnoughFunds)) } +func TestStakingDataProvider_ComputeUnQualifiedNodesWithStakingV4ReceivedNewListNode(t *testing.T) { + v0 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey0"), + List: string(common.EligibleList), + RewardAddress: []byte("address0"), + } + v1 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey1"), + List: string(common.NewList), + RewardAddress: []byte("address0"), + } + v2 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey2"), + List: string(common.AuctionList), + RewardAddress: []byte("address1"), + } + + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(v0) + _ = valInfo.Add(v1) + _ = valInfo.Add(v2) + + sdp := createStakingDataProviderAndUpdateCache(t, valInfo, big.NewInt(0)) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + keysToUnStake, ownersWithNotEnoughFunds, err := sdp.ComputeUnQualifiedNodes(valInfo) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), epochStart.ErrReceivedNewListNodeInStakingV4.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(v1.PublicKey))) + require.Empty(t, keysToUnStake) + require.Empty(t, ownersWithNotEnoughFunds) +} + func TestStakingDataProvider_ComputeUnQualifiedNodesWithOwnerNotEnoughFunds(t *testing.T) { nbShards := uint32(3) nbEligible := make(map[uint32]uint32) @@ -259,6 +317,39 @@ func TestStakingDataProvider_ComputeUnQualifiedNodesWithOwnerNotEnoughFunds(t *t require.Equal(t, 1, len(ownersWithNotEnoughFunds)) } +func TestStakingDataProvider_ComputeUnQualifiedNodesWithOwnerNotEnoughFundsWithStakingV4(t *testing.T) { + owner := "address0" + v0 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey0"), + List: string(common.EligibleList), + RewardAddress: []byte(owner), + } + v1 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey1"), + List: string(common.AuctionList), + RewardAddress: []byte(owner), + } + + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(v0) + _ = valInfo.Add(v1) + + sdp := createStakingDataProviderAndUpdateCache(t, valInfo, big.NewInt(0)) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sdp.cache[owner].blsKeys = append(sdp.cache[owner].blsKeys, []byte("newKey")) + sdp.cache[owner].totalStaked = big.NewInt(2500) + sdp.cache[owner].numStakedNodes++ + + keysToUnStake, ownersWithNotEnoughFunds, err := sdp.ComputeUnQualifiedNodes(valInfo) + require.Nil(t, err) + + expectedUnStakedKeys := [][]byte{[]byte("blsKey1"), []byte("newKey")} + expectedOwnerWithNotEnoughFunds := map[string][][]byte{owner: expectedUnStakedKeys} + require.Equal(t, expectedUnStakedKeys, keysToUnStake) + require.Equal(t, expectedOwnerWithNotEnoughFunds, ownersWithNotEnoughFunds) +} + func TestStakingDataProvider_GetTotalStakeEligibleNodes(t *testing.T) { t.Parallel() @@ -345,13 +436,13 @@ func TestStakingDataProvider_GetNodeStakedTopUpShouldWork(t *testing.T) { sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) expectedOwnerStats := &ownerStats{ - topUpPerNode: big.NewInt(37), + eligibleTopUpPerNode: big.NewInt(37), } sdp.SetInCache(owner, expectedOwnerStats) res, err := sdp.GetNodeStakedTopUp(owner) require.NoError(t, err) - require.Equal(t, expectedOwnerStats.topUpPerNode, res) + require.Equal(t, expectedOwnerStats.eligibleTopUpPerNode, res) } func TestStakingDataProvider_PrepareStakingDataForRewards(t *testing.T) { @@ -365,9 +456,9 @@ func TestStakingDataProvider_PrepareStakingDataForRewards(t *testing.T) { sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) - keys := make(map[uint32][][]byte) - keys[0] = append(keys[0], []byte("owner")) - err := sdp.PrepareStakingDataForRewards(keys) + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(&state.ValidatorInfo{PublicKey: owner, ShardId: 0}) + err := sdp.PrepareStakingData(validatorsMap) require.NoError(t, err) } @@ -382,10 +473,144 @@ func TestStakingDataProvider_FillValidatorInfo(t *testing.T) { sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) - err := sdp.FillValidatorInfo([]byte("owner")) + err := sdp.FillValidatorInfo(&state.ValidatorInfo{PublicKey: []byte("bls key")}) require.NoError(t, err) } +func TestCheckAndFillOwnerValidatorAuctionData(t *testing.T) { + t.Parallel() + + t.Run("validator not in auction, expect no error, no owner data update", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + + ownerData := &ownerStats{} + err := sdp.checkAndFillOwnerValidatorAuctionData([]byte("owner"), ownerData, &state.ValidatorInfo{List: string(common.NewList)}) + require.Nil(t, err) + require.Equal(t, &ownerStats{}, ownerData) + }) + + t.Run("validator in auction, but no staked node, expect error", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + + owner := []byte("owner") + ownerData := &ownerStats{numStakedNodes: 0} + validator := &state.ValidatorInfo{PublicKey: []byte("validatorPubKey"), List: string(common.AuctionList)} + + err := sdp.checkAndFillOwnerValidatorAuctionData(owner, ownerData, validator) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), epochStart.ErrOwnerHasNoStakedNode.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(owner))) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(validator.PublicKey))) + require.Equal(t, &ownerStats{numStakedNodes: 0}, ownerData) + }) + + t.Run("validator in auction, staking v4 not enabled yet, expect error", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + + owner := []byte("owner") + ownerData := &ownerStats{numStakedNodes: 1} + validator := &state.ValidatorInfo{PublicKey: []byte("validatorPubKey"), List: string(common.AuctionList)} + + err := sdp.checkAndFillOwnerValidatorAuctionData(owner, ownerData, validator) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), epochStart.ErrReceivedAuctionValidatorsBeforeStakingV4.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(owner))) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(validator.PublicKey))) + require.Equal(t, &ownerStats{numStakedNodes: 1}, ownerData) + }) + + t.Run("should update owner's data", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4StartedFlag) + + owner := []byte("owner") + ownerData := &ownerStats{numStakedNodes: 3, numActiveNodes: 3} + validator := &state.ValidatorInfo{PublicKey: []byte("validatorPubKey"), List: string(common.AuctionList)} + + err := sdp.checkAndFillOwnerValidatorAuctionData(owner, ownerData, validator) + require.Nil(t, err) + require.Equal(t, &ownerStats{ + numStakedNodes: 3, + numActiveNodes: 2, + auctionList: []state.ValidatorInfoHandler{validator}, + }, ownerData) + }) +} + +func TestSelectKeysToUnStake(t *testing.T) { + t.Parallel() + + t.Run("no validator removed", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sortedKeys := map[string][][]byte{ + string(common.AuctionList): {[]byte("pk0")}, + } + unStakedKeys, removedValidators := sdp.selectKeysToUnStake(sortedKeys, 2) + require.Equal(t, [][]byte{[]byte("pk0")}, unStakedKeys) + require.Equal(t, 0, removedValidators) + }) + + t.Run("overflow from waiting", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sortedKeys := map[string][][]byte{ + string(common.AuctionList): {[]byte("pk0")}, + string(common.EligibleList): {[]byte("pk2")}, + string(common.WaitingList): {[]byte("pk3"), []byte("pk4"), []byte("pk5")}, + } + unStakedKeys, removedValidators := sdp.selectKeysToUnStake(sortedKeys, 2) + require.Equal(t, [][]byte{[]byte("pk0"), []byte("pk3")}, unStakedKeys) + require.Equal(t, 1, removedValidators) + }) + + t.Run("overflow from eligible", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sortedKeys := map[string][][]byte{ + string(common.AuctionList): {[]byte("pk0")}, + string(common.EligibleList): {[]byte("pk1"), []byte("pk2")}, + string(common.WaitingList): {[]byte("pk4"), []byte("pk5")}, + } + unStakedKeys, removedValidators := sdp.selectKeysToUnStake(sortedKeys, 4) + require.Equal(t, [][]byte{[]byte("pk0"), []byte("pk4"), []byte("pk5"), []byte("pk1")}, unStakedKeys) + require.Equal(t, 3, removedValidators) + }) + + t.Run("no overflow", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sortedKeys := map[string][][]byte{ + string(common.AuctionList): {[]byte("pk0")}, + string(common.EligibleList): {[]byte("pk1")}, + string(common.WaitingList): {[]byte("pk2")}, + } + unStakedKeys, removedValidators := sdp.selectKeysToUnStake(sortedKeys, 3) + require.Equal(t, [][]byte{[]byte("pk0"), []byte("pk2"), []byte("pk1")}, unStakedKeys) + require.Equal(t, 2, removedValidators) + }) +} + func createStakingDataProviderWithMockArgs( t *testing.T, owner []byte, @@ -393,7 +618,8 @@ func createStakingDataProviderWithMockArgs( stakingVal *big.Int, numRunContractCalls *int, ) *stakingDataProvider { - sdp, err := NewStakingDataProvider(&mock.VMExecutionHandlerStub{ + args := createStakingDataProviderArgs() + args.SystemVM = &mock.VMExecutionHandlerStub{ RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { *numRunContractCalls++ switch input.Function { @@ -417,7 +643,8 @@ func createStakingDataProviderWithMockArgs( return nil, errors.New("unexpected call") }, - }, "100000") + } + sdp, err := NewStakingDataProvider(args) require.Nil(t, err) return sdp @@ -435,7 +662,9 @@ func createStakingDataProviderWithRealArgs(t *testing.T, owner []byte, blsKey [] doStake(t, s.systemVM, s.userAccountsDB, owner, big.NewInt(0).Add(big.NewInt(1000), topUpVal), blsKey) - sdp, _ := NewStakingDataProvider(s.systemVM, "100000") + argsStakingDataProvider := createStakingDataProviderArgs() + argsStakingDataProvider.SystemVM = s.systemVM + sdp, _ := NewStakingDataProvider(argsStakingDataProvider) return sdp } @@ -464,27 +693,28 @@ func saveOutputAccounts(t *testing.T, accountsDB state.AccountsAdapter, vmOutput require.Nil(t, err) } -func createStakingDataProviderAndUpdateCache(t *testing.T, validatorsInfo map[uint32][]*state.ValidatorInfo, topUpValue *big.Int) *stakingDataProvider { - +func createStakingDataProviderAndUpdateCache(t *testing.T, validatorsInfo state.ShardValidatorsInfoMapHandler, topUpValue *big.Int) *stakingDataProvider { args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 1, }, testscommon.CreateMemUnit()) args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, }) - sdp, _ := NewStakingDataProvider(args.SystemVM, "2500") + + argsStakingDataProvider := createStakingDataProviderArgs() + argsStakingDataProvider.SystemVM = args.SystemVM + sdp, _ := NewStakingDataProvider(argsStakingDataProvider) args.StakingDataProvider = sdp s, _ := NewSystemSCProcessor(args) require.NotNil(t, s) - for _, valsList := range validatorsInfo { - for _, valInfo := range valsList { - stake := big.NewInt(0).Add(big.NewInt(2500), topUpValue) - if valInfo.List != string(common.LeavingList) && valInfo.List != string(common.InactiveList) { - doStake(t, s.systemVM, s.userAccountsDB, valInfo.RewardAddress, stake, valInfo.PublicKey) - } - updateCache(sdp, valInfo.RewardAddress, valInfo.PublicKey, valInfo.List, stake) + for _, valInfo := range validatorsInfo.GetAllValidatorsInfo() { + stake := big.NewInt(0).Add(big.NewInt(2500), topUpValue) + if valInfo.GetList() != string(common.LeavingList) && valInfo.GetList() != string(common.InactiveList) { + doStake(t, s.systemVM, s.userAccountsDB, valInfo.GetRewardAddress(), stake, valInfo.GetPublicKey()) } + updateCache(sdp, valInfo.GetRewardAddress(), valInfo.GetPublicKey(), valInfo.GetList(), stake) + } return sdp @@ -495,14 +725,14 @@ func updateCache(sdp *stakingDataProvider, ownerAddress []byte, blsKey []byte, l if owner == nil { owner = &ownerStats{ - numEligible: 0, - numStakedNodes: 0, - topUpValue: big.NewInt(0), - totalStaked: big.NewInt(0), - eligibleBaseStake: big.NewInt(0), - eligibleTopUpStake: big.NewInt(0), - topUpPerNode: big.NewInt(0), - blsKeys: nil, + numEligible: 0, + numStakedNodes: 0, + totalTopUp: big.NewInt(0), + totalStaked: big.NewInt(0), + eligibleBaseStake: big.NewInt(0), + eligibleTopUpStake: big.NewInt(0), + eligibleTopUpPerNode: big.NewInt(0), + blsKeys: nil, } } @@ -518,12 +748,12 @@ func updateCache(sdp *stakingDataProvider, ownerAddress []byte, blsKey []byte, l sdp.cache[string(ownerAddress)] = owner } -func createValidatorsInfo(nbShards uint32, nbEligible, nbWaiting, nbLeaving, nbInactive map[uint32]uint32) map[uint32][]*state.ValidatorInfo { - validatorsInfo := make(map[uint32][]*state.ValidatorInfo) +func createValidatorsInfo(nbShards uint32, nbEligible, nbWaiting, nbLeaving, nbInactive map[uint32]uint32) state.ShardValidatorsInfoMapHandler { + validatorsInfo := state.NewShardValidatorsInfoMap() shardMap := shardsMap(nbShards) for shardID := range shardMap { - valInfoList := make([]*state.ValidatorInfo, 0) + valInfoList := make([]state.ValidatorInfoHandler, 0) for eligible := uint32(0); eligible < nbEligible[shardID]; eligible++ { vInfo := &state.ValidatorInfo{ PublicKey: []byte(fmt.Sprintf("blsKey%s%d%d", common.EligibleList, shardID, eligible)), @@ -561,7 +791,7 @@ func createValidatorsInfo(nbShards uint32, nbEligible, nbWaiting, nbLeaving, nbI } valInfoList = append(valInfoList, vInfo) } - validatorsInfo[shardID] = valInfoList + _ = validatorsInfo.SetValidatorsInShard(shardID, valInfoList) } return validatorsInfo } diff --git a/epochStart/metachain/systemSCs.go b/epochStart/metachain/systemSCs.go index 39bfa4c2e41..229a41d5710 100644 --- a/epochStart/metachain/systemSCs.go +++ b/epochStart/metachain/systemSCs.go @@ -1,30 +1,20 @@ package metachain import ( - "bytes" - "context" "fmt" - "math" "math/big" - "sort" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/common/errChan" - vInfo "github.com/multiversx/mx-chain-go/common/validatorInfo" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/vm" - "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -41,107 +31,41 @@ type ArgsNewEpochStartSystemSCProcessing struct { EndOfEpochCallerAddress []byte StakingSCAddress []byte - MaxNodesEnableConfig []config.MaxNodesChangeConfig ESDTOwnerAddressBytes []byte - GenesisNodesConfig sharding.GenesisNodesSetupHandler - EpochNotifier process.EpochNotifier - NodesConfigProvider epochStart.NodesConfigProvider - StakingDataProvider epochStart.StakingDataProvider - EnableEpochsHandler common.EnableEpochsHandler + GenesisNodesConfig sharding.GenesisNodesSetupHandler + EpochNotifier process.EpochNotifier + NodesConfigProvider epochStart.NodesConfigProvider + StakingDataProvider epochStart.StakingDataProvider + AuctionListSelector epochStart.AuctionListSelector + MaxNodesChangeConfigProvider epochStart.MaxNodesChangeConfigProvider + EnableEpochsHandler common.EnableEpochsHandler } type systemSCProcessor struct { - systemVM vmcommon.VMExecutionHandler - userAccountsDB state.AccountsAdapter - marshalizer marshal.Marshalizer - peerAccountsDB state.AccountsAdapter - chanceComputer nodesCoordinator.ChanceComputer - shardCoordinator sharding.Coordinator - startRating uint32 - validatorInfoCreator epochStart.ValidatorInfoCreator - genesisNodesConfig sharding.GenesisNodesSetupHandler - nodesConfigProvider epochStart.NodesConfigProvider - stakingDataProvider epochStart.StakingDataProvider - endOfEpochCallerAddress []byte - stakingSCAddress []byte - maxNodesEnableConfig []config.MaxNodesChangeConfig - maxNodes uint32 - flagChangeMaxNodesEnabled atomic.Flag - esdtOwnerAddressBytes []byte - mapNumSwitchedPerShard map[uint32]uint32 - mapNumSwitchablePerShard map[uint32]uint32 - enableEpochsHandler common.EnableEpochsHandler -} - -type validatorList []*state.ValidatorInfo - -// Len will return the length of the validatorList -func (v validatorList) Len() int { return len(v) } - -// Swap will interchange the objects on input indexes -func (v validatorList) Swap(i, j int) { v[i], v[j] = v[j], v[i] } - -// Less will return true if object on index i should appear before object in index j -// Sorting of validators should be by index and public key -func (v validatorList) Less(i, j int) bool { - if v[i].TempRating == v[j].TempRating { - if v[i].Index == v[j].Index { - return bytes.Compare(v[i].PublicKey, v[j].PublicKey) < 0 - } - return v[i].Index < v[j].Index - } - return v[i].TempRating < v[j].TempRating + *legacySystemSCProcessor + auctionListSelector epochStart.AuctionListSelector + enableEpochsHandler common.EnableEpochsHandler } // NewSystemSCProcessor creates the end of epoch system smart contract processor func NewSystemSCProcessor(args ArgsNewEpochStartSystemSCProcessing) (*systemSCProcessor, error) { - if check.IfNilReflect(args.SystemVM) { - return nil, epochStart.ErrNilSystemVM - } - if check.IfNil(args.UserAccountsDB) { - return nil, epochStart.ErrNilAccountsDB - } - if check.IfNil(args.PeerAccountsDB) { - return nil, epochStart.ErrNilAccountsDB - } - if check.IfNil(args.Marshalizer) { - return nil, epochStart.ErrNilMarshalizer - } - if check.IfNil(args.ValidatorInfoCreator) { - return nil, epochStart.ErrNilValidatorInfoProcessor - } - if len(args.EndOfEpochCallerAddress) == 0 { - return nil, epochStart.ErrNilEndOfEpochCallerAddress - } - if len(args.StakingSCAddress) == 0 { - return nil, epochStart.ErrNilStakingSCAddress - } - if check.IfNil(args.ChanceComputer) { - return nil, epochStart.ErrNilChanceComputer - } if check.IfNil(args.EpochNotifier) { return nil, epochStart.ErrNilEpochStartNotifier } - if check.IfNil(args.GenesisNodesConfig) { - return nil, epochStart.ErrNilGenesisNodesConfig - } - if check.IfNil(args.NodesConfigProvider) { - return nil, epochStart.ErrNilNodesConfigProvider + if check.IfNil(args.AuctionListSelector) { + return nil, epochStart.ErrNilAuctionListSelector } - if check.IfNil(args.StakingDataProvider) { - return nil, epochStart.ErrNilStakingDataProvider - } - if check.IfNil(args.ShardCoordinator) { - return nil, epochStart.ErrNilShardCoordinator - } - if len(args.ESDTOwnerAddressBytes) == 0 { - return nil, epochStart.ErrEmptyESDTOwnerAddress + + legacy, err := newLegacySystemSCProcessor(args) + if err != nil { + return nil, err } if check.IfNil(args.EnableEpochsHandler) { return nil, epochStart.ErrNilEnableEpochsHandler } - err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + + err = core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly, common.StakingV2OwnerFlagInSpecificEpochOnly, common.CorrectLastUnJailedFlagInSpecificEpochOnly, @@ -152,133 +76,91 @@ func NewSystemSCProcessor(args ArgsNewEpochStartSystemSCProcessing) (*systemSCPr common.ESDTFlagInSpecificEpochOnly, common.GovernanceFlag, common.SaveJailedAlwaysFlag, + common.StakingV4Step1Flag, + common.StakingV4Step2Flag, + common.StakingQueueFlag, + common.StakingV4StartedFlag, + common.DelegationSmartContractFlagInSpecificEpochOnly, + common.GovernanceFlagInSpecificEpochOnly, }) if err != nil { return nil, err } s := &systemSCProcessor{ - systemVM: args.SystemVM, - userAccountsDB: args.UserAccountsDB, - peerAccountsDB: args.PeerAccountsDB, - marshalizer: args.Marshalizer, - startRating: args.StartRating, - validatorInfoCreator: args.ValidatorInfoCreator, - genesisNodesConfig: args.GenesisNodesConfig, - endOfEpochCallerAddress: args.EndOfEpochCallerAddress, - stakingSCAddress: args.StakingSCAddress, - chanceComputer: args.ChanceComputer, - mapNumSwitchedPerShard: make(map[uint32]uint32), - mapNumSwitchablePerShard: make(map[uint32]uint32), - stakingDataProvider: args.StakingDataProvider, - nodesConfigProvider: args.NodesConfigProvider, - shardCoordinator: args.ShardCoordinator, - esdtOwnerAddressBytes: args.ESDTOwnerAddressBytes, - enableEpochsHandler: args.EnableEpochsHandler, + legacySystemSCProcessor: legacy, + auctionListSelector: args.AuctionListSelector, + enableEpochsHandler: args.EnableEpochsHandler, } - s.maxNodesEnableConfig = make([]config.MaxNodesChangeConfig, len(args.MaxNodesEnableConfig)) - copy(s.maxNodesEnableConfig, args.MaxNodesEnableConfig) - sort.Slice(s.maxNodesEnableConfig, func(i, j int) bool { - return s.maxNodesEnableConfig[i].EpochEnable < s.maxNodesEnableConfig[j].EpochEnable - }) - args.EpochNotifier.RegisterNotifyHandler(s) return s, nil } // ProcessSystemSmartContract does all the processing at end of epoch in case of system smart contract func (s *systemSCProcessor) ProcessSystemSmartContract( - validatorInfos map[uint32][]*state.ValidatorInfo, - nonce uint64, - epoch uint32, + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + header data.HeaderHandler, ) error { - if s.enableEpochsHandler.IsFlagEnabled(common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly) { - err := s.updateSystemSCConfigMinNodes() - if err != nil { - return err - } + err := checkNilInputValues(validatorsInfoMap, header) + if err != nil { + return err } - if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2OwnerFlagInSpecificEpochOnly) { - err := s.updateOwnersForBlsKeys() - if err != nil { - return err - } + err = s.processLegacy(validatorsInfoMap, header.GetNonce(), header.GetEpoch()) + if err != nil { + return err } + return s.processWithNewFlags(validatorsInfoMap, header) +} - if s.flagChangeMaxNodesEnabled.IsSet() { - err := s.updateMaxNodes(validatorInfos, nonce) - if err != nil { - return err - } +func checkNilInputValues(validatorsInfoMap state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { + if check.IfNil(header) { + return process.ErrNilHeaderHandler } - - if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlagInSpecificEpochOnly) { - err := s.resetLastUnJailed() - if err != nil { - return err - } + if validatorsInfoMap == nil { + return fmt.Errorf("systemSCProcessor.ProcessSystemSmartContract : %w, header nonce: %d ", + errNilValidatorsInfoMap, header.GetNonce()) } - if s.enableEpochsHandler.IsFlagEnabled(common.DelegationSmartContractFlag) { - err := s.initDelegationSystemSC() - if err != nil { - return err - } - } + return nil +} - if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { - err := s.cleanAdditionalQueue() +func (s *systemSCProcessor) processWithNewFlags( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + header data.HeaderHandler, +) error { + if s.enableEpochsHandler.IsFlagEnabled(common.GovernanceFlagInSpecificEpochOnly) { + err := s.updateToGovernanceV2() if err != nil { return err } } - if s.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) { - err := s.computeNumWaitingPerShard(validatorInfos) - if err != nil { - return err - } - - err = s.swapJailedWithWaiting(validatorInfos) + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + err := s.unStakeAllNodesFromQueue() if err != nil { return err } } - if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - err := s.prepareRewardsData(validatorInfos) - if err != nil { - return err - } - - err = s.fillStakingDataForNonEligible(validatorInfos) + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + err := s.prepareStakingDataForEligibleNodes(validatorsInfoMap) if err != nil { return err } - numUnStaked, err := s.unStakeNodesWithNotEnoughFunds(validatorInfos, epoch) + err = s.fillStakingDataForNonEligible(validatorsInfoMap) if err != nil { return err } - err = s.stakeNodesFromQueue(validatorInfos, numUnStaked, nonce) + err = s.unStakeNodesWithNotEnoughFundsWithStakingV4(validatorsInfoMap, header.GetEpoch()) if err != nil { return err } - } - - if s.enableEpochsHandler.IsFlagEnabled(common.ESDTFlagInSpecificEpochOnly) { - err := s.initESDT() - if err != nil { - //not a critical error - log.Error("error while initializing ESDT", "err", err) - } - } - if s.enableEpochsHandler.IsFlagEnabled(common.GovernanceFlag) { - err := s.updateToGovernanceV2() + err = s.auctionListSelector.SelectNodesFromAuctionList(validatorsInfoMap, header.GetPrevRandSeed()) if err != nil { return err } @@ -287,1162 +169,94 @@ func (s *systemSCProcessor) ProcessSystemSmartContract( return nil } -// ToggleUnStakeUnBond will pause/unPause the unStake/unBond functions on the validator system sc -func (s *systemSCProcessor) ToggleUnStakeUnBond(value bool) error { - if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - return nil - } - +func (s *systemSCProcessor) unStakeAllNodesFromQueue() error { vmInput := &vmcommon.ContractCallInput{ VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: nil, + CallerAddr: vm.EndOfEpochAddress, CallValue: big.NewInt(0), + Arguments: [][]byte{}, }, - RecipientAddr: vm.ValidatorSCAddress, - Function: "unPauseUnStakeUnBond", - } - - if value { - vmInput.Function = "pauseUnStakeUnBond" + RecipientAddr: vm.StakingSCAddress, + Function: "unStakeAllNodesFromQueue", } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when unStaking all nodes from staking queue", errRun) } - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrSystemValidatorSCCall - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err + return fmt.Errorf("got return code %s when unStaking all nodes from staking queue", vmOutput.ReturnCode) } - return nil + return s.processSCOutputAccounts(vmOutput) } -func (s *systemSCProcessor) unStakeNodesWithNotEnoughFunds( - validatorInfos map[uint32][]*state.ValidatorInfo, +func (s *systemSCProcessor) unStakeNodesWithNotEnoughFundsWithStakingV4( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, epoch uint32, -) (uint32, error) { - nodesToUnStake, mapOwnersKeys, err := s.stakingDataProvider.ComputeUnQualifiedNodes(validatorInfos) +) error { + nodesToUnStake, mapOwnersKeys, err := s.stakingDataProvider.ComputeUnQualifiedNodes(validatorsInfoMap) if err != nil { - return 0, err + return err } - nodesUnStakedFromAdditionalQueue := uint32(0) - log.Debug("unStake nodes with not enough funds", "num", len(nodesToUnStake)) for _, blsKey := range nodesToUnStake { log.Debug("unStake at end of epoch for node", "blsKey", blsKey) err = s.unStakeOneNode(blsKey, epoch) - if err != nil { - return 0, err - } - - validatorInfo := getValidatorInfoWithBLSKey(validatorInfos, blsKey) - if validatorInfo == nil { - nodesUnStakedFromAdditionalQueue++ - log.Debug("unStaked node which was in additional queue", "blsKey", blsKey) - continue - } - - validatorInfo.List = string(common.LeavingList) - } - - err = s.updateDelegationContracts(mapOwnersKeys) - if err != nil { - return 0, err - } - - nodesToStakeFromQueue := uint32(len(nodesToUnStake)) - if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { - nodesToStakeFromQueue -= nodesUnStakedFromAdditionalQueue - } - - log.Debug("stake nodes from waiting list", "num", nodesToStakeFromQueue) - return nodesToStakeFromQueue, nil -} - -func (s *systemSCProcessor) unStakeOneNode(blsKey []byte, epoch uint32) error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{blsKey}, - CallValue: big.NewInt(0), - }, - RecipientAddr: s.stakingSCAddress, - Function: "unStakeAtEndOfEpoch", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - if vmOutput.ReturnCode != vmcommon.Ok { - log.Debug("unStakeOneNode", "returnMessage", vmOutput.ReturnMessage, "returnCode", vmOutput.ReturnCode.String()) - return epochStart.ErrUnStakeExecuteError - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - account, errExists := s.peerAccountsDB.GetExistingAccount(blsKey) - if errExists != nil { - return nil - } - - peerAccount, ok := account.(state.PeerAccountHandler) - if !ok { - return epochStart.ErrWrongTypeAssertion - } - - peerAccount.SetListAndIndex(peerAccount.GetShardId(), string(common.LeavingList), peerAccount.GetIndexInList()) - peerAccount.SetUnStakedEpoch(epoch) - err = s.peerAccountsDB.SaveAccount(peerAccount) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) updateDelegationContracts(mapOwnerKeys map[string][][]byte) error { - sortedDelegationsSCs := make([]string, 0, len(mapOwnerKeys)) - for address := range mapOwnerKeys { - shardId := s.shardCoordinator.ComputeId([]byte(address)) - if shardId != core.MetachainShardId { - continue - } - sortedDelegationsSCs = append(sortedDelegationsSCs, address) - } - - sort.Slice(sortedDelegationsSCs, func(i, j int) bool { - return sortedDelegationsSCs[i] < sortedDelegationsSCs[j] - }) - - for _, address := range sortedDelegationsSCs { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: mapOwnerKeys[address], - CallValue: big.NewInt(0), - }, - RecipientAddr: []byte(address), - Function: "unStakeAtEndOfEpoch", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) if err != nil { return err } - if vmOutput.ReturnCode != vmcommon.Ok { - log.Debug("unStakeAtEndOfEpoch", "returnMessage", vmOutput.ReturnMessage, "returnCode", vmOutput.ReturnCode.String()) - return epochStart.ErrUnStakeExecuteError - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - } - - return nil -} - -func getValidatorInfoWithBLSKey(validatorInfos map[uint32][]*state.ValidatorInfo, blsKey []byte) *state.ValidatorInfo { - for _, validatorsInfoSlice := range validatorInfos { - for _, validatorInfo := range validatorsInfoSlice { - if bytes.Equal(validatorInfo.PublicKey, blsKey) { - return validatorInfo - } - } - } - return nil -} - -func (s *systemSCProcessor) fillStakingDataForNonEligible(validatorInfos map[uint32][]*state.ValidatorInfo) error { - for shId, validatorsInfoSlice := range validatorInfos { - newList := make([]*state.ValidatorInfo, 0, len(validatorsInfoSlice)) - deleteCalled := false - - for _, validatorInfo := range validatorsInfoSlice { - if vInfo.WasEligibleInCurrentEpoch(validatorInfo) { - newList = append(newList, validatorInfo) - continue - } - - err := s.stakingDataProvider.FillValidatorInfo(validatorInfo.PublicKey) - if err != nil { - deleteCalled = true - - log.Error("fillStakingDataForNonEligible", "error", err) - if len(validatorInfo.List) > 0 { - return err - } - - err = s.peerAccountsDB.RemoveAccount(validatorInfo.PublicKey) - if err != nil { - log.Error("fillStakingDataForNonEligible removeAccount", "error", err) - } - - continue - } - - newList = append(newList, validatorInfo) - } - - if deleteCalled { - validatorInfos[shId] = newList - } - } - - return nil -} - -func (s *systemSCProcessor) prepareRewardsData( - validatorsInfo map[uint32][]*state.ValidatorInfo, -) error { - eligibleNodesKeys := s.getEligibleNodesKeyMapOfType(validatorsInfo) - err := s.prepareStakingDataForRewards(eligibleNodesKeys) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) prepareStakingDataForRewards(eligibleNodesKeys map[uint32][][]byte) error { - sw := core.NewStopWatch() - sw.Start("prepareStakingDataForRewards") - defer func() { - sw.Stop("prepareStakingDataForRewards") - log.Debug("systemSCProcessor.prepareStakingDataForRewards time measurements", sw.GetMeasurements()...) - }() - - return s.stakingDataProvider.PrepareStakingDataForRewards(eligibleNodesKeys) -} - -func (s *systemSCProcessor) getEligibleNodesKeyMapOfType( - validatorsInfo map[uint32][]*state.ValidatorInfo, -) map[uint32][][]byte { - eligibleNodesKeys := make(map[uint32][][]byte) - for shardID, validatorsInfoSlice := range validatorsInfo { - eligibleNodesKeys[shardID] = make([][]byte, 0, s.nodesConfigProvider.ConsensusGroupSize(shardID)) - for _, validatorInfo := range validatorsInfoSlice { - if vInfo.WasEligibleInCurrentEpoch(validatorInfo) { - eligibleNodesKeys[shardID] = append(eligibleNodesKeys[shardID], validatorInfo.PublicKey) - } - } - } - - return eligibleNodesKeys -} - -func getRewardsMiniBlockForMeta(miniBlocks block.MiniBlockSlice) *block.MiniBlock { - for _, miniBlock := range miniBlocks { - if miniBlock.Type != block.RewardsBlock { - continue - } - if miniBlock.ReceiverShardID != core.MetachainShardId { - continue - } - return miniBlock - } - return nil -} - -// ProcessDelegationRewards will process the rewards which are directed towards the delegation system smart contracts -func (s *systemSCProcessor) ProcessDelegationRewards( - miniBlocks block.MiniBlockSlice, - txCache epochStart.TransactionCacher, -) error { - if txCache == nil { - return epochStart.ErrNilLocalTxCache - } - - rwdMb := getRewardsMiniBlockForMeta(miniBlocks) - if rwdMb == nil { - return nil - } - for _, txHash := range rwdMb.TxHashes { - rwdTx, err := txCache.GetTx(txHash) - if err != nil { - return err + validatorInfo := validatorsInfoMap.GetValidator(blsKey) + if validatorInfo == nil { + return fmt.Errorf( + "%w in systemSCProcessor.unStakeNodesWithNotEnoughFundsWithStakingV4 because validator might be in additional queue after staking v4", + epochStart.ErrNilValidatorInfo) } - err = s.executeRewardTx(rwdTx) + validatorLeaving := validatorInfo.ShallowClone() + validatorLeaving.SetListAndIndex(string(common.LeavingList), validatorLeaving.GetIndex(), true) + err = validatorsInfoMap.Replace(validatorInfo, validatorLeaving) if err != nil { return err } } - return nil -} - -func (s *systemSCProcessor) executeRewardTx(rwdTx data.TransactionHandler) error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: nil, - CallValue: rwdTx.GetValue(), - }, - RecipientAddr: rwdTx.GetRcvAddr(), - Function: "updateRewards", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrSystemDelegationCall - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - return nil -} - -// updates the configuration of the system SC if the flags permit -func (s *systemSCProcessor) updateSystemSCConfigMinNodes() error { - minNumberOfNodesWithHysteresis := s.genesisNodesConfig.MinNumberOfNodesWithHysteresis() - err := s.setMinNumberOfNodes(minNumberOfNodesWithHysteresis) - - return err + return s.updateDelegationContracts(mapOwnersKeys) } -func (s *systemSCProcessor) resetLastUnJailed() error { +func (s *systemSCProcessor) updateToGovernanceV2() error { vmInput := &vmcommon.ContractCallInput{ VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{}, + CallerAddr: vm.GovernanceSCAddress, CallValue: big.NewInt(0), + Arguments: [][]byte{}, }, - RecipientAddr: s.stakingSCAddress, - Function: "resetLastUnJailedFromQueue", + RecipientAddr: vm.GovernanceSCAddress, + Function: "initV2", } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when updating to governanceV2", errRun) } - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrResetLastUnJailedFromQueue - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err + return fmt.Errorf("got return code %s when updating to governanceV2", vmOutput.ReturnCode) } - return nil -} - -// updates the configuration of the system SC if the flags permit -func (s *systemSCProcessor) updateMaxNodes(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64) error { - sw := core.NewStopWatch() - sw.Start("total") - defer func() { - sw.Stop("total") - log.Debug("systemSCProcessor.updateMaxNodes", sw.GetMeasurements()...) - }() - - maxNumberOfNodes := s.maxNodes - sw.Start("setMaxNumberOfNodes") - prevMaxNumberOfNodes, err := s.setMaxNumberOfNodes(maxNumberOfNodes) - sw.Stop("setMaxNumberOfNodes") + err := s.processSCOutputAccounts(vmOutput) if err != nil { return err } - if maxNumberOfNodes < prevMaxNumberOfNodes { - return epochStart.ErrInvalidMaxNumberOfNodes - } - - sw.Start("stakeNodesFromQueue") - err = s.stakeNodesFromQueue(validatorInfos, maxNumberOfNodes-prevMaxNumberOfNodes, nonce) - sw.Stop("stakeNodesFromQueue") - if err != nil { - return err - } return nil } -func (s *systemSCProcessor) computeNumWaitingPerShard(validatorInfos map[uint32][]*state.ValidatorInfo) error { - for shardID, validatorInfoList := range validatorInfos { - totalInWaiting := uint32(0) - for _, validatorInfo := range validatorInfoList { - switch validatorInfo.List { - case string(common.WaitingList): - totalInWaiting++ - } - } - s.mapNumSwitchablePerShard[shardID] = totalInWaiting - s.mapNumSwitchedPerShard[shardID] = 0 - } - return nil -} - -func (s *systemSCProcessor) swapJailedWithWaiting(validatorInfos map[uint32][]*state.ValidatorInfo) error { - jailedValidators := s.getSortedJailedNodes(validatorInfos) - - log.Debug("number of jailed validators", "num", len(jailedValidators)) - - newValidators := make(map[string]struct{}) - for _, jailedValidator := range jailedValidators { - if _, ok := newValidators[string(jailedValidator.PublicKey)]; ok { - continue - } - if isValidator(jailedValidator) && s.mapNumSwitchablePerShard[jailedValidator.ShardId] <= s.mapNumSwitchedPerShard[jailedValidator.ShardId] { - log.Debug("cannot switch in this epoch anymore for this shard as switched num waiting", - "shardID", jailedValidator.ShardId, - "numSwitched", s.mapNumSwitchedPerShard[jailedValidator.ShardId]) - continue - } - - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{jailedValidator.PublicKey}, - CallValue: big.NewInt(0), - }, - RecipientAddr: s.stakingSCAddress, - Function: "switchJailedWithWaiting", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - - log.Debug("switchJailedWithWaiting called for", - "key", jailedValidator.PublicKey, - "returnMessage", vmOutput.ReturnMessage) - if vmOutput.ReturnCode != vmcommon.Ok { - continue - } - - newValidator, err := s.stakingToValidatorStatistics(validatorInfos, jailedValidator, vmOutput) - if err != nil { - return err - } - - if len(newValidator) != 0 { - newValidators[string(newValidator)] = struct{}{} - } - } - - return nil -} - -func (s *systemSCProcessor) stakingToValidatorStatistics( - validatorInfos map[uint32][]*state.ValidatorInfo, - jailedValidator *state.ValidatorInfo, - vmOutput *vmcommon.VMOutput, -) ([]byte, error) { - stakingSCOutput, ok := vmOutput.OutputAccounts[string(s.stakingSCAddress)] - if !ok { - return nil, epochStart.ErrStakingSCOutputAccountNotFound - } - - var activeStorageUpdate *vmcommon.StorageUpdate - for _, storageUpdate := range stakingSCOutput.StorageUpdates { - isNewValidatorKey := len(storageUpdate.Offset) == len(jailedValidator.PublicKey) && - !bytes.Equal(storageUpdate.Offset, jailedValidator.PublicKey) - if isNewValidatorKey { - activeStorageUpdate = storageUpdate - break - } - } - if activeStorageUpdate == nil { - log.Debug("no one in waiting suitable for switch") - if s.enableEpochsHandler.IsFlagEnabled(common.SaveJailedAlwaysFlag) { - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return nil, err - } - } - - return nil, nil - } - - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return nil, err - } - - var stakingData systemSmartContracts.StakedDataV2_0 - err = s.marshalizer.Unmarshal(&stakingData, activeStorageUpdate.Data) - if err != nil { - return nil, err - } - - blsPubKey := activeStorageUpdate.Offset - log.Debug("staking validator key who switches with the jailed one", "blsKey", blsPubKey) - - account, isNew, err := state.GetPeerAccountAndReturnIfNew(s.peerAccountsDB, blsPubKey) - if err != nil { - return nil, err - } - - if !bytes.Equal(account.GetRewardAddress(), stakingData.RewardAddress) { - err = account.SetRewardAddress(stakingData.RewardAddress) - if err != nil { - return nil, err - } - } - - if !isNew { - // old jailed validator getting switched back after unJail with stake - must remove first from exported map - deleteNewValidatorIfExistsFromMap(validatorInfos, blsPubKey, account.GetShardId()) - } - - account.SetListAndIndex(jailedValidator.ShardId, string(common.NewList), uint32(stakingData.StakedNonce)) - account.SetTempRating(s.startRating) - account.SetUnStakedEpoch(common.DefaultUnstakedEpoch) - - err = s.peerAccountsDB.SaveAccount(account) - if err != nil { - return nil, err - } - - jailedAccount, err := s.getPeerAccount(jailedValidator.PublicKey) - if err != nil { - return nil, err - } - - jailedAccount.SetListAndIndex(jailedValidator.ShardId, string(common.JailedList), jailedValidator.Index) - jailedAccount.ResetAtNewEpoch() - err = s.peerAccountsDB.SaveAccount(jailedAccount) - if err != nil { - return nil, err - } - - if isValidator(jailedValidator) { - s.mapNumSwitchedPerShard[jailedValidator.ShardId]++ - } - - newValidatorInfo := s.validatorInfoCreator.PeerAccountToValidatorInfo(account) - switchJailedWithNewValidatorInMap(validatorInfos, jailedValidator, newValidatorInfo) - - return blsPubKey, nil -} - -func isValidator(validator *state.ValidatorInfo) bool { - return validator.List == string(common.WaitingList) || validator.List == string(common.EligibleList) -} - -func deleteNewValidatorIfExistsFromMap( - validatorInfos map[uint32][]*state.ValidatorInfo, - blsPubKey []byte, - shardID uint32, -) { - for index, validatorInfo := range validatorInfos[shardID] { - if bytes.Equal(validatorInfo.PublicKey, blsPubKey) { - length := len(validatorInfos[shardID]) - validatorInfos[shardID][index] = validatorInfos[shardID][length-1] - validatorInfos[shardID][length-1] = nil - validatorInfos[shardID] = validatorInfos[shardID][:length-1] - break - } - } -} - -func switchJailedWithNewValidatorInMap( - validatorInfos map[uint32][]*state.ValidatorInfo, - jailedValidator *state.ValidatorInfo, - newValidator *state.ValidatorInfo, -) { - for index, validatorInfo := range validatorInfos[jailedValidator.ShardId] { - if bytes.Equal(validatorInfo.PublicKey, jailedValidator.PublicKey) { - validatorInfos[jailedValidator.ShardId][index] = newValidator - break - } - } -} - -func (s *systemSCProcessor) getUserAccount(address []byte) (state.UserAccountHandler, error) { - acnt, err := s.userAccountsDB.LoadAccount(address) - if err != nil { - return nil, err - } - - stAcc, ok := acnt.(state.UserAccountHandler) - if !ok { - return nil, process.ErrWrongTypeAssertion - } - - return stAcc, nil -} - -// save account changes in state from vmOutput - protected by VM - every output can be treated as is. -func (s *systemSCProcessor) processSCOutputAccounts( - vmOutput *vmcommon.VMOutput, -) error { - - outputAccounts := process.SortVMOutputInsideData(vmOutput) - for _, outAcc := range outputAccounts { - acc, err := s.getUserAccount(outAcc.Address) - if err != nil { - return err - } - - storageUpdates := process.GetSortedStorageUpdates(outAcc) - for _, storeUpdate := range storageUpdates { - err = acc.SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) - if err != nil { - return err - } - } - - if outAcc.BalanceDelta != nil && outAcc.BalanceDelta.Cmp(zero) != 0 { - err = acc.AddToBalance(outAcc.BalanceDelta) - if err != nil { - return err - } - } - - err = s.userAccountsDB.SaveAccount(acc) - if err != nil { - return err - } - } - - return nil -} - -func (s *systemSCProcessor) getSortedJailedNodes(validatorInfos map[uint32][]*state.ValidatorInfo) []*state.ValidatorInfo { - newJailedValidators := make([]*state.ValidatorInfo, 0) - oldJailedValidators := make([]*state.ValidatorInfo, 0) - - minChance := s.chanceComputer.GetChance(0) - for _, listValidators := range validatorInfos { - for _, validatorInfo := range listValidators { - if validatorInfo.List == string(common.JailedList) { - oldJailedValidators = append(oldJailedValidators, validatorInfo) - } else if s.chanceComputer.GetChance(validatorInfo.TempRating) < minChance { - newJailedValidators = append(newJailedValidators, validatorInfo) - } - } - } - - sort.Sort(validatorList(oldJailedValidators)) - sort.Sort(validatorList(newJailedValidators)) - - return append(oldJailedValidators, newJailedValidators...) -} - -func (s *systemSCProcessor) getPeerAccount(key []byte) (state.PeerAccountHandler, error) { - account, err := s.peerAccountsDB.LoadAccount(key) - if err != nil { - return nil, err - } - - peerAcc, ok := account.(state.PeerAccountHandler) - if !ok { - return nil, epochStart.ErrWrongTypeAssertion - } - - return peerAcc, nil -} - -func (s *systemSCProcessor) setMinNumberOfNodes(minNumNodes uint32) error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{big.NewInt(int64(minNumNodes)).Bytes()}, - CallValue: big.NewInt(0), - }, - RecipientAddr: s.stakingSCAddress, - Function: "updateConfigMinNodes", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - - log.Debug("setMinNumberOfNodes called with", - "minNumNodes", minNumNodes, - "returnMessage", vmOutput.ReturnMessage) - - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrInvalidMinNumberOfNodes - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) setMaxNumberOfNodes(maxNumNodes uint32) (uint32, error) { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{big.NewInt(int64(maxNumNodes)).Bytes()}, - CallValue: big.NewInt(0), - }, - RecipientAddr: s.stakingSCAddress, - Function: "updateConfigMaxNodes", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return 0, err - } - - log.Debug("setMaxNumberOfNodes called with", - "maxNumNodes", maxNumNodes, - "returnMessage", vmOutput.ReturnMessage) - - if vmOutput.ReturnCode != vmcommon.Ok { - return 0, epochStart.ErrInvalidMaxNumberOfNodes - } - if len(vmOutput.ReturnData) != 1 { - return 0, epochStart.ErrInvalidSystemSCReturn - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return 0, err - } - - prevMaxNumNodes := big.NewInt(0).SetBytes(vmOutput.ReturnData[0]).Uint64() - return uint32(prevMaxNumNodes), nil -} - -func (s *systemSCProcessor) updateOwnersForBlsKeys() error { - sw := core.NewStopWatch() - sw.Start("systemSCProcessor") - defer func() { - sw.Stop("systemSCProcessor") - log.Debug("systemSCProcessor.updateOwnersForBlsKeys time measurements", sw.GetMeasurements()...) - }() - - sw.Start("getValidatorSystemAccount") - userValidatorAccount, err := s.getValidatorSystemAccount() - sw.Stop("getValidatorSystemAccount") - if err != nil { - return err - } - - sw.Start("getArgumentsForSetOwnerFunctionality") - arguments, err := s.getArgumentsForSetOwnerFunctionality(userValidatorAccount) - sw.Stop("getArgumentsForSetOwnerFunctionality") - if err != nil { - return err - } - - sw.Start("callSetOwnersOnAddresses") - err = s.callSetOwnersOnAddresses(arguments) - sw.Stop("callSetOwnersOnAddresses") - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) updateToGovernanceV2() error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.GovernanceSCAddress, - CallValue: big.NewInt(0), - Arguments: [][]byte{}, - }, - RecipientAddr: vm.GovernanceSCAddress, - Function: "initV2", - } - vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) - if errRun != nil { - return fmt.Errorf("%w when updating to governanceV2", errRun) - } - if vmOutput.ReturnCode != vmcommon.Ok { - return fmt.Errorf("got return code %s when updating to governanceV2", vmOutput.ReturnCode) - } - - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) getValidatorSystemAccount() (state.UserAccountHandler, error) { - validatorAccount, err := s.userAccountsDB.LoadAccount(vm.ValidatorSCAddress) - if err != nil { - return nil, fmt.Errorf("%w when loading validator account", err) - } - - userValidatorAccount, ok := validatorAccount.(state.UserAccountHandler) - if !ok { - return nil, fmt.Errorf("%w when loading validator account", epochStart.ErrWrongTypeAssertion) - } - - if check.IfNil(userValidatorAccount.DataTrie()) { - return nil, epochStart.ErrNilDataTrie - } - - return userValidatorAccount, nil -} - -func (s *systemSCProcessor) getArgumentsForSetOwnerFunctionality(userValidatorAccount state.UserAccountHandler) ([][]byte, error) { - arguments := make([][]byte, 0) - - leavesChannels := &common.TrieIteratorChannels{ - LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChanWrapper(), - } - err := userValidatorAccount.GetAllLeaves(leavesChannels, context.Background()) - if err != nil { - return nil, err - } - for leaf := range leavesChannels.LeavesChan { - validatorData := &systemSmartContracts.ValidatorDataV2{} - - err = s.marshalizer.Unmarshal(validatorData, leaf.Value()) - if err != nil { - continue - } - for _, blsKey := range validatorData.BlsPubKeys { - arguments = append(arguments, blsKey) - arguments = append(arguments, leaf.Key()) - } - } - - err = leavesChannels.ErrChan.ReadFromChanNonBlocking() - if err != nil { - return nil, err - } - - return arguments, nil -} - -func (s *systemSCProcessor) callSetOwnersOnAddresses(arguments [][]byte) error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.EndOfEpochAddress, - CallValue: big.NewInt(0), - Arguments: arguments, - }, - RecipientAddr: vm.StakingSCAddress, - Function: "setOwnersOnAddresses", - } - - vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) - if errRun != nil { - return fmt.Errorf("%w when calling setOwnersOnAddresses function", errRun) - } - if vmOutput.ReturnCode != vmcommon.Ok { - return fmt.Errorf("got return code %s when calling setOwnersOnAddresses", vmOutput.ReturnCode) - } - - return s.processSCOutputAccounts(vmOutput) -} - -func (s *systemSCProcessor) initDelegationSystemSC() error { - codeMetaData := &vmcommon.CodeMetadata{ - Upgradeable: false, - Payable: false, - Readable: true, - } - - vmInput := &vmcommon.ContractCreateInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.DelegationManagerSCAddress, - Arguments: [][]byte{}, - CallValue: big.NewInt(0), - }, - ContractCode: vm.DelegationManagerSCAddress, - ContractCodeMetadata: codeMetaData.ToBytes(), - } - - vmOutput, err := s.systemVM.RunSmartContractCreate(vmInput) - if err != nil { - return err - } - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrCouldNotInitDelegationSystemSC - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - err = s.updateSystemSCContractsCode(vmInput.ContractCodeMetadata) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) updateSystemSCContractsCode(contractMetadata []byte) error { - contractsToUpdate := make([][]byte, 0) - contractsToUpdate = append(contractsToUpdate, vm.StakingSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.ValidatorSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.GovernanceSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.ESDTSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.DelegationManagerSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.FirstDelegationSCAddress) - - for _, address := range contractsToUpdate { - userAcc, err := s.getUserAccount(address) - if err != nil { - return err - } - - userAcc.SetOwnerAddress(address) - userAcc.SetCodeMetadata(contractMetadata) - userAcc.SetCode(address) - - err = s.userAccountsDB.SaveAccount(userAcc) - if err != nil { - return err - } - } - - return nil -} - -func (s *systemSCProcessor) cleanAdditionalQueue() error { - sw := core.NewStopWatch() - sw.Start("systemSCProcessor") - defer func() { - sw.Stop("systemSCProcessor") - log.Debug("systemSCProcessor.cleanAdditionalQueue time measurements", sw.GetMeasurements()...) - }() - - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.EndOfEpochAddress, - CallValue: big.NewInt(0), - Arguments: [][]byte{}, - }, - RecipientAddr: vm.StakingSCAddress, - Function: "cleanAdditionalQueue", - } - vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) - if errRun != nil { - return fmt.Errorf("%w when cleaning additional queue", errRun) - } - if vmOutput.ReturnCode != vmcommon.Ok { - return fmt.Errorf("got return code %s, return message %s when cleaning additional queue", vmOutput.ReturnCode, vmOutput.ReturnMessage) - } - - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - // returnData format is list(address - all blsKeys which were unstaked for that) - addressLength := len(s.endOfEpochCallerAddress) - mapOwnersKeys := make(map[string][][]byte) - currentOwner := "" - for _, returnData := range vmOutput.ReturnData { - if len(returnData) == addressLength { - currentOwner = string(returnData) - continue - } - - if len(currentOwner) != addressLength { - continue - } - - mapOwnersKeys[currentOwner] = append(mapOwnersKeys[currentOwner], returnData) - } - - err = s.updateDelegationContracts(mapOwnersKeys) - if err != nil { - log.Error("update delegation contracts failed after cleaning additional queue", "error", err.Error()) - return err - } - - return nil -} - -func (s *systemSCProcessor) stakeNodesFromQueue( - validatorInfos map[uint32][]*state.ValidatorInfo, - nodesToStake uint32, - nonce uint64, -) error { - if nodesToStake == 0 { - return nil - } - - nodesToStakeAsBigInt := big.NewInt(0).SetUint64(uint64(nodesToStake)) - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.EndOfEpochAddress, - CallValue: big.NewInt(0), - Arguments: [][]byte{nodesToStakeAsBigInt.Bytes()}, - }, - RecipientAddr: vm.StakingSCAddress, - Function: "stakeNodesFromQueue", - } - vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) - if errRun != nil { - return fmt.Errorf("%w when staking nodes from waiting list", errRun) - } - if vmOutput.ReturnCode != vmcommon.Ok { - return fmt.Errorf("got return code %s when staking nodes from waiting list", vmOutput.ReturnCode) - } - if len(vmOutput.ReturnData)%2 != 0 { - return fmt.Errorf("%w return data must be divisible by 2 when staking nodes from waiting list", epochStart.ErrInvalidSystemSCReturn) - } - - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - err = s.addNewlyStakedNodesToValidatorTrie(validatorInfos, vmOutput.ReturnData, nonce) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) addNewlyStakedNodesToValidatorTrie( - validatorInfos map[uint32][]*state.ValidatorInfo, - returnData [][]byte, - nonce uint64, -) error { - for i := 0; i < len(returnData); i += 2 { - blsKey := returnData[i] - rewardAddress := returnData[i+1] - - peerAcc, err := s.getPeerAccount(blsKey) - if err != nil { - return err - } - - err = peerAcc.SetRewardAddress(rewardAddress) - if err != nil { - return err - } - - peerAcc.SetListAndIndex(peerAcc.GetShardId(), string(common.NewList), uint32(nonce)) - peerAcc.SetTempRating(s.startRating) - peerAcc.SetUnStakedEpoch(common.DefaultUnstakedEpoch) - - err = s.peerAccountsDB.SaveAccount(peerAcc) - if err != nil { - return err - } - - validatorInfo := &state.ValidatorInfo{ - PublicKey: blsKey, - ShardId: peerAcc.GetShardId(), - List: string(common.NewList), - Index: uint32(nonce), - TempRating: s.startRating, - Rating: s.startRating, - RewardAddress: rewardAddress, - AccumulatedFees: big.NewInt(0), - } - validatorInfos[peerAcc.GetShardId()] = append(validatorInfos[peerAcc.GetShardId()], validatorInfo) - } - - return nil -} - -func (s *systemSCProcessor) initESDT() error { - currentConfigValues, err := s.extractConfigFromESDTContract() - if err != nil { - return err - } - - return s.changeESDTOwner(currentConfigValues) -} - -func (s *systemSCProcessor) extractConfigFromESDTContract() ([][]byte, error) { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{}, - CallValue: big.NewInt(0), - GasProvided: math.MaxInt64, - }, - Function: "getContractConfig", - RecipientAddr: vm.ESDTSCAddress, - } - - output, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return nil, err - } - if len(output.ReturnData) != 4 { - return nil, fmt.Errorf("%w getContractConfig should have returned 4 values", epochStart.ErrInvalidSystemSCReturn) - } - - return output.ReturnData, nil -} - -func (s *systemSCProcessor) changeESDTOwner(currentConfigValues [][]byte) error { - baseIssuingCost := currentConfigValues[1] - minTokenNameLength := currentConfigValues[2] - maxTokenNameLength := currentConfigValues[3] - - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{s.esdtOwnerAddressBytes, baseIssuingCost, minTokenNameLength, maxTokenNameLength}, - CallValue: big.NewInt(0), - GasProvided: math.MaxInt64, - }, - Function: "configChange", - RecipientAddr: vm.ESDTSCAddress, - } - - output, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - if output.ReturnCode != vmcommon.Ok { - return fmt.Errorf("%w changeESDTOwner should have returned Ok", epochStart.ErrInvalidSystemSCReturn) - } - - return s.processSCOutputAccounts(output) -} - -// IsInterfaceNil returns true if underlying object is nil -func (s *systemSCProcessor) IsInterfaceNil() bool { - return s == nil +// IsInterfaceNil returns true if underlying object is nil +func (s *systemSCProcessor) IsInterfaceNil() bool { + return s == nil } // EpochConfirmed is called whenever a new epoch is confirmed func (s *systemSCProcessor) EpochConfirmed(epoch uint32, _ uint64) { - s.flagChangeMaxNodesEnabled.SetValue(false) - for _, maxNodesConfig := range s.maxNodesEnableConfig { - if epoch == maxNodesConfig.EpochEnable { - s.flagChangeMaxNodesEnabled.SetValue(true) - s.maxNodes = maxNodesConfig.MaxNumNodes - break - } - } - - log.Debug("systemSCProcessor: change of maximum number of nodes and/or shuffling percentage", - "enabled", s.flagChangeMaxNodesEnabled.IsSet(), - "epoch", epoch, - "maxNodes", s.maxNodes, - ) + s.legacyEpochConfirmed(epoch) } diff --git a/epochStart/metachain/systemSCs_test.go b/epochStart/metachain/systemSCs_test.go index a519e77e7f7..54dfa2a85e6 100644 --- a/epochStart/metachain/systemSCs_test.go +++ b/epochStart/metachain/systemSCs_test.go @@ -8,7 +8,7 @@ import ( "math" "math/big" "os" - "strconv" + "strings" "testing" "github.com/multiversx/mx-chain-core-go/core" @@ -27,9 +27,9 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever/dataPool" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/mock" + "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/genesis/process/disabled" "github.com/multiversx/mx-chain-go/process" - economicsHandler "github.com/multiversx/mx-chain-go/process/economics" vmFactory "github.com/multiversx/mx-chain-go/process/factory" metaProcess "github.com/multiversx/mx-chain-go/process/factory/metachain" "github.com/multiversx/mx-chain-go/process/peer" @@ -48,7 +48,11 @@ import ( dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageMock "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" @@ -87,17 +91,39 @@ func createPhysicalUnit(t *testing.T) (storage.Storer, string) { MaxOpenFiles: 10, } - dbConfigHandler := storageFactory.NewDBConfigHandler(dbConfig) - persisterFactory, err := storageFactory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := storageFactory.NewPersisterFactory(dbConfig) assert.Nil(t, err) cache, _ := storageunit.NewCache(cacheConfig) - persist, _ := storageunit.NewDB(persisterFactory, dir) + persist, _ := persisterFactory.CreateWithRetries(dir) unit, _ := storageunit.NewStorageUnit(cache, persist) return unit, dir } +func createMockArgsForSystemSCProcessor() ArgsNewEpochStartSystemSCProcessing { + return ArgsNewEpochStartSystemSCProcessing{ + SystemVM: &mock.VMExecutionHandlerStub{}, + UserAccountsDB: &stateMock.AccountsStub{}, + PeerAccountsDB: &stateMock.AccountsStub{}, + Marshalizer: &marshallerMock.MarshalizerStub{}, + StartRating: 0, + ValidatorInfoCreator: &testscommon.ValidatorStatisticsProcessorStub{}, + ChanceComputer: &mock.ChanceComputerStub{}, + ShardCoordinator: &testscommon.ShardsCoordinatorMock{}, + EndOfEpochCallerAddress: vm.EndOfEpochAddress, + StakingSCAddress: vm.StakingSCAddress, + ESDTOwnerAddressBytes: vm.ESDTSCAddress, + GenesisNodesConfig: &genesisMocks.NodesSetupStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + NodesConfigProvider: &shardingMocks.NodesCoordinatorStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, + AuctionListSelector: &stakingcommon.AuctionListSelectorStub{}, + MaxNodesChangeConfigProvider: &testscommon.MaxNodesChangeConfigProviderStub{}, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + } +} + func TestNewSystemSCProcessor(t *testing.T) { t.Parallel() @@ -198,7 +224,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContract(t *testing.T) { jailedAcc, _ := args.PeerAccountsDB.LoadAccount([]byte("jailedPubKey0")) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) + validatorsInfo := state.NewShardValidatorsInfoMap() vInfo := &state.ValidatorInfo{ PublicKey: []byte("jailedPubKey0"), ShardId: 0, @@ -207,13 +233,13 @@ func TestSystemSCProcessor_ProcessSystemSmartContract(t *testing.T) { RewardAddress: []byte("address"), AccumulatedFees: big.NewInt(0), } - validatorInfos[0] = append(validatorInfos[0], vInfo) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) - assert.Nil(t, err) + _ = validatorsInfo.Add(vInfo) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) - assert.Equal(t, len(validatorInfos[0]), 1) - newValidatorInfo := validatorInfos[0][0] - assert.Equal(t, newValidatorInfo.List, string(common.NewList)) + require.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 1) + newValidatorInfo := validatorsInfo.GetShardValidatorsInfoMap()[0][0] + require.Equal(t, newValidatorInfo.GetList(), string(common.NewList)) } func TestSystemSCProcessor_JailedNodesShouldNotBeSwappedAllAtOnce(t *testing.T) { @@ -243,7 +269,7 @@ func testSystemSCProcessorJailedNodesShouldNotBeSwappedAllAtOnce(t *testing.T, s numEligible := 9 numWaiting := 5 numJailed := 8 - stakingScAcc := loadSCAccount(args.UserAccountsDB, vm.StakingSCAddress) + stakingScAcc := stakingcommon.LoadUserAccount(args.UserAccountsDB, vm.StakingSCAddress) createEligibleNodes(numEligible, stakingScAcc, args.Marshalizer) _ = createWaitingNodes(numWaiting, stakingScAcc, args.UserAccountsDB, args.Marshalizer) jailed := createJailedNodes(numJailed, stakingScAcc, args.UserAccountsDB, args.PeerAccountsDB, args.Marshalizer) @@ -251,25 +277,25 @@ func testSystemSCProcessorJailedNodesShouldNotBeSwappedAllAtOnce(t *testing.T, s _ = s.userAccountsDB.SaveAccount(stakingScAcc) _, _ = s.userAccountsDB.Commit() - addValidatorData(args.UserAccountsDB, []byte("ownerForAll"), [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, big.NewInt(900000), args.Marshalizer) + stakingcommon.AddValidatorData(args.UserAccountsDB, []byte("ownerForAll"), [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, big.NewInt(900000), args.Marshalizer) - validatorsInfo := make(map[uint32][]*state.ValidatorInfo) - validatorsInfo[0] = append(validatorsInfo[0], jailed...) + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.SetValidatorsInShard(0, jailed) - err := s.ProcessSystemSmartContract(validatorsInfo, 0, 0) - assert.Nil(t, err) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) for i := 0; i < numWaiting; i++ { - assert.Equal(t, string(common.NewList), validatorsInfo[0][i].List) + require.Equal(t, string(common.NewList), validatorsInfo.GetShardValidatorsInfoMap()[0][i].GetList()) } for i := numWaiting; i < numJailed; i++ { - assert.Equal(t, string(common.JailedList), validatorsInfo[0][i].List) + require.Equal(t, string(common.JailedList), validatorsInfo.GetShardValidatorsInfoMap()[0][i].GetList()) } newJailedNodes := jailed[numWaiting:numJailed] checkNodesStatusInSystemSCDataTrie(t, newJailedNodes, args.UserAccountsDB, args.Marshalizer, saveJailedAlwaysEnableEpoch == 0) } -func checkNodesStatusInSystemSCDataTrie(t *testing.T, nodes []*state.ValidatorInfo, accounts state.AccountsAdapter, marshalizer marshal.Marshalizer, jailed bool) { +func checkNodesStatusInSystemSCDataTrie(t *testing.T, nodes []state.ValidatorInfoHandler, accounts state.AccountsAdapter, marshalizer marshal.Marshalizer, jailed bool) { account, err := accounts.LoadAccount(vm.StakingSCAddress) require.Nil(t, err) @@ -277,7 +303,7 @@ func checkNodesStatusInSystemSCDataTrie(t *testing.T, nodes []*state.ValidatorIn systemScAccount, ok := account.(state.UserAccountHandler) require.True(t, ok) for _, nodeInfo := range nodes { - buff, _, err = systemScAccount.RetrieveValue(nodeInfo.PublicKey) + buff, _, err = systemScAccount.RetrieveValue(nodeInfo.GetPublicKey()) require.Nil(t, err) require.True(t, len(buff) > 0) @@ -315,7 +341,7 @@ func TestSystemSCProcessor_NobodyToSwapWithStakingV2(t *testing.T) { _ = s.initDelegationSystemSC() doStake(t, s.systemVM, s.userAccountsDB, owner1, big.NewInt(1000), blsKeys...) doUnStake(t, s.systemVM, s.userAccountsDB, owner1, blsKeys[:3]...) - validatorsInfo := make(map[uint32][]*state.ValidatorInfo) + validatorsInfo := state.NewShardValidatorsInfoMap() jailed := &state.ValidatorInfo{ PublicKey: blsKeys[0], ShardId: 0, @@ -324,13 +350,13 @@ func TestSystemSCProcessor_NobodyToSwapWithStakingV2(t *testing.T) { RewardAddress: []byte("owner1"), AccumulatedFees: big.NewInt(0), } - validatorsInfo[0] = append(validatorsInfo[0], jailed) + _ = validatorsInfo.Add(jailed) - err := s.ProcessSystemSmartContract(validatorsInfo, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - for _, vInfo := range validatorsInfo[0] { - assert.Equal(t, string(common.JailedList), vInfo.List) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + assert.Equal(t, string(common.JailedList), vInfo.GetList()) } nodesToUnStake, mapOwnersKeys, err := s.stakingDataProvider.ComputeUnQualifiedNodes(validatorsInfo) @@ -541,13 +567,6 @@ func doUnStake(t *testing.T, systemVm vmcommon.VMExecutionHandler, accountsDB st saveOutputAccounts(t, accountsDB, vmOutput) } -func loadSCAccount(accountsDB state.AccountsAdapter, address []byte) state.UserAccountHandler { - acc, _ := accountsDB.LoadAccount(address) - stakingSCAcc := acc.(state.UserAccountHandler) - - return stakingSCAcc -} - func createEligibleNodes(numNodes int, stakingSCAcc state.UserAccountHandler, marshalizer marshal.Marshalizer) { for i := 0; i < numNodes; i++ { stakedData := &systemSmartContracts.StakedDataV2_0{ @@ -563,8 +582,8 @@ func createEligibleNodes(numNodes int, stakingSCAcc state.UserAccountHandler, ma } } -func createJailedNodes(numNodes int, stakingSCAcc state.UserAccountHandler, userAccounts state.AccountsAdapter, peerAccounts state.AccountsAdapter, marshalizer marshal.Marshalizer) []*state.ValidatorInfo { - validatorInfos := make([]*state.ValidatorInfo, 0) +func createJailedNodes(numNodes int, stakingSCAcc state.UserAccountHandler, userAccounts state.AccountsAdapter, peerAccounts state.AccountsAdapter, marshalizer marshal.Marshalizer) []state.ValidatorInfoHandler { + validatorInfos := make([]state.ValidatorInfoHandler, 0) for i := 0; i < numNodes; i++ { stakedData := &systemSmartContracts.StakedDataV2_0{ @@ -603,8 +622,8 @@ func addValidatorDataWithUnStakedKey( nodePrice *big.Int, marshalizer marshal.Marshalizer, ) { - stakingAccount := loadSCAccount(accountsDB, vm.StakingSCAddress) - validatorAccount := loadSCAccount(accountsDB, vm.ValidatorSCAddress) + stakingAccount := stakingcommon.LoadUserAccount(accountsDB, vm.StakingSCAddress) + validatorAccount := stakingcommon.LoadUserAccount(accountsDB, vm.ValidatorSCAddress) validatorData := &systemSmartContracts.ValidatorDataV2{ RegisterNonce: 0, @@ -705,50 +724,6 @@ func createWaitingNodes(numNodes int, stakingSCAcc state.UserAccountHandler, use return validatorInfos } -func addValidatorData( - accountsDB state.AccountsAdapter, - ownerKey []byte, - registeredKeys [][]byte, - totalStake *big.Int, - marshalizer marshal.Marshalizer, -) { - validatorSC := loadSCAccount(accountsDB, vm.ValidatorSCAddress) - validatorData := &systemSmartContracts.ValidatorDataV2{ - RegisterNonce: 0, - Epoch: 0, - RewardAddress: ownerKey, - TotalStakeValue: totalStake, - LockedStake: big.NewInt(0), - TotalUnstaked: big.NewInt(0), - BlsPubKeys: registeredKeys, - NumRegistered: uint32(len(registeredKeys)), - } - - marshaledData, _ := marshalizer.Marshal(validatorData) - _ = validatorSC.SaveKeyValue(ownerKey, marshaledData) - - _ = accountsDB.SaveAccount(validatorSC) -} - -func addStakedData( - accountsDB state.AccountsAdapter, - stakedKey []byte, - ownerKey []byte, - marshalizer marshal.Marshalizer, -) { - stakingSCAcc := loadSCAccount(accountsDB, vm.StakingSCAddress) - stakedData := &systemSmartContracts.StakedDataV2_0{ - Staked: true, - RewardAddress: ownerKey, - OwnerAddress: ownerKey, - StakeValue: big.NewInt(0), - } - marshaledData, _ := marshalizer.Marshal(stakedData) - _ = stakingSCAcc.SaveKeyValue(stakedKey, marshaledData) - - _ = accountsDB.SaveAccount(stakingSCAcc) -} - func prepareStakingContractWithData( accountsDB state.AccountsAdapter, stakedKey []byte, @@ -757,139 +732,14 @@ func prepareStakingContractWithData( rewardAddress []byte, ownerAddress []byte, ) { - stakingSCAcc := loadSCAccount(accountsDB, vm.StakingSCAddress) - - stakedData := &systemSmartContracts.StakedDataV2_0{ - Staked: true, - RewardAddress: rewardAddress, - OwnerAddress: ownerAddress, - StakeValue: big.NewInt(100), - } - marshaledData, _ := marshalizer.Marshal(stakedData) - _ = stakingSCAcc.SaveKeyValue(stakedKey, marshaledData) - _ = accountsDB.SaveAccount(stakingSCAcc) + stakingcommon.AddStakingData(accountsDB, ownerAddress, rewardAddress, [][]byte{stakedKey}, marshalizer) + stakingcommon.AddKeysToWaitingList(accountsDB, [][]byte{waitingKey}, marshalizer, rewardAddress, ownerAddress) + stakingcommon.AddValidatorData(accountsDB, rewardAddress, [][]byte{stakedKey, waitingKey}, big.NewInt(10000000000), marshalizer) - saveOneKeyToWaitingList(accountsDB, waitingKey, marshalizer, rewardAddress, ownerAddress) - - validatorSC := loadSCAccount(accountsDB, vm.ValidatorSCAddress) - validatorData := &systemSmartContracts.ValidatorDataV2{ - RegisterNonce: 0, - Epoch: 0, - RewardAddress: rewardAddress, - TotalStakeValue: big.NewInt(10000000000), - LockedStake: big.NewInt(10000000000), - TotalUnstaked: big.NewInt(0), - NumRegistered: 2, - BlsPubKeys: [][]byte{stakedKey, waitingKey}, - } - - marshaledData, _ = marshalizer.Marshal(validatorData) - _ = validatorSC.SaveKeyValue(rewardAddress, marshaledData) - - _ = accountsDB.SaveAccount(validatorSC) _, err := accountsDB.Commit() log.LogIfError(err) } -func saveOneKeyToWaitingList( - accountsDB state.AccountsAdapter, - waitingKey []byte, - marshalizer marshal.Marshalizer, - rewardAddress []byte, - ownerAddress []byte, -) { - stakingSCAcc := loadSCAccount(accountsDB, vm.StakingSCAddress) - stakedData := &systemSmartContracts.StakedDataV2_0{ - Waiting: true, - RewardAddress: rewardAddress, - OwnerAddress: ownerAddress, - StakeValue: big.NewInt(100), - } - marshaledData, _ := marshalizer.Marshal(stakedData) - _ = stakingSCAcc.SaveKeyValue(waitingKey, marshaledData) - - waitingKeyInList := []byte("w_" + string(waitingKey)) - waitingListHead := &systemSmartContracts.WaitingList{ - FirstKey: waitingKeyInList, - LastKey: waitingKeyInList, - Length: 1, - } - marshaledData, _ = marshalizer.Marshal(waitingListHead) - _ = stakingSCAcc.SaveKeyValue([]byte("waitingList"), marshaledData) - - waitingListElement := &systemSmartContracts.ElementInList{ - BLSPublicKey: waitingKey, - PreviousKey: waitingKeyInList, - NextKey: make([]byte, 0), - } - marshaledData, _ = marshalizer.Marshal(waitingListElement) - _ = stakingSCAcc.SaveKeyValue(waitingKeyInList, marshaledData) - - _ = accountsDB.SaveAccount(stakingSCAcc) -} - -func addKeysToWaitingList( - accountsDB state.AccountsAdapter, - waitingKeys [][]byte, - marshalizer marshal.Marshalizer, - rewardAddress []byte, - ownerAddress []byte, -) { - stakingSCAcc := loadSCAccount(accountsDB, vm.StakingSCAddress) - - for _, waitingKey := range waitingKeys { - stakedData := &systemSmartContracts.StakedDataV2_0{ - Waiting: true, - RewardAddress: rewardAddress, - OwnerAddress: ownerAddress, - StakeValue: big.NewInt(100), - } - marshaledData, _ := marshalizer.Marshal(stakedData) - _ = stakingSCAcc.SaveKeyValue(waitingKey, marshaledData) - } - - marshaledData, _, _ := stakingSCAcc.RetrieveValue([]byte("waitingList")) - waitingListHead := &systemSmartContracts.WaitingList{} - _ = marshalizer.Unmarshal(waitingListHead, marshaledData) - waitingListHead.Length += uint32(len(waitingKeys)) - lastKeyInList := []byte("w_" + string(waitingKeys[len(waitingKeys)-1])) - waitingListHead.LastKey = lastKeyInList - - marshaledData, _ = marshalizer.Marshal(waitingListHead) - _ = stakingSCAcc.SaveKeyValue([]byte("waitingList"), marshaledData) - - numWaitingKeys := len(waitingKeys) - previousKey := waitingListHead.FirstKey - for i, waitingKey := range waitingKeys { - - waitingKeyInList := []byte("w_" + string(waitingKey)) - waitingListElement := &systemSmartContracts.ElementInList{ - BLSPublicKey: waitingKey, - PreviousKey: previousKey, - NextKey: make([]byte, 0), - } - - if i < numWaitingKeys-1 { - nextKey := []byte("w_" + string(waitingKeys[i+1])) - waitingListElement.NextKey = nextKey - } - - marshaledData, _ = marshalizer.Marshal(waitingListElement) - _ = stakingSCAcc.SaveKeyValue(waitingKeyInList, marshaledData) - - previousKey = waitingKeyInList - } - - marshaledData, _, _ = stakingSCAcc.RetrieveValue(waitingListHead.FirstKey) - waitingListElement := &systemSmartContracts.ElementInList{} - _ = marshalizer.Unmarshal(waitingListElement, marshaledData) - waitingListElement.NextKey = []byte("w_" + string(waitingKeys[0])) - marshaledData, _ = marshalizer.Marshal(waitingListElement) - _ = stakingSCAcc.SaveKeyValue(waitingListHead.FirstKey, marshaledData) - - _ = accountsDB.SaveAccount(stakingSCAcc) -} - func createAccountsDB( hasher hashing.Hasher, marshaller marshal.Marshalizer, @@ -935,6 +785,9 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp accCreator, _ := factory.NewAccountCreator(argsAccCreator) peerAccCreator := factory.NewPeerAccountCreator() en := forking.NewGenericEpochNotifier() + enableEpochsConfig.StakeLimitsEnableEpoch = 10 + enableEpochsConfig.StakingV4Step1EnableEpoch = 444 + enableEpochsConfig.StakingV4Step2EnableEpoch = 445 epochsConfig := &config.EpochConfig{ EnableEpochs: enableEpochsConfig, } @@ -952,7 +805,7 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp PeerAdapter: peerAccountsDB, Rater: &mock.RaterStub{}, RewardsHandler: &mock.RewardsHandlerStub{}, - NodesSetup: &mock.NodesSetupStub{}, + NodesSetup: &genesisMocks.NodesSetupStub{}, MaxComputableRounds: 1, MaxConsecutiveRoundsOfRatingDecrease: 2000, EnableEpochsHandler: enableEpochsHandler, @@ -960,15 +813,14 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp vCreator, _ := peer.NewValidatorStatisticsProcessor(argsValidatorsProcessor) blockChain, _ := blockchain.NewMetaChain(&statusHandlerMock.AppStatusHandlerStub{}) + gasSchedule := wasmConfig.MakeGasMapForTests() + gasScheduleNotifier := testscommon.NewGasScheduleNotifierMock(gasSchedule) testDataPool := dataRetrieverMock.NewPoolsHolderMock() - gasSchedule := wasmConfig.MakeGasMapForTests() defaults.FillGasMapInternal(gasSchedule, 1) signVerifer, _ := disabled.NewMessageSignVerifier(&cryptoMocks.KeyGenStub{}) - gasScheduleNotifier := testscommon.NewGasScheduleNotifierMock(gasSchedule) - - nodesSetup := &mock.NodesSetupStub{} + nodesSetup := &genesisMocks.NodesSetupStub{} argsHook := hooks.ArgBlockChainHook{ Accounts: userAccountsDB, @@ -978,10 +830,10 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp ShardCoordinator: &mock.ShardCoordinatorStub{}, Marshalizer: marshalizer, Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - BuiltInFunctions: vmcommonBuiltInFunctions.NewBuiltInFunctionContainer(), NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, - GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, + BuiltInFunctions: vmcommonBuiltInFunctions.NewBuiltInFunctionContainer(), DataPool: testDataPool, + GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, CompiledSCPool: testDataPool.SmartContracts(), EpochNotifier: en, EnableEpochsHandler: enableEpochsHandler, @@ -991,11 +843,13 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } + defaults.FillGasMapInternal(gasSchedule, 1) + blockChainHookImpl, _ := hooks.NewBlockChainHookImpl(argsHook) argsNewVMContainerFactory := metaProcess.ArgsNewVMContainerFactory{ BlockChainHook: blockChainHookImpl, PubkeyConv: argsHook.PubkeyConv, - Economics: createEconomicsData(), + Economics: stakingcommon.CreateEconomicsData(), MessageSignVerifier: signVerifer, GasSchedule: gasScheduleNotifier, NodesConfigProvider: nodesSetup, @@ -1031,6 +885,8 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp MaxNumberOfNodesForStake: 5, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -1041,21 +897,57 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ValidatorAccountsDB: peerAccountsDB, UserAccountsDB: userAccountsDB, ChanceComputer: &mock.ChanceComputerStub{}, ShardCoordinator: &mock.ShardCoordinatorStub{}, EnableEpochsHandler: enableEpochsHandler, + NodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, + ArgBlockChainHook: argsHook, } metaVmFactory, _ := metaProcess.NewVMContainerFactory(argsNewVMContainerFactory) vmContainer, _ := metaVmFactory.Create() systemVM, _ := vmContainer.Get(vmFactory.SystemVirtualMachine) - stakingSCprovider, _ := NewStakingDataProvider(systemVM, "1000") + argsStakingDataProvider := StakingDataProviderArgs{ + EnableEpochsHandler: enableEpochsHandler, + SystemVM: systemVM, + MinNodePrice: "1000", + } + stakingSCProvider, _ := NewStakingDataProvider(argsStakingDataProvider) shardCoordinator, _ := sharding.NewMultiShardCoordinator(3, core.MetachainShardId) + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(en, nil) + auctionCfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } + ald, _ := NewAuctionListDisplayer(ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: auctionCfg, + Denomination: 0, + }) + argsAuctionListSelector := AuctionListSelectorArgs{ + ShardCoordinator: shardCoordinator, + StakingDataProvider: stakingSCProvider, + MaxNodesChangeConfigProvider: nodesConfigProvider, + AuctionListDisplayHandler: ald, + SoftAuctionConfig: auctionCfg, + } + als, _ := NewAuctionListSelector(argsAuctionListSelector) + args := ArgsNewEpochStartSystemSCProcessing{ SystemVM: systemVM, UserAccountsDB: userAccountsDB, @@ -1068,7 +960,8 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp ChanceComputer: &mock.ChanceComputerStub{}, EpochNotifier: en, GenesisNodesConfig: nodesSetup, - StakingDataProvider: stakingSCprovider, + StakingDataProvider: stakingSCProvider, + AuctionListSelector: als, NodesConfigProvider: &shardingMocks.NodesCoordinatorStub{ ConsensusGroupSizeCalled: func(shardID uint32) int { if shardID == core.MetachainShardId { @@ -1077,87 +970,189 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp return 63 }, }, - ShardCoordinator: shardCoordinator, - ESDTOwnerAddressBytes: bytes.Repeat([]byte{1}, 32), - EnableEpochsHandler: enableEpochsHandler, + ShardCoordinator: shardCoordinator, + ESDTOwnerAddressBytes: bytes.Repeat([]byte{1}, 32), + MaxNodesChangeConfigProvider: nodesConfigProvider, + EnableEpochsHandler: enableEpochsHandler, } return args, metaVmFactory.SystemSmartContractContainer() } -func createEconomicsData() process.EconomicsDataHandler { - maxGasLimitPerBlock := strconv.FormatUint(1500000000, 10) - minGasPrice := strconv.FormatUint(10, 10) - minGasLimit := strconv.FormatUint(10, 10) - - argsNewEconomicsData := economicsHandler.ArgsNewEconomicsData{ - Economics: &config.EconomicsConfig{ - GlobalSettings: config.GlobalSettings{ - GenesisTotalSupply: "2000000000000000000000", - MinimumInflation: 0, - YearSettings: []*config.YearSetting{ - { - Year: 0, - MaximumInflation: 0.01, - }, - }, +func TestSystemSCProcessor_ProcessSystemSmartContractInitDelegationMgr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + t.Run("flag not active", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + if flag == common.GovernanceFlagInSpecificEpochOnly || + flag == common.StakingV4Step1Flag || + flag == common.StakingV4Step2Flag || + flag == common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly || + flag == common.StakingV2OwnerFlagInSpecificEpochOnly || + flag == common.CorrectLastUnJailedFlagInSpecificEpochOnly || + flag == common.DelegationSmartContractFlagInSpecificEpochOnly || + flag == common.CorrectLastUnJailedFlag || + flag == common.SwitchJailWaitingFlag || + flag == common.StakingV2Flag || + flag == common.ESDTFlagInSpecificEpochOnly { + + return false + } + + return true }, - RewardsSettings: config.RewardsSettings{ - RewardsConfigByEpoch: []config.EpochRewardSettings{ - { - LeaderPercentage: 0.1, - DeveloperPercentage: 0.1, - ProtocolSustainabilityPercentage: 0.1, - ProtocolSustainabilityAddress: "protocol", - TopUpGradientPoint: "300000000000000000000", - TopUpFactor: 0.25, - }, - }, + } + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCreateCalled: func(input *vmcommon.ContractCreateInput) (*vmcommon.VMOutput, error) { + assert.Fail(t, "should have not called") + + return nil, fmt.Errorf("should have not called") }, - FeeSettings: config.FeeSettings{ - GasLimitSettings: []config.GasLimitSetting{ - { - MaxGasLimitPerBlock: maxGasLimitPerBlock, - MaxGasLimitPerMiniBlock: maxGasLimitPerBlock, - MaxGasLimitPerMetaBlock: maxGasLimitPerBlock, - MaxGasLimitPerMetaMiniBlock: maxGasLimitPerBlock, - MaxGasLimitPerTx: maxGasLimitPerBlock, - MinGasLimit: minGasLimit, - ExtraGasLimitGuardedTx: "50000", - }, - }, - MinGasPrice: minGasPrice, - GasPerDataByte: "1", - GasPriceModifier: 1.0, - MaxGasPriceSetGuardian: "100000", + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + }) + t.Run("flag active", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.DelegationSmartContractFlagInSpecificEpochOnly }, - }, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - } - economicsData, _ := economicsHandler.NewEconomicsData(argsNewEconomicsData) - return economicsData + } + runSmartContractCreateCalled := false + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCreateCalled: func(input *vmcommon.ContractCreateInput) (*vmcommon.VMOutput, error) { + runSmartContractCreateCalled = true + + return &vmcommon.VMOutput{}, nil + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + require.True(t, runSmartContractCreateCalled) + }) + t.Run("flag active but contract create call errors, should error", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.DelegationSmartContractFlagInSpecificEpochOnly + }, + } + runSmartContractCreateCalled := false + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCreateCalled: func(input *vmcommon.ContractCreateInput) (*vmcommon.VMOutput, error) { + runSmartContractCreateCalled = true + + return nil, expectedErr + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.ErrorIs(t, err, expectedErr) + require.True(t, runSmartContractCreateCalled) + }) } -func TestSystemSCProcessor_ProcessSystemSmartContractInitDelegationMgr(t *testing.T) { +func TestSystemSCProcessor_ProcessSystemSmartContractInitGovernance(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ - StakingV2EnableEpoch: 1000, - }, testscommon.CreateMemUnit()) - s, _ := NewSystemSCProcessor(args) + expectedErr := errors.New("expected error") + t.Run("flag not active", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + if flag == common.GovernanceFlagInSpecificEpochOnly || + flag == common.StakingV4Step1Flag || + flag == common.StakingV4Step2Flag || + flag == common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly || + flag == common.StakingV2OwnerFlagInSpecificEpochOnly || + flag == common.CorrectLastUnJailedFlagInSpecificEpochOnly || + flag == common.DelegationSmartContractFlagInSpecificEpochOnly || + flag == common.CorrectLastUnJailedFlag || + flag == common.SwitchJailWaitingFlag || + flag == common.StakingV2Flag || + flag == common.ESDTFlagInSpecificEpochOnly { + + return false + } - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) - assert.Nil(t, err) + return true + }, + } + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { + assert.Fail(t, "should have not called") - acc, err := s.userAccountsDB.GetExistingAccount(vm.DelegationManagerSCAddress) - assert.Nil(t, err) + return nil, fmt.Errorf("should have not called") + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) - userAcc, _ := acc.(state.UserAccountHandler) - assert.Equal(t, userAcc.GetOwnerAddress(), vm.DelegationManagerSCAddress) - assert.NotNil(t, userAcc.GetCodeMetadata()) + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + }) + t.Run("flag active", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.GovernanceFlagInSpecificEpochOnly + }, + } + runSmartContractCreateCalled := false + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { + runSmartContractCreateCalled = true + + return &vmcommon.VMOutput{}, nil + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + require.True(t, runSmartContractCreateCalled) + }) + t.Run("flag active but contract call errors, should error", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.GovernanceFlagInSpecificEpochOnly + }, + } + runSmartContractCreateCalled := false + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { + runSmartContractCreateCalled = true + + return nil, expectedErr + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.ErrorIs(t, err, expectedErr) + require.Contains(t, err.Error(), "governanceV2") + require.True(t, runSmartContractCreateCalled) + }) } func TestSystemSCProcessor_ProcessDelegationRewardsNothingToExecute(t *testing.T) { @@ -1293,7 +1288,8 @@ func TestSystemSCProcessor_ProcessSystemSmartContractMaxNodesStakedFromQueue(t * t.Parallel() args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) - args.MaxNodesEnableConfig = []config.MaxNodesChangeConfig{{EpochEnable: 0, MaxNumNodes: 10}} + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(args.EpochNotifier, []config.MaxNodesChangeConfig{{EpochEnable: 0, MaxNumNodes: 10}}) + args.MaxNodesChangeConfigProvider = nodesConfigProvider s, _ := NewSystemSCProcessor(args) prepareStakingContractWithData( @@ -1305,8 +1301,8 @@ func TestSystemSCProcessor_ProcessSystemSmartContractMaxNodesStakedFromQueue(t * []byte("rewardAddress"), ) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + validatorsInfo := state.NewShardValidatorsInfoMap() + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) peerAcc, err := s.getPeerAccount([]byte("waitingPubKey")) @@ -1340,10 +1336,12 @@ func getTotalNumberOfRegisteredNodes(t *testing.T, s *systemSCProcessor) int { func TestSystemSCProcessor_ProcessSystemSmartContractMaxNodesStakedFromQueueOwnerNotSet(t *testing.T) { t.Parallel() + maxNodesChangeConfig := []config.MaxNodesChangeConfig{{EpochEnable: 10, MaxNumNodes: 10}} args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ - StakingV2EnableEpoch: 10, + MaxNodesChangeEnableEpoch: maxNodesChangeConfig, + StakingV2EnableEpoch: 10, }, testscommon.CreateMemUnit()) - args.MaxNodesEnableConfig = []config.MaxNodesChangeConfig{{EpochEnable: 10, MaxNumNodes: 10}} + args.MaxNodesChangeConfigProvider, _ = notifier.NewNodesConfigProvider(args.EpochNotifier, maxNodesChangeConfig) s, _ := NewSystemSCProcessor(args) prepareStakingContractWithData( @@ -1358,8 +1356,8 @@ func TestSystemSCProcessor_ProcessSystemSmartContractMaxNodesStakedFromQueueOwne args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 10, }) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 10) + validatorsInfo := state.NewShardValidatorsInfoMap() + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{Epoch: 10}) assert.Nil(t, err) peerAcc, err := s.getPeerAccount([]byte("waitingPubKey")) @@ -1386,7 +1384,7 @@ func TestSystemSCProcessor_ESDTInitShouldWork(t *testing.T) { require.Equal(t, 4, len(initialContractConfig)) require.Equal(t, []byte("aaaaaa"), initialContractConfig[0]) - err = s.ProcessSystemSmartContract(nil, 1, 1) + err = s.ProcessSystemSmartContract(state.NewShardValidatorsInfoMap(), &block.Header{Nonce: 1, Epoch: 1}) require.Nil(t, err) @@ -1414,47 +1412,48 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeOneNodeStakeOthers(t []byte("rewardAddress"), []byte("rewardAddress"), ) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, + []byte("ownerKey"), + []byte("ownerKey"), + [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, + big.NewInt(2000), + args.Marshalizer, + ) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), []byte("ownerKey"), args.Marshalizer) - addValidatorData(args.UserAccountsDB, []byte("ownerKey"), [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, big.NewInt(2000), args.Marshalizer) - _, _ = args.UserAccountsDB.Commit() - - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) peerAcc, err := s.getPeerAccount([]byte("waitingPubKey")) @@ -1465,10 +1464,10 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeOneNodeStakeOthers(t peerAcc, _ = s.getPeerAccount([]byte("stakedPubKey1")) assert.Equal(t, peerAcc.GetList(), string(common.LeavingList)) - assert.Equal(t, string(common.LeavingList), validatorInfos[0][1].List) + assert.Equal(t, string(common.LeavingList), validatorsInfo.GetShardValidatorsInfoMap()[0][1].GetList()) - assert.Equal(t, 5, len(validatorInfos[0])) - assert.Equal(t, string(common.NewList), validatorInfos[0][4].List) + assert.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 5) + assert.Equal(t, string(common.NewList), validatorsInfo.GetShardValidatorsInfoMap()[0][4].GetList()) } func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeTheOnlyNodeShouldWork(t *testing.T) { @@ -1486,18 +1485,18 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeTheOnlyNodeShouldWor []byte("rewardAddress"), ) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), []byte("ownerKey"), args.Marshalizer) + stakingcommon.AddStakingData(args.UserAccountsDB, []byte("ownerKey"), []byte("ownerKey"), [][]byte{[]byte("stakedPubKey1")}, args.Marshalizer) addValidatorDataWithUnStakedKey(args.UserAccountsDB, []byte("ownerKey"), [][]byte{[]byte("stakedPubKey1")}, big.NewInt(1000), args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), @@ -1507,7 +1506,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeTheOnlyNodeShouldWor args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) } @@ -1517,7 +1516,7 @@ func addDelegationData( stakedKeys [][]byte, marshalizer marshal.Marshalizer, ) { - delegatorSC := loadSCAccount(accountsDB, delegation) + delegatorSC := stakingcommon.LoadUserAccount(accountsDB, delegation) dStatus := &systemSmartContracts.DelegationContractStatus{ StakedKeys: make([]*systemSmartContracts.NodesData, 0), NotStakedKeys: make([]*systemSmartContracts.NodesData, 0), @@ -1545,68 +1544,71 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromDelegationContra contract, _ := scContainer.Get(vm.FirstDelegationSCAddress) _ = scContainer.Add(delegationAddr, contract) - prepareStakingContractWithData( + stakingcommon.AddStakingData( args.UserAccountsDB, - []byte("stakedPubKey0"), - []byte("waitingPubKey"), + delegationAddr, + delegationAddr, + [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, args.Marshalizer, + ) + allKeys := [][]byte{[]byte("stakedPubKey0"), []byte("waitingPubKey"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")} + stakingcommon.RegisterValidatorKeys( + args.UserAccountsDB, delegationAddr, delegationAddr, + allKeys, + big.NewInt(3000), + args.Marshalizer, ) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), delegationAddr, args.Marshalizer) - allKeys := [][]byte{[]byte("stakedPubKey0"), []byte("waitingPubKey"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")} - addValidatorData(args.UserAccountsDB, delegationAddr, allKeys, big.NewInt(3000), args.Marshalizer) addDelegationData(args.UserAccountsDB, delegationAddr, allKeys, args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.WaitingList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.WaitingList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - for _, vInfo := range validatorInfos[0] { - assert.NotEqual(t, string(common.NewList), vInfo.List) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + assert.NotEqual(t, string(common.NewList), vInfo.GetList()) } peerAcc, _ := s.getPeerAccount([]byte("stakedPubKey2")) assert.Equal(t, peerAcc.GetList(), string(common.LeavingList)) - assert.Equal(t, 4, len(validatorInfos[0])) + assert.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 4) - delegationSC := loadSCAccount(args.UserAccountsDB, delegationAddr) + delegationSC := stakingcommon.LoadUserAccount(args.UserAccountsDB, delegationAddr) marshalledData, _, err := delegationSC.RetrieveValue([]byte("delegationStatus")) assert.Nil(t, err) dStatus := &systemSmartContracts.DelegationContractStatus{ @@ -1633,67 +1635,55 @@ func TestSystemSCProcessor_ProcessSystemSmartContractShouldUnStakeFromAdditional contract, _ := scContainer.Get(vm.FirstDelegationSCAddress) _ = scContainer.Add(delegationAddr, contract) - prepareStakingContractWithData( - args.UserAccountsDB, - []byte("stakedPubKey0"), - []byte("waitingPubKey"), - args.Marshalizer, - delegationAddr, - delegationAddr, - ) - - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), delegationAddr, args.Marshalizer) + listOfKeysInWaiting := [][]byte{[]byte("waitingPubKey"), []byte("waitingPubKe1"), []byte("waitingPubKe2"), []byte("waitingPubKe3"), []byte("waitingPubKe4")} + allStakedKeys := append(listOfKeysInWaiting, []byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")) - listOfKeysInWaiting := [][]byte{[]byte("waitingPubKe1"), []byte("waitingPubKe2"), []byte("waitingPubKe3"), []byte("waitingPubKe4")} - allStakedKeys := append(listOfKeysInWaiting, []byte("waitingPubKey"), []byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")) - addKeysToWaitingList(args.UserAccountsDB, listOfKeysInWaiting, args.Marshalizer, delegationAddr, delegationAddr) - addValidatorData(args.UserAccountsDB, delegationAddr, allStakedKeys, big.NewInt(4000), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, delegationAddr, delegationAddr, allStakedKeys, big.NewInt(4000), args.Marshalizer) + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, listOfKeysInWaiting, args.Marshalizer, delegationAddr, delegationAddr) addDelegationData(args.UserAccountsDB, delegationAddr, allStakedKeys, args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - for _, vInfo := range validatorInfos[0] { - assert.Equal(t, string(common.EligibleList), vInfo.List) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + assert.Equal(t, string(common.EligibleList), vInfo.GetList()) } - delegationSC := loadSCAccount(args.UserAccountsDB, delegationAddr) + delegationSC := stakingcommon.LoadUserAccount(args.UserAccountsDB, delegationAddr) marshalledData, _, err := delegationSC.RetrieveValue([]byte("delegationStatus")) assert.Nil(t, err) dStatus := &systemSmartContracts.DelegationContractStatus{ @@ -1728,10 +1718,14 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromAdditionalQueue( delegationAddr, ) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), delegationAddr, args.Marshalizer) - addValidatorData(args.UserAccountsDB, delegationAddr, [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, big.NewInt(10000), args.Marshalizer) + stakingcommon.AddStakingData(args.UserAccountsDB, + delegationAddr, + delegationAddr, + [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, + args.Marshalizer, + ) + + stakingcommon.AddValidatorData(args.UserAccountsDB, delegationAddr, [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, big.NewInt(10000), args.Marshalizer) addDelegationData(args.UserAccountsDB, delegationAddr, [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, args.Marshalizer) _, _ = args.UserAccountsDB.Commit() @@ -1740,47 +1734,47 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromAdditionalQueue( _ = scContainer.Add(delegationAddr2, contract) listOfKeysInWaiting := [][]byte{[]byte("waitingPubKe1"), []byte("waitingPubKe2"), []byte("waitingPubKe3"), []byte("waitingPubKe4")} - addKeysToWaitingList(args.UserAccountsDB, listOfKeysInWaiting, args.Marshalizer, delegationAddr2, delegationAddr2) - addValidatorData(args.UserAccountsDB, delegationAddr2, listOfKeysInWaiting, big.NewInt(2000), args.Marshalizer) + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, listOfKeysInWaiting, args.Marshalizer, delegationAddr2, delegationAddr2) + stakingcommon.AddValidatorData(args.UserAccountsDB, delegationAddr2, listOfKeysInWaiting, big.NewInt(2000), args.Marshalizer) addDelegationData(args.UserAccountsDB, delegationAddr2, listOfKeysInWaiting, args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - peerAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + peerAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(peerAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - delegationSC := loadSCAccount(args.UserAccountsDB, delegationAddr2) + delegationSC := stakingcommon.LoadUserAccount(args.UserAccountsDB, delegationAddr2) marshalledData, _, err := delegationSC.RetrieveValue([]byte("delegationStatus")) assert.Nil(t, err) dStatus := &systemSmartContracts.DelegationContractStatus{ @@ -1796,7 +1790,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromAdditionalQueue( assert.Equal(t, []byte("waitingPubKe4"), dStatus.UnStakedKeys[0].BLSKey) assert.Equal(t, []byte("waitingPubKe3"), dStatus.UnStakedKeys[1].BLSKey) - stakingSCAcc := loadSCAccount(args.UserAccountsDB, vm.StakingSCAddress) + stakingSCAcc := stakingcommon.LoadUserAccount(args.UserAccountsDB, vm.StakingSCAddress) marshaledData, _, _ := stakingSCAcc.RetrieveValue([]byte("waitingList")) waitingListHead := &systemSmartContracts.WaitingList{} _ = args.Marshalizer.Unmarshal(waitingListHead, marshaledData) @@ -1818,42 +1812,42 @@ func TestSystemSCProcessor_ProcessSystemSmartContractWrongValidatorInfoShouldBeC []byte("oneAddress1"), ) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: "", RewardAddress: []byte("stakedPubKey0"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: "", RewardAddress: []byte("stakedPubKey0"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: "", RewardAddress: []byte("stakedPubKey0"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: "", RewardAddress: []byte("stakedPubKey0"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("oneAddress1"), List: string(common.EligibleList), RewardAddress: []byte("oneAddress1"), AccumulatedFees: big.NewInt(0), }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - assert.Equal(t, len(validatorInfos[0]), 1) + assert.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 1) } func TestSystemSCProcessor_TogglePauseUnPause(t *testing.T) { @@ -1865,14 +1859,14 @@ func TestSystemSCProcessor_TogglePauseUnPause(t *testing.T) { err := s.ToggleUnStakeUnBond(true) assert.Nil(t, err) - validatorSC := loadSCAccount(s.userAccountsDB, vm.ValidatorSCAddress) + validatorSC := stakingcommon.LoadUserAccount(s.userAccountsDB, vm.ValidatorSCAddress) value, _, _ := validatorSC.RetrieveValue([]byte("unStakeUnBondPause")) assert.True(t, value[0] == 1) err = s.ToggleUnStakeUnBond(false) assert.Nil(t, err) - validatorSC = loadSCAccount(s.userAccountsDB, vm.ValidatorSCAddress) + validatorSC = stakingcommon.LoadUserAccount(s.userAccountsDB, vm.ValidatorSCAddress) value, _, _ = validatorSC.RetrieveValue([]byte("unStakeUnBondPause")) assert.True(t, value[0] == 0) } @@ -1904,58 +1898,60 @@ func TestSystemSCProcessor_ProcessSystemSmartContractJailAndUnStake(t *testing.T args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey0"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), []byte("ownerKey"), args.Marshalizer) - saveOneKeyToWaitingList(args.UserAccountsDB, []byte("waitingPubKey"), args.Marshalizer, []byte("ownerKey"), []byte("ownerKey")) - addValidatorData(args.UserAccountsDB, []byte("ownerKey"), [][]byte{[]byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, big.NewInt(0), args.Marshalizer) + stakingcommon.AddStakingData(args.UserAccountsDB, + []byte("ownerKey"), + []byte("ownerKey"), + [][]byte{[]byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, + args.Marshalizer, + ) + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, [][]byte{[]byte("waitingPubKey")}, args.Marshalizer, []byte("ownerKey"), []byte("ownerKey")) + stakingcommon.AddValidatorData(args.UserAccountsDB, []byte("ownerKey"), [][]byte{[]byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, big.NewInt(0), args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: []byte("ownerKey"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: []byte("ownerKey"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.EligibleList), RewardAddress: []byte("ownerKey"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.EligibleList), RewardAddress: []byte("ownerKey"), AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) _, err = s.peerAccountsDB.GetExistingAccount([]byte("waitingPubKey")) assert.NotNil(t, err) - assert.Equal(t, 4, len(validatorInfos[0])) - for _, vInfo := range validatorInfos[0] { - assert.Equal(t, vInfo.List, string(common.LeavingList)) - peerAcc, _ := s.getPeerAccount(vInfo.PublicKey) + assert.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 4) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + assert.Equal(t, vInfo.GetList(), string(common.LeavingList)) + peerAcc, _ := s.getPeerAccount(vInfo.GetPublicKey()) assert.Equal(t, peerAcc.GetList(), string(common.LeavingList)) } } @@ -1985,28 +1981,480 @@ func TestSystemSCProcessor_ProcessSystemSmartContractSwapJailedWithWaiting(t *te jailedAcc, _ := args.PeerAccountsDB.LoadAccount([]byte("jailedPubKey0")) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - vInfo := &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("jailedPubKey0"), ShardId: 0, List: string(common.JailedList), TempRating: 1, RewardAddress: []byte("address"), AccumulatedFees: big.NewInt(0), - } - validatorInfos[0] = append(validatorInfos[0], vInfo) - - vInfo1 := &state.ValidatorInfo{ + }) + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("waitingPubKey"), ShardId: 0, List: string(common.WaitingList), - } - validatorInfos[0] = append(validatorInfos[0], vInfo1) + }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - assert.Equal(t, 2, len(validatorInfos[0])) - newValidatorInfo := validatorInfos[0][0] - assert.Equal(t, newValidatorInfo.List, string(common.NewList)) + require.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 2) + newValidatorInfo := validatorsInfo.GetShardValidatorsInfoMap()[0][0] + require.Equal(t, newValidatorInfo.GetList(), string(common.NewList)) +} + +func TestSystemSCProcessor_ProcessSystemSmartContractStakingV4Init(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + s, _ := NewSystemSCProcessor(args) + + owner1 := []byte("owner1") + owner2 := []byte("owner2") + owner3 := []byte("owner3") + + owner1ListPubKeysWaiting := [][]byte{[]byte("waitingPubKe0"), []byte("waitingPubKe1"), []byte("waitingPubKe2")} + owner1ListPubKeysStaked := [][]byte{[]byte("stakedPubKey0"), []byte("stakedPubKey1")} + owner1AllPubKeys := append(owner1ListPubKeysWaiting, owner1ListPubKeysStaked...) + + owner2ListPubKeysWaiting := [][]byte{[]byte("waitingPubKe3"), []byte("waitingPubKe4")} + owner2ListPubKeysStaked := [][]byte{[]byte("stakedPubKey2")} + owner2AllPubKeys := append(owner2ListPubKeysWaiting, owner2ListPubKeysStaked...) + + owner3ListPubKeysWaiting := [][]byte{[]byte("waitingPubKe5"), []byte("waitingPubKe6")} + + // Owner1 has 2 staked nodes (one eligible, one waiting) in shard0 + 3 nodes in staking queue. + // It has enough stake so that all his staking queue nodes will be selected in the auction list + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, owner1ListPubKeysWaiting, args.Marshalizer, owner1, owner1) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner1, owner1, owner1AllPubKeys, big.NewInt(5000), args.Marshalizer) + + // Owner2 has 1 staked node (eligible) in shard1 + 2 nodes in staking queue. + // It has enough stake for only ONE node from staking queue to be selected in the auction list + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, owner2ListPubKeysWaiting, args.Marshalizer, owner2, owner2) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner2, owner2, owner2AllPubKeys, big.NewInt(2500), args.Marshalizer) + + // Owner3 has 0 staked node + 2 nodes in staking queue. + // It has enough stake so that all his staking queue nodes will be selected in the auction list + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, owner3ListPubKeysWaiting, args.Marshalizer, owner3, owner3) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner3, owner3, owner3ListPubKeysWaiting, big.NewInt(2000), args.Marshalizer) + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1ListPubKeysStaked[0], common.EligibleList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner1ListPubKeysStaked[1], common.WaitingList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner2ListPubKeysStaked[0], common.EligibleList, "", 1, owner2)) + + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: stakingV4Step1EnableEpoch}) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1ListPubKeysStaked[0], common.EligibleList, "", 0, owner1), + createValidatorInfo(owner1ListPubKeysStaked[1], common.WaitingList, "", 0, owner1), + }, + 1: { + createValidatorInfo(owner2ListPubKeysStaked[0], common.EligibleList, "", 1, owner2), + }, + } + + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) +} + +func TestSystemSCProcessor_ProcessSystemSmartContractStakingV4EnabledCannotPrepareStakingData(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + + errProcessStakingData := errors.New("error processing staking data") + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + PrepareStakingDataCalled: func(validatorsMap state.ShardValidatorsInfoMapHandler) error { + return errProcessStakingData + }, + } + + owner := []byte("owner") + ownerStakedKeys := [][]byte{[]byte("pubKey0"), []byte("pubKey1")} + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner, owner, ownerStakedKeys, big.NewInt(2000), args.Marshalizer) + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(ownerStakedKeys[0], common.AuctionList, "", 0, owner)) + _ = validatorsInfo.Add(createValidatorInfo(ownerStakedKeys[1], common.AuctionList, "", 0, owner)) + + s, _ := NewSystemSCProcessor(args) + s.EpochConfirmed(stakingV4Step2EnableEpoch, 0) + + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Equal(t, errProcessStakingData, err) +} + +func TestSystemSCProcessor_ProcessSystemSmartContractStakingV4Enabled(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(args.EpochNotifier, []config.MaxNodesChangeConfig{{MaxNumNodes: 8}}) + + auctionCfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } + ald, _ := NewAuctionListDisplayer(ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: auctionCfg, + Denomination: 0, + }) + + argsAuctionListSelector := AuctionListSelectorArgs{ + ShardCoordinator: args.ShardCoordinator, + StakingDataProvider: args.StakingDataProvider, + MaxNodesChangeConfigProvider: nodesConfigProvider, + SoftAuctionConfig: auctionCfg, + AuctionListDisplayHandler: ald, + } + als, _ := NewAuctionListSelector(argsAuctionListSelector) + args.AuctionListSelector = als + + owner1 := []byte("owner1") + owner2 := []byte("owner2") + owner3 := []byte("owner3") + owner4 := []byte("owner4") + owner5 := []byte("owner5") + owner6 := []byte("owner6") + owner7 := []byte("owner7") + + owner1StakedKeys := [][]byte{[]byte("pubKey0"), []byte("pubKey1"), []byte("pubKey2")} + owner2StakedKeys := [][]byte{[]byte("pubKey3"), []byte("pubKey4"), []byte("pubKey5")} + owner3StakedKeys := [][]byte{[]byte("pubKey6"), []byte("pubKey7")} + owner4StakedKeys := [][]byte{[]byte("pubKey8"), []byte("pubKey9"), []byte("pubKe10"), []byte("pubKe11")} + owner5StakedKeys := [][]byte{[]byte("pubKe12"), []byte("pubKe13")} + owner6StakedKeys := [][]byte{[]byte("pubKe14"), []byte("pubKe15")} + owner7StakedKeys := [][]byte{[]byte("pubKe16"), []byte("pubKe17")} + + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(6666), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner2, owner2, owner2StakedKeys, big.NewInt(5555), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner3, owner3, owner3StakedKeys, big.NewInt(4444), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner4, owner4, owner4StakedKeys, big.NewInt(6666), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner5, owner5, owner5StakedKeys, big.NewInt(1500), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner6, owner6, owner6StakedKeys, big.NewInt(1500), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner7, owner7, owner7StakedKeys, big.NewInt(1500), args.Marshalizer) + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[1], common.WaitingList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[2], common.AuctionList, "", 0, owner1)) + + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[0], common.EligibleList, "", 1, owner2)) + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[1], common.AuctionList, "", 1, owner2)) + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[2], common.AuctionList, "", 1, owner2)) + + _ = validatorsInfo.Add(createValidatorInfo(owner3StakedKeys[0], common.LeavingList, "", 1, owner3)) + _ = validatorsInfo.Add(createValidatorInfo(owner3StakedKeys[1], common.AuctionList, "", 1, owner3)) + + _ = validatorsInfo.Add(createValidatorInfo(owner4StakedKeys[0], common.JailedList, "", 1, owner4)) + _ = validatorsInfo.Add(createValidatorInfo(owner4StakedKeys[1], common.AuctionList, "", 1, owner4)) + _ = validatorsInfo.Add(createValidatorInfo(owner4StakedKeys[2], common.AuctionList, "", 1, owner4)) + _ = validatorsInfo.Add(createValidatorInfo(owner4StakedKeys[3], common.AuctionList, "", 1, owner4)) + + _ = validatorsInfo.Add(createValidatorInfo(owner5StakedKeys[0], common.EligibleList, "", 1, owner5)) + _ = validatorsInfo.Add(createValidatorInfo(owner5StakedKeys[1], common.AuctionList, "", 1, owner5)) + + _ = validatorsInfo.Add(createValidatorInfo(owner6StakedKeys[0], common.AuctionList, "", 1, owner6)) + _ = validatorsInfo.Add(createValidatorInfo(owner6StakedKeys[1], common.AuctionList, "", 1, owner6)) + + _ = validatorsInfo.Add(createValidatorInfo(owner7StakedKeys[0], common.EligibleList, "", 2, owner7)) + _ = validatorsInfo.Add(createValidatorInfo(owner7StakedKeys[1], common.EligibleList, "", 2, owner7)) + + s, _ := NewSystemSCProcessor(args) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: stakingV4Step2EnableEpoch}) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{PrevRandSeed: []byte("pubKey7")}) + require.Nil(t, err) + + /* + - owner5 does not have enough stake for 2 nodes=> his auction node (pubKe13) will be unStaked at the end of the epoch => + will not participate in auction selection + - owner6 does not have enough stake for 2 nodes => one of his auction nodes(pubKey14) will be unStaked at the end of the epoch => + his other auction node(pubKey15) will not participate in auction selection + - MaxNumNodes = 8 + - EligibleBlsKeys = 5 (pubKey0, pubKey1, pubKey3, pubKe13, pubKey17) + - QualifiedAuctionBlsKeys = 7 (pubKey2, pubKey4, pubKey5, pubKey7, pubKey9, pubKey10, pubKey11) + We can only select (MaxNumNodes - EligibleBlsKeys = 3) bls keys from AuctionList to be added to NewList + + -> Initial nodes config in auction list is: + +--------+------------------+------------------+-------------------+--------------+-----------------+---------------------------+ + | Owner | Num staked nodes | Num active nodes | Num auction nodes | Total top up | Top up per node | Auction list nodes | + +--------+------------------+------------------+-------------------+--------------+-----------------+---------------------------+ + | owner3 | 2 | 1 | 1 | 2444 | 1222 | pubKey7 | + | owner4 | 4 | 1 | 3 | 2666 | 666 | pubKey9, pubKe10, pubKe11 | + | owner1 | 3 | 2 | 1 | 3666 | 1222 | pubKey2 | + | owner2 | 3 | 1 | 2 | 2555 | 851 | pubKey4, pubKey5 | + +--------+------------------+------------------+-------------------+--------------+-----------------+---------------------------+ + -> Min possible topUp = 666; max possible topUp = 1333, min required topUp = 1216 + -> Selected nodes config in auction list. For each owner's auction nodes, qualified ones are selected by sorting the bls keys + +--------+------------------+----------------+--------------+-------------------+-----------------------------+------------------+---------------------------+-----------------------------+ + | Owner | Num staked nodes | TopUp per node | Total top up | Num auction nodes | Num qualified auction nodes | Num active nodes | Qualified top up per node | Selected auction list nodes | + +--------+------------------+----------------+--------------+-------------------+-----------------------------+------------------+---------------------------+-----------------------------+ + | owner1 | 3 | 1222 | 3666 | 1 | 1 | 2 | 1222 | pubKey2 | + | owner2 | 3 | 851 | 2555 | 2 | 1 | 1 | 1277 | pubKey5 | + | owner3 | 2 | 1222 | 2444 | 1 | 1 | 1 | 1222 | pubKey7 | + | owner4 | 4 | 666 | 2666 | 3 | 1 | 1 | 1333 | pubKey9 | + +--------+------------------+----------------+--------------+-------------------+-----------------------------+------------------+---------------------------+-----------------------------+ + -> Final selected nodes from auction list + +--------+----------------+--------------------------+ + | Owner | Registered key | Qualified TopUp per node | + +--------+----------------+--------------------------+ + | owner4 | pubKey9 | 1333 | + | owner2 | pubKey5 | 1277 | + | owner1 | pubKey2 | 1222 | + +--------+----------------+--------------------------+ + | owner3 | pubKey7 | 1222 | + +--------+----------------+--------------------------+ + + The following have 1222 top up per node: + - owner1 with 1 bls key = pubKey2 + - owner3 with 1 bls key = pubKey7 + + Since randomness = []byte("pubKey7"), nodes will be sorted based on blsKey XOR randomness, therefore: + - XOR1 = []byte("pubKey2") XOR []byte("pubKey7") = [0 0 0 0 0 0 5] + - XOR3 = []byte("pubKey7") XOR []byte("pubKey7") = [0 0 0 0 0 0 0] + */ + requireTopUpPerNodes(t, s.stakingDataProvider, owner1StakedKeys, big.NewInt(1222)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner2StakedKeys, big.NewInt(851)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner3StakedKeys, big.NewInt(1222)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner4StakedKeys, big.NewInt(666)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner5StakedKeys, big.NewInt(0)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner6StakedKeys, big.NewInt(0)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner7StakedKeys, big.NewInt(0)) + + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1), + createValidatorInfo(owner1StakedKeys[1], common.WaitingList, "", 0, owner1), + createValidatorInfo(owner1StakedKeys[2], common.SelectedFromAuctionList, common.AuctionList, 0, owner1), + }, + 1: { + createValidatorInfo(owner2StakedKeys[0], common.EligibleList, "", 1, owner2), + createValidatorInfo(owner2StakedKeys[1], common.AuctionList, "", 1, owner2), + createValidatorInfo(owner2StakedKeys[2], common.SelectedFromAuctionList, common.AuctionList, 1, owner2), + + createValidatorInfo(owner3StakedKeys[0], common.LeavingList, "", 1, owner3), + createValidatorInfo(owner3StakedKeys[1], common.AuctionList, "", 1, owner3), + + createValidatorInfo(owner4StakedKeys[0], common.JailedList, "", 1, owner4), + createValidatorInfo(owner4StakedKeys[1], common.SelectedFromAuctionList, common.AuctionList, 1, owner4), + createValidatorInfo(owner4StakedKeys[2], common.AuctionList, "", 1, owner4), + createValidatorInfo(owner4StakedKeys[3], common.AuctionList, "", 1, owner4), + + createValidatorInfo(owner5StakedKeys[0], common.EligibleList, "", 1, owner5), + createValidatorInfo(owner5StakedKeys[1], common.LeavingList, common.AuctionList, 1, owner5), + + createValidatorInfo(owner6StakedKeys[0], common.LeavingList, common.AuctionList, 1, owner6), + createValidatorInfo(owner6StakedKeys[1], common.AuctionList, "", 1, owner6), + }, + 2: { + createValidatorInfo(owner7StakedKeys[0], common.LeavingList, common.EligibleList, 2, owner7), + createValidatorInfo(owner7StakedKeys[1], common.EligibleList, "", 2, owner7), + }, + } + + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) +} + +func TestSystemSCProcessor_LegacyEpochConfirmedCorrectMaxNumNodesAfterNodeRestart(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + nodesConfigEpoch0 := config.MaxNodesChangeConfig{ + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + } + nodesConfigEpoch1 := config.MaxNodesChangeConfig{ + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + } + nodesConfigEpoch6 := config.MaxNodesChangeConfig{ + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 1, + } + nodesConfigProvider, _ := notifier.NewNodesConfigProvider( + args.EpochNotifier, + []config.MaxNodesChangeConfig{ + nodesConfigEpoch0, + nodesConfigEpoch1, + nodesConfigEpoch6, + }) + args.MaxNodesChangeConfigProvider = nodesConfigProvider + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV2Flag) + validatorsInfoMap := state.NewShardValidatorsInfoMap() + s, _ := NewSystemSCProcessor(args) + + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 0, Nonce: 0}) + require.True(t, s.flagChangeMaxNodesEnabled.IsSet()) + err := s.processLegacy(validatorsInfoMap, 0, 0) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch0.MaxNumNodes, s.maxNodes) + + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 1, Nonce: 1}) + require.True(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 1, 1) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch1.MaxNumNodes, s.maxNodes) + + for epoch := uint32(2); epoch <= 5; epoch++ { + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: epoch, Nonce: uint64(epoch)}) + require.False(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, uint64(epoch), epoch) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch1.MaxNumNodes, s.maxNodes) + } + + // simulate restart + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 0, Nonce: 0}) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 5, Nonce: 5}) + require.False(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 5, 5) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch1.MaxNumNodes, s.maxNodes) + + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 6, Nonce: 6}) + require.True(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 6, 6) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch6.MaxNumNodes, s.maxNodes) + + // simulate restart + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 0, Nonce: 0}) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 6, Nonce: 6}) + require.True(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 6, 6) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch6.MaxNumNodes, s.maxNodes) + + for epoch := uint32(7); epoch <= 20; epoch++ { + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: epoch, Nonce: uint64(epoch)}) + require.False(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, uint64(epoch), epoch) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch6.MaxNumNodes, s.maxNodes) + } + + // simulate restart + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 1, Nonce: 1}) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 21, Nonce: 21}) + require.False(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 21, 21) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch6.MaxNumNodes, s.maxNodes) +} + +func TestSystemSCProcessor_ProcessSystemSmartContractNilInputValues(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + s, _ := NewSystemSCProcessor(args) + + t.Run("nil validators info map, expect error", func(t *testing.T) { + t.Parallel() + + blockHeader := &block.Header{Nonce: 4} + err := s.ProcessSystemSmartContract(nil, blockHeader) + require.True(t, strings.Contains(err.Error(), errNilValidatorsInfoMap.Error())) + require.True(t, strings.Contains(err.Error(), fmt.Sprintf("%d", blockHeader.GetNonce()))) + }) + + t.Run("nil header, expect error", func(t *testing.T) { + t.Parallel() + + validatorsInfoMap := state.NewShardValidatorsInfoMap() + err := s.ProcessSystemSmartContract(validatorsInfoMap, nil) + require.Equal(t, process.ErrNilHeaderHandler, err) + }) +} + +func TestLegacySystemSCProcessor_addNewlyStakedNodesToValidatorTrie(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + sysProc, _ := NewSystemSCProcessor(args) + + pubKey := []byte("pubKey") + existingValidator := &state.ValidatorInfo{ + PublicKey: pubKey, + List: "inactive", + } + + nonce := uint64(4) + newList := common.AuctionList + newlyAddedValidator := &state.ValidatorInfo{ + PublicKey: pubKey, + List: string(newList), + Index: uint32(nonce), + TempRating: sysProc.startRating, + Rating: sysProc.startRating, + RewardAddress: pubKey, + AccumulatedFees: big.NewInt(0), + } + + // Check before stakingV4, we should have both validators + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(existingValidator) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: stakingV4Step1EnableEpoch - 1, Nonce: 1}) + err := sysProc.addNewlyStakedNodesToValidatorTrie( + validatorsInfo, + [][]byte{pubKey, pubKey}, + nonce, + newList, + ) + require.Nil(t, err) + require.Equal(t, map[uint32][]state.ValidatorInfoHandler{ + 0: {existingValidator, newlyAddedValidator}, + }, validatorsInfo.GetShardValidatorsInfoMap()) + + // Check after stakingV4, we should only have the new one + validatorsInfo = state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(existingValidator) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: stakingV4Step1EnableEpoch, Nonce: 1}) + err = sysProc.addNewlyStakedNodesToValidatorTrie( + validatorsInfo, + [][]byte{pubKey, pubKey}, + nonce, + newList, + ) + require.Nil(t, err) + require.Equal(t, map[uint32][]state.ValidatorInfoHandler{ + 0: {newlyAddedValidator}, + }, validatorsInfo.GetShardValidatorsInfoMap()) +} + +func requireTopUpPerNodes(t *testing.T, s epochStart.StakingDataProvider, stakedPubKeys [][]byte, topUp *big.Int) { + for _, pubKey := range stakedPubKeys { + owner, err := s.GetBlsKeyOwner(pubKey) + require.Nil(t, err) + + totalTopUp := s.GetOwnersData()[owner].TotalTopUp + topUpPerNode := big.NewInt(0).Div(totalTopUp, big.NewInt(int64(len(stakedPubKeys)))) + require.Equal(t, topUp, topUpPerNode) + } +} + +// This func sets rating and temp rating with the start rating value used in createFullArgumentsForSystemSCProcessing +func createValidatorInfo(pubKey []byte, list common.PeerType, previousList common.PeerType, shardID uint32, owner []byte) *state.ValidatorInfo { + rating := uint32(5) + + return &state.ValidatorInfo{ + PublicKey: pubKey, + List: string(list), + PreviousList: string(previousList), + ShardId: shardID, + RewardAddress: owner, + AccumulatedFees: zero, + Rating: rating, + TempRating: rating, + } } diff --git a/epochStart/metachain/tableDisplayer.go b/epochStart/metachain/tableDisplayer.go new file mode 100644 index 00000000000..275805489dc --- /dev/null +++ b/epochStart/metachain/tableDisplayer.go @@ -0,0 +1,32 @@ +package metachain + +import ( + "fmt" + + "github.com/multiversx/mx-chain-core-go/display" +) + +type tableDisplayer struct { +} + +// NewTableDisplayer will create a component able to display tables in logger +func NewTableDisplayer() *tableDisplayer { + return &tableDisplayer{} +} + +// DisplayTable will display a table in the log +func (tb *tableDisplayer) DisplayTable(tableHeader []string, lines []*display.LineData, message string) { + table, err := display.CreateTableString(tableHeader, lines) + if err != nil { + log.Error("could not create table", "tableHeader", tableHeader, "error", err) + return + } + + msg := fmt.Sprintf("%s\n%s", message, table) + log.Debug(msg) +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (tb *tableDisplayer) IsInterfaceNil() bool { + return tb == nil +} diff --git a/epochStart/metachain/validatorList.go b/epochStart/metachain/validatorList.go new file mode 100644 index 00000000000..75c38a1b3c2 --- /dev/null +++ b/epochStart/metachain/validatorList.go @@ -0,0 +1,27 @@ +package metachain + +import ( + "bytes" + + "github.com/multiversx/mx-chain-go/state" +) + +type validatorList []state.ValidatorInfoHandler + +// Len will return the length of the validatorList +func (v validatorList) Len() int { return len(v) } + +// Swap will interchange the objects on input indexes +func (v validatorList) Swap(i, j int) { v[i], v[j] = v[j], v[i] } + +// Less will return true if object on index i should appear before object in index j +// Sorting of validators should be by index and public key +func (v validatorList) Less(i, j int) bool { + if v[i].GetTempRating() == v[j].GetTempRating() { + if v[i].GetIndex() == v[j].GetIndex() { + return bytes.Compare(v[i].GetPublicKey(), v[j].GetPublicKey()) < 0 + } + return v[i].GetIndex() < v[j].GetIndex() + } + return v[i].GetTempRating() < v[j].GetTempRating() +} diff --git a/epochStart/metachain/validators.go b/epochStart/metachain/validators.go index 081944230db..e8eff547a09 100644 --- a/epochStart/metachain/validators.go +++ b/epochStart/metachain/validators.go @@ -93,7 +93,7 @@ func NewValidatorInfoCreator(args ArgsNewValidatorInfoCreator) (*validatorInfoCr } // CreateValidatorInfoMiniBlocks creates the validatorInfo mini blocks according to the provided validatorInfo map -func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { +func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) { if validatorsInfo == nil { return nil, epochStart.ErrNilValidatorInfo } @@ -102,8 +102,9 @@ func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo ma miniBlocks := make([]*block.MiniBlock, 0) + validatorsMap := validatorsInfo.GetShardValidatorsInfoMap() for shardId := uint32(0); shardId < vic.shardCoordinator.NumberOfShards(); shardId++ { - validators := validatorsInfo[shardId] + validators := validatorsMap[shardId] if len(validators) == 0 { continue } @@ -116,7 +117,7 @@ func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo ma miniBlocks = append(miniBlocks, miniBlock) } - validators := validatorsInfo[core.MetachainShardId] + validators := validatorsMap[core.MetachainShardId] if len(validators) == 0 { return miniBlocks, nil } @@ -131,19 +132,19 @@ func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo ma return miniBlocks, nil } -func (vic *validatorInfoCreator) createMiniBlock(validatorsInfo []*state.ValidatorInfo) (*block.MiniBlock, error) { +func (vic *validatorInfoCreator) createMiniBlock(validatorsInfo []state.ValidatorInfoHandler) (*block.MiniBlock, error) { miniBlock := &block.MiniBlock{} miniBlock.SenderShardID = vic.shardCoordinator.SelfId() miniBlock.ReceiverShardID = core.AllShardId miniBlock.TxHashes = make([][]byte, len(validatorsInfo)) miniBlock.Type = block.PeerBlock - validatorsCopy := make([]*state.ValidatorInfo, len(validatorsInfo)) - copy(validatorsCopy, validatorsInfo) + validatorCopy := make([]state.ValidatorInfoHandler, len(validatorsInfo)) + copy(validatorCopy, validatorsInfo) - vic.sortValidators(validatorsCopy) + vic.sortValidators(validatorCopy) - for index, validator := range validatorsCopy { + for index, validator := range validatorCopy { shardValidatorInfo := createShardValidatorInfo(validator) shardValidatorInfoData, err := vic.getShardValidatorInfoData(shardValidatorInfo) @@ -157,7 +158,7 @@ func (vic *validatorInfoCreator) createMiniBlock(validatorsInfo []*state.Validat return miniBlock, nil } -func (vic *validatorInfoCreator) sortValidators(validators []*state.ValidatorInfo) { +func (vic *validatorInfoCreator) sortValidators(validators []state.ValidatorInfoHandler) { if vic.enableEpochsHandler.IsFlagEnabled(common.DeterministicSortOnValidatorsInfoFixFlag) { vic.deterministicSortValidators(validators) return @@ -166,9 +167,9 @@ func (vic *validatorInfoCreator) sortValidators(validators []*state.ValidatorInf vic.legacySortValidators(validators) } -func (vic *validatorInfoCreator) deterministicSortValidators(validators []*state.ValidatorInfo) { +func (vic *validatorInfoCreator) deterministicSortValidators(validators []state.ValidatorInfoHandler) { sort.SliceStable(validators, func(a, b int) bool { - result := bytes.Compare(validators[a].PublicKey, validators[b].PublicKey) + result := bytes.Compare(validators[a].GetPublicKey(), validators[b].GetPublicKey()) if result != 0 { return result < 0 } @@ -177,7 +178,8 @@ func (vic *validatorInfoCreator) deterministicSortValidators(validators []*state bValidatorString := validators[b].GoString() // possible issues as we have 2 entries with the same public key. Print & assure deterministic sorting log.Warn("found 2 entries in validatorInfoCreator.deterministicSortValidators with the same public key", - "validator a", aValidatorString, "validator b", bValidatorString) + "validator a", aValidatorString, "validator b", bValidatorString, + "validator a pub key", validators[a].GetPublicKey(), "validator b pub key", validators[b].GetPublicKey()) // since the GoString will include all fields, we do not need to marshal the struct again. Strings comparison will // suffice in this case. @@ -185,12 +187,12 @@ func (vic *validatorInfoCreator) deterministicSortValidators(validators []*state }) } -func (vic *validatorInfoCreator) legacySortValidators(validators []*state.ValidatorInfo) { +func (vic *validatorInfoCreator) legacySortValidators(validators []state.ValidatorInfoHandler) { swap := func(a, b int) { validators[a], validators[b] = validators[b], validators[a] } less := func(a, b int) bool { - return bytes.Compare(validators[a].PublicKey, validators[b].PublicKey) < 0 + return bytes.Compare(validators[a].GetPublicKey(), validators[b].GetPublicKey()) < 0 } compatibility.SortSlice(swap, less, len(validators)) } @@ -219,18 +221,23 @@ func (vic *validatorInfoCreator) getShardValidatorInfoHash(shardValidatorInfo *s return shardValidatorInfoHash, nil } -func createShardValidatorInfo(validator *state.ValidatorInfo) *state.ShardValidatorInfo { +func createShardValidatorInfo(validator state.ValidatorInfoHandler) *state.ShardValidatorInfo { return &state.ShardValidatorInfo{ - PublicKey: validator.PublicKey, - ShardId: validator.ShardId, - List: validator.List, - Index: validator.Index, - TempRating: validator.TempRating, + PublicKey: validator.GetPublicKey(), + ShardId: validator.GetShardId(), + List: validator.GetList(), + PreviousList: validator.GetPreviousList(), + Index: validator.GetIndex(), + PreviousIndex: validator.GetPreviousIndex(), + TempRating: validator.GetTempRating(), } } // VerifyValidatorInfoMiniBlocks verifies if received validator info mini blocks are correct -func (vic *validatorInfoCreator) VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error { +func (vic *validatorInfoCreator) VerifyValidatorInfoMiniBlocks( + miniBlocks []*block.MiniBlock, + validatorsInfo state.ShardValidatorsInfoMapHandler, +) error { if len(miniBlocks) == 0 { return epochStart.ErrNilMiniblocks } diff --git a/epochStart/metachain/validators_test.go b/epochStart/metachain/validators_test.go index 9589943162f..662b0192044 100644 --- a/epochStart/metachain/validators_test.go +++ b/epochStart/metachain/validators_test.go @@ -30,90 +30,90 @@ import ( "github.com/stretchr/testify/require" ) -func createMockValidatorInfo() map[uint32][]*state.ValidatorInfo { - validatorInfo := map[uint32][]*state.ValidatorInfo{ - 0: { - &state.ValidatorInfo{ - PublicKey: []byte("a1"), - ShardId: 0, - List: "eligible", - Index: 1, - TempRating: 100, - Rating: 1000, - RewardAddress: []byte("rewardA1"), - LeaderSuccess: 1, - LeaderFailure: 2, - ValidatorSuccess: 3, - ValidatorFailure: 4, - TotalLeaderSuccess: 10, - TotalLeaderFailure: 20, - TotalValidatorSuccess: 30, - TotalValidatorFailure: 40, - NumSelectedInSuccessBlocks: 5, - AccumulatedFees: big.NewInt(100), - }, - &state.ValidatorInfo{ - PublicKey: []byte("a2"), - ShardId: 0, - List: "waiting", - Index: 2, - TempRating: 101, - Rating: 1001, - RewardAddress: []byte("rewardA2"), - LeaderSuccess: 6, - LeaderFailure: 7, - ValidatorSuccess: 8, - ValidatorFailure: 9, - TotalLeaderSuccess: 60, - TotalLeaderFailure: 70, - TotalValidatorSuccess: 80, - TotalValidatorFailure: 90, - NumSelectedInSuccessBlocks: 10, - AccumulatedFees: big.NewInt(101), - }, - }, - core.MetachainShardId: { - &state.ValidatorInfo{ - PublicKey: []byte("m1"), - ShardId: core.MetachainShardId, - List: "eligible", - Index: 1, - TempRating: 100, - Rating: 1000, - RewardAddress: []byte("rewardM1"), - LeaderSuccess: 1, - LeaderFailure: 2, - ValidatorSuccess: 3, - ValidatorFailure: 4, - TotalLeaderSuccess: 10, - TotalLeaderFailure: 20, - TotalValidatorSuccess: 30, - TotalValidatorFailure: 40, - NumSelectedInSuccessBlocks: 5, - AccumulatedFees: big.NewInt(100), - }, - &state.ValidatorInfo{ - PublicKey: []byte("m0"), - ShardId: core.MetachainShardId, - List: "waiting", - Index: 2, - TempRating: 101, - Rating: 1001, - RewardAddress: []byte("rewardM2"), - LeaderSuccess: 6, - LeaderFailure: 7, - ValidatorSuccess: 8, - ValidatorFailure: 9, - TotalLeaderSuccess: 60, - TotalLeaderFailure: 70, - TotalValidatorSuccess: 80, - TotalValidatorFailure: 90, - NumSelectedInSuccessBlocks: 10, - AccumulatedFees: big.NewInt(101), - }, - }, - } - return validatorInfo +func createMockValidatorInfo() state.ShardValidatorsInfoMapHandler { + validatorsInfo := state.NewShardValidatorsInfoMap() + + _ = validatorsInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("a1"), + ShardId: 0, + List: "eligible", + Index: 1, + TempRating: 100, + Rating: 1000, + RewardAddress: []byte("rewardA1"), + LeaderSuccess: 1, + LeaderFailure: 2, + ValidatorSuccess: 3, + ValidatorFailure: 4, + TotalLeaderSuccess: 10, + TotalLeaderFailure: 20, + TotalValidatorSuccess: 30, + TotalValidatorFailure: 40, + NumSelectedInSuccessBlocks: 5, + AccumulatedFees: big.NewInt(100), + }) + + _ = validatorsInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("a2"), + ShardId: 0, + List: "waiting", + Index: 2, + TempRating: 101, + Rating: 1001, + RewardAddress: []byte("rewardA2"), + LeaderSuccess: 6, + LeaderFailure: 7, + ValidatorSuccess: 8, + ValidatorFailure: 9, + TotalLeaderSuccess: 60, + TotalLeaderFailure: 70, + TotalValidatorSuccess: 80, + TotalValidatorFailure: 90, + NumSelectedInSuccessBlocks: 10, + AccumulatedFees: big.NewInt(101), + }) + + _ = validatorsInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("m1"), + ShardId: core.MetachainShardId, + List: "eligible", + Index: 1, + TempRating: 100, + Rating: 1000, + RewardAddress: []byte("rewardM1"), + LeaderSuccess: 1, + LeaderFailure: 2, + ValidatorSuccess: 3, + ValidatorFailure: 4, + TotalLeaderSuccess: 10, + TotalLeaderFailure: 20, + TotalValidatorSuccess: 30, + TotalValidatorFailure: 40, + NumSelectedInSuccessBlocks: 5, + AccumulatedFees: big.NewInt(100), + }) + + _ = validatorsInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("m0"), + ShardId: core.MetachainShardId, + List: "waiting", + Index: 2, + TempRating: 101, + Rating: 1001, + RewardAddress: []byte("rewardM2"), + LeaderSuccess: 6, + LeaderFailure: 7, + ValidatorSuccess: 8, + ValidatorFailure: 9, + TotalLeaderSuccess: 60, + TotalLeaderFailure: 70, + TotalValidatorSuccess: 80, + TotalValidatorFailure: 90, + NumSelectedInSuccessBlocks: 10, + AccumulatedFees: big.NewInt(101), + }) + + return validatorsInfo } func createMockEpochValidatorInfoCreatorsArguments() ArgsNewValidatorInfoCreator { @@ -145,7 +145,7 @@ func createMockEpochValidatorInfoCreatorsArguments() ArgsNewValidatorInfoCreator return argsNewEpochEconomics } -func verifyMiniBlocks(bl *block.MiniBlock, infos []*state.ValidatorInfo, marshalledShardValidatorsInfo [][]byte, marshalizer marshal.Marshalizer) bool { +func verifyMiniBlocks(bl *block.MiniBlock, infos []state.ValidatorInfoHandler, marshalledShardValidatorsInfo [][]byte, marshalizer marshal.Marshalizer) bool { if bl.SenderShardID != core.MetachainShardId || bl.ReceiverShardID != core.AllShardId || len(bl.TxHashes) == 0 || @@ -153,10 +153,10 @@ func verifyMiniBlocks(bl *block.MiniBlock, infos []*state.ValidatorInfo, marshal return false } - validatorCopy := make([]*state.ValidatorInfo, len(infos)) + validatorCopy := make([]state.ValidatorInfoHandler, len(infos)) copy(validatorCopy, infos) sort.Slice(validatorCopy, func(a, b int) bool { - return bytes.Compare(validatorCopy[a].PublicKey, validatorCopy[b].PublicKey) < 0 + return bytes.Compare(validatorCopy[a].GetPublicKey(), validatorCopy[b].GetPublicKey()) < 0 }) for i, marshalledShardValidatorInfo := range marshalledShardValidatorsInfo { @@ -304,22 +304,22 @@ func TestEpochValidatorInfoCreator_CreateValidatorInfoMiniBlocksShouldBeCorrect( vic, _ := NewValidatorInfoCreator(arguments) mbs, _ := vic.CreateValidatorInfoMiniBlocks(validatorInfo) - shardValidatorInfo := make([]*state.ShardValidatorInfo, len(validatorInfo[0])) - marshalledShardValidatorInfo := make([][]byte, len(validatorInfo[0])) - for i := 0; i < len(validatorInfo[0]); i++ { - shardValidatorInfo[i] = createShardValidatorInfo(validatorInfo[0][i]) + shardValidatorInfo := make([]*state.ShardValidatorInfo, len(validatorInfo.GetShardValidatorsInfoMap()[0])) + marshalledShardValidatorInfo := make([][]byte, len(validatorInfo.GetShardValidatorsInfoMap()[0])) + for i := 0; i < len(validatorInfo.GetShardValidatorsInfoMap()[0]); i++ { + shardValidatorInfo[i] = createShardValidatorInfo(validatorInfo.GetShardValidatorsInfoMap()[0][i]) marshalledShardValidatorInfo[i], _ = arguments.Marshalizer.Marshal(shardValidatorInfo[i]) } - correctMB0 := verifyMiniBlocks(mbs[0], validatorInfo[0], marshalledShardValidatorInfo, arguments.Marshalizer) + correctMB0 := verifyMiniBlocks(mbs[0], validatorInfo.GetShardValidatorsInfoMap()[0], marshalledShardValidatorInfo, arguments.Marshalizer) require.True(t, correctMB0) - shardValidatorInfo = make([]*state.ShardValidatorInfo, len(validatorInfo[core.MetachainShardId])) - marshalledShardValidatorInfo = make([][]byte, len(validatorInfo[core.MetachainShardId])) - for i := 0; i < len(validatorInfo[core.MetachainShardId]); i++ { - shardValidatorInfo[i] = createShardValidatorInfo(validatorInfo[core.MetachainShardId][i]) + shardValidatorInfo = make([]*state.ShardValidatorInfo, len(validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId])) + marshalledShardValidatorInfo = make([][]byte, len(validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId])) + for i := 0; i < len(validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId]); i++ { + shardValidatorInfo[i] = createShardValidatorInfo(validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId][i]) marshalledShardValidatorInfo[i], _ = arguments.Marshalizer.Marshal(shardValidatorInfo[i]) } - correctMbMeta := verifyMiniBlocks(mbs[1], validatorInfo[core.MetachainShardId], marshalledShardValidatorInfo, arguments.Marshalizer) + correctMbMeta := verifyMiniBlocks(mbs[1], validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId], marshalledShardValidatorInfo, arguments.Marshalizer) require.True(t, correctMbMeta) } @@ -398,11 +398,11 @@ func TestEpochValidatorInfoCreator_VerifyValidatorInfoMiniBlocksNilOneMiniblock( } func createValidatorInfoMiniBlocks( - validatorInfo map[uint32][]*state.ValidatorInfo, + validatorInfo state.ShardValidatorsInfoMapHandler, arguments ArgsNewValidatorInfoCreator, ) []*block.MiniBlock { miniblocks := make([]*block.MiniBlock, 0) - for _, validators := range validatorInfo { + for _, validators := range validatorInfo.GetShardValidatorsInfoMap() { if len(validators) == 0 { continue } @@ -413,10 +413,10 @@ func createValidatorInfoMiniBlocks( miniBlock.TxHashes = make([][]byte, len(validators)) miniBlock.Type = block.PeerBlock - validatorCopy := make([]*state.ValidatorInfo, len(validators)) + validatorCopy := make([]state.ValidatorInfoHandler, len(validators)) copy(validatorCopy, validators) sort.Slice(validatorCopy, func(a, b int) bool { - return bytes.Compare(validatorCopy[a].PublicKey, validatorCopy[b].PublicKey) < 0 + return bytes.Compare(validatorCopy[a].GetPublicKey(), validatorCopy[b].GetPublicKey()) < 0 }) for index, validator := range validatorCopy { @@ -1129,7 +1129,7 @@ func testCreateMiniblockBackwardsCompatibility(t *testing.T, deterministFixEnabl require.Equal(t, len(input), len(expected)) - validators := make([]*state.ValidatorInfo, 0, len(input)) + validators := state.NewShardValidatorsInfoMap() marshaller := &marshal.GogoProtoMarshalizer{} for _, marshalledData := range input { vinfo := &state.ValidatorInfo{} @@ -1139,7 +1139,8 @@ func testCreateMiniblockBackwardsCompatibility(t *testing.T, deterministFixEnabl err = marshaller.Unmarshal(vinfo, buffMarshalledData) require.Nil(t, err) - validators = append(validators, vinfo) + err = validators.Add(vinfo) + require.Nil(t, err) } arguments := createMockEpochValidatorInfoCreatorsArguments() @@ -1157,7 +1158,7 @@ func testCreateMiniblockBackwardsCompatibility(t *testing.T, deterministFixEnabl arguments.ValidatorInfoStorage = storer vic, _ := NewValidatorInfoCreator(arguments) - mb, err := vic.createMiniBlock(validators) + mb, err := vic.createMiniBlock(validators.GetAllValidatorsInfo()) require.Nil(t, err) // test all generated miniblock's "txhashes" are the same with the expected ones @@ -1274,7 +1275,7 @@ func TestValidatorInfoCreator_sortValidators(t *testing.T) { } vic, _ := NewValidatorInfoCreator(arguments) - list := []*state.ValidatorInfo{thirdValidator, secondValidator, firstValidator} + list := []state.ValidatorInfoHandler{thirdValidator, secondValidator, firstValidator} vic.sortValidators(list) assert.Equal(t, list[0], secondValidator) // order not changed for the ones with same public key @@ -1292,7 +1293,7 @@ func TestValidatorInfoCreator_sortValidators(t *testing.T) { } vic, _ := NewValidatorInfoCreator(arguments) - list := []*state.ValidatorInfo{thirdValidator, secondValidator, firstValidator} + list := []state.ValidatorInfoHandler{thirdValidator, secondValidator, firstValidator} vic.sortValidators(list) assert.Equal(t, list[0], firstValidator) // proper sorting diff --git a/epochStart/mock/builtInCostHandlerStub.go b/epochStart/mock/builtInCostHandlerStub.go deleted file mode 100644 index 4ee3b23b062..00000000000 --- a/epochStart/mock/builtInCostHandlerStub.go +++ /dev/null @@ -1,24 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// BuiltInCostHandlerStub - -type BuiltInCostHandlerStub struct { -} - -// ComputeBuiltInCost - -func (b *BuiltInCostHandlerStub) ComputeBuiltInCost(_ data.TransactionWithFeeHandler) uint64 { - return 1 -} - -// IsBuiltInFuncCall - -func (b *BuiltInCostHandlerStub) IsBuiltInFuncCall(_ data.TransactionWithFeeHandler) bool { - return false -} - -// IsInterfaceNil - -func (b *BuiltInCostHandlerStub) IsInterfaceNil() bool { - return b == nil -} diff --git a/epochStart/mock/nodesSetupStub.go b/epochStart/mock/nodesSetupStub.go deleted file mode 100644 index 8a79c4330cb..00000000000 --- a/epochStart/mock/nodesSetupStub.go +++ /dev/null @@ -1,191 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" -) - -// NodesSetupStub - -type NodesSetupStub struct { - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - GetRoundDurationCalled func() uint64 - GetShardConsensusGroupSizeCalled func() uint32 - GetMetaConsensusGroupSizeCalled func() uint32 - NumberOfShardsCalled func() uint32 - MinNumberOfNodesCalled func() uint32 - AllInitialNodesCalled func() []nodesCoordinator.GenesisNodeInfoHandler - GetAdaptivityCalled func() bool - GetHysteresisCalled func() float32 - GetShardIDForPubKeyCalled func(pubkey []byte) (uint32, error) - InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) - InitialNodesPubKeysCalled func() map[uint32][]string - MinNumberOfMetaNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 - MinNumberOfNodesWithHysteresisCalled func() uint32 - MinShardHysteresisNodesCalled func() uint32 - MinMetaHysteresisNodesCalled func() uint32 -} - -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() - } - return 1 -} - -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() - } - return 0 -} - -// GetRoundDuration - -func (n *NodesSetupStub) GetRoundDuration() uint64 { - if n.GetRoundDurationCalled != nil { - return n.GetRoundDurationCalled() - } - return 0 -} - -// GetShardConsensusGroupSize - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetShardConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() - } - return 0 -} - -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() - } - return 0 -} - -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() - } - return 0 -} - -// InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { - if n.InitialNodesInfoForShardCalled != nil { - return n.InitialNodesInfoForShardCalled(shardId) - } - return nil, nil, nil -} - -// InitialNodesInfo - -func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { - if n.InitialNodesInfoCalled != nil { - return n.InitialNodesInfoCalled() - } - return nil, nil -} - -// AllInitialNodes - -func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHandler { - if n.AllInitialNodesCalled != nil { - return n.AllInitialNodesCalled() - } - return nil -} - -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() - } - - return false -} - -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() - } - - return 0 -} - -// GetShardIDForPubKey - -func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { - if n.GetShardIDForPubKeyCalled != nil { - return n.GetShardIDForPubKeyCalled(pubkey) - } - return 0, nil -} - -// InitialEligibleNodesPubKeysForShard - -func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { - if n.InitialEligibleNodesPubKeysForShardCalled != nil { - return n.InitialEligibleNodesPubKeysForShardCalled(shardId) - } - - return []string{"val1", "val2"}, nil -} - -// InitialNodesPubKeys - -func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { - if n.InitialNodesPubKeysCalled != nil { - return n.InitialNodesPubKeysCalled() - } - - return map[uint32][]string{0: {"val1", "val2"}} -} - -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() - } - - return 1 -} - -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() - } - - return 1 -} - -// MinNumberOfNodesWithHysteresis - -func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { - if n.MinNumberOfNodesWithHysteresisCalled != nil { - return n.MinNumberOfNodesWithHysteresisCalled() - } - return n.MinNumberOfNodes() -} - -// MinShardHysteresisNodes - -func (n *NodesSetupStub) MinShardHysteresisNodes() uint32 { - if n.MinShardHysteresisNodesCalled != nil { - return n.MinShardHysteresisNodesCalled() - } - return 1 -} - -// MinMetaHysteresisNodes - -func (n *NodesSetupStub) MinMetaHysteresisNodes() uint32 { - if n.MinMetaHysteresisNodesCalled != nil { - return n.MinMetaHysteresisNodesCalled() - } - return 1 -} - -// IsInterfaceNil - -func (n *NodesSetupStub) IsInterfaceNil() bool { - return n == nil -} diff --git a/epochStart/mock/validatorStatisticsProcessorStub.go b/epochStart/mock/validatorStatisticsProcessorStub.go deleted file mode 100644 index 4bb574a5ba5..00000000000 --- a/epochStart/mock/validatorStatisticsProcessorStub.go +++ /dev/null @@ -1,38 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - IsInterfaceNilCalled func() bool -} - -// Process - -func (pm *ValidatorStatisticsProcessorStub) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if pm.ProcessCalled != nil { - return pm.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (pm *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { - if pm.CommitCalled != nil { - return pm.CommitCalled() - } - - return nil, nil -} - -// IsInterfaceNil - -func (pm *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - if pm.IsInterfaceNilCalled != nil { - return pm.IsInterfaceNilCalled() - } - return false -} diff --git a/epochStart/notifier/errors.go b/epochStart/notifier/errors.go new file mode 100644 index 00000000000..eba24016fa1 --- /dev/null +++ b/epochStart/notifier/errors.go @@ -0,0 +1,5 @@ +package notifier + +import "errors" + +var errNoMaxNodesConfigChangeForStakingV4 = errors.New("no MaxNodesChangeEnableEpoch config found for EpochEnable = StakingV4Step3EnableEpoch") diff --git a/epochStart/notifier/nodesConfigProvider.go b/epochStart/notifier/nodesConfigProvider.go new file mode 100644 index 00000000000..273f750ae44 --- /dev/null +++ b/epochStart/notifier/nodesConfigProvider.go @@ -0,0 +1,82 @@ +package notifier + +import ( + "sort" + "sync" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/process" +) + +type nodesConfigProvider struct { + mutex sync.RWMutex + currentEpoch uint32 + currentNodesConfig config.MaxNodesChangeConfig + allNodesConfigs []config.MaxNodesChangeConfig +} + +// NewNodesConfigProvider returns a new instance of nodesConfigProvider, which provides the current +// config.MaxNodesChangeConfig based on the current epoch +func NewNodesConfigProvider( + epochNotifier process.EpochNotifier, + maxNodesEnableConfig []config.MaxNodesChangeConfig, +) (*nodesConfigProvider, error) { + if check.IfNil(epochNotifier) { + return nil, epochStart.ErrNilEpochNotifier + } + + ncp := &nodesConfigProvider{ + allNodesConfigs: make([]config.MaxNodesChangeConfig, len(maxNodesEnableConfig)), + } + copy(ncp.allNodesConfigs, maxNodesEnableConfig) + ncp.sortConfigs() + epochNotifier.RegisterNotifyHandler(ncp) + + return ncp, nil +} + +func (ncp *nodesConfigProvider) sortConfigs() { + ncp.mutex.Lock() + defer ncp.mutex.Unlock() + + sort.Slice(ncp.allNodesConfigs, func(i, j int) bool { + return ncp.allNodesConfigs[i].EpochEnable < ncp.allNodesConfigs[j].EpochEnable + }) +} + +// GetAllNodesConfig returns all config.MaxNodesChangeConfig +func (ncp *nodesConfigProvider) GetAllNodesConfig() []config.MaxNodesChangeConfig { + ncp.mutex.RLock() + defer ncp.mutex.RUnlock() + + return ncp.allNodesConfigs +} + +// GetCurrentNodesConfig returns the current config.MaxNodesChangeConfig, based on epoch +func (ncp *nodesConfigProvider) GetCurrentNodesConfig() config.MaxNodesChangeConfig { + ncp.mutex.RLock() + defer ncp.mutex.RUnlock() + + return ncp.currentNodesConfig +} + +// EpochConfirmed is called whenever a new epoch is confirmed +func (ncp *nodesConfigProvider) EpochConfirmed(epoch uint32, _ uint64) { + ncp.mutex.Lock() + defer ncp.mutex.Unlock() + + for _, maxNodesConfig := range ncp.allNodesConfigs { + if epoch >= maxNodesConfig.EpochEnable { + ncp.currentNodesConfig = maxNodesConfig + } + } + + ncp.currentEpoch = epoch +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ncp *nodesConfigProvider) IsInterfaceNil() bool { + return ncp == nil +} diff --git a/epochStart/notifier/nodesConfigProviderAPI.go b/epochStart/notifier/nodesConfigProviderAPI.go new file mode 100644 index 00000000000..3db0d028ece --- /dev/null +++ b/epochStart/notifier/nodesConfigProviderAPI.go @@ -0,0 +1,71 @@ +package notifier + +import ( + "fmt" + + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process" +) + +type nodesConfigProviderAPI struct { + *nodesConfigProvider + stakingV4Step2Epoch uint32 + stakingV4Step3MaxNodesConfig config.MaxNodesChangeConfig +} + +// NewNodesConfigProviderAPI returns a new instance of nodes config provider for API calls only, which provides the current +// max nodes change config based on the current epoch +func NewNodesConfigProviderAPI( + epochNotifier process.EpochNotifier, + cfg config.EnableEpochs, +) (*nodesConfigProviderAPI, error) { + nodesCfgProvider, err := NewNodesConfigProvider(epochNotifier, cfg.MaxNodesChangeEnableEpoch) + if err != nil { + return nil, err + } + + stakingV4Step3MaxNodesConfig, err := getStakingV4Step3MaxNodesConfig(nodesCfgProvider.allNodesConfigs, cfg.StakingV4Step3EnableEpoch) + if err != nil { + return nil, err + } + + return &nodesConfigProviderAPI{ + nodesConfigProvider: nodesCfgProvider, + stakingV4Step2Epoch: cfg.StakingV4Step2EnableEpoch, + stakingV4Step3MaxNodesConfig: stakingV4Step3MaxNodesConfig, + }, nil +} + +func getStakingV4Step3MaxNodesConfig( + allNodesConfigs []config.MaxNodesChangeConfig, + stakingV4Step3EnableEpoch uint32, +) (config.MaxNodesChangeConfig, error) { + for _, cfg := range allNodesConfigs { + if cfg.EpochEnable == stakingV4Step3EnableEpoch { + return cfg, nil + } + } + + return config.MaxNodesChangeConfig{}, fmt.Errorf("%w when creating api nodes config provider", errNoMaxNodesConfigChangeForStakingV4) +} + +// GetCurrentNodesConfig retrieves the current configuration of nodes. However, when invoked during epoch stakingV4 step 2 +// through API calls, it will provide the nodes configuration as it will appear in epoch stakingV4 step 3. This adjustment +// is made because, with the transition to step 3 at the epoch change, the maximum number of nodes will be reduced. +// Therefore, calling this API during step 2 aims to offer a preview of the upcoming epoch, accurately reflecting the +// adjusted number of nodes that will qualify from the auction. +func (ncp *nodesConfigProviderAPI) GetCurrentNodesConfig() config.MaxNodesChangeConfig { + ncp.mutex.RLock() + defer ncp.mutex.RUnlock() + + if ncp.currentEpoch == ncp.stakingV4Step2Epoch { + return ncp.stakingV4Step3MaxNodesConfig + } + + return ncp.currentNodesConfig +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ncp *nodesConfigProviderAPI) IsInterfaceNil() bool { + return ncp == nil +} diff --git a/epochStart/notifier/nodesConfigProviderAPI_test.go b/epochStart/notifier/nodesConfigProviderAPI_test.go new file mode 100644 index 00000000000..5438d533741 --- /dev/null +++ b/epochStart/notifier/nodesConfigProviderAPI_test.go @@ -0,0 +1,95 @@ +package notifier + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process" + epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/stretchr/testify/require" +) + +func getEnableEpochCfg() config.EnableEpochs { + return config.EnableEpochs{ + StakingV4Step1EnableEpoch: 2, + StakingV4Step2EnableEpoch: 3, + StakingV4Step3EnableEpoch: 4, + MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 64, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 4, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + }, + } +} + +func TestNewNodesConfigProviderAPI(t *testing.T) { + t.Parallel() + + t.Run("nil epoch notifier, should return error", func(t *testing.T) { + ncp, err := NewNodesConfigProviderAPI(nil, config.EnableEpochs{}) + require.Equal(t, process.ErrNilEpochNotifier, err) + require.Nil(t, ncp) + }) + + t.Run("no nodes config for staking v4 step 3, should return error", func(t *testing.T) { + ncp, err := NewNodesConfigProviderAPI(&epochNotifierMock.EpochNotifierStub{}, config.EnableEpochs{}) + require.ErrorIs(t, err, errNoMaxNodesConfigChangeForStakingV4) + require.Nil(t, ncp) + }) + + t.Run("should work", func(t *testing.T) { + ncp, err := NewNodesConfigProviderAPI(&epochNotifierMock.EpochNotifierStub{}, getEnableEpochCfg()) + require.Nil(t, err) + require.False(t, ncp.IsInterfaceNil()) + }) +} + +func TestNodesConfigProviderAPI_GetCurrentNodesConfig(t *testing.T) { + t.Parallel() + + epochNotifier := forking.NewGenericEpochNotifier() + enableEpochCfg := getEnableEpochCfg() + ncp, _ := NewNodesConfigProviderAPI(epochNotifier, enableEpochCfg) + + maxNodesConfig1 := enableEpochCfg.MaxNodesChangeEnableEpoch[0] + maxNodesConfig2 := enableEpochCfg.MaxNodesChangeEnableEpoch[1] + maxNodesConfigStakingV4Step3 := enableEpochCfg.MaxNodesChangeEnableEpoch[2] + + require.Equal(t, maxNodesConfig1, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step1EnableEpoch}) + require.Equal(t, maxNodesConfig2, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step2EnableEpoch}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step3EnableEpoch}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step3EnableEpoch + 1}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step2EnableEpoch}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step3EnableEpoch}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) +} diff --git a/epochStart/notifier/nodesConfigProvider_test.go b/epochStart/notifier/nodesConfigProvider_test.go new file mode 100644 index 00000000000..a813ff4b48d --- /dev/null +++ b/epochStart/notifier/nodesConfigProvider_test.go @@ -0,0 +1,121 @@ +package notifier + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process" + "github.com/stretchr/testify/require" +) + +func TestNewNodesConfigProvider(t *testing.T) { + t.Parallel() + + ncp, err := NewNodesConfigProvider(nil, nil) + require.Equal(t, process.ErrNilEpochNotifier, err) + require.True(t, ncp.IsInterfaceNil()) + + epochNotifier := forking.NewGenericEpochNotifier() + ncp, err = NewNodesConfigProvider(epochNotifier, nil) + require.Nil(t, err) + require.False(t, ncp.IsInterfaceNil()) +} + +func TestNodesConfigProvider_GetAllNodesConfigSorted(t *testing.T) { + t.Parallel() + + nodesConfigEpoch0 := config.MaxNodesChangeConfig{ + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + } + nodesConfigEpoch1 := config.MaxNodesChangeConfig{ + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + } + nodesConfigEpoch6 := config.MaxNodesChangeConfig{ + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 1, + } + + unsortedNodesConfig := []config.MaxNodesChangeConfig{ + nodesConfigEpoch6, + nodesConfigEpoch0, + nodesConfigEpoch1, + } + sortedNodesConfig := []config.MaxNodesChangeConfig{ + nodesConfigEpoch0, + nodesConfigEpoch1, + nodesConfigEpoch6, + } + + epochNotifier := forking.NewGenericEpochNotifier() + ncp, _ := NewNodesConfigProvider(epochNotifier, unsortedNodesConfig) + require.Equal(t, sortedNodesConfig, ncp.GetAllNodesConfig()) +} + +func TestNodesConfigProvider_EpochConfirmedCorrectMaxNumNodesAfterNodeRestart(t *testing.T) { + t.Parallel() + + nodesConfigEpoch0 := config.MaxNodesChangeConfig{ + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + } + nodesConfigEpoch1 := config.MaxNodesChangeConfig{ + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + } + nodesConfigEpoch6 := config.MaxNodesChangeConfig{ + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 1, + } + + allNodesConfig := []config.MaxNodesChangeConfig{ + nodesConfigEpoch0, + nodesConfigEpoch1, + nodesConfigEpoch6, + } + epochNotifier := forking.NewGenericEpochNotifier() + ncp, _ := NewNodesConfigProvider(epochNotifier, allNodesConfig) + + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + require.Equal(t, nodesConfigEpoch0, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: 1}) + require.Equal(t, nodesConfigEpoch1, ncp.GetCurrentNodesConfig()) + + for epoch := uint32(2); epoch <= 5; epoch++ { + epochNotifier.CheckEpoch(&block.Header{Epoch: epoch}) + require.Equal(t, nodesConfigEpoch1, ncp.GetCurrentNodesConfig()) + } + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + epochNotifier.CheckEpoch(&block.Header{Epoch: 5}) + require.Equal(t, nodesConfigEpoch1, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: 6}) + require.Equal(t, nodesConfigEpoch6, ncp.GetCurrentNodesConfig()) + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + epochNotifier.CheckEpoch(&block.Header{Epoch: 6}) + require.Equal(t, nodesConfigEpoch6, ncp.GetCurrentNodesConfig()) + + for epoch := uint32(7); epoch <= 20; epoch++ { + epochNotifier.CheckEpoch(&block.Header{Epoch: epoch}) + require.Equal(t, nodesConfigEpoch6, ncp.GetCurrentNodesConfig()) + } + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 1}) + epochNotifier.CheckEpoch(&block.Header{Epoch: 21}) + require.Equal(t, nodesConfigEpoch6, ncp.GetCurrentNodesConfig()) +} diff --git a/facade/initial/initialNodeFacade.go b/facade/initial/initialNodeFacade.go index 1d45caace60..7411a2078e9 100644 --- a/facade/initial/initialNodeFacade.go +++ b/facade/initial/initialNodeFacade.go @@ -156,6 +156,11 @@ func (inf *initialNodeFacade) ValidatorStatisticsApi() (map[string]*validator.Va return nil, errNodeStarting } +// AuctionListApi returns nil and error +func (inf *initialNodeFacade) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { + return nil, errNodeStarting +} + // SendBulkTransactions returns 0 and error func (inf *initialNodeFacade) SendBulkTransactions(_ []*transaction.Transaction) (uint64, error) { return uint64(0), errNodeStarting @@ -426,6 +431,11 @@ func (inf *initialNodeFacade) GetManagedKeys() []string { return nil } +// GetLoadedKeys returns nil +func (inf *initialNodeFacade) GetLoadedKeys() []string { + return nil +} + // GetEligibleManagedKeys returns nil and error func (inf *initialNodeFacade) GetEligibleManagedKeys() ([]string, error) { return nil, errNodeStarting diff --git a/facade/initial/initialNodeFacade_test.go b/facade/initial/initialNodeFacade_test.go index 2633349d69f..294f0accfca 100644 --- a/facade/initial/initialNodeFacade_test.go +++ b/facade/initial/initialNodeFacade_test.go @@ -95,6 +95,10 @@ func TestInitialNodeFacade_AllMethodsShouldNotPanic(t *testing.T) { assert.Nil(t, v1) assert.Equal(t, errNodeStarting, err) + v2, err := inf.AuctionListApi() + assert.Nil(t, v2) + assert.Equal(t, errNodeStarting, err) + u1, err := inf.SendBulkTransactions(nil) assert.Equal(t, uint64(0), u1) assert.Equal(t, errNodeStarting, err) @@ -329,12 +333,15 @@ func TestInitialNodeFacade_AllMethodsShouldNotPanic(t *testing.T) { assert.Nil(t, txPoolGaps) assert.Equal(t, errNodeStarting, err) - cnt := inf.GetManagedKeysCount() - assert.Zero(t, cnt) + count := inf.GetManagedKeysCount() + assert.Zero(t, count) keys := inf.GetManagedKeys() assert.Nil(t, keys) + keys = inf.GetLoadedKeys() + assert.Nil(t, keys) + keys, err = inf.GetEligibleManagedKeys() assert.Nil(t, keys) assert.Equal(t, errNodeStarting, err) diff --git a/facade/interface.go b/facade/interface.go index 4c782e6a574..07488622a96 100644 --- a/facade/interface.go +++ b/facade/interface.go @@ -5,6 +5,7 @@ import ( "math/big" "github.com/multiversx/mx-chain-core-go/core" + coreData "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" @@ -86,6 +87,8 @@ type NodeHandler interface { // ValidatorStatisticsApi return the statistics for all the validators ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) + + AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) DirectTrigger(epoch uint32, withEarlyEndOfEpoch bool) error IsSelfTrigger() bool @@ -106,7 +109,7 @@ type NodeHandler interface { // TransactionSimulatorProcessor defines the actions which a transaction simulator processor has to implement type TransactionSimulatorProcessor interface { - ProcessTx(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) + ProcessTx(tx *transaction.Transaction, currentHeader coreData.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) IsInterfaceNil() bool } @@ -142,6 +145,7 @@ type ApiResolver interface { GetGasConfigs() map[string]map[string]uint64 GetManagedKeysCount() int GetManagedKeys() []string + GetLoadedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) diff --git a/facade/mock/apiResolverStub.go b/facade/mock/apiResolverStub.go index e2ab9aa3707..33bae8518aa 100644 --- a/facade/mock/apiResolverStub.go +++ b/facade/mock/apiResolverStub.go @@ -46,6 +46,7 @@ type ApiResolverStub struct { GetGasConfigsCalled func() map[string]map[string]uint64 GetManagedKeysCountCalled func() int GetManagedKeysCalled func() []string + GetLoadedKeysCalled func() []string GetEligibleManagedKeysCalled func() ([]string, error) GetWaitingManagedKeysCalled func() ([]string, error) GetWaitingEpochsLeftForPublicKeyCalled func(publicKey string) (uint32, error) @@ -309,6 +310,14 @@ func (ars *ApiResolverStub) GetManagedKeys() []string { return make([]string, 0) } +// GetLoadedKeys - +func (ars *ApiResolverStub) GetLoadedKeys() []string { + if ars.GetLoadedKeysCalled != nil { + return ars.GetLoadedKeysCalled() + } + return make([]string, 0) +} + // GetEligibleManagedKeys - func (ars *ApiResolverStub) GetEligibleManagedKeys() ([]string, error) { if ars.GetEligibleManagedKeysCalled != nil { diff --git a/facade/mock/nodeStub.go b/facade/mock/nodeStub.go index 254f92218ba..74c9cbea536 100644 --- a/facade/mock/nodeStub.go +++ b/facade/mock/nodeStub.go @@ -54,6 +54,7 @@ type NodeStub struct { VerifyProofCalled func(rootHash string, address string, proof [][]byte) (bool, error) GetTokenSupplyCalled func(token string) (*api.ESDTSupply, error) IsDataTrieMigratedCalled func(address string, options api.AccountQueryOptions) (bool, error) + AuctionListApiCalled func() ([]*common.AuctionListValidatorAPIResponse, error) } // GetProof - @@ -139,7 +140,11 @@ func (ns *NodeStub) DecodeAddressPubkey(pk string) ([]byte, error) { // GetBalance - func (ns *NodeStub) GetBalance(address string, options api.AccountQueryOptions) (*big.Int, api.BlockInfo, error) { - return ns.GetBalanceCalled(address, options) + if ns.GetBalanceCalled != nil { + return ns.GetBalanceCalled(address, options) + } + + return nil, api.BlockInfo{}, nil } // CreateTransaction - @@ -150,22 +155,38 @@ func (ns *NodeStub) CreateTransaction(txArgs *external.ArgsCreateTransaction) (* // ValidateTransaction - func (ns *NodeStub) ValidateTransaction(tx *transaction.Transaction) error { - return ns.ValidateTransactionHandler(tx) + if ns.ValidateTransactionHandler != nil { + return ns.ValidateTransactionHandler(tx) + } + + return nil } // ValidateTransactionForSimulation - func (ns *NodeStub) ValidateTransactionForSimulation(tx *transaction.Transaction, bypassSignature bool) error { - return ns.ValidateTransactionForSimulationCalled(tx, bypassSignature) + if ns.ValidateTransactionForSimulationCalled != nil { + return ns.ValidateTransactionForSimulationCalled(tx, bypassSignature) + } + + return nil } // SendBulkTransactions - func (ns *NodeStub) SendBulkTransactions(txs []*transaction.Transaction) (uint64, error) { - return ns.SendBulkTransactionsHandler(txs) + if ns.SendBulkTransactionsHandler != nil { + return ns.SendBulkTransactionsHandler(txs) + } + + return 0, nil } // GetAccount - func (ns *NodeStub) GetAccount(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { - return ns.GetAccountCalled(address, options) + if ns.GetAccountCalled != nil { + return ns.GetAccountCalled(address, options) + } + + return api.AccountResponse{}, api.BlockInfo{}, nil } // GetCode - @@ -179,22 +200,47 @@ func (ns *NodeStub) GetCode(codeHash []byte, options api.AccountQueryOptions) ([ // GetHeartbeats - func (ns *NodeStub) GetHeartbeats() []data.PubKeyHeartbeat { - return ns.GetHeartbeatsHandler() + if ns.GetHeartbeatsHandler != nil { + return ns.GetHeartbeatsHandler() + } + + return nil } // ValidatorStatisticsApi - func (ns *NodeStub) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) { - return ns.ValidatorStatisticsApiCalled() + if ns.ValidatorStatisticsApiCalled != nil { + return ns.ValidatorStatisticsApiCalled() + } + + return nil, nil +} + +// AuctionListApi - +func (ns *NodeStub) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { + if ns.AuctionListApiCalled != nil { + return ns.AuctionListApiCalled() + } + + return nil, nil } // DirectTrigger - func (ns *NodeStub) DirectTrigger(epoch uint32, withEarlyEndOfEpoch bool) error { - return ns.DirectTriggerCalled(epoch, withEarlyEndOfEpoch) + if ns.DirectTriggerCalled != nil { + return ns.DirectTriggerCalled(epoch, withEarlyEndOfEpoch) + } + + return nil } // IsSelfTrigger - func (ns *NodeStub) IsSelfTrigger() bool { - return ns.IsSelfTriggerCalled() + if ns.IsSelfTriggerCalled != nil { + return ns.IsSelfTriggerCalled() + } + + return false } // GetQueryHandler - diff --git a/facade/nodeFacade.go b/facade/nodeFacade.go index f91a405a96c..03dd77c76b7 100644 --- a/facade/nodeFacade.go +++ b/facade/nodeFacade.go @@ -163,7 +163,7 @@ func (nf *nodeFacade) RestAPIServerDebugMode() bool { // RestApiInterface returns the interface on which the rest API should start on, based on the config file provided. // The API will start on the DefaultRestInterface value unless a correct value is passed or -// the value is explicitly set to off, in which case it will not start at all +// // the value is explicitly set to off, in which case it will not start at all func (nf *nodeFacade) RestApiInterface() string { if nf.config.RestApiInterface == "" { return DefaultRestInterface @@ -284,6 +284,11 @@ func (nf *nodeFacade) ValidatorStatisticsApi() (map[string]*validator.ValidatorS return nf.node.ValidatorStatisticsApi() } +// AuctionListApi will return the data about the validators in the auction list +func (nf *nodeFacade) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { + return nf.node.AuctionListApi() +} + // SendBulkTransactions will send a bulk of transactions on the topic channel func (nf *nodeFacade) SendBulkTransactions(txs []*transaction.Transaction) (uint64, error) { return nf.node.SendBulkTransactions(txs) @@ -590,11 +595,16 @@ func (nf *nodeFacade) GetManagedKeysCount() int { return nf.apiResolver.GetManagedKeysCount() } -// GetManagedKeys returns all keys managed by the current node when running in multikey mode +// GetManagedKeys returns all keys that should act as validator(main or backup that took over) and will be managed by this node func (nf *nodeFacade) GetManagedKeys() []string { return nf.apiResolver.GetManagedKeys() } +// GetLoadedKeys returns all keys that were loaded by this node +func (nf *nodeFacade) GetLoadedKeys() []string { + return nf.apiResolver.GetLoadedKeys() +} + // GetEligibleManagedKeys returns the eligible managed keys when node is running in multikey mode func (nf *nodeFacade) GetEligibleManagedKeys() ([]string, error) { return nf.apiResolver.GetEligibleManagedKeys() diff --git a/facade/nodeFacade_test.go b/facade/nodeFacade_test.go index 299fa1cfb50..21823b60b6e 100644 --- a/facade/nodeFacade_test.go +++ b/facade/nodeFacade_test.go @@ -1317,6 +1317,22 @@ func TestNodeFacade_GetEligibleManagedKeys(t *testing.T) { assert.Equal(t, expectedResult, result) } +func TestNodeFacade_GetLoadedKeys(t *testing.T) { + t.Parallel() + + providedLoadedKeys := []string{"pk1", "pk2"} + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + GetLoadedKeysCalled: func() []string { + return providedLoadedKeys + }, + } + nf, _ := NewNodeFacade(arg) + + keys := nf.GetLoadedKeys() + require.Equal(t, providedLoadedKeys, keys) +} + func TestNodeFacade_GetWaitingEpochsLeftForPublicKey(t *testing.T) { t.Parallel() diff --git a/factory/api/apiResolverFactory.go b/factory/api/apiResolverFactory.go index ed3610ca42d..889be426869 100644 --- a/factory/api/apiResolverFactory.go +++ b/factory/api/apiResolverFactory.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/disabled" + "github.com/multiversx/mx-chain-go/common/operationmodes" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" @@ -71,40 +72,42 @@ type ApiResolverArgs struct { } type scQueryServiceArgs struct { - generalConfig *config.Config - epochConfig *config.EpochConfig - coreComponents factory.CoreComponentsHolder - stateComponents factory.StateComponentsHolder - dataComponents factory.DataComponentsHolder - processComponents factory.ProcessComponentsHolder - statusCoreComponents factory.StatusCoreComponentsHolder - gasScheduleNotifier core.GasScheduleNotifier - messageSigVerifier vm.MessageSignVerifier - systemSCConfig *config.SystemSmartContractsConfig - bootstrapper process.Bootstrapper - guardedAccountHandler process.GuardedAccountHandler - allowVMQueriesChan chan struct{} - workingDir string - processingMode common.NodeProcessingMode + generalConfig *config.Config + epochConfig *config.EpochConfig + coreComponents factory.CoreComponentsHolder + stateComponents factory.StateComponentsHolder + dataComponents factory.DataComponentsHolder + processComponents factory.ProcessComponentsHolder + statusCoreComponents factory.StatusCoreComponentsHolder + gasScheduleNotifier core.GasScheduleNotifier + messageSigVerifier vm.MessageSignVerifier + systemSCConfig *config.SystemSmartContractsConfig + bootstrapper process.Bootstrapper + guardedAccountHandler process.GuardedAccountHandler + allowVMQueriesChan chan struct{} + workingDir string + processingMode common.NodeProcessingMode + isInHistoricalBalancesMode bool } type scQueryElementArgs struct { - generalConfig *config.Config - epochConfig *config.EpochConfig - coreComponents factory.CoreComponentsHolder - stateComponents factory.StateComponentsHolder - dataComponents factory.DataComponentsHolder - processComponents factory.ProcessComponentsHolder - statusCoreComponents factory.StatusCoreComponentsHolder - gasScheduleNotifier core.GasScheduleNotifier - messageSigVerifier vm.MessageSignVerifier - systemSCConfig *config.SystemSmartContractsConfig - bootstrapper process.Bootstrapper - guardedAccountHandler process.GuardedAccountHandler - allowVMQueriesChan chan struct{} - workingDir string - index int - processingMode common.NodeProcessingMode + generalConfig *config.Config + epochConfig *config.EpochConfig + coreComponents factory.CoreComponentsHolder + stateComponents factory.StateComponentsHolder + dataComponents factory.DataComponentsHolder + processComponents factory.ProcessComponentsHolder + statusCoreComponents factory.StatusCoreComponentsHolder + gasScheduleNotifier core.GasScheduleNotifier + messageSigVerifier vm.MessageSignVerifier + systemSCConfig *config.SystemSmartContractsConfig + bootstrapper process.Bootstrapper + guardedAccountHandler process.GuardedAccountHandler + allowVMQueriesChan chan struct{} + workingDir string + index int + processingMode common.NodeProcessingMode + isInHistoricalBalancesMode bool } // CreateApiResolver is able to create an ApiResolver instance that will solve the REST API requests through the node facade @@ -112,24 +115,25 @@ type scQueryElementArgs struct { func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { apiWorkingDir := filepath.Join(args.Configs.FlagsConfig.WorkingDir, common.TemporaryPath) argsSCQuery := &scQueryServiceArgs{ - generalConfig: args.Configs.GeneralConfig, - epochConfig: args.Configs.EpochConfig, - coreComponents: args.CoreComponents, - dataComponents: args.DataComponents, - stateComponents: args.StateComponents, - processComponents: args.ProcessComponents, - statusCoreComponents: args.StatusCoreComponents, - gasScheduleNotifier: args.GasScheduleNotifier, - messageSigVerifier: args.CryptoComponents.MessageSignVerifier(), - systemSCConfig: args.Configs.SystemSCConfig, - bootstrapper: args.Bootstrapper, - guardedAccountHandler: args.BootstrapComponents.GuardedAccountHandler(), - allowVMQueriesChan: args.AllowVMQueriesChan, - workingDir: apiWorkingDir, - processingMode: args.ProcessingMode, - } - - scQueryService, err := createScQueryService(argsSCQuery) + generalConfig: args.Configs.GeneralConfig, + epochConfig: args.Configs.EpochConfig, + coreComponents: args.CoreComponents, + dataComponents: args.DataComponents, + stateComponents: args.StateComponents, + processComponents: args.ProcessComponents, + statusCoreComponents: args.StatusCoreComponents, + gasScheduleNotifier: args.GasScheduleNotifier, + messageSigVerifier: args.CryptoComponents.MessageSignVerifier(), + systemSCConfig: args.Configs.SystemSCConfig, + bootstrapper: args.Bootstrapper, + guardedAccountHandler: args.BootstrapComponents.GuardedAccountHandler(), + allowVMQueriesChan: args.AllowVMQueriesChan, + workingDir: apiWorkingDir, + processingMode: args.ProcessingMode, + isInHistoricalBalancesMode: operationmodes.IsInHistoricalBalancesMode(args.Configs), + } + + scQueryService, storageManagers, err := createScQueryService(argsSCQuery) if err != nil { return nil, err } @@ -271,7 +275,9 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { AccountsParser: args.ProcessComponents.AccountsParser(), GasScheduleNotifier: args.GasScheduleNotifier, ManagedPeersMonitor: args.StatusComponents.ManagedPeersMonitor(), + PublicKey: args.CryptoComponents.PublicKeyString(), NodesCoordinator: args.ProcessComponents.NodesCoordinator(), + StorageManagers: storageManagers, } return external.NewNodeApiResolver(argsApiResolver) @@ -279,75 +285,91 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { func createScQueryService( args *scQueryServiceArgs, -) (process.SCQueryService, error) { +) (process.SCQueryService, []common.StorageManager, error) { numConcurrentVms := args.generalConfig.VirtualMachine.Querying.NumConcurrentVMs if numConcurrentVms < 1 { - return nil, fmt.Errorf("VirtualMachine.Querying.NumConcurrentVms should be a positive number more than 1") + return nil, nil, fmt.Errorf("VirtualMachine.Querying.NumConcurrentVms should be a positive number more than 1") } argsQueryElem := &scQueryElementArgs{ - generalConfig: args.generalConfig, - epochConfig: args.epochConfig, - coreComponents: args.coreComponents, - stateComponents: args.stateComponents, - dataComponents: args.dataComponents, - processComponents: args.processComponents, - statusCoreComponents: args.statusCoreComponents, - gasScheduleNotifier: args.gasScheduleNotifier, - messageSigVerifier: args.messageSigVerifier, - systemSCConfig: args.systemSCConfig, - bootstrapper: args.bootstrapper, - guardedAccountHandler: args.guardedAccountHandler, - allowVMQueriesChan: args.allowVMQueriesChan, - workingDir: args.workingDir, - index: 0, - processingMode: args.processingMode, + generalConfig: args.generalConfig, + epochConfig: args.epochConfig, + coreComponents: args.coreComponents, + stateComponents: args.stateComponents, + dataComponents: args.dataComponents, + processComponents: args.processComponents, + statusCoreComponents: args.statusCoreComponents, + gasScheduleNotifier: args.gasScheduleNotifier, + messageSigVerifier: args.messageSigVerifier, + systemSCConfig: args.systemSCConfig, + bootstrapper: args.bootstrapper, + guardedAccountHandler: args.guardedAccountHandler, + allowVMQueriesChan: args.allowVMQueriesChan, + workingDir: args.workingDir, + index: 0, + processingMode: args.processingMode, + isInHistoricalBalancesMode: args.isInHistoricalBalancesMode, } var err error var scQueryService process.SCQueryService + var storageManager common.StorageManager + storageManagers := make([]common.StorageManager, 0, numConcurrentVms) list := make([]process.SCQueryService, 0, numConcurrentVms) for i := 0; i < numConcurrentVms; i++ { argsQueryElem.index = i - scQueryService, err = createScQueryElement(argsQueryElem) + scQueryService, storageManager, err = createScQueryElement(*argsQueryElem) if err != nil { - return nil, err + return nil, nil, err } list = append(list, scQueryService) + storageManagers = append(storageManagers, storageManager) } sqQueryDispatcher, err := smartContract.NewScQueryServiceDispatcher(list) if err != nil { - return nil, err + return nil, nil, err } - return sqQueryDispatcher, nil + return sqQueryDispatcher, storageManagers, nil } func createScQueryElement( - args *scQueryElementArgs, -) (process.SCQueryService, error) { + args scQueryElementArgs, +) (process.SCQueryService, common.StorageManager, error) { var err error + selfShardID := args.processComponents.ShardCoordinator().SelfId() + pkConverter := args.coreComponents.AddressPubKeyConverter() automaticCrawlerAddressesStrings := args.generalConfig.BuiltInFunctions.AutomaticCrawlerAddresses convertedAddresses, errDecode := factory.DecodeAddresses(pkConverter, automaticCrawlerAddressesStrings) if errDecode != nil { - return nil, errDecode + return nil, nil, errDecode } dnsV2AddressesStrings := args.generalConfig.BuiltInFunctions.DNSV2Addresses convertedDNSV2Addresses, errDecode := factory.DecodeAddresses(pkConverter, dnsV2AddressesStrings) if errDecode != nil { - return nil, errDecode + return nil, nil, errDecode + } + + apiBlockchain, err := createBlockchainForScQuery(selfShardID) + if err != nil { + return nil, nil, err + } + + accountsAdapterApi, storageManager, err := createNewAccountsAdapterApi(args, apiBlockchain) + if err != nil { + return nil, nil, err } builtInFuncFactory, err := createBuiltinFuncs( args.gasScheduleNotifier, args.coreComponents.InternalMarshalizer(), - args.stateComponents.AccountsAdapterAPI(), + accountsAdapterApi, args.processComponents.ShardCoordinator(), args.coreComponents.EpochNotifier(), args.coreComponents.EnableEpochsHandler(), @@ -357,13 +379,13 @@ func createScQueryElement( convertedDNSV2Addresses, ) if err != nil { - return nil, err + return nil, nil, err } cacherCfg := storageFactory.GetCacherFromConfig(args.generalConfig.SmartContractDataPool) smartContractsCache, err := storageunit.NewCache(cacherCfg) if err != nil { - return nil, err + return nil, nil, err } scStorage := args.generalConfig.SmartContractsStorageForSCQuery @@ -387,76 +409,76 @@ func createScQueryElement( GasSchedule: args.gasScheduleNotifier, Counter: counters.NewDisabledCounter(), MissingTrieNodesNotifier: syncer.NewMissingTrieNodesNotifier(), + Accounts: accountsAdapterApi, + BlockChain: apiBlockchain, } - var apiBlockchain data.ChainHandler var vmFactory process.VirtualMachinesContainerFactory maxGasForVmQueries := args.generalConfig.VirtualMachine.GasConfig.ShardMaxGasPerVmQuery - if args.processComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if selfShardID == core.MetachainShardId { maxGasForVmQueries = args.generalConfig.VirtualMachine.GasConfig.MetaMaxGasPerVmQuery - apiBlockchain, vmFactory, err = createMetaVmContainerFactory(args, argsHook) + vmFactory, err = createMetaVmContainerFactory(args, argsHook) } else { - apiBlockchain, vmFactory, err = createShardVmContainerFactory(args, argsHook) + vmFactory, err = createShardVmContainerFactory(args, argsHook) } if err != nil { - return nil, err + return nil, nil, err } log.Debug("maximum gas per VM Query", "value", maxGasForVmQueries) vmContainer, err := vmFactory.Create() if err != nil { - return nil, err + return nil, nil, err } err = vmFactory.BlockChainHookImpl().SetVMContainer(vmContainer) if err != nil { - return nil, err + return nil, nil, err } err = builtInFuncFactory.SetPayableHandler(vmFactory.BlockChainHookImpl()) if err != nil { - return nil, err + return nil, nil, err } argsNewSCQueryService := smartContract.ArgsNewSCQueryService{ - VmContainer: vmContainer, - EconomicsFee: args.coreComponents.EconomicsData(), - BlockChainHook: vmFactory.BlockChainHookImpl(), - MainBlockChain: args.dataComponents.Blockchain(), - APIBlockChain: apiBlockchain, - WasmVMChangeLocker: args.coreComponents.WasmVMChangeLocker(), - Bootstrapper: args.bootstrapper, - AllowExternalQueriesChan: args.allowVMQueriesChan, - MaxGasLimitPerQuery: maxGasForVmQueries, - HistoryRepository: args.processComponents.HistoryRepository(), - ShardCoordinator: args.processComponents.ShardCoordinator(), - StorageService: args.dataComponents.StorageService(), - Marshaller: args.coreComponents.InternalMarshalizer(), - Hasher: args.coreComponents.Hasher(), - Uint64ByteSliceConverter: args.coreComponents.Uint64ByteSliceConverter(), - } - - return smartContract.NewSCQueryService(argsNewSCQueryService) + VmContainer: vmContainer, + EconomicsFee: args.coreComponents.EconomicsData(), + BlockChainHook: vmFactory.BlockChainHookImpl(), + MainBlockChain: args.dataComponents.Blockchain(), + APIBlockChain: apiBlockchain, + WasmVMChangeLocker: args.coreComponents.WasmVMChangeLocker(), + Bootstrapper: args.bootstrapper, + AllowExternalQueriesChan: args.allowVMQueriesChan, + MaxGasLimitPerQuery: maxGasForVmQueries, + HistoryRepository: args.processComponents.HistoryRepository(), + ShardCoordinator: args.processComponents.ShardCoordinator(), + StorageService: args.dataComponents.StorageService(), + Marshaller: args.coreComponents.InternalMarshalizer(), + Hasher: args.coreComponents.Hasher(), + Uint64ByteSliceConverter: args.coreComponents.Uint64ByteSliceConverter(), + IsInHistoricalBalancesMode: args.isInHistoricalBalancesMode, + } + + scQueryService, err := smartContract.NewSCQueryService(argsNewSCQueryService) + + return scQueryService, storageManager, err } -func createMetaVmContainerFactory(args *scQueryElementArgs, argsHook hooks.ArgBlockChainHook) (data.ChainHandler, process.VirtualMachinesContainerFactory, error) { - apiBlockchain, err := blockchain.NewMetaChain(disabled.NewAppStatusHandler()) - if err != nil { - return nil, nil, err +func createBlockchainForScQuery(selfShardID uint32) (data.ChainHandler, error) { + isMetachain := selfShardID == core.MetachainShardId + if isMetachain { + return blockchain.NewMetaChain(disabled.NewAppStatusHandler()) } - accountsAdapterApi, err := createNewAccountsAdapterApi(args, apiBlockchain) - if err != nil { - return nil, nil, err - } - - argsHook.BlockChain = apiBlockchain - argsHook.Accounts = accountsAdapterApi + return blockchain.NewBlockChain(disabled.NewAppStatusHandler()) +} +func createMetaVmContainerFactory(args scQueryElementArgs, argsHook hooks.ArgBlockChainHook) (process.VirtualMachinesContainerFactory, error) { blockChainHookImpl, errBlockChainHook := hooks.NewBlockChainHookImpl(argsHook) if errBlockChainHook != nil { - return nil, nil, errBlockChainHook + return nil, errBlockChainHook } argsNewVmFactory := metachain.ArgsNewVMContainerFactory{ @@ -474,38 +496,26 @@ func createMetaVmContainerFactory(args *scQueryElementArgs, argsHook hooks.ArgBl ChanceComputer: args.coreComponents.Rater(), ShardCoordinator: args.processComponents.ShardCoordinator(), EnableEpochsHandler: args.coreComponents.EnableEpochsHandler(), + NodesCoordinator: args.processComponents.NodesCoordinator(), } vmFactory, err := metachain.NewVMContainerFactory(argsNewVmFactory) if err != nil { - return nil, nil, err + return nil, err } - return apiBlockchain, vmFactory, nil + return vmFactory, nil } -func createShardVmContainerFactory(args *scQueryElementArgs, argsHook hooks.ArgBlockChainHook) (data.ChainHandler, process.VirtualMachinesContainerFactory, error) { - apiBlockchain, err := blockchain.NewBlockChain(disabled.NewAppStatusHandler()) - if err != nil { - return nil, nil, err - } - - accountsAdapterApi, err := createNewAccountsAdapterApi(args, apiBlockchain) - if err != nil { - return nil, nil, err - } - - argsHook.BlockChain = apiBlockchain - argsHook.Accounts = accountsAdapterApi - +func createShardVmContainerFactory(args scQueryElementArgs, argsHook hooks.ArgBlockChainHook) (process.VirtualMachinesContainerFactory, error) { queryVirtualMachineConfig := args.generalConfig.VirtualMachine.Querying.VirtualMachineConfig esdtTransferParser, errParser := parsers.NewESDTTransferParser(args.coreComponents.InternalMarshalizer()) if errParser != nil { - return nil, nil, errParser + return nil, errParser } blockChainHookImpl, errBlockChainHook := hooks.NewBlockChainHookImpl(argsHook) if errBlockChainHook != nil { - return nil, nil, errBlockChainHook + return nil, errBlockChainHook } argsNewVMFactory := shard.ArgVMContainerFactory{ @@ -527,13 +537,13 @@ func createShardVmContainerFactory(args *scQueryElementArgs, argsHook hooks.ArgB vmFactory, err := shard.NewVMContainerFactory(argsNewVMFactory) if err != nil { - return nil, nil, err + return nil, err } - return apiBlockchain, vmFactory, nil + return vmFactory, nil } -func createNewAccountsAdapterApi(args *scQueryElementArgs, chainHandler data.ChainHandler) (state.AccountsAdapterAPI, error) { +func createNewAccountsAdapterApi(args scQueryElementArgs, chainHandler data.ChainHandler) (state.AccountsAdapterAPI, common.StorageManager, error) { argsAccCreator := factoryState.ArgsAccountCreator{ Hasher: args.coreComponents.Hasher(), Marshaller: args.coreComponents.InternalMarshalizer(), @@ -541,17 +551,17 @@ func createNewAccountsAdapterApi(args *scQueryElementArgs, chainHandler data.Cha } accountFactory, err := factoryState.NewAccountCreator(argsAccCreator) if err != nil { - return nil, err + return nil, nil, err } storagePruning, err := newStoragePruningManager(args) if err != nil { - return nil, err + return nil, nil, err } storageService := args.dataComponents.StorageService() trieStorer, err := storageService.GetStorer(dataRetriever.UserAccountsUnit) if err != nil { - return nil, err + return nil, nil, err } trieFactoryArgs := trieFactory.TrieFactoryArgs{ @@ -562,7 +572,7 @@ func createNewAccountsAdapterApi(args *scQueryElementArgs, chainHandler data.Cha } trFactory, err := trieFactory.NewTrieFactory(trieFactoryArgs) if err != nil { - return nil, err + return nil, nil, err } trieCreatorArgs := trieFactory.TrieCreateArgs{ @@ -575,9 +585,9 @@ func createNewAccountsAdapterApi(args *scQueryElementArgs, chainHandler data.Cha EnableEpochsHandler: args.coreComponents.EnableEpochsHandler(), StatsCollector: args.statusCoreComponents.StateStatsHandler(), } - _, merkleTrie, err := trFactory.Create(trieCreatorArgs) + trieStorageManager, merkleTrie, err := trFactory.Create(trieCreatorArgs) if err != nil { - return nil, err + return nil, nil, err } argsAPIAccountsDB := state.ArgsAccountsDB{ @@ -592,18 +602,20 @@ func createNewAccountsAdapterApi(args *scQueryElementArgs, chainHandler data.Cha provider, err := blockInfoProviders.NewCurrentBlockInfo(chainHandler) if err != nil { - return nil, err + return nil, nil, err } accounts, err := state.NewAccountsDB(argsAPIAccountsDB) if err != nil { - return nil, err + return nil, nil, err } - return state.NewAccountsDBApi(accounts, provider) + accountsDB, err := state.NewAccountsDBApi(accounts, provider) + + return accountsDB, trieStorageManager, err } -func newStoragePruningManager(args *scQueryElementArgs) (state.StoragePruningManager, error) { +func newStoragePruningManager(args scQueryElementArgs) (state.StoragePruningManager, error) { argsMemEviction := evictionWaitingList.MemoryEvictionWaitingListArgs{ RootHashesSize: args.generalConfig.EvictionWaitingList.RootHashesSize, HashesSize: args.generalConfig.EvictionWaitingList.HashesSize, diff --git a/factory/api/apiResolverFactory_test.go b/factory/api/apiResolverFactory_test.go index 591ea31b79f..e929d66e701 100644 --- a/factory/api/apiResolverFactory_test.go +++ b/factory/api/apiResolverFactory_test.go @@ -1,6 +1,7 @@ package api_test import ( + "fmt" "strings" "sync" "testing" @@ -26,6 +27,7 @@ import ( epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" @@ -70,7 +72,7 @@ func createMockArgs(t *testing.T) *api.ApiResolverArgs { cryptoComponents := componentsMock.GetCryptoComponents(coreComponents) networkComponents := componentsMock.GetNetworkComponents(cryptoComponents) dataComponents := componentsMock.GetDataComponents(coreComponents, shardCoordinator) - stateComponents := componentsMock.GetStateComponents(coreComponents) + stateComponents := componentsMock.GetStateComponents(coreComponents, componentsMock.GetStatusCoreComponents()) processComponents := componentsMock.GetProcessComponents(shardCoordinator, coreComponents, networkComponents, dataComponents, cryptoComponents, stateComponents) argsB := componentsMock.GetBootStrapFactoryArgs() @@ -327,7 +329,7 @@ func createMockSCQueryElementArgs() api.SCQueryElementArgs { EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, UInt64ByteSliceConv: &testsMocks.Uint64ByteSliceConverterMock{}, EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &genesisMocks.NodesSetupStub{}, Hash: &testscommon.HasherStub{}, RatingHandler: &testscommon.RaterMock{}, WasmVMChangeLockerInternal: &sync.RWMutex{}, @@ -346,6 +348,7 @@ func createMockSCQueryElementArgs() api.SCQueryElementArgs { AppStatusHandlerCalled: func() core.AppStatusHandler { return &statusHandler.AppStatusHandlerStub{} }, + StateStatsHandlerField: &testscommon.StateStatisticsHandlerStub{}, }, DataComponents: &mock.DataComponentsMock{ Storage: genericMocks.NewChainStorerMock(0), @@ -379,9 +382,10 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { args := createMockSCQueryElementArgs() args.GuardedAccountHandler = nil - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.Equal(t, process.ErrNilGuardedAccountHandler, err) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("DecodeAddresses fails", func(t *testing.T) { t.Parallel() @@ -390,10 +394,11 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { args.CoreComponents = &mock.CoreComponentsMock{ AddrPubKeyConv: nil, } - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "public key converter")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("createBuiltinFuncs fails", func(t *testing.T) { t.Parallel() @@ -401,10 +406,11 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { args := createMockSCQueryElementArgs() coreCompMock := args.CoreComponents.(*mock.CoreComponentsMock) coreCompMock.IntMarsh = nil - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "marshalizer")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("NewCache fails", func(t *testing.T) { t.Parallel() @@ -414,10 +420,11 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { Type: "LRU", SizeInBytes: 1, } - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "lru")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("metachain - NewVMContainerFactory fails", func(t *testing.T) { t.Parallel() @@ -432,10 +439,11 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { } coreCompMock := args.CoreComponents.(*mock.CoreComponentsMock) coreCompMock.Hash = nil - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "hasher")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("shard - NewVMContainerFactory fails", func(t *testing.T) { t.Parallel() @@ -443,10 +451,30 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { args := createMockSCQueryElementArgs() coreCompMock := args.CoreComponents.(*mock.CoreComponentsMock) coreCompMock.Hash = nil - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "hasher")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) +} + +func TestCreateApiResolver_createBlockchainForScQuery(t *testing.T) { + t.Parallel() + + t.Run("for metachain", func(t *testing.T) { + t.Parallel() + apiBlockchain, err := api.CreateBlockchainForScQuery(core.MetachainShardId) + require.NoError(t, err) + require.Equal(t, "*blockchain.metaChain", fmt.Sprintf("%T", apiBlockchain)) + }) + + t.Run("for shard", func(t *testing.T) { + t.Parallel() + + apiBlockchain, err := api.CreateBlockchainForScQuery(0) + require.NoError(t, err) + require.Equal(t, "*blockchain.blockChain", fmt.Sprintf("%T", apiBlockchain)) + }) } diff --git a/factory/api/export_test.go b/factory/api/export_test.go index 0164c0c2b10..a17ddfad30c 100644 --- a/factory/api/export_test.go +++ b/factory/api/export_test.go @@ -2,6 +2,8 @@ package api import ( "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/process" @@ -28,8 +30,8 @@ type SCQueryElementArgs struct { } // CreateScQueryElement - -func CreateScQueryElement(args SCQueryElementArgs) (process.SCQueryService, error) { - return createScQueryElement(&scQueryElementArgs{ +func CreateScQueryElement(args SCQueryElementArgs) (process.SCQueryService, common.StorageManager, error) { + return createScQueryElement(scQueryElementArgs{ generalConfig: args.GeneralConfig, epochConfig: args.EpochConfig, coreComponents: args.CoreComponents, @@ -47,3 +49,8 @@ func CreateScQueryElement(args SCQueryElementArgs) (process.SCQueryService, erro guardedAccountHandler: args.GuardedAccountHandler, }) } + +// CreateBlockchainForScQuery - +func CreateBlockchainForScQuery(selfShardID uint32) (data.ChainHandler, error) { + return createBlockchainForScQuery(selfShardID) +} diff --git a/factory/bootstrap/bootstrapComponents.go b/factory/bootstrap/bootstrapComponents.go index 988b72764e0..a9ef7851ccb 100644 --- a/factory/bootstrap/bootstrapComponents.go +++ b/factory/bootstrap/bootstrapComponents.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/process/headerCheck" "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/directoryhandler" storageFactory "github.com/multiversx/mx-chain-go/storage/factory" @@ -55,14 +56,15 @@ type bootstrapComponentsFactory struct { } type bootstrapComponents struct { - epochStartBootstrapper factory.EpochStartBootstrapper - bootstrapParamsHolder factory.BootstrapParamsHolder - nodeType core.NodeType - shardCoordinator sharding.Coordinator - headerVersionHandler nodeFactory.HeaderVersionHandler - versionedHeaderFactory nodeFactory.VersionedHeaderFactory - headerIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler - guardedAccountHandler process.GuardedAccountHandler + epochStartBootstrapper factory.EpochStartBootstrapper + bootstrapParamsHolder factory.BootstrapParamsHolder + nodeType core.NodeType + shardCoordinator sharding.Coordinator + headerVersionHandler nodeFactory.HeaderVersionHandler + versionedHeaderFactory nodeFactory.VersionedHeaderFactory + headerIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler + guardedAccountHandler process.GuardedAccountHandler + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory } // NewBootstrapComponentsFactory creates an instance of bootstrapComponentsFactory @@ -70,6 +72,9 @@ func NewBootstrapComponentsFactory(args BootstrapComponentsFactoryArgs) (*bootst if check.IfNil(args.CoreComponents) { return nil, errors.ErrNilCoreComponentsHolder } + if check.IfNil(args.CoreComponents.EnableEpochsHandler()) { + return nil, errors.ErrNilEnableEpochsHandler + } if check.IfNil(args.CryptoComponents) { return nil, errors.ErrNilCryptoComponentsHolder } @@ -185,31 +190,40 @@ func (bcf *bootstrapComponentsFactory) Create() (*bootstrapComponents, error) { return nil, err } + nodesCoordinatorRegistryFactory, err := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + bcf.coreComponents.InternalMarshalizer(), + bcf.coreComponents.EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step2Flag), + ) + if err != nil { + return nil, err + } + epochStartBootstrapArgs := bootstrap.ArgsEpochStartBootstrap{ - CoreComponentsHolder: bcf.coreComponents, - CryptoComponentsHolder: bcf.cryptoComponents, - MainMessenger: bcf.networkComponents.NetworkMessenger(), - FullArchiveMessenger: bcf.networkComponents.FullArchiveNetworkMessenger(), - GeneralConfig: bcf.config, - PrefsConfig: bcf.prefConfig.Preferences, - FlagsConfig: bcf.flagsConfig, - EconomicsData: bcf.coreComponents.EconomicsData(), - GenesisNodesConfig: bcf.coreComponents.GenesisNodesSetup(), - GenesisShardCoordinator: genesisShardCoordinator, - StorageUnitOpener: unitOpener, - Rater: bcf.coreComponents.Rater(), - DestinationShardAsObserver: destShardIdAsObserver, - NodeShuffler: bcf.coreComponents.NodesShuffler(), - RoundHandler: bcf.coreComponents.RoundHandler(), - LatestStorageDataProvider: latestStorageDataProvider, - ArgumentsParser: smartContract.NewArgumentParser(), - StatusHandler: bcf.statusCoreComponents.AppStatusHandler(), - HeaderIntegrityVerifier: headerIntegrityVerifier, - DataSyncerCreator: dataSyncerFactory, - ScheduledSCRsStorer: nil, // will be updated after sync from network - TrieSyncStatisticsProvider: tss, - NodeProcessingMode: common.GetNodeProcessingMode(&bcf.importDbConfig), - StateStatsHandler: bcf.statusCoreComponents.StateStatsHandler(), + CoreComponentsHolder: bcf.coreComponents, + CryptoComponentsHolder: bcf.cryptoComponents, + MainMessenger: bcf.networkComponents.NetworkMessenger(), + FullArchiveMessenger: bcf.networkComponents.FullArchiveNetworkMessenger(), + GeneralConfig: bcf.config, + PrefsConfig: bcf.prefConfig.Preferences, + FlagsConfig: bcf.flagsConfig, + EconomicsData: bcf.coreComponents.EconomicsData(), + GenesisNodesConfig: bcf.coreComponents.GenesisNodesSetup(), + GenesisShardCoordinator: genesisShardCoordinator, + StorageUnitOpener: unitOpener, + Rater: bcf.coreComponents.Rater(), + DestinationShardAsObserver: destShardIdAsObserver, + NodeShuffler: bcf.coreComponents.NodesShuffler(), + RoundHandler: bcf.coreComponents.RoundHandler(), + LatestStorageDataProvider: latestStorageDataProvider, + ArgumentsParser: smartContract.NewArgumentParser(), + StatusHandler: bcf.statusCoreComponents.AppStatusHandler(), + HeaderIntegrityVerifier: headerIntegrityVerifier, + DataSyncerCreator: dataSyncerFactory, + ScheduledSCRsStorer: nil, // will be updated after sync from network + TrieSyncStatisticsProvider: tss, + NodeProcessingMode: common.GetNodeProcessingMode(&bcf.importDbConfig), + StateStatsHandler: bcf.statusCoreComponents.StateStatsHandler(), + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } var epochStartBootstrapper factory.EpochStartBootstrapper @@ -260,12 +274,13 @@ func (bcf *bootstrapComponentsFactory) Create() (*bootstrapComponents, error) { bootstrapParamsHolder: &bootstrapParams{ bootstrapParams: bootstrapParameters, }, - nodeType: nodeType, - shardCoordinator: shardCoordinator, - headerVersionHandler: headerVersionHandler, - headerIntegrityVerifier: headerIntegrityVerifier, - versionedHeaderFactory: versionedHeaderFactory, - guardedAccountHandler: guardedAccountHandler, + nodeType: nodeType, + shardCoordinator: shardCoordinator, + headerVersionHandler: headerVersionHandler, + headerIntegrityVerifier: headerIntegrityVerifier, + versionedHeaderFactory: versionedHeaderFactory, + guardedAccountHandler: guardedAccountHandler, + nodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, }, nil } diff --git a/factory/bootstrap/bootstrapComponentsHandler.go b/factory/bootstrap/bootstrapComponentsHandler.go index bda412e2759..7401f4834f4 100644 --- a/factory/bootstrap/bootstrapComponentsHandler.go +++ b/factory/bootstrap/bootstrapComponentsHandler.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" ) var _ factory.ComponentHandler = (*managedBootstrapComponents)(nil) @@ -118,6 +119,18 @@ func (mbf *managedBootstrapComponents) EpochBootstrapParams() factory.BootstrapP return mbf.bootstrapComponents.bootstrapParamsHolder } +// NodesCoordinatorRegistryFactory returns the NodesCoordinatorRegistryFactory +func (mbf *managedBootstrapComponents) NodesCoordinatorRegistryFactory() nodesCoordinator.NodesCoordinatorRegistryFactory { + mbf.mutBootstrapComponents.RLock() + defer mbf.mutBootstrapComponents.RUnlock() + + if mbf.bootstrapComponents == nil { + return nil + } + + return mbf.bootstrapComponents.nodesCoordinatorRegistryFactory +} + // IsInterfaceNil returns true if the underlying object is nil func (mbf *managedBootstrapComponents) IsInterfaceNil() bool { return mbf == nil diff --git a/factory/bootstrap/bootstrapComponents_test.go b/factory/bootstrap/bootstrapComponents_test.go index 85c22017b28..180315b1f36 100644 --- a/factory/bootstrap/bootstrapComponents_test.go +++ b/factory/bootstrap/bootstrapComponents_test.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory/bootstrap" @@ -38,6 +39,19 @@ func TestNewBootstrapComponentsFactory(t *testing.T) { require.Nil(t, bcf) require.Equal(t, errorsMx.ErrNilCoreComponentsHolder, err) }) + t.Run("nil enable epochs handler should error", func(t *testing.T) { + t.Parallel() + + argsCopy := args + argsCopy.CoreComponents = &factory.CoreComponentsHolderStub{ + EnableEpochsHandlerCalled: func() common.EnableEpochsHandler { + return nil + }, + } + bcf, err := bootstrap.NewBootstrapComponentsFactory(argsCopy) + require.Nil(t, bcf) + require.Equal(t, errorsMx.ErrNilEnableEpochsHandler, err) + }) t.Run("nil crypto components should error", func(t *testing.T) { t.Parallel() @@ -218,7 +232,8 @@ func TestBootstrapComponentsFactory_Create(t *testing.T) { coreComponents := componentsMock.GetDefaultCoreComponents() args.CoreComponents = coreComponents coreComponents.RatingHandler = nil - bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) + bcf, err := bootstrap.NewBootstrapComponentsFactory(args) + require.Nil(t, err) require.NotNil(t, bcf) bc, err := bcf.Create() diff --git a/factory/bootstrap/bootstrapParameters.go b/factory/bootstrap/bootstrapParameters.go index 5002f597e55..0002beb1f62 100644 --- a/factory/bootstrap/bootstrapParameters.go +++ b/factory/bootstrap/bootstrapParameters.go @@ -25,7 +25,7 @@ func (bph *bootstrapParams) NumOfShards() uint32 { } // NodesConfig returns the nodes coordinator config after bootstrap -func (bph *bootstrapParams) NodesConfig() *nodesCoordinator.NodesCoordinatorRegistry { +func (bph *bootstrapParams) NodesConfig() nodesCoordinator.NodesCoordinatorRegistryHandler { return bph.bootstrapParams.NodesConfig } diff --git a/factory/bootstrap/shardingFactory.go b/factory/bootstrap/shardingFactory.go index 40472acae1e..6662129299b 100644 --- a/factory/bootstrap/shardingFactory.go +++ b/factory/bootstrap/shardingFactory.go @@ -113,6 +113,7 @@ func CreateNodesCoordinator( nodeTypeProvider core.NodeTypeProviderHandler, enableEpochsHandler common.EnableEpochsHandler, validatorInfoCacher epochStart.ValidatorInfoCacher, + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory, ) (nodesCoordinator.NodesCoordinator, error) { if check.IfNil(nodeShufflerOut) { return nil, errErd.ErrNilShuffleOutCloser @@ -165,15 +166,15 @@ func CreateNodesCoordinator( if bootstrapParameters.NodesConfig() != nil { nodeRegistry := bootstrapParameters.NodesConfig() currentEpoch = bootstrapParameters.Epoch() - epochsConfig, ok := nodeRegistry.EpochsConfig[fmt.Sprintf("%d", currentEpoch)] + epochsConfig, ok := nodeRegistry.GetEpochsConfig()[fmt.Sprintf("%d", currentEpoch)] if ok { - eligibles := epochsConfig.EligibleValidators + eligibles := epochsConfig.GetEligibleValidators() eligibleValidators, err = nodesCoordinator.SerializableValidatorsToValidators(eligibles) if err != nil { return nil, err } - waitings := epochsConfig.WaitingValidators + waitings := epochsConfig.GetWaitingValidators() waitingValidators, err = nodesCoordinator.SerializableValidatorsToValidators(waitings) if err != nil { return nil, err @@ -197,28 +198,29 @@ func CreateNodesCoordinator( } argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: marshalizer, - Hasher: hasher, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartNotifier, - BootStorer: bootStorer, - ShardIDAsObserver: shardIDAsObserver, - NbShards: nbShards, - EligibleNodes: eligibleValidators, - WaitingNodes: waitingValidators, - SelfPublicKey: pubKeyBytes, - ConsensusGroupCache: consensusGroupCache, - ShuffledOutHandler: shuffledOutHandler, - Epoch: currentEpoch, - StartEpoch: startEpoch, - ChanStopNode: chanNodeStop, - NodeTypeProvider: nodeTypeProvider, - IsFullArchive: prefsConfig.FullArchive, - EnableEpochsHandler: enableEpochsHandler, - ValidatorInfoCacher: validatorInfoCacher, - GenesisNodesSetupHandler: nodesConfig, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: marshalizer, + Hasher: hasher, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartNotifier, + BootStorer: bootStorer, + ShardIDAsObserver: shardIDAsObserver, + NbShards: nbShards, + EligibleNodes: eligibleValidators, + WaitingNodes: waitingValidators, + SelfPublicKey: pubKeyBytes, + ConsensusGroupCache: consensusGroupCache, + ShuffledOutHandler: shuffledOutHandler, + Epoch: currentEpoch, + StartEpoch: startEpoch, + ChanStopNode: chanNodeStop, + NodeTypeProvider: nodeTypeProvider, + IsFullArchive: prefsConfig.FullArchive, + EnableEpochsHandler: enableEpochsHandler, + ValidatorInfoCacher: validatorInfoCacher, + GenesisNodesSetupHandler: nodesConfig, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } baseNodesCoordinator, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/factory/bootstrap/shardingFactory_test.go b/factory/bootstrap/shardingFactory_test.go index 0df777933b0..c7a54e077f4 100644 --- a/factory/bootstrap/shardingFactory_test.go +++ b/factory/bootstrap/shardingFactory_test.go @@ -17,6 +17,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" @@ -41,7 +42,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Run("nil pub key should error", func(t *testing.T) { t.Parallel() - shardC, nodeType, err := CreateShardCoordinator(&testscommon.NodesSetupStub{}, nil, config.PreferencesConfig{}, nil) + shardC, nodeType, err := CreateShardCoordinator(&genesisMocks.NodesSetupStub{}, nil, config.PreferencesConfig{}, nil) require.Equal(t, errErd.ErrNilPublicKey, err) require.Empty(t, nodeType) require.True(t, check.IfNil(shardC)) @@ -49,7 +50,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Run("nil logger should error", func(t *testing.T) { t.Parallel() - shardC, nodeType, err := CreateShardCoordinator(&testscommon.NodesSetupStub{}, &cryptoMocks.PublicKeyStub{}, config.PreferencesConfig{}, nil) + shardC, nodeType, err := CreateShardCoordinator(&genesisMocks.NodesSetupStub{}, &cryptoMocks.PublicKeyStub{}, config.PreferencesConfig{}, nil) require.Equal(t, errErd.ErrNilLogger, err) require.Empty(t, nodeType) require.True(t, check.IfNil(shardC)) @@ -58,7 +59,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, &cryptoMocks.PublicKeyStub{ ToByteArrayStub: func() ([]byte, error) { return nil, expectedErr @@ -75,7 +76,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, &cryptoMocks.PublicKeyStub{ ToByteArrayStub: func() ([]byte, error) { return nil, sharding.ErrPublicKeyNotFoundInGenesis // force this error here @@ -95,7 +96,7 @@ func TestCreateShardCoordinator(t *testing.T) { counter := 0 shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { return 0, sharding.ErrPublicKeyNotFoundInGenesis // force this error }, @@ -123,7 +124,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { return 0, sharding.ErrPublicKeyNotFoundInGenesis // force this error }, @@ -149,7 +150,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { return core.MetachainShardId, nil }, @@ -169,7 +170,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { return core.MetachainShardId, nil }, @@ -192,7 +193,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( nil, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, &mock.EpochStartNotifierStub{}, &cryptoMocks.PublicKeyStub{}, @@ -208,6 +209,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilShuffleOutCloser, err) require.True(t, check.IfNil(nodesC)) @@ -233,6 +235,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilGenesisNodesSetupHandler, err) require.True(t, check.IfNil(nodesC)) @@ -242,7 +245,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, nil, &cryptoMocks.PublicKeyStub{}, @@ -258,6 +261,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilEpochStartNotifier, err) require.True(t, check.IfNil(nodesC)) @@ -267,7 +271,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, &mock.EpochStartNotifierStub{}, nil, @@ -283,6 +287,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilPublicKey, err) require.True(t, check.IfNil(nodesC)) @@ -292,7 +297,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, &mock.EpochStartNotifierStub{}, &cryptoMocks.PublicKeyStub{}, @@ -308,6 +313,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilBootstrapParamsHandler, err) require.True(t, check.IfNil(nodesC)) @@ -317,7 +323,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, &mock.EpochStartNotifierStub{}, &cryptoMocks.PublicKeyStub{}, @@ -333,6 +339,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, nodesCoordinator.ErrNilNodeStopChannel, err) require.True(t, check.IfNil(nodesC)) @@ -342,7 +349,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "", }, @@ -360,6 +367,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.NotNil(t, err) require.True(t, check.IfNil(nodesC)) @@ -369,7 +377,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "disabled", }, @@ -391,6 +399,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.True(t, errors.Is(err, expectedErr)) require.True(t, check.IfNil(nodesC)) @@ -400,7 +409,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "0", }, @@ -422,6 +431,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.True(t, errors.Is(err, expectedErr)) require.True(t, check.IfNil(nodesC)) @@ -431,7 +441,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "0", }, @@ -453,6 +463,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.NotNil(t, err) require.True(t, check.IfNil(nodesC)) @@ -462,7 +473,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "0", }, @@ -484,6 +495,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.NotNil(t, err) require.True(t, check.IfNil(nodesC)) @@ -493,7 +505,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "0", }, @@ -510,7 +522,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &shardingMocks.NodeShufflerMock{}, 0, &bootstrapMocks.BootstrapParamsHandlerMock{ - NodesConfigCalled: func() *nodesCoordinator.NodesCoordinatorRegistry { + NodesConfigCalled: func() nodesCoordinator.NodesCoordinatorRegistryHandler { return &nodesCoordinator.NodesCoordinatorRegistry{ EpochsConfig: map[string]*nodesCoordinator.EpochValidators{ "0": { @@ -536,6 +548,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.NotNil(t, err) require.True(t, check.IfNil(nodesC)) @@ -545,7 +558,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "disabled", }, @@ -562,7 +575,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &shardingMocks.NodeShufflerMock{}, 0, &bootstrapMocks.BootstrapParamsHandlerMock{ - NodesConfigCalled: func() *nodesCoordinator.NodesCoordinatorRegistry { + NodesConfigCalled: func() nodesCoordinator.NodesCoordinatorRegistryHandler { return &nodesCoordinator.NodesCoordinatorRegistry{ EpochsConfig: map[string]*nodesCoordinator.EpochValidators{ "0": { @@ -588,6 +601,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Nil(t, err) require.False(t, check.IfNil(nodesC)) @@ -608,7 +622,7 @@ func TestCreateNodesShuffleOut(t *testing.T) { t.Parallel() shuffler, err := CreateNodesShuffleOut( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.EpochStartConfig{ MaxShuffledOutRestartThreshold: 5.0, }, @@ -621,7 +635,7 @@ func TestCreateNodesShuffleOut(t *testing.T) { t.Parallel() shuffler, err := CreateNodesShuffleOut( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.EpochStartConfig{ MinShuffledOutRestartThreshold: 5.0, }, @@ -634,7 +648,7 @@ func TestCreateNodesShuffleOut(t *testing.T) { t.Parallel() shuffler, err := CreateNodesShuffleOut( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.EpochStartConfig{}, nil, // force NewShuffleOutCloser to fail ) @@ -645,7 +659,7 @@ func TestCreateNodesShuffleOut(t *testing.T) { t.Parallel() shuffler, err := CreateNodesShuffleOut( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return 4000 }, diff --git a/factory/consensus/consensusComponents_test.go b/factory/consensus/consensusComponents_test.go index f3ffa602ba1..a7b00e6a347 100644 --- a/factory/consensus/consensusComponents_test.go +++ b/factory/consensus/consensusComponents_test.go @@ -29,6 +29,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" factoryMocks "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" outportMocks "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" @@ -57,7 +58,7 @@ func createMockConsensusComponentsFactoryArgs() consensusComp.ConsensusComponent AlarmSch: &testscommon.AlarmSchedulerStub{}, NtpSyncTimer: &testscommon.SyncTimerStub{}, GenesisBlockTime: time.Time{}, - NodesConfig: &testscommon.NodesSetupStub{ + NodesConfig: &genesisMocks.NodesSetupStub{ GetShardConsensusGroupSizeCalled: func() uint32 { return 2 }, diff --git a/factory/core/coreComponents.go b/factory/core/coreComponents.go index f04afe47d61..247ee7e05f8 100644 --- a/factory/core/coreComponents.go +++ b/factory/core/coreComponents.go @@ -33,7 +33,6 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/economics" "github.com/multiversx/mx-chain-go/process/rating" - "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/statusHandler" @@ -244,35 +243,15 @@ func (ccf *coreComponentsFactory) Create() (*coreComponents, error) { } wasmVMChangeLocker := &sync.RWMutex{} - gasScheduleConfigurationFolderName := ccf.configPathsHolder.GasScheduleDirectoryName - argsGasScheduleNotifier := forking.ArgsNewGasScheduleNotifier{ - GasScheduleConfig: ccf.epochConfig.GasSchedule, - ConfigDir: gasScheduleConfigurationFolderName, - EpochNotifier: epochNotifier, - WasmVMChangeLocker: wasmVMChangeLocker, - } - gasScheduleNotifier, err := forking.NewGasScheduleNotifier(argsGasScheduleNotifier) - if err != nil { - return nil, err - } - - builtInCostHandler, err := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - ArgsParser: smartContract.NewArgumentParser(), - GasSchedule: gasScheduleNotifier, - }) - if err != nil { - return nil, err - } txVersionChecker := versioning.NewTxVersionChecker(ccf.config.GeneralSettings.MinTransactionVersion) log.Trace("creating economics data components") argsNewEconomicsData := economics.ArgsNewEconomicsData{ - Economics: &ccf.economicsConfig, - EpochNotifier: epochNotifier, - EnableEpochsHandler: enableEpochsHandler, - BuiltInFunctionsCostHandler: builtInCostHandler, - TxVersionChecker: txVersionChecker, + Economics: &ccf.economicsConfig, + EpochNotifier: epochNotifier, + EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: txVersionChecker, } economicsData, err := economics.NewEconomicsData(argsNewEconomicsData) if err != nil { @@ -311,6 +290,7 @@ func (ccf *coreComponentsFactory) Create() (*coreComponents, error) { ShuffleBetweenShards: true, MaxNodesEnableConfig: ccf.epochConfig.EnableEpochs.MaxNodesChangeEnableEpoch, EnableEpochsHandler: enableEpochsHandler, + EnableEpochs: ccf.epochConfig.EnableEpochs, } nodesShuffler, err := nodesCoordinator.NewHashValidatorsShuffler(argsNodesShuffler) diff --git a/factory/core/coreComponents_test.go b/factory/core/coreComponents_test.go index 79aba4a2532..d88a8a2284e 100644 --- a/factory/core/coreComponents_test.go +++ b/factory/core/coreComponents_test.go @@ -248,18 +248,6 @@ func TestCoreComponentsFactory_CreateCoreComponentsInvalidRoundConfigShouldErr(t require.NotNil(t, err) } -func TestCoreComponentsFactory_CreateCoreComponentsInvalidEpochConfigShouldErr(t *testing.T) { - t.Parallel() - - args := componentsMock.GetCoreArgs() - args.EpochConfig = config.EpochConfig{} - ccf, _ := coreComp.NewCoreComponentsFactory(args) - - cc, err := ccf.Create() - require.Nil(t, cc) - require.NotNil(t, err) -} - func TestCoreComponentsFactory_CreateCoreComponentsInvalidGenesisMaxNumberOfShardsShouldErr(t *testing.T) { t.Parallel() diff --git a/factory/disabled/auctionListDisplayer.go b/factory/disabled/auctionListDisplayer.go new file mode 100644 index 00000000000..ec2d2f0774b --- /dev/null +++ b/factory/disabled/auctionListDisplayer.go @@ -0,0 +1,35 @@ +package disabled + +import ( + "github.com/multiversx/mx-chain-go/epochStart/metachain" + "github.com/multiversx/mx-chain-go/state" +) + +type auctionListDisplayer struct { +} + +// NewDisabledAuctionListDisplayer creates a disabled auction list displayer +func NewDisabledAuctionListDisplayer() *auctionListDisplayer { + return &auctionListDisplayer{} +} + +// DisplayOwnersData does nothing +func (ald *auctionListDisplayer) DisplayOwnersData(_ map[string]*metachain.OwnerAuctionData) { +} + +// DisplayOwnersSelectedNodes does nothing +func (ald *auctionListDisplayer) DisplayOwnersSelectedNodes(_ map[string]*metachain.OwnerAuctionData) { +} + +// DisplayAuctionList does nothing +func (ald *auctionListDisplayer) DisplayAuctionList( + _ []state.ValidatorInfoHandler, + _ map[string]*metachain.OwnerAuctionData, + _ uint32, +) { +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ald *auctionListDisplayer) IsInterfaceNil() bool { + return ald == nil +} diff --git a/factory/disabled/auctionListSelector.go b/factory/disabled/auctionListSelector.go new file mode 100644 index 00000000000..281102a4a7f --- /dev/null +++ b/factory/disabled/auctionListSelector.go @@ -0,0 +1,21 @@ +package disabled + +import "github.com/multiversx/mx-chain-go/state" + +type auctionListSelector struct { +} + +// NewDisabledAuctionListSelector returns a new instance of a disabled auction list selector +func NewDisabledAuctionListSelector() *auctionListSelector { + return &auctionListSelector{} +} + +// SelectNodesFromAuctionList returns nil +func (als *auctionListSelector) SelectNodesFromAuctionList(state.ShardValidatorsInfoMapHandler, []byte) error { + return nil +} + +// IsInterfaceNil returns true if the underlying pointer is nil +func (als *auctionListSelector) IsInterfaceNil() bool { + return als == nil +} diff --git a/factory/disabled/stakingDataProvider.go b/factory/disabled/stakingDataProvider.go new file mode 100644 index 00000000000..f24b7b735b2 --- /dev/null +++ b/factory/disabled/stakingDataProvider.go @@ -0,0 +1,38 @@ +package disabled + +import ( + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/state" +) + +type stakingDataProvider struct { +} + +// NewDisabledStakingDataProvider returns a new instance of stakingDataProvider +func NewDisabledStakingDataProvider() *stakingDataProvider { + return &stakingDataProvider{} +} + +// FillValidatorInfo returns a nil error +func (s *stakingDataProvider) FillValidatorInfo(state.ValidatorInfoHandler) error { + return nil +} + +// ComputeUnQualifiedNodes returns nil values +func (s *stakingDataProvider) ComputeUnQualifiedNodes(_ state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) { + return nil, nil, nil +} + +// GetOwnersData returns nil +func (s *stakingDataProvider) GetOwnersData() map[string]*epochStart.OwnerData { + return nil +} + +// Clean does nothing +func (s *stakingDataProvider) Clean() { +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *stakingDataProvider) IsInterfaceNil() bool { + return s == nil +} diff --git a/factory/heartbeat/heartbeatV2Components.go b/factory/heartbeat/heartbeatV2Components.go index a551f22e869..97164a7240e 100644 --- a/factory/heartbeat/heartbeatV2Components.go +++ b/factory/heartbeat/heartbeatV2Components.go @@ -272,32 +272,6 @@ func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error return nil, err } - argsMainCrossShardPeerTopicNotifier := monitor.ArgsCrossShardPeerTopicNotifier{ - ShardCoordinator: hcf.processComponents.ShardCoordinator(), - PeerShardMapper: hcf.processComponents.PeerShardMapper(), - } - mainCrossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsMainCrossShardPeerTopicNotifier) - if err != nil { - return nil, err - } - err = hcf.networkComponents.NetworkMessenger().AddPeerTopicNotifier(mainCrossShardPeerTopicNotifier) - if err != nil { - return nil, err - } - - argsFullArchiveCrossShardPeerTopicNotifier := monitor.ArgsCrossShardPeerTopicNotifier{ - ShardCoordinator: hcf.processComponents.ShardCoordinator(), - PeerShardMapper: hcf.processComponents.FullArchivePeerShardMapper(), - } - fullArchiveCrossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsFullArchiveCrossShardPeerTopicNotifier) - if err != nil { - return nil, err - } - err = hcf.networkComponents.FullArchiveNetworkMessenger().AddPeerTopicNotifier(fullArchiveCrossShardPeerTopicNotifier) - if err != nil { - return nil, err - } - return &heartbeatV2Components{ sender: heartbeatV2Sender, peerAuthRequestsProcessor: paRequestsProcessor, diff --git a/factory/heartbeat/heartbeatV2Components_test.go b/factory/heartbeat/heartbeatV2Components_test.go index f013294a7d1..6b5088cab5b 100644 --- a/factory/heartbeat/heartbeatV2Components_test.go +++ b/factory/heartbeat/heartbeatV2Components_test.go @@ -11,7 +11,6 @@ import ( errorsMx "github.com/multiversx/mx-chain-go/errors" heartbeatComp "github.com/multiversx/mx-chain-go/factory/heartbeat" testsMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" - "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon" @@ -504,26 +503,6 @@ func TestHeartbeatV2Components_Create(t *testing.T) { assert.Nil(t, hc) assert.Error(t, err) }) - t.Run("AddPeerTopicNotifier fails should error", func(t *testing.T) { - t.Parallel() - - args := createMockHeartbeatV2ComponentsFactoryArgs() - args.NetworkComponents = &testsMocks.NetworkComponentsStub{ - Messenger: &p2pmocks.MessengerStub{ - AddPeerTopicNotifierCalled: func(notifier p2p.PeerTopicNotifier) error { - return expectedErr - }, - }, - FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, - } - hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) - assert.NotNil(t, hcf) - assert.NoError(t, err) - - hc, err := hcf.Create() - assert.Nil(t, hc) - assert.Equal(t, expectedErr, err) - }) t.Run("should work", func(t *testing.T) { t.Parallel() diff --git a/factory/interface.go b/factory/interface.go index ea021d17752..ede9f39089b 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -436,7 +436,7 @@ type BootstrapParamsHolder interface { Epoch() uint32 SelfShardID() uint32 NumOfShards() uint32 - NodesConfig() *nodesCoordinator.NodesCoordinatorRegistry + NodesConfig() nodesCoordinator.NodesCoordinatorRegistryHandler IsInterfaceNil() bool } @@ -457,6 +457,7 @@ type BootstrapComponentsHolder interface { HeaderVersionHandler() factory.HeaderVersionHandler HeaderIntegrityVerifier() factory.HeaderIntegrityVerifierHandler GuardedAccountHandler() process.GuardedAccountHandler + NodesCoordinatorRegistryFactory() nodesCoordinator.NodesCoordinatorRegistryFactory IsInterfaceNil() bool } diff --git a/factory/mock/nodesSetupStub.go b/factory/mock/nodesSetupStub.go deleted file mode 100644 index 835ad9fc0d8..00000000000 --- a/factory/mock/nodesSetupStub.go +++ /dev/null @@ -1,142 +0,0 @@ -package mock - -import "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" - -// NodesSetupStub - -type NodesSetupStub struct { - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - GetRoundDurationCalled func() uint64 - GetChainIdCalled func() string - GetMinTransactionVersionCalled func() uint32 - GetShardConsensusGroupSizeCalled func() uint32 - GetMetaConsensusGroupSizeCalled func() uint32 - NumberOfShardsCalled func() uint32 - MinNumberOfNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 - MinNumberOfMetaNodesCalled func() uint32 - GetHysteresisCalled func() float32 - GetAdaptivityCalled func() bool -} - -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() - } - - return 1 -} - -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() - } - - return 1 -} - -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() - } - - return 0 -} - -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() - } - - return false -} - -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() - } - return 2 -} - -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() - } - return 0 -} - -// GetRoundDuration - -func (n *NodesSetupStub) GetRoundDuration() uint64 { - if n.GetRoundDurationCalled != nil { - return n.GetRoundDurationCalled() - } - return 0 -} - -// GetChainId - -func (n *NodesSetupStub) GetChainId() string { - if n.GetChainIdCalled != nil { - return n.GetChainIdCalled() - } - return "chainID" -} - -// GetMinTransactionVersion - -func (n *NodesSetupStub) GetMinTransactionVersion() uint32 { - if n.GetMinTransactionVersionCalled != nil { - return n.GetMinTransactionVersionCalled() - } - return 1 -} - -// GetShardConsensusGroupSize - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetShardConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() - } - return 0 -} - -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() - } - return 0 -} - -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() - } - return 0 -} - -// InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { - if n.InitialNodesInfoForShardCalled != nil { - return n.InitialNodesInfoForShardCalled(shardId) - } - return nil, nil, nil -} - -// InitialNodesInfo - -func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { - if n.InitialNodesInfoCalled != nil { - return n.InitialNodesInfoCalled() - } - return nil, nil -} - -// IsInterfaceNil - -func (n *NodesSetupStub) IsInterfaceNil() bool { - return n == nil -} diff --git a/factory/mock/validatorStatisticsProcessorStub.go b/factory/mock/validatorStatisticsProcessorStub.go deleted file mode 100644 index 1cb51e79f41..00000000000 --- a/factory/mock/validatorStatisticsProcessorStub.go +++ /dev/null @@ -1,130 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/state" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - UpdatePeerStateCalled func(header data.MetaHeaderHandler) ([]byte, error) - RevertPeerStateCalled func(header data.MetaHeaderHandler) error - GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) - RootHashCalled func() ([]byte, error) - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo - SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) -} - -// PeerAccountToValidatorInfo - -func (vsp *ValidatorStatisticsProcessorStub) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { - if vsp.PeerAccountToValidatorInfoCalled != nil { - return vsp.PeerAccountToValidatorInfoCalled(peerAccount) - } - return nil -} - -// Process - -func (vsp *ValidatorStatisticsProcessorStub) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if vsp.ProcessCalled != nil { - return vsp.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (vsp *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { - if vsp.CommitCalled != nil { - return vsp.CommitCalled() - } - - return nil, nil -} - -// ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { - if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { - return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) - } - return nil -} - -// GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { - if vsp.GetValidatorInfoForRootHashCalled != nil { - return vsp.GetValidatorInfoForRootHashCalled(rootHash) - } - return nil, nil -} - -// UpdatePeerState - -func (vsp *ValidatorStatisticsProcessorStub) UpdatePeerState(header data.MetaHeaderHandler, _ map[string]data.HeaderHandler) ([]byte, error) { - if vsp.UpdatePeerStateCalled != nil { - return vsp.UpdatePeerStateCalled(header) - } - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - return nil -} - -// RevertPeerState - -func (vsp *ValidatorStatisticsProcessorStub) RevertPeerState(header data.MetaHeaderHandler) error { - if vsp.RevertPeerStateCalled != nil { - return vsp.RevertPeerStateCalled(header) - } - return nil -} - -// RootHash - -func (vsp *ValidatorStatisticsProcessorStub) RootHash() ([]byte, error) { - if vsp.RootHashCalled != nil { - return vsp.RootHashCalled() - } - return nil, nil -} - -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { - return nil -} - -// GetPeerAccount - -func (vsp *ValidatorStatisticsProcessorStub) GetPeerAccount(address []byte) (state.PeerAccountHandler, error) { - if vsp.GetPeerAccountCalled != nil { - return vsp.GetPeerAccountCalled(address) - } - - return nil, nil -} - -// DisplayRatings - -func (vsp *ValidatorStatisticsProcessorStub) DisplayRatings(_ uint32) { -} - -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - -// IsInterfaceNil - -func (vsp *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - return false -} diff --git a/factory/mock/validatorsProviderStub.go b/factory/mock/validatorsProviderStub.go deleted file mode 100644 index 98ea652340b..00000000000 --- a/factory/mock/validatorsProviderStub.go +++ /dev/null @@ -1,28 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data/validator" -) - -// ValidatorsProviderStub - -type ValidatorsProviderStub struct { - GetLatestValidatorsCalled func() map[string]*validator.ValidatorStatistics -} - -// GetLatestValidators - -func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*validator.ValidatorStatistics { - if vp.GetLatestValidatorsCalled != nil { - return vp.GetLatestValidatorsCalled() - } - return nil -} - -// Close - -func (vp *ValidatorsProviderStub) Close() error { - return nil -} - -// IsInterfaceNil - -func (vp *ValidatorsProviderStub) IsInterfaceNil() bool { - return vp == nil -} diff --git a/factory/processing/blockProcessorCreator.go b/factory/processing/blockProcessorCreator.go index 20421924bfc..7db9e20cf7d 100644 --- a/factory/processing/blockProcessorCreator.go +++ b/factory/processing/blockProcessorCreator.go @@ -12,7 +12,9 @@ import ( debugFactory "github.com/multiversx/mx-chain-go/debug/factory" "github.com/multiversx/mx-chain-go/epochStart" metachainEpochStart "github.com/multiversx/mx-chain-go/epochStart/metachain" + "github.com/multiversx/mx-chain-go/epochStart/notifier" mainFactory "github.com/multiversx/mx-chain-go/factory" + factoryDisabled "github.com/multiversx/mx-chain-go/factory/disabled" "github.com/multiversx/mx-chain-go/genesis" "github.com/multiversx/mx-chain-go/outport" processOutport "github.com/multiversx/mx-chain-go/outport/process" @@ -230,11 +232,7 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( return nil, err } - txFeeHandler, err := postprocess.NewFeeAccumulator() - if err != nil { - return nil, err - } - + txFeeHandler := postprocess.NewFeeAccumulator() argsNewScProcessor := scrCommon.ArgsNewSmartContractProcessor{ VmContainer: vmContainer, ArgsParser: argsParser, @@ -452,10 +450,15 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( return nil, err } - return &blockProcessorAndVmFactories{ + blockProcessorComponents := &blockProcessorAndVmFactories{ blockProcessor: blockProcessor, vmFactoryForProcessing: vmFactory, - }, nil + } + + pcf.stakingDataProviderAPI = factoryDisabled.NewDisabledStakingDataProvider() + pcf.auctionListSelectorAPI = factoryDisabled.NewDisabledAuctionListSelector() + + return blockProcessorComponents, nil } func (pcf *processComponentsFactory) newMetaBlockProcessor( @@ -556,11 +559,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( return nil, err } - txFeeHandler, err := postprocess.NewFeeAccumulator() - if err != nil { - return nil, err - } - + txFeeHandler := postprocess.NewFeeAccumulator() enableEpochs := pcf.epochConfig.EnableEpochs argsNewScProcessor := scrCommon.ArgsNewSmartContractProcessor{ VmContainer: vmContainer, @@ -760,8 +759,14 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( return nil, err } + argsStakingDataProvider := metachainEpochStart.StakingDataProviderArgs{ + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + SystemVM: systemVM, + MinNodePrice: pcf.systemSCConfig.StakingSystemSCConfig.GenesisNodePrice, + } + // TODO: in case of changing the minimum node price, make sure to update the staking data provider - stakingDataProvider, err := metachainEpochStart.NewStakingDataProvider(systemVM, pcf.systemSCConfig.StakingSystemSCConfig.GenesisNodePrice) + stakingDataProvider, err := metachainEpochStart.NewStakingDataProvider(argsStakingDataProvider) if err != nil { return nil, err } @@ -776,6 +781,13 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( return nil, err } + stakingDataProviderAPI, err := metachainEpochStart.NewStakingDataProvider(argsStakingDataProvider) + if err != nil { + return nil, err + } + + pcf.stakingDataProviderAPI = stakingDataProviderAPI + argsEpochRewards := metachainEpochStart.RewardsCreatorProxyArgs{ BaseRewardsCreatorArgs: metachainEpochStart.BaseRewardsCreatorArgs{ ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), @@ -867,25 +879,79 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( "in processComponentsFactory.newMetaBlockProcessor", err) } + maxNodesChangeConfigProvider, err := notifier.NewNodesConfigProvider( + pcf.epochNotifier, + enableEpochs.MaxNodesChangeEnableEpoch, + ) + if err != nil { + return nil, err + } + + argsAuctionListDisplayer := metachainEpochStart.ArgsAuctionListDisplayer{ + TableDisplayHandler: metachainEpochStart.NewTableDisplayer(), + ValidatorPubKeyConverter: pcf.coreData.ValidatorPubKeyConverter(), + AddressPubKeyConverter: pcf.coreData.AddressPubKeyConverter(), + AuctionConfig: pcf.systemSCConfig.SoftAuctionConfig, + Denomination: pcf.economicsConfig.GlobalSettings.Denomination, + } + auctionListDisplayer, err := metachainEpochStart.NewAuctionListDisplayer(argsAuctionListDisplayer) + if err != nil { + return nil, err + } + + argsAuctionListSelector := metachainEpochStart.AuctionListSelectorArgs{ + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + StakingDataProvider: stakingDataProvider, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListDisplayHandler: auctionListDisplayer, + SoftAuctionConfig: pcf.systemSCConfig.SoftAuctionConfig, + Denomination: pcf.economicsConfig.GlobalSettings.Denomination, + } + auctionListSelector, err := metachainEpochStart.NewAuctionListSelector(argsAuctionListSelector) + if err != nil { + return nil, err + } + + maxNodesChangeConfigProviderAPI, err := notifier.NewNodesConfigProviderAPI(pcf.epochNotifier, pcf.epochConfig.EnableEpochs) + if err != nil { + return nil, err + } + argsAuctionListSelectorAPI := metachainEpochStart.AuctionListSelectorArgs{ + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + StakingDataProvider: stakingDataProviderAPI, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProviderAPI, + SoftAuctionConfig: pcf.systemSCConfig.SoftAuctionConfig, + Denomination: pcf.economicsConfig.GlobalSettings.Denomination, + AuctionListDisplayHandler: factoryDisabled.NewDisabledAuctionListDisplayer(), + } + auctionListSelectorAPI, err := metachainEpochStart.NewAuctionListSelector(argsAuctionListSelectorAPI) + if err != nil { + return nil, err + } + + pcf.auctionListSelectorAPI = auctionListSelectorAPI + argsEpochSystemSC := metachainEpochStart.ArgsNewEpochStartSystemSCProcessing{ - SystemVM: systemVM, - UserAccountsDB: pcf.state.AccountsAdapter(), - PeerAccountsDB: pcf.state.PeerAccounts(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - StartRating: pcf.coreData.RatingsData().StartRating(), - ValidatorInfoCreator: validatorStatisticsProcessor, - EndOfEpochCallerAddress: vm.EndOfEpochAddress, - StakingSCAddress: vm.StakingSCAddress, - ChanceComputer: pcf.coreData.Rater(), - EpochNotifier: pcf.coreData.EpochNotifier(), - GenesisNodesConfig: pcf.coreData.GenesisNodesSetup(), - MaxNodesEnableConfig: enableEpochs.MaxNodesChangeEnableEpoch, - StakingDataProvider: stakingDataProvider, - NodesConfigProvider: pcf.nodesCoordinator, - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - ESDTOwnerAddressBytes: esdtOwnerAddress, - EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + SystemVM: systemVM, + UserAccountsDB: pcf.state.AccountsAdapter(), + PeerAccountsDB: pcf.state.PeerAccounts(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + StartRating: pcf.coreData.RatingsData().StartRating(), + ValidatorInfoCreator: validatorStatisticsProcessor, + EndOfEpochCallerAddress: vm.EndOfEpochAddress, + StakingSCAddress: vm.StakingSCAddress, + ChanceComputer: pcf.coreData.Rater(), + EpochNotifier: pcf.coreData.EpochNotifier(), + GenesisNodesConfig: pcf.coreData.GenesisNodesSetup(), + StakingDataProvider: stakingDataProvider, + NodesConfigProvider: pcf.nodesCoordinator, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ESDTOwnerAddressBytes: esdtOwnerAddress, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListSelector: auctionListSelector, } + epochStartSystemSCProcessor, err := metachainEpochStart.NewSystemSCProcessor(argsEpochSystemSC) if err != nil { return nil, err @@ -1075,6 +1141,7 @@ func (pcf *processComponentsFactory) createVMFactoryMeta( ChanceComputer: pcf.coreData.Rater(), ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + NodesCoordinator: pcf.nodesCoordinator, } return metachain.NewVMContainerFactory(argsNewVMContainer) } diff --git a/factory/processing/blockProcessorCreator_test.go b/factory/processing/blockProcessorCreator_test.go index 7d8267ca8ec..3ecc3432f9e 100644 --- a/factory/processing/blockProcessorCreator_test.go +++ b/factory/processing/blockProcessorCreator_test.go @@ -20,6 +20,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageManager "github.com/multiversx/mx-chain-go/testscommon/storage" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" @@ -41,10 +42,10 @@ func Test_newBlockProcessorCreatorForShard(t *testing.T) { bp, err := pcf.NewBlockProcessor( &testscommon.RequestHandlerStub{}, - &mock.ForkDetectorStub{}, + &processMocks.ForkDetectorStub{}, &mock.EpochStartTriggerStub{}, &mock.BoostrapStorerStub{}, - &mock.ValidatorStatisticsProcessorStub{}, + &testscommon.ValidatorStatisticsProcessorStub{}, &mock.HeaderValidatorStub{}, &mock.BlockTrackerStub{}, &mock.PendingMiniBlocksHandlerStub{}, @@ -167,10 +168,10 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { bp, err := pcf.NewBlockProcessor( &testscommon.RequestHandlerStub{}, - &mock.ForkDetectorStub{}, + &processMocks.ForkDetectorStub{}, &mock.EpochStartTriggerStub{}, &mock.BoostrapStorerStub{}, - &mock.ValidatorStatisticsProcessorStub{}, + &testscommon.ValidatorStatisticsProcessorStub{}, &mock.HeaderValidatorStub{}, &mock.BlockTrackerStub{}, &mock.PendingMiniBlocksHandlerStub{}, diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index e6896dd975c..72d75c69dc3 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -140,6 +140,7 @@ type ProcessComponentsFactoryArgs struct { EpochConfig config.EpochConfig PrefConfigs config.Preferences ImportDBConfig config.ImportDbConfig + EconomicsConfig config.EconomicsConfig AccountsParser genesis.AccountsParser SmartContractParser genesis.InitialSmartContractParser GasSchedule core.GasScheduleNotifier @@ -162,6 +163,9 @@ type ProcessComponentsFactoryArgs struct { StatusComponents factory.StatusComponentsHolder StatusCoreComponents factory.StatusCoreComponentsHolder TxExecutionOrderHandler common.TxExecutionOrderHandler + + GenesisNonce uint64 + GenesisRound uint64 } type processComponentsFactory struct { @@ -170,6 +174,7 @@ type processComponentsFactory struct { epochConfig config.EpochConfig prefConfigs config.Preferences importDBConfig config.ImportDbConfig + economicsConfig config.EconomicsConfig accountsParser genesis.AccountsParser smartContractParser genesis.InitialSmartContractParser gasSchedule core.GasScheduleNotifier @@ -186,6 +191,8 @@ type processComponentsFactory struct { importHandler update.ImportHandler flagsConfig config.ContextFlagsConfig esdtNftStorage vmcommon.ESDTNFTStorageHandler + stakingDataProviderAPI peer.StakingDataProviderAPI + auctionListSelectorAPI epochStart.AuctionListSelector data factory.DataComponentsHolder coreData factory.CoreComponentsHolder @@ -196,6 +203,9 @@ type processComponentsFactory struct { statusComponents factory.StatusComponentsHolder statusCoreComponents factory.StatusCoreComponentsHolder txExecutionOrderHandler common.TxExecutionOrderHandler + + genesisNonce uint64 + genesisRound uint64 } // NewProcessComponentsFactory will return a new instance of processComponentsFactory @@ -210,6 +220,7 @@ func NewProcessComponentsFactory(args ProcessComponentsFactoryArgs) (*processCom epochConfig: args.EpochConfig, prefConfigs: args.PrefConfigs, importDBConfig: args.ImportDBConfig, + economicsConfig: args.EconomicsConfig, accountsParser: args.AccountsParser, smartContractParser: args.SmartContractParser, gasSchedule: args.GasSchedule, @@ -232,6 +243,9 @@ func NewProcessComponentsFactory(args ProcessComponentsFactoryArgs) (*processCom statusCoreComponents: args.StatusCoreComponents, flagsConfig: args.FlagsConfig, txExecutionOrderHandler: args.TxExecutionOrderHandler, + genesisNonce: args.GenesisNonce, + genesisRound: args.GenesisRound, + roundConfig: args.RoundConfig, }, nil } @@ -403,30 +417,6 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } - startEpochNum := pcf.bootstrapComponents.EpochBootstrapParams().Epoch() - if startEpochNum == 0 { - err = pcf.indexGenesisBlocks(genesisBlocks, initialTxs, genesisAccounts) - if err != nil { - return nil, err - } - } - - cacheRefreshDuration := time.Duration(pcf.config.ValidatorStatistics.CacheRefreshIntervalInSec) * time.Second - argVSP := peer.ArgValidatorsProvider{ - NodesCoordinator: pcf.nodesCoordinator, - StartEpoch: startEpochNum, - EpochStartEventNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), - CacheRefreshIntervalDurationInSec: cacheRefreshDuration, - ValidatorStatistics: validatorStatisticsProcessor, - MaxRating: pcf.maxRating, - PubKeyConverter: pcf.coreData.ValidatorPubKeyConverter(), - } - - validatorsProvider, err := peer.NewValidatorsProvider(argVSP) - if err != nil { - return nil, err - } - epochStartTrigger, err := pcf.newEpochStartTrigger(requestHandler) if err != nil { return nil, err @@ -633,6 +623,33 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } + startEpochNum := pcf.bootstrapComponents.EpochBootstrapParams().Epoch() + if startEpochNum == 0 { + err = pcf.indexGenesisBlocks(genesisBlocks, initialTxs, genesisAccounts) + if err != nil { + return nil, err + } + } + + cacheRefreshDuration := time.Duration(pcf.config.ValidatorStatistics.CacheRefreshIntervalInSec) * time.Second + argVSP := peer.ArgValidatorsProvider{ + NodesCoordinator: pcf.nodesCoordinator, + StartEpoch: startEpochNum, + EpochStartEventNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), + CacheRefreshIntervalDurationInSec: cacheRefreshDuration, + ValidatorStatistics: validatorStatisticsProcessor, + MaxRating: pcf.maxRating, + ValidatorPubKeyConverter: pcf.coreData.ValidatorPubKeyConverter(), + AddressPubKeyConverter: pcf.coreData.AddressPubKeyConverter(), + AuctionListSelector: pcf.auctionListSelectorAPI, + StakingDataProvider: pcf.stakingDataProviderAPI, + } + + validatorsProvider, err := peer.NewValidatorsProvider(argVSP) + if err != nil { + return nil, err + } + conversionBase := 10 genesisNodePrice, ok := big.NewInt(0).SetString(pcf.systemSCConfig.StakingSystemSCConfig.GenesisNodePrice, conversionBase) if !ok { @@ -746,7 +763,6 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { } func (pcf *processComponentsFactory) newValidatorStatisticsProcessor() (process.ValidatorStatisticsProcessor, error) { - storageService := pcf.data.StorageService() var peerDataPool peer.DataPool = pcf.data.Datapool() @@ -881,13 +897,17 @@ func (pcf *processComponentsFactory) generateGenesisHeadersAndApplyInitialBalanc HardForkConfig: pcf.config.Hardfork, TrieStorageManagers: pcf.state.TrieStorageManagers(), SystemSCConfig: *pcf.systemSCConfig, - RoundConfig: &pcf.roundConfig, - EpochConfig: &pcf.epochConfig, + RoundConfig: pcf.roundConfig, + EpochConfig: pcf.epochConfig, + HeaderVersionConfigs: pcf.config.Versions, BlockSignKeyGen: pcf.crypto.BlockSignKeyGen(), HistoryRepository: pcf.historyRepo, GenesisNodePrice: genesisNodePrice, GenesisString: pcf.config.GeneralSettings.GenesisString, TxExecutionOrderHandler: pcf.txExecutionOrderHandler, + GenesisEpoch: pcf.config.EpochStartConfig.GenesisEpoch, + GenesisNonce: pcf.genesisNonce, + GenesisRound: pcf.genesisRound, } gbc, err := processGenesis.NewGenesisBlockCreator(arg) @@ -1366,23 +1386,24 @@ func (pcf *processComponentsFactory) newShardResolverContainerFactory( } resolversContainerFactoryArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - MainMessenger: pcf.network.NetworkMessenger(), - FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), - Store: pcf.data.StorageService(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - DataPools: pcf.data.Datapool(), - Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), - DataPacker: dataPacker, - TriesContainer: pcf.state.TriesContainer(), - SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, - InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), - OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), - NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, - IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, - MainPreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), - FullArchivePreferredPeersHolder: pcf.network.FullArchivePreferredPeersHolderHandler(), - PayloadValidator: payloadValidator, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + MainMessenger: pcf.network.NetworkMessenger(), + FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), + Store: pcf.data.StorageService(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + DataPools: pcf.data.Datapool(), + Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), + DataPacker: dataPacker, + TriesContainer: pcf.state.TriesContainer(), + SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, + InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), + OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), + NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, + NumConcurrentResolvingTrieNodesJobs: pcf.config.Antiflood.NumConcurrentResolvingTrieNodesJobs, + IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, + MainPreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), + FullArchivePreferredPeersHolder: pcf.network.FullArchivePreferredPeersHolderHandler(), + PayloadValidator: payloadValidator, } resolversContainerFactory, err := resolverscontainer.NewShardResolversContainerFactory(resolversContainerFactoryArgs) if err != nil { @@ -1402,23 +1423,24 @@ func (pcf *processComponentsFactory) newMetaResolverContainerFactory( } resolversContainerFactoryArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - MainMessenger: pcf.network.NetworkMessenger(), - FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), - Store: pcf.data.StorageService(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - DataPools: pcf.data.Datapool(), - Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), - DataPacker: dataPacker, - TriesContainer: pcf.state.TriesContainer(), - SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, - InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), - OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), - NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, - IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, - MainPreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), - FullArchivePreferredPeersHolder: pcf.network.FullArchivePreferredPeersHolderHandler(), - PayloadValidator: payloadValidator, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + MainMessenger: pcf.network.NetworkMessenger(), + FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), + Store: pcf.data.StorageService(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + DataPools: pcf.data.Datapool(), + Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), + DataPacker: dataPacker, + TriesContainer: pcf.state.TriesContainer(), + SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, + InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), + OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), + NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, + NumConcurrentResolvingTrieNodesJobs: pcf.config.Antiflood.NumConcurrentResolvingTrieNodesJobs, + IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, + MainPreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), + FullArchivePreferredPeersHolder: pcf.network.FullArchivePreferredPeersHolderHandler(), + PayloadValidator: payloadValidator, } return resolverscontainer.NewMetaResolversContainerFactory(resolversContainerFactoryArgs) diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index dbbd8fff853..a1654ce3ba3 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -44,6 +44,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" factoryMocks "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + nodesSetupMock "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" @@ -79,8 +80,19 @@ var ( func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFactoryArgs { args := processComp.ProcessComponentsFactoryArgs{ - Config: testscommon.GetGeneralConfig(), - EpochConfig: config.EpochConfig{}, + Config: testscommon.GetGeneralConfig(), + EpochConfig: config.EpochConfig{ + EnableEpochs: config.EnableEpochs{ + MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 100, + NodesToShufflePerShard: 2, + }, + }, + }, + }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), PrefConfigs: config.Preferences{}, ImportDBConfig: config.ImportDbConfig{}, FlagsConfig: config.ContextFlagsConfig{ @@ -127,7 +139,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto OwnerAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ - GenesisNodePrice: "2500000000000000000000", + GenesisNodePrice: "2500", MinStakeValue: "1", UnJailValue: "1", MinStepValue: "1", @@ -138,6 +150,8 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + NodeLimitPercentage: 100.0, + StakeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -148,6 +162,12 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ImportStartHandler: &testscommon.ImportStartHandlerStub{}, HistoryRepo: &dblookupext.HistoryRepositoryStub{}, @@ -170,7 +190,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto UInt64ByteSliceConv: &testsMocks.Uint64ByteSliceConverterMock{}, AddrPubKeyConv: addrPubKeyConv, ValPubKeyConv: valPubKeyConv, - NodesConfig: &testscommon.NodesSetupStub{ + NodesConfig: &nodesSetupMock.NodesSetupStub{ GetShardConsensusGroupSizeCalled: func() uint32 { return 2 }, @@ -244,7 +264,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, } - args.State = components.GetStateComponents(args.CoreData) + args.State = components.GetStateComponents(args.CoreData, args.StatusCoreComponents) return args } @@ -353,7 +373,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: nil, } pcf, err := processComp.NewProcessComponentsFactory(args) @@ -366,7 +386,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: nil, } @@ -380,7 +400,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, ValPubKeyConv: nil, @@ -395,7 +415,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, ValPubKeyConv: &testscommon.PubkeyConverterStub{}, @@ -411,7 +431,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, ValPubKeyConv: &testscommon.PubkeyConverterStub{}, @@ -732,7 +752,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args := createMockProcessComponentsFactoryArgs() coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) coreCompStub.GenesisNodesSetupCalled = func() sharding.GenesisNodesSetupHandler { - return &testscommon.NodesSetupStub{ + return &nodesSetupMock.NodesSetupStub{ AllInitialNodesCalled: func() []nodesCoordinator.GenesisNodeInfoHandler { return []nodesCoordinator.GenesisNodeInfoHandler{ &genesisMocks.GenesisNodeInfoHandlerMock{ diff --git a/factory/processing/txSimulatorProcessComponents.go b/factory/processing/txSimulatorProcessComponents.go index 2a5e8c5a7a2..257a46af1a5 100644 --- a/factory/processing/txSimulatorProcessComponents.go +++ b/factory/processing/txSimulatorProcessComponents.go @@ -79,6 +79,7 @@ func (pcf *processComponentsFactory) createAPITransactionEvaluator() (factory.Tr Accounts: simulationAccountsDB, ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + BlockChain: pcf.data.Blockchain(), }) return apiTransactionEvaluator, vmContainerFactory, err @@ -141,6 +142,8 @@ func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorForMeta( return args, nil, nil, err } + args.BlockChainHook = vmContainerFactory.BlockChainHookImpl() + vmContainer, err := vmContainerFactory.Create() if err != nil { return args, nil, nil, err @@ -301,6 +304,8 @@ func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorShard( return args, nil, nil, err } + args.BlockChainHook = vmContainerFactory.BlockChainHookImpl() + err = builtInFuncFactory.SetPayableHandler(vmContainerFactory.BlockChainHookImpl()) if err != nil { return args, nil, nil, err diff --git a/factory/state/stateComponentsHandler_test.go b/factory/state/stateComponentsHandler_test.go index ba552ed416a..e73600180ff 100644 --- a/factory/state/stateComponentsHandler_test.go +++ b/factory/state/stateComponentsHandler_test.go @@ -27,7 +27,7 @@ func TestNewManagedStateComponents(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) require.NoError(t, err) @@ -42,7 +42,7 @@ func TestManagedStateComponents_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) require.NoError(t, err) @@ -56,7 +56,7 @@ func TestManagedStateComponents_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) require.NoError(t, err) @@ -87,7 +87,7 @@ func TestManagedStateComponents_Close(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, _ := stateComp.NewManagedStateComponents(stateComponentsFactory) require.NoError(t, managedStateComponents.Close()) @@ -102,7 +102,7 @@ func TestManagedStateComponents_CheckSubcomponents(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, _ := stateComp.NewManagedStateComponents(stateComponentsFactory) err := managedStateComponents.CheckSubcomponents() @@ -121,7 +121,7 @@ func TestManagedStateComponents_Setters(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, _ := stateComp.NewManagedStateComponents(stateComponentsFactory) err := managedStateComponents.Create() @@ -153,7 +153,7 @@ func TestManagedStateComponents_IsInterfaceNil(t *testing.T) { require.True(t, managedStateComponents.IsInterfaceNil()) coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, _ = stateComp.NewManagedStateComponents(stateComponentsFactory) require.False(t, managedStateComponents.IsInterfaceNil()) diff --git a/factory/state/stateComponents_test.go b/factory/state/stateComponents_test.go index 177407226d8..bf5068e8dd7 100644 --- a/factory/state/stateComponents_test.go +++ b/factory/state/stateComponents_test.go @@ -20,7 +20,7 @@ func TestNewStateComponentsFactory(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) args.Core = nil scf, err := stateComp.NewStateComponentsFactory(args) @@ -31,7 +31,7 @@ func TestNewStateComponentsFactory(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) args.StatusCore = nil scf, err := stateComp.NewStateComponentsFactory(args) @@ -42,7 +42,7 @@ func TestNewStateComponentsFactory(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) scf, err := stateComp.NewStateComponentsFactory(args) require.NoError(t, err) @@ -57,7 +57,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) coreCompStub := factory.NewCoreComponentsHolderStubFromRealComponent(args.Core) coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { return nil @@ -73,7 +73,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) args.Config.EvictionWaitingList.RootHashesSize = 0 scf, _ := stateComp.NewStateComponentsFactory(args) @@ -85,7 +85,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) coreCompStub := factory.NewCoreComponentsHolderStubFromRealComponent(args.Core) cnt := 0 @@ -107,7 +107,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) coreCompStub := factory.NewCoreComponentsHolderStubFromRealComponent(args.Core) cnt := 0 @@ -129,7 +129,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) scf, _ := stateComp.NewStateComponentsFactory(args) sc, err := scf.Create() @@ -143,7 +143,7 @@ func TestStateComponents_Close(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) scf, _ := stateComp.NewStateComponentsFactory(args) sc, err := scf.Create() diff --git a/factory/status/statusComponentsHandler_test.go b/factory/status/statusComponentsHandler_test.go index ee81a353e31..c7252cbf6de 100644 --- a/factory/status/statusComponentsHandler_test.go +++ b/factory/status/statusComponentsHandler_test.go @@ -16,18 +16,14 @@ import ( ) func TestNewManagedStatusComponents(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("nil factory should error", func(t *testing.T) { - t.Parallel() - managedStatusComponents, err := statusComp.NewManagedStatusComponents(nil) require.Equal(t, errorsMx.ErrNilStatusComponentsFactory, err) require.Nil(t, managedStatusComponents) }) t.Run("should work", func(t *testing.T) { - t.Parallel() - scf, err := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) require.Nil(t, err) managedStatusComponents, err := statusComp.NewManagedStatusComponents(scf) @@ -37,11 +33,9 @@ func TestNewManagedStatusComponents(t *testing.T) { } func TestManagedStatusComponents_Create(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("invalid params should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.StatusCoreComponents = &factoryMocks.StatusCoreComponentsStub{ AppStatusHandlerField: nil, @@ -56,8 +50,6 @@ func TestManagedStatusComponents_Create(t *testing.T) { require.Error(t, err) }) t.Run("should work with getters", func(t *testing.T) { - t.Parallel() - scf, err := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) require.Nil(t, err) managedStatusComponents, err := statusComp.NewManagedStatusComponents(scf) @@ -78,7 +70,7 @@ func TestManagedStatusComponents_Create(t *testing.T) { } func TestManagedStatusComponents_Close(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) @@ -96,7 +88,7 @@ func TestManagedStatusComponents_Close(t *testing.T) { } func TestManagedStatusComponents_CheckSubcomponents(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) @@ -112,7 +104,7 @@ func TestManagedStatusComponents_CheckSubcomponents(t *testing.T) { } func TestManagedStatusComponents_SetForkDetector(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) @@ -126,11 +118,9 @@ func TestManagedStatusComponents_SetForkDetector(t *testing.T) { } func TestManagedStatusComponents_StartPolling(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("NewAppStatusPolling fails should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.Config.GeneralSettings.StatusPollingIntervalSec = 0 scf, _ := statusComp.NewStatusComponentsFactory(args) @@ -142,8 +132,6 @@ func TestManagedStatusComponents_StartPolling(t *testing.T) { require.Equal(t, errorsMx.ErrStatusPollingInit, err) }) t.Run("RegisterPollingFunc fails should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.Config.GeneralSettings.StatusPollingIntervalSec = 0 scf, _ := statusComp.NewStatusComponentsFactory(args) @@ -155,8 +143,6 @@ func TestManagedStatusComponents_StartPolling(t *testing.T) { require.Equal(t, errorsMx.ErrStatusPollingInit, err) }) t.Run("should work", func(t *testing.T) { - t.Parallel() - scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) err := managedStatusComponents.Create() @@ -168,7 +154,7 @@ func TestManagedStatusComponents_StartPolling(t *testing.T) { } func TestComputeNumConnectedPeers(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("main network", testComputeNumConnectedPeers("")) t.Run("full archive network", testComputeNumConnectedPeers(common.FullArchiveMetricSuffix)) @@ -176,8 +162,6 @@ func TestComputeNumConnectedPeers(t *testing.T) { func testComputeNumConnectedPeers(suffix string) func(t *testing.T) { return func(t *testing.T) { - t.Parallel() - netMes := &p2pmocks.MessengerStub{ ConnectedAddressesCalled: func() []string { return []string{"addr1", "addr2", "addr3"} @@ -195,7 +179,7 @@ func testComputeNumConnectedPeers(suffix string) func(t *testing.T) { } func TestComputeConnectedPeers(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("main network", testComputeConnectedPeers("")) t.Run("full archive network", testComputeConnectedPeers(common.FullArchiveMetricSuffix)) @@ -203,8 +187,6 @@ func TestComputeConnectedPeers(t *testing.T) { func testComputeConnectedPeers(suffix string) func(t *testing.T) { return func(t *testing.T) { - t.Parallel() - netMes := &p2pmocks.MessengerStub{ GetConnectedPeersInfoCalled: func() *p2p.ConnectedPeersInfo { return &p2p.ConnectedPeersInfo{ @@ -294,7 +276,7 @@ func testComputeConnectedPeers(suffix string) func(t *testing.T) { } func TestManagedStatusComponents_IsInterfaceNil(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components managedStatusComponents, _ := statusComp.NewManagedStatusComponents(nil) require.True(t, managedStatusComponents.IsInterfaceNil()) diff --git a/factory/status/statusComponents_test.go b/factory/status/statusComponents_test.go index 35c7041d844..2b7c3e59379 100644 --- a/factory/status/statusComponents_test.go +++ b/factory/status/statusComponents_test.go @@ -15,6 +15,7 @@ import ( componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/stretchr/testify/require" @@ -45,7 +46,7 @@ func createMockStatusComponentsFactoryArgs() statusComp.StatusComponentsFactoryA NodesCoordinator: &shardingMocks.NodesCoordinatorMock{}, EpochStartNotifier: &mock.EpochStartNotifierStub{}, CoreComponents: &mock.CoreComponentsMock{ - NodesConfig: &testscommon.NodesSetupStub{ + NodesConfig: &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return 1000 }, @@ -66,11 +67,9 @@ func createMockStatusComponentsFactoryArgs() statusComp.StatusComponentsFactoryA } func TestNewStatusComponentsFactory(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("nil CoreComponents should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.CoreComponents = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -78,8 +77,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilCoreComponentsHolder, err) }) t.Run("CoreComponents with nil GenesisNodesSetup should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.CoreComponents = &mock.CoreComponentsMock{ NodesConfig: nil, @@ -89,8 +86,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilGenesisNodesSetupHandler, err) }) t.Run("nil NetworkComponents should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.NetworkComponents = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -98,8 +93,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilNetworkComponentsHolder, err) }) t.Run("nil ShardCoordinator should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.ShardCoordinator = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -107,8 +100,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilShardCoordinator, err) }) t.Run("nil NodesCoordinator should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.NodesCoordinator = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -116,8 +107,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilNodesCoordinator, err) }) t.Run("nil EpochStartNotifier should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.EpochStartNotifier = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -125,8 +114,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilEpochStartNotifier, err) }) t.Run("nil StatusCoreComponents should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.StatusCoreComponents = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -134,8 +121,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilStatusCoreComponents, err) }) t.Run("nil CryptoComponents should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.CryptoComponents = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -143,8 +128,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilCryptoComponents, err) }) t.Run("should work", func(t *testing.T) { - t.Parallel() - scf, err := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) require.NotNil(t, scf) require.NoError(t, err) @@ -152,11 +135,11 @@ func TestNewStatusComponentsFactory(t *testing.T) { } func TestStatusComponentsFactory_Create(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } t.Run("NewSoftwareVersionFactory fails should return error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.StatusCoreComponents = &factory.StatusCoreComponentsStub{ AppStatusHandlerField: nil, // make NewSoftwareVersionFactory fail @@ -169,8 +152,6 @@ func TestStatusComponentsFactory_Create(t *testing.T) { require.Nil(t, sc) }) t.Run("softwareVersionCheckerFactory.Create fails should return error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.Config.SoftwareVersionConfig.PollingIntervalInMinutes = 0 scf, _ := statusComp.NewStatusComponentsFactory(args) @@ -181,11 +162,9 @@ func TestStatusComponentsFactory_Create(t *testing.T) { require.Nil(t, sc) }) t.Run("invalid round duration should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.CoreComponents = &mock.CoreComponentsMock{ - NodesConfig: &testscommon.NodesSetupStub{ + NodesConfig: &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return 0 }, @@ -199,8 +178,6 @@ func TestStatusComponentsFactory_Create(t *testing.T) { require.Nil(t, sc) }) t.Run("makeWebSocketDriverArgs fails due to invalid marshaller type should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.ExternalConfig.HostDriversConfig[0].Enabled = true args.ExternalConfig.HostDriversConfig[0].MarshallerType = "invalid type" @@ -212,8 +189,6 @@ func TestStatusComponentsFactory_Create(t *testing.T) { require.Nil(t, sc) }) t.Run("should work", func(t *testing.T) { - t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) shardCoordinator.SelfIDCalled = func() uint32 { return core.MetachainShardId // coverage @@ -232,7 +207,7 @@ func TestStatusComponentsFactory_Create(t *testing.T) { } func TestStatusComponentsFactory_epochStartEventHandler(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components args := createMockStatusComponentsFactoryArgs() args.NodesCoordinator = &shardingMocks.NodesCoordinatorStub{ @@ -252,7 +227,7 @@ func TestStatusComponentsFactory_epochStartEventHandler(t *testing.T) { } func TestStatusComponentsFactory_IsInterfaceNil(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components args := createMockStatusComponentsFactoryArgs() args.CoreComponents = nil @@ -264,7 +239,7 @@ func TestStatusComponentsFactory_IsInterfaceNil(t *testing.T) { } func TestStatusComponents_Close(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) cc, err := scf.Create() @@ -275,7 +250,7 @@ func TestStatusComponents_Close(t *testing.T) { } func TestMakeHostDriversArgs(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components args := createMockStatusComponentsFactoryArgs() args.ExternalConfig.HostDriversConfig = []config.HostDriversConfig{ diff --git a/genesis/interface.go b/genesis/interface.go index 1a618a44efe..e58708a236f 100644 --- a/genesis/interface.go +++ b/genesis/interface.go @@ -84,7 +84,7 @@ type InitialSmartContractHandler interface { } // InitialSmartContractParser contains the parsed genesis initial smart contracts -//json file and has some functionality regarding processed data +// json file and has some functionality regarding processed data type InitialSmartContractParser interface { InitialSmartContractsSplitOnOwnersShards(shardCoordinator sharding.Coordinator) (map[uint32][]InitialSmartContractHandler, error) GetDeployedSCAddresses(scType string) (map[string]struct{}, error) @@ -115,3 +115,9 @@ type DeployProcessor interface { Deploy(sc InitialSmartContractHandler) ([][]byte, error) IsInterfaceNil() bool } + +// VersionedHeaderFactory creates versioned headers +type VersionedHeaderFactory interface { + Create(epoch uint32) data.HeaderHandler + IsInterfaceNil() bool +} diff --git a/genesis/process/argGenesisBlockCreator.go b/genesis/process/argGenesisBlockCreator.go index e4374b7f6f0..19b5fc9adcc 100644 --- a/genesis/process/argGenesisBlockCreator.go +++ b/genesis/process/argGenesisBlockCreator.go @@ -44,7 +44,10 @@ type dataComponentsHandler interface { // ArgsGenesisBlockCreator holds the arguments which are needed to create a genesis block type ArgsGenesisBlockCreator struct { GenesisTime uint64 + GenesisNonce uint64 + GenesisRound uint64 StartEpochNum uint32 + GenesisEpoch uint32 Data dataComponentsHandler Core coreComponentsHandler Accounts state.AccountsAdapter @@ -60,8 +63,9 @@ type ArgsGenesisBlockCreator struct { HardForkConfig config.HardforkConfig TrieStorageManagers map[string]common.StorageManager SystemSCConfig config.SystemSmartContractsConfig - RoundConfig *config.RoundConfig - EpochConfig *config.EpochConfig + RoundConfig config.RoundConfig + EpochConfig config.EpochConfig + HeaderVersionConfigs config.VersionsConfig WorkingDir string BlockSignKeyGen crypto.KeyGenerator HistoryRepository dblookupext.HistoryRepository @@ -69,6 +73,8 @@ type ArgsGenesisBlockCreator struct { GenesisNodePrice *big.Int GenesisString string + // created components - importHandler update.ImportHandler + importHandler update.ImportHandler + versionedHeaderFactory genesis.VersionedHeaderFactory } diff --git a/genesis/process/disabled/nodesCoordinator.go b/genesis/process/disabled/nodesCoordinator.go new file mode 100644 index 00000000000..610230dd56f --- /dev/null +++ b/genesis/process/disabled/nodesCoordinator.go @@ -0,0 +1,15 @@ +package disabled + +// NodesCoordinator implements the NodesCoordinator interface, it does nothing as it is disabled +type NodesCoordinator struct { +} + +// GetNumTotalEligible - +func (n *NodesCoordinator) GetNumTotalEligible() uint64 { + return 1600 +} + +// IsInterfaceNil - +func (n *NodesCoordinator) IsInterfaceNil() bool { + return n == nil +} diff --git a/genesis/process/genesisBlockCreator.go b/genesis/process/genesisBlockCreator.go index 2e9b14d7db3..3f5e559888f 100644 --- a/genesis/process/genesisBlockCreator.go +++ b/genesis/process/genesisBlockCreator.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" + factoryBlock "github.com/multiversx/mx-chain-go/factory/block" "github.com/multiversx/mx-chain-go/genesis" "github.com/multiversx/mx-chain-go/genesis/process/disabled" "github.com/multiversx/mx-chain-go/genesis/process/intermediate" @@ -82,7 +83,7 @@ func getGenesisBlocksRoundNonceEpoch(arg ArgsGenesisBlockCreator) (uint64, uint6 if arg.HardForkConfig.AfterHardFork { return arg.HardForkConfig.StartRound, arg.HardForkConfig.StartNonce, arg.HardForkConfig.StartEpoch } - return 0, 0, 0 + return arg.GenesisRound, arg.GenesisNonce, arg.GenesisEpoch } func (gbc *genesisBlockCreator) createHardForkImportHandler() error { @@ -131,8 +132,7 @@ func createStorer(storageConfig config.StorageConfig, folder string) (storage.St dbConfig := factory.GetDBFromConfig(storageConfig.DB) dbConfig.FilePath = path.Join(folder, storageConfig.DB.FilePath) - dbConfigHandler := factory.NewDBConfigHandler(storageConfig.DB) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(storageConfig.DB) if err != nil { return nil, err } @@ -195,12 +195,6 @@ func checkArgumentsForBlockCreator(arg ArgsGenesisBlockCreator) error { if arg.TrieStorageManagers == nil { return genesis.ErrNilTrieStorageManager } - if arg.EpochConfig == nil { - return genesis.ErrNilEpochConfig - } - if arg.RoundConfig == nil { - return genesis.ErrNilRoundConfig - } if check.IfNil(arg.HistoryRepository) { return process.ErrNilHistoryRepository } @@ -212,7 +206,7 @@ func checkArgumentsForBlockCreator(arg ArgsGenesisBlockCreator) error { } func mustDoGenesisProcess(arg ArgsGenesisBlockCreator) bool { - genesisEpoch := uint32(0) + genesisEpoch := arg.GenesisEpoch if arg.HardForkConfig.AfterHardFork { genesisEpoch = arg.HardForkConfig.StartEpoch } @@ -225,7 +219,7 @@ func mustDoGenesisProcess(arg ArgsGenesisBlockCreator) bool { } func (gbc *genesisBlockCreator) createEmptyGenesisBlocks() (map[uint32]data.HeaderHandler, error) { - err := gbc.computeDNSAddresses(createGenesisConfig()) + err := gbc.computeDNSAddresses(createGenesisConfig(gbc.arg.EpochConfig.EnableEpochs)) if err != nil { return nil, err } @@ -486,12 +480,17 @@ func (gbc *genesisBlockCreator) getNewArgForShard(shardID uint32) (ArgsGenesisBl var err error isCurrentShard := shardID == gbc.arg.ShardCoordinator.SelfId() + newArgument := gbc.arg // copy the arguments + newArgument.versionedHeaderFactory, err = gbc.createVersionedHeaderFactory() + if err != nil { + return ArgsGenesisBlockCreator{}, fmt.Errorf("'%w' while generating a VersionedHeaderFactory instance for shard %d", + err, shardID) + } + if isCurrentShard { - newArgument := gbc.arg // copy the arguments newArgument.Data = newArgument.Data.Clone().(dataComponentsHandler) return newArgument, nil } - newArgument := gbc.arg // copy the arguments argsAccCreator := factoryState.ArgsAccountCreator{ Hasher: newArgument.Core.Hasher(), @@ -530,6 +529,25 @@ func (gbc *genesisBlockCreator) getNewArgForShard(shardID uint32) (ArgsGenesisBl return newArgument, err } +func (gbc *genesisBlockCreator) createVersionedHeaderFactory() (genesis.VersionedHeaderFactory, error) { + cacheConfig := factory.GetCacherFromConfig(gbc.arg.HeaderVersionConfigs.Cache) + cache, err := storageunit.NewCache(cacheConfig) + if err != nil { + return nil, err + } + + headerVersionHandler, err := factoryBlock.NewHeaderVersionHandler( + gbc.arg.HeaderVersionConfigs.VersionsByEpochs, + gbc.arg.HeaderVersionConfigs.DefaultVersion, + cache, + ) + if err != nil { + return nil, err + } + + return factoryBlock.NewShardHeaderFactory(headerVersionHandler) +} + func (gbc *genesisBlockCreator) saveGenesisBlock(header data.HeaderHandler) error { blockBuff, err := gbc.arg.Core.InternalMarshalizer().Marshal(header) if err != nil { diff --git a/genesis/process/genesisBlockCreator_test.go b/genesis/process/genesisBlockCreator_test.go index 90b46757a86..68c93b87f51 100644 --- a/genesis/process/genesisBlockCreator_test.go +++ b/genesis/process/genesisBlockCreator_test.go @@ -1,7 +1,5 @@ //go:build !race -// TODO reinstate test after Wasm VM pointer fix - package process import ( @@ -13,6 +11,8 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" @@ -151,6 +151,8 @@ func createMockArgument( MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -161,27 +163,33 @@ func createMockArgument( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, TrieStorageManagers: trieStorageManagers, BlockSignKeyGen: &mock.KeyGenMock{}, GenesisNodePrice: nodePrice, - EpochConfig: &config.EpochConfig{ + EpochConfig: config.EpochConfig{ EnableEpochs: config.EnableEpochs{ - BuiltInFunctionsEnableEpoch: 0, - SCDeployEnableEpoch: 0, - RelayedTransactionsEnableEpoch: 0, - PenalizedTooMuchGasEnableEpoch: 0, - }, - }, - RoundConfig: &config.RoundConfig{ - RoundActivations: map[string]config.ActivationRoundByName{ - "DisableAsyncCallV1": { - Round: "18446744073709551615", - }, + SCDeployEnableEpoch: unreachableEpoch, + CleanUpInformativeSCRsEnableEpoch: unreachableEpoch, + SCProcessorV2EnableEpoch: unreachableEpoch, + StakeLimitsEnableEpoch: 10, }, }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + HeaderVersionConfigs: testscommon.GetDefaultHeaderVersionConfig(), HistoryRepository: &dblookupext.HistoryRepositoryStub{}, TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, + versionedHeaderFactory: &testscommon.VersionedHeaderFactoryStub{ + CreateCalled: func(epoch uint32) data.HeaderHandler { + return &block.Header{} + }, + }, } arg.ShardCoordinator = &mock.ShardCoordinatorMock{ @@ -427,16 +435,6 @@ func TestNewGenesisBlockCreator(t *testing.T) { require.True(t, errors.Is(err, genesis.ErrNilTrieStorageManager)) require.Nil(t, gbc) }) - t.Run("nil EpochConfig should error", func(t *testing.T) { - t.Parallel() - - arg := createMockArgument(t, "testdata/genesisTest1.json", &mock.InitialNodesHandlerStub{}, big.NewInt(22000)) - arg.EpochConfig = nil - - gbc, err := NewGenesisBlockCreator(arg) - require.True(t, errors.Is(err, genesis.ErrNilEpochConfig)) - require.Nil(t, gbc) - }) t.Run("invalid GenesisNodePrice should error", func(t *testing.T) { t.Parallel() @@ -897,9 +895,9 @@ func TestCreateArgsGenesisBlockCreator_ShouldWorkAndCreateEmpty(t *testing.T) { blocks, err := gbc.CreateGenesisBlocks() assert.Nil(t, err) assert.Equal(t, 3, len(blocks)) - for _, block := range blocks { - assert.Zero(t, block.GetNonce()) - assert.Zero(t, block.GetRound()) - assert.Zero(t, block.GetEpoch()) + for _, blockInstance := range blocks { + assert.Zero(t, blockInstance.GetNonce()) + assert.Zero(t, blockInstance.GetRound()) + assert.Zero(t, blockInstance.GetEpoch()) } } diff --git a/genesis/process/metaGenesisBlockCreator.go b/genesis/process/metaGenesisBlockCreator.go index 40b5f606241..de3500d2e2f 100644 --- a/genesis/process/metaGenesisBlockCreator.go +++ b/genesis/process/metaGenesisBlockCreator.go @@ -48,9 +48,6 @@ import ( "github.com/multiversx/mx-chain-vm-common-go/parsers" ) -const unreachableEpoch = ^uint32(0) -const unreachableRound = ^uint64(0) - // CreateMetaGenesisBlock will create a metachain genesis block func CreateMetaGenesisBlock( arg ArgsGenesisBlockCreator, @@ -70,7 +67,11 @@ func CreateMetaGenesisBlock( DeployInitialScTxs: make([]data.TransactionHandler, 0), } - processors, err := createProcessorsForMetaGenesisBlock(arg, createGenesisConfig(), createGenesisRoundConfig()) + processors, err := createProcessorsForMetaGenesisBlock( + arg, + createGenesisConfig(arg.EpochConfig.EnableEpochs), + createGenesisRoundConfig(arg.RoundConfig), + ) if err != nil { return nil, nil, nil, err } @@ -295,7 +296,7 @@ func saveGenesisMetaToStorage( return nil } -func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpochsConfig config.EnableEpochs, roundConfig *config.RoundConfig) (*genesisProcessors, error) { +func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpochsConfig config.EnableEpochs, roundConfig config.RoundConfig) (*genesisProcessors, error) { epochNotifier := forking.NewGenericEpochNotifier() temporaryMetaHeader := &block.MetaBlock{ Epoch: arg.StartEpochNum, @@ -308,7 +309,7 @@ func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpoc epochNotifier.CheckEpoch(temporaryMetaHeader) roundNotifier := forking.NewGenericRoundNotifier() - enableRoundsHandler, err := enablers.NewEnableRoundsHandler(*roundConfig, roundNotifier) + enableRoundsHandler, err := enablers.NewEnableRoundsHandler(roundConfig, roundNotifier) if err != nil { return nil, err } @@ -360,6 +361,7 @@ func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpoc ChanceComputer: &disabled.Rater{}, ShardCoordinator: arg.ShardCoordinator, EnableEpochsHandler: enableEpochsHandler, + NodesCoordinator: &disabled.NodesCoordinator{}, } virtualMachineFactory, err := metachain.NewVMContainerFactory(argsNewVMContainerFactory) if err != nil { diff --git a/genesis/process/shardGenesisBlockCreator.go b/genesis/process/shardGenesisBlockCreator.go index 9fef8f05569..3c7e47070c7 100644 --- a/genesis/process/shardGenesisBlockCreator.go +++ b/genesis/process/shardGenesisBlockCreator.go @@ -5,7 +5,6 @@ import ( "fmt" "math" "math/big" - "strconv" "sync" "github.com/multiversx/mx-chain-core-go/core/check" @@ -45,8 +44,9 @@ import ( "github.com/multiversx/mx-chain-vm-common-go/parsers" ) -var log = logger.GetOrCreate("genesis/process") +const unreachableEpoch = ^uint32(0) +var log = logger.GetOrCreate("genesis/process") var zero = big.NewInt(0) type deployedScMetrics struct { @@ -54,112 +54,26 @@ type deployedScMetrics struct { numOtherTypes int } -func createGenesisConfig() config.EnableEpochs { - blsMultiSignerEnableEpoch := []config.MultiSignerConfig{ +func createGenesisConfig(providedEnableEpochs config.EnableEpochs) config.EnableEpochs { + clonedConfig := providedEnableEpochs + clonedConfig.BuiltInFunctionsEnableEpoch = 0 + clonedConfig.PenalizedTooMuchGasEnableEpoch = unreachableEpoch + clonedConfig.MaxNodesChangeEnableEpoch = []config.MaxNodesChangeConfig{ { - EnableEpoch: 0, - Type: "no-KOSK", + EpochEnable: unreachableEpoch, + MaxNumNodes: 0, + NodesToShufflePerShard: 0, }, } + clonedConfig.DoubleKeyProtectionEnableEpoch = 0 - return config.EnableEpochs{ - SCDeployEnableEpoch: unreachableEpoch, - BuiltInFunctionsEnableEpoch: 0, - RelayedTransactionsEnableEpoch: unreachableEpoch, - PenalizedTooMuchGasEnableEpoch: unreachableEpoch, - SwitchJailWaitingEnableEpoch: unreachableEpoch, - SwitchHysteresisForMinNodesEnableEpoch: unreachableEpoch, - BelowSignedThresholdEnableEpoch: unreachableEpoch, - TransactionSignedWithTxHashEnableEpoch: unreachableEpoch, - MetaProtectionEnableEpoch: unreachableEpoch, - AheadOfTimeGasUsageEnableEpoch: unreachableEpoch, - GasPriceModifierEnableEpoch: unreachableEpoch, - RepairCallbackEnableEpoch: unreachableEpoch, - MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ - { - EpochEnable: unreachableEpoch, - MaxNumNodes: 0, - NodesToShufflePerShard: 0, - }, - }, - BlockGasAndFeesReCheckEnableEpoch: unreachableEpoch, - StakingV2EnableEpoch: unreachableEpoch, - StakeEnableEpoch: unreachableEpoch, // no need to enable this, we have builtin exceptions in staking system SC - DoubleKeyProtectionEnableEpoch: 0, - ESDTEnableEpoch: unreachableEpoch, - GovernanceEnableEpoch: unreachableEpoch, - DelegationManagerEnableEpoch: unreachableEpoch, - DelegationSmartContractEnableEpoch: unreachableEpoch, - CorrectLastUnjailedEnableEpoch: unreachableEpoch, - BalanceWaitingListsEnableEpoch: unreachableEpoch, - ReturnDataToLastTransferEnableEpoch: unreachableEpoch, - SenderInOutTransferEnableEpoch: unreachableEpoch, - RelayedTransactionsV2EnableEpoch: unreachableEpoch, - UnbondTokensV2EnableEpoch: unreachableEpoch, - SaveJailedAlwaysEnableEpoch: unreachableEpoch, - ValidatorToDelegationEnableEpoch: unreachableEpoch, - ReDelegateBelowMinCheckEnableEpoch: unreachableEpoch, - WaitingListFixEnableEpoch: unreachableEpoch, - IncrementSCRNonceInMultiTransferEnableEpoch: unreachableEpoch, - ESDTMultiTransferEnableEpoch: unreachableEpoch, - GlobalMintBurnDisableEpoch: unreachableEpoch, - ESDTTransferRoleEnableEpoch: unreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: unreachableEpoch, - ComputeRewardCheckpointEnableEpoch: unreachableEpoch, - SCRSizeInvariantCheckEnableEpoch: unreachableEpoch, - BackwardCompSaveKeyValueEnableEpoch: unreachableEpoch, - ESDTNFTCreateOnMultiShardEnableEpoch: unreachableEpoch, - MetaESDTSetEnableEpoch: unreachableEpoch, - AddTokensToDelegationEnableEpoch: unreachableEpoch, - MultiESDTTransferFixOnCallBackOnEnableEpoch: unreachableEpoch, - OptimizeGasUsedInCrossMiniBlocksEnableEpoch: unreachableEpoch, - CorrectFirstQueuedEpoch: unreachableEpoch, - CorrectJailedNotUnstakedEmptyQueueEpoch: unreachableEpoch, - FixOOGReturnCodeEnableEpoch: unreachableEpoch, - RemoveNonUpdatedStorageEnableEpoch: unreachableEpoch, - DeleteDelegatorAfterClaimRewardsEnableEpoch: unreachableEpoch, - OptimizeNFTStoreEnableEpoch: unreachableEpoch, - CreateNFTThroughExecByCallerEnableEpoch: unreachableEpoch, - StopDecreasingValidatorRatingWhenStuckEnableEpoch: unreachableEpoch, - FrontRunningProtectionEnableEpoch: unreachableEpoch, - IsPayableBySCEnableEpoch: unreachableEpoch, - CleanUpInformativeSCRsEnableEpoch: unreachableEpoch, - StorageAPICostOptimizationEnableEpoch: unreachableEpoch, - TransformToMultiShardCreateEnableEpoch: unreachableEpoch, - ESDTRegisterAndSetAllRolesEnableEpoch: unreachableEpoch, - ScheduledMiniBlocksEnableEpoch: unreachableEpoch, - FailExecutionOnEveryAPIErrorEnableEpoch: unreachableEpoch, - AddFailedRelayedTxToInvalidMBsDisableEpoch: unreachableEpoch, - SCRSizeInvariantOnBuiltInResultEnableEpoch: unreachableEpoch, - ManagedCryptoAPIsEnableEpoch: unreachableEpoch, - CheckCorrectTokenIDForTransferRoleEnableEpoch: unreachableEpoch, - DisableExecByCallerEnableEpoch: unreachableEpoch, - RefactorContextEnableEpoch: unreachableEpoch, - CheckFunctionArgumentEnableEpoch: unreachableEpoch, - CheckExecuteOnReadOnlyEnableEpoch: unreachableEpoch, - MiniBlockPartialExecutionEnableEpoch: unreachableEpoch, - ESDTMetadataContinuousCleanupEnableEpoch: unreachableEpoch, - FixAsyncCallBackArgsListEnableEpoch: unreachableEpoch, - FixOldTokenLiquidityEnableEpoch: unreachableEpoch, - SetSenderInEeiOutputTransferEnableEpoch: unreachableEpoch, - RefactorPeersMiniBlocksEnableEpoch: unreachableEpoch, - SCProcessorV2EnableEpoch: unreachableEpoch, - DoNotReturnOldBlockInBlockchainHookEnableEpoch: unreachableEpoch, - MaxBlockchainHookCountersEnableEpoch: unreachableEpoch, - BLSMultiSignerEnableEpoch: blsMultiSignerEnableEpoch, - SetGuardianEnableEpoch: unreachableEpoch, - ScToScLogEventEnableEpoch: unreachableEpoch, - } + return clonedConfig } -func createGenesisRoundConfig() *config.RoundConfig { - return &config.RoundConfig{ - RoundActivations: map[string]config.ActivationRoundByName{ - "DisableAsyncCallV1": { - Round: strconv.FormatUint(unreachableRound, 10), - }, - }, - } +func createGenesisRoundConfig(providedEnableRounds config.RoundConfig) config.RoundConfig { + clonedConfig := providedEnableRounds + + return clonedConfig } // CreateShardGenesisBlock will create a shard genesis block @@ -181,7 +95,11 @@ func CreateShardGenesisBlock( DeployInitialScTxs: make([]data.TransactionHandler, 0), } - processors, err := createProcessorsForShardGenesisBlock(arg, createGenesisConfig(), createGenesisRoundConfig()) + processors, err := createProcessorsForShardGenesisBlock( + arg, + createGenesisConfig(arg.EpochConfig.EnableEpochs), + createGenesisRoundConfig(arg.RoundConfig), + ) if err != nil { return nil, nil, nil, err } @@ -241,22 +159,10 @@ func CreateShardGenesisBlock( ) round, nonce, epoch := getGenesisBlocksRoundNonceEpoch(arg) - header := &block.Header{ - Epoch: epoch, - Round: round, - Nonce: nonce, - ShardID: arg.ShardCoordinator.SelfId(), - BlockBodyType: block.StateBlock, - PubKeysBitmap: []byte{1}, - Signature: rootHash, - RootHash: rootHash, - PrevRandSeed: rootHash, - RandSeed: rootHash, - TimeStamp: arg.GenesisTime, - AccumulatedFees: big.NewInt(0), - DeveloperFees: big.NewInt(0), - ChainID: []byte(arg.Core.ChainID()), - SoftwareVersion: []byte(""), + headerHandler := arg.versionedHeaderFactory.Create(epoch) + err = setInitialDataInHeader(headerHandler, arg, epoch, nonce, round, rootHash) + if err != nil { + return nil, nil, nil, err } err = processors.vmContainer.Close() @@ -269,7 +175,46 @@ func CreateShardGenesisBlock( return nil, nil, nil, err } - return header, scAddresses, indexingData, nil + return headerHandler, scAddresses, indexingData, nil +} + +func setInitialDataInHeader( + headerHandler data.HeaderHandler, + arg ArgsGenesisBlockCreator, + epoch uint32, + nonce uint64, + round uint64, + rootHash []byte, +) error { + shardHeaderHandler, ok := headerHandler.(data.ShardHeaderHandler) + if !ok { + return process.ErrWrongTypeAssertion + } + + setErrors := make([]error, 0) + setErrors = append(setErrors, shardHeaderHandler.SetEpoch(epoch)) + setErrors = append(setErrors, shardHeaderHandler.SetNonce(nonce)) + setErrors = append(setErrors, shardHeaderHandler.SetRound(round)) + setErrors = append(setErrors, shardHeaderHandler.SetShardID(arg.ShardCoordinator.SelfId())) + setErrors = append(setErrors, shardHeaderHandler.SetBlockBodyTypeInt32(int32(block.StateBlock))) + setErrors = append(setErrors, shardHeaderHandler.SetPubKeysBitmap([]byte{1})) + setErrors = append(setErrors, shardHeaderHandler.SetSignature(rootHash)) + setErrors = append(setErrors, shardHeaderHandler.SetRootHash(rootHash)) + setErrors = append(setErrors, shardHeaderHandler.SetPrevRandSeed(rootHash)) + setErrors = append(setErrors, shardHeaderHandler.SetRandSeed(rootHash)) + setErrors = append(setErrors, shardHeaderHandler.SetTimeStamp(arg.GenesisTime)) + setErrors = append(setErrors, shardHeaderHandler.SetAccumulatedFees(big.NewInt(0))) + setErrors = append(setErrors, shardHeaderHandler.SetDeveloperFees(big.NewInt(0))) + setErrors = append(setErrors, shardHeaderHandler.SetChainID([]byte(arg.Core.ChainID()))) + setErrors = append(setErrors, shardHeaderHandler.SetSoftwareVersion([]byte(""))) + + for _, err := range setErrors { + if err != nil { + return err + } + } + + return nil } func createShardGenesisBlockAfterHardFork( @@ -399,7 +344,7 @@ func setBalanceToTrie(arg ArgsGenesisBlockCreator, accnt genesis.InitialAccountH return arg.Accounts.SaveAccount(account) } -func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpochsConfig config.EnableEpochs, roundConfig *config.RoundConfig) (*genesisProcessors, error) { +func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpochsConfig config.EnableEpochs, roundConfig config.RoundConfig) (*genesisProcessors, error) { genesisWasmVMLocker := &sync.RWMutex{} // use a local instance as to not run in concurrent issues when doing bootstrap epochNotifier := forking.NewGenericEpochNotifier() enableEpochsHandler, err := enablers.NewEnableEpochsHandler(enableEpochsConfig, epochNotifier) @@ -408,7 +353,7 @@ func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpo } roundNotifier := forking.NewGenericRoundNotifier() - enableRoundsHandler, err := enablers.NewEnableRoundsHandler(*roundConfig, roundNotifier) + enableRoundsHandler, err := enablers.NewEnableRoundsHandler(roundConfig, roundNotifier) if err != nil { return nil, err } diff --git a/go.mod b/go.mod index 4652af67770..9de88775caa 100644 --- a/go.mod +++ b/go.mod @@ -14,18 +14,18 @@ require ( github.com/gorilla/websocket v1.5.0 github.com/klauspost/cpuid/v2 v2.2.5 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.13-0.20240126150131-2ac5bc749b40 - github.com/multiversx/mx-chain-core-go v1.2.19-0.20240130114525-969a1a41a404 + github.com/multiversx/mx-chain-communication-go v1.0.13-0.20240321151517-2fffad77c605 + github.com/multiversx/mx-chain-core-go v1.2.20-0.20240328090024-e88291d59ace github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231206065052-38843c1f1479 - github.com/multiversx/mx-chain-es-indexer-go v1.4.19-0.20240129150813-a772c480d33a + github.com/multiversx/mx-chain-es-indexer-go v1.4.20-0.20240228094052-28a36809b9b8 github.com/multiversx/mx-chain-logger-go v1.0.14-0.20240129144507-d00e967c890c github.com/multiversx/mx-chain-scenario-go v1.4.3-0.20240212160120-cc32d1580157 - github.com/multiversx/mx-chain-storage-go v1.0.15-0.20240129144933-b1c0d642d7f8 - github.com/multiversx/mx-chain-vm-common-go v1.5.12-0.20240306133710-91798f2f9baa - github.com/multiversx/mx-chain-vm-go v1.5.28-0.20240306131314-329c0fcd5ce1 - github.com/multiversx/mx-chain-vm-v1_2-go v1.2.66-0.20240129145751-f814f5525edb - github.com/multiversx/mx-chain-vm-v1_3-go v1.3.67-0.20240129150004-536a22d9c618 - github.com/multiversx/mx-chain-vm-v1_4-go v1.4.95-0.20240129150215-43996b664ada + github.com/multiversx/mx-chain-storage-go v1.0.15-0.20240321150623-3974ec1d6474 + github.com/multiversx/mx-chain-vm-common-go v1.5.12-0.20240328091908-c46c76dac779 + github.com/multiversx/mx-chain-vm-go v1.5.28-0.20240328092329-b5f2c7c059eb + github.com/multiversx/mx-chain-vm-v1_2-go v1.2.66-0.20240321152247-79521988c8e6 + github.com/multiversx/mx-chain-vm-v1_3-go v1.3.67-0.20240321152532-45da5eabdc38 + github.com/multiversx/mx-chain-vm-v1_4-go v1.4.96-0.20240321152756-16110ce9d968 github.com/pelletier/go-toml v1.9.3 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 diff --git a/go.sum b/go.sum index 8363b441c57..96da81e0efb 100644 --- a/go.sum +++ b/go.sum @@ -385,30 +385,30 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v1.0.13-0.20240126150131-2ac5bc749b40 h1:bMFxkbb1EOQs0+JMM0G0/Kv9v4Jjjla5MSVhVk6scTA= -github.com/multiversx/mx-chain-communication-go v1.0.13-0.20240126150131-2ac5bc749b40/go.mod h1:n4E8BWIV0g3AcNGe1gf+vcjUC8A2QCJ4ARQSbiUDGrI= -github.com/multiversx/mx-chain-core-go v1.2.19-0.20240130114525-969a1a41a404 h1:6abf4zfA/L2KQM7twd2guVmYPiXWG83yfJUHwuRz/tg= -github.com/multiversx/mx-chain-core-go v1.2.19-0.20240130114525-969a1a41a404/go.mod h1:B5zU4MFyJezmEzCsAHE9YNULmGCm2zbPHvl9hazNxmE= +github.com/multiversx/mx-chain-communication-go v1.0.13-0.20240321151517-2fffad77c605 h1:WYPdDmxL5rk9O6wUYVW4Fpw/QtwkWiIzFHeH2F5Zap4= +github.com/multiversx/mx-chain-communication-go v1.0.13-0.20240321151517-2fffad77c605/go.mod h1:wUM/1NFfgeTjovQMaaXghynwXgOyoPchMquu2wnCHz8= +github.com/multiversx/mx-chain-core-go v1.2.20-0.20240328090024-e88291d59ace h1:sCXg0IlWmi0k5mC3BmUVCKVrxatGRQKGmqVS/froLDw= +github.com/multiversx/mx-chain-core-go v1.2.20-0.20240328090024-e88291d59ace/go.mod h1:B5zU4MFyJezmEzCsAHE9YNULmGCm2zbPHvl9hazNxmE= github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231206065052-38843c1f1479 h1:beVIhs5ysylwNplQ/bZ0h5DoDlqKNWgpWE/NMHHNmAw= github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231206065052-38843c1f1479/go.mod h1:Ap6p7QZFtwPlb++OvCG+85BfuZ+bLP/JtQp6EwjWJsI= -github.com/multiversx/mx-chain-es-indexer-go v1.4.19-0.20240129150813-a772c480d33a h1:mOMUhbsjTq7n5oAv4KkVnL67ngS0+wkqmkiv1XJfBIY= -github.com/multiversx/mx-chain-es-indexer-go v1.4.19-0.20240129150813-a772c480d33a/go.mod h1:3aSGRJNvfUuPQkZUGHWuF11rPPxphsKGuAuIB+eD3is= +github.com/multiversx/mx-chain-es-indexer-go v1.4.20-0.20240228094052-28a36809b9b8 h1:z9ePQGALhPCs9Fv7cQsnsScbEq8KuOJ9xrJEEEOiHyI= +github.com/multiversx/mx-chain-es-indexer-go v1.4.20-0.20240228094052-28a36809b9b8/go.mod h1:3aSGRJNvfUuPQkZUGHWuF11rPPxphsKGuAuIB+eD3is= github.com/multiversx/mx-chain-logger-go v1.0.14-0.20240129144507-d00e967c890c h1:QIUOn8FgNRa5cir4BCWHZi/Qcr6Gg0eGNhns4+jy6+k= github.com/multiversx/mx-chain-logger-go v1.0.14-0.20240129144507-d00e967c890c/go.mod h1:fH/fR/GEBsDjPkBoZDVJMoYo2HhlA7++DP6QfITJ1N8= github.com/multiversx/mx-chain-scenario-go v1.4.3-0.20240212160120-cc32d1580157 h1:ydzN3f+Y7H0InXuxAcNUSyVc+omNYL8uYtLqVzqaaX4= github.com/multiversx/mx-chain-scenario-go v1.4.3-0.20240212160120-cc32d1580157/go.mod h1:ndk45i9J9McuCJpTcgiaK4ocd0yhnBBCPrlFwO6GRcs= -github.com/multiversx/mx-chain-storage-go v1.0.15-0.20240129144933-b1c0d642d7f8 h1:/EYv/HGX0OKbeNFt667J0yZRtuJiZH0lEK8YtobuH/c= -github.com/multiversx/mx-chain-storage-go v1.0.15-0.20240129144933-b1c0d642d7f8/go.mod h1:zl1A6teNe39T8yhdZlkX3ckm5aLYrMIJJZ6Ord1E71M= -github.com/multiversx/mx-chain-vm-common-go v1.5.12-0.20240306133710-91798f2f9baa h1:lBvEkooZE6xIZiPc9TTNkgN3pz+qbmuGvcW0Hcc/Ir8= -github.com/multiversx/mx-chain-vm-common-go v1.5.12-0.20240306133710-91798f2f9baa/go.mod h1:aOuG7j+RoifbyJNzmCeY2yT3y0zUTpW2LQoq8giUTwk= -github.com/multiversx/mx-chain-vm-go v1.5.28-0.20240306131314-329c0fcd5ce1 h1:nTI2TKn1CatNJDh6pmqTvtWSTI8xq96lN+ylZJ4pJYQ= -github.com/multiversx/mx-chain-vm-go v1.5.28-0.20240306131314-329c0fcd5ce1/go.mod h1:nG0NywN7JMXckwXn17qTVLaIklZiWOX+vQxrXML5gpU= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.66-0.20240129145751-f814f5525edb h1:UtiY8X73llF9OLtGb2CM7Xewae1chvPjLc8B+ZmDLjw= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.66-0.20240129145751-f814f5525edb/go.mod h1:8uugq3HUeDiE6G4AS3F8/B3zA1Pabzbl7SSD6Cebwz8= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.67-0.20240129150004-536a22d9c618 h1:1uMlT5TjiHUlx81fEH/WQANWlY0PjF3opMlW+E3L3GI= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.67-0.20240129150004-536a22d9c618/go.mod h1:4uezxguZiX42kUaYMK/x46LLbgpYqn/iQXbcGM7zdM0= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.95-0.20240129150215-43996b664ada h1:NZLV2QmNPW+QTefuAhC24sOuGbOsAQEXzfv2CWoRJKc= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.95-0.20240129150215-43996b664ada/go.mod h1:tCjtWeBEZCfjEjlBcgLIRDGJbVmdV8dsmG6ydtiUtSo= +github.com/multiversx/mx-chain-storage-go v1.0.15-0.20240321150623-3974ec1d6474 h1:x65Su8ojHwA+NICp9DrSVGLDDcAlW04DafkqCHY1QPE= +github.com/multiversx/mx-chain-storage-go v1.0.15-0.20240321150623-3974ec1d6474/go.mod h1:hnc6H4D5Ge1haRAQ6QHTXhyh+CT2DRiNJ0U0HQYI3DY= +github.com/multiversx/mx-chain-vm-common-go v1.5.12-0.20240328091908-c46c76dac779 h1:FSgAtNcml8kWdIEn8MxCfPkZ8ZE/wIFNKI5TZLEfcT0= +github.com/multiversx/mx-chain-vm-common-go v1.5.12-0.20240328091908-c46c76dac779/go.mod h1:G6daPJC6bFsvAw45RPMCRi2rP+8LjFxa8G+3alHuJow= +github.com/multiversx/mx-chain-vm-go v1.5.28-0.20240328092329-b5f2c7c059eb h1:0WvWXqzliYS1yKW+6uTxZGMjQd08IQNPzlNNxxyNWHM= +github.com/multiversx/mx-chain-vm-go v1.5.28-0.20240328092329-b5f2c7c059eb/go.mod h1:mZNRILxq51LVqwqE9jMJyDHgmy9W3x7otOGuFjOm82Q= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.66-0.20240321152247-79521988c8e6 h1:7HqUo9YmpsfN/y9px6RmzREJm5O6ZzP9NqvFSrHTw24= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.66-0.20240321152247-79521988c8e6/go.mod h1:H2H/zoskiZC0lEokq9qMFVxRkB0RWVDPLjHbG/NrGUU= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.67-0.20240321152532-45da5eabdc38 h1:SAKjOByxXkZ5Sys5O4IkrrSGCKLoPvD+cCJJEvbev4w= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.67-0.20240321152532-45da5eabdc38/go.mod h1:3dhvJ5/SgEMKAaIYHAOzo3nmOmJik/DDXaQW21PUno4= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.96-0.20240321152756-16110ce9d968 h1:14A3e5rqaXXXOFGC0DjOWtGFiVLx20TNghsaja0u4E0= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.96-0.20240321152756-16110ce9d968/go.mod h1:XJt8jbyLtP1+pPSzQmHwQG45hH/qazz1H+Xk2wasfTs= github.com/multiversx/mx-components-big-int v1.0.0 h1:Wkr8lSzK2nDqixOrrBa47VNuqdhV1m/aJhaP1EMaiS8= github.com/multiversx/mx-components-big-int v1.0.0/go.mod h1:maIEMgHlNE2u78JaDD0oLzri+ShgU4okHfzP3LWGdQM= github.com/multiversx/protobuf v1.3.2 h1:RaNkxvGTGbA0lMcnHAN24qE1G1i+Xs5yHA6MDvQ4mSM= diff --git a/heartbeat/interface.go b/heartbeat/interface.go index 12eb29a5d61..3652170d8ba 100644 --- a/heartbeat/interface.go +++ b/heartbeat/interface.go @@ -83,6 +83,7 @@ type ManagedPeersHolder interface { IncrementRoundsWithoutReceivedMessages(pkBytes []byte) ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) GetManagedKeysByCurrentNode() map[string]crypto.PrivateKey + GetLoadedKeysByCurrentNode() [][]byte IsKeyManagedByCurrentNode(pkBytes []byte) bool IsKeyRegistered(pkBytes []byte) bool IsPidManagedByCurrentNode(pid core.PeerID) bool diff --git a/heartbeat/monitor/crossShardPeerTopicNotifier.go b/heartbeat/monitor/crossShardPeerTopicNotifier.go deleted file mode 100644 index aa25995fc71..00000000000 --- a/heartbeat/monitor/crossShardPeerTopicNotifier.go +++ /dev/null @@ -1,111 +0,0 @@ -package monitor - -import ( - "fmt" - "strconv" - "strings" - - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/heartbeat" - "github.com/multiversx/mx-chain-go/sharding" -) - -const topicSeparator = "_" - -// ArgsCrossShardPeerTopicNotifier represents the arguments for the cross shard peer topic notifier -type ArgsCrossShardPeerTopicNotifier struct { - ShardCoordinator sharding.Coordinator - PeerShardMapper heartbeat.PeerShardMapper -} - -type crossShardPeerTopicNotifier struct { - shardCoordinator sharding.Coordinator - peerShardMapper heartbeat.PeerShardMapper -} - -// NewCrossShardPeerTopicNotifier create a new cross shard peer topic notifier instance -func NewCrossShardPeerTopicNotifier(args ArgsCrossShardPeerTopicNotifier) (*crossShardPeerTopicNotifier, error) { - err := checkArgsCrossShardPeerTopicNotifier(args) - if err != nil { - return nil, err - } - - notifier := &crossShardPeerTopicNotifier{ - shardCoordinator: args.ShardCoordinator, - peerShardMapper: args.PeerShardMapper, - } - - return notifier, nil -} - -func checkArgsCrossShardPeerTopicNotifier(args ArgsCrossShardPeerTopicNotifier) error { - if check.IfNil(args.PeerShardMapper) { - return heartbeat.ErrNilPeerShardMapper - } - if check.IfNil(args.ShardCoordinator) { - return heartbeat.ErrNilShardCoordinator - } - - return nil -} - -// NewPeerFound is called whenever a new peer was found -func (notifier *crossShardPeerTopicNotifier) NewPeerFound(pid core.PeerID, topic string) { - splt := strings.Split(topic, topicSeparator) - if len(splt) != 3 { - // not a cross shard peer or the topic is global - return - } - - shardID1, err := notifier.getShardID(splt[1]) - if err != nil { - log.Error("failed to extract first shard for topic", "topic", topic, "error", err.Error()) - return - } - - shardID2, err := notifier.getShardID(splt[2]) - if err != nil { - log.Error("failed to extract second shard for topic", "topic", topic, "error", err.Error()) - return - } - if shardID1 == shardID2 { - return - } - notifier.checkAndAddShardID(pid, shardID1, topic, shardID2) - notifier.checkAndAddShardID(pid, shardID2, topic, shardID1) -} - -// TODO make a standalone component out of this -func (notifier *crossShardPeerTopicNotifier) getShardID(data string) (uint32, error) { - if data == common.MetachainTopicIdentifier { - return common.MetachainShardId, nil - } - val, err := strconv.Atoi(data) - if err != nil { - return 0, err - } - if uint32(val) >= notifier.shardCoordinator.NumberOfShards() || val < 0 { - return 0, fmt.Errorf("invalid value in crossShardPeerTopicNotifier.getShardID %d", val) - } - - return uint32(val), nil -} - -func (notifier *crossShardPeerTopicNotifier) checkAndAddShardID(pid core.PeerID, shardID1 uint32, topic string, shardID2 uint32) { - if shardID1 != notifier.shardCoordinator.SelfId() { - return - } - - log.Trace("crossShardPeerTopicNotifier.NewPeerFound found a cross shard peer", - "topic", topic, - "pid", pid.Pretty(), - "shard", shardID2) - notifier.peerShardMapper.PutPeerIdShardId(pid, shardID2) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (notifier *crossShardPeerTopicNotifier) IsInterfaceNil() bool { - return notifier == nil -} diff --git a/heartbeat/monitor/crossShardPeerTopicNotifier_test.go b/heartbeat/monitor/crossShardPeerTopicNotifier_test.go deleted file mode 100644 index e4951586852..00000000000 --- a/heartbeat/monitor/crossShardPeerTopicNotifier_test.go +++ /dev/null @@ -1,273 +0,0 @@ -package monitor - -import ( - "math" - "testing" - - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/heartbeat" - "github.com/multiversx/mx-chain-go/process/mock" - "github.com/multiversx/mx-chain-go/testscommon" - "github.com/stretchr/testify/assert" -) - -func createMockArgsCrossShardPeerTopicNotifier() ArgsCrossShardPeerTopicNotifier { - return ArgsCrossShardPeerTopicNotifier{ - ShardCoordinator: &testscommon.ShardsCoordinatorMock{ - NoShards: 3, - CurrentShard: 1, - }, - PeerShardMapper: &mock.PeerShardMapperStub{}, - } -} - -func TestNewCrossShardPeerTopicNotifier(t *testing.T) { - t.Parallel() - - t.Run("nil sharding coordinator should error", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.ShardCoordinator = nil - - notifier, err := NewCrossShardPeerTopicNotifier(args) - assert.True(t, check.IfNil(notifier)) - assert.Equal(t, heartbeat.ErrNilShardCoordinator, err) - }) - t.Run("nil peer shard mapper should error", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = nil - - notifier, err := NewCrossShardPeerTopicNotifier(args) - assert.True(t, check.IfNil(notifier)) - assert.Equal(t, heartbeat.ErrNilPeerShardMapper, err) - }) - t.Run("should work", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - - notifier, err := NewCrossShardPeerTopicNotifier(args) - assert.False(t, check.IfNil(notifier)) - assert.Nil(t, err) - }) -} - -func TestCrossShardPeerTopicNotifier_NewPeerFound(t *testing.T) { - t.Parallel() - - testTopic := "test" - t.Run("global topic should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - notifier.NewPeerFound("pid", "random topic") - }) - t.Run("intra-shard topic should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + core.CommunicationIdentifierBetweenShards(0, 0) - notifier.NewPeerFound("pid", topic) - }) - t.Run("cross-shard topic but not relevant to current node should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + core.CommunicationIdentifierBetweenShards(0, 2) - notifier.NewPeerFound("pid", topic) - }) - t.Run("first shard ID is a NaN should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + "_NaN_1" - notifier.NewPeerFound("pid", topic) - }) - t.Run("second shard ID is a NaN should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + "_1_NaN" - notifier.NewPeerFound("pid", topic) - }) - t.Run("second shard ID is a negative value should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + "_1_-1" - notifier.NewPeerFound("pid", topic) - }) - t.Run("second shard ID is an out of range value should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + "_1_4" - notifier.NewPeerFound("pid", topic) - }) - t.Run("same shard IDs should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + "_0_0" - notifier.NewPeerFound("pid", topic) - }) - t.Run("cross-shard between 0 and 1 should notice", func(t *testing.T) { - t.Parallel() - - expectedPid := core.PeerID("pid") - notifiedShardID := uint32(math.MaxUint32) - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Equal(t, pid, expectedPid) - notifiedShardID = shardId - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + core.CommunicationIdentifierBetweenShards(0, 1) - notifier.NewPeerFound("pid", topic) - assert.Equal(t, uint32(0), notifiedShardID) - }) - t.Run("cross-shard between 1 and 2 should notice", func(t *testing.T) { - t.Parallel() - - expectedPid := core.PeerID("pid") - notifiedShardID := uint32(math.MaxUint32) - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Equal(t, pid, expectedPid) - notifiedShardID = shardId - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + core.CommunicationIdentifierBetweenShards(1, 2) - notifier.NewPeerFound("pid", topic) - assert.Equal(t, uint32(2), notifiedShardID) - }) - t.Run("cross-shard between 1 and META should notice", func(t *testing.T) { - t.Parallel() - - expectedPid := core.PeerID("pid") - notifiedShardID := uint32(math.MaxUint32) - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Equal(t, pid, expectedPid) - notifiedShardID = shardId - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + core.CommunicationIdentifierBetweenShards(1, common.MetachainShardId) - notifier.NewPeerFound("pid", topic) - assert.Equal(t, common.MetachainShardId, notifiedShardID) - }) - t.Run("cross-shard between META and 1 should notice", func(t *testing.T) { - t.Parallel() - - expectedPid := core.PeerID("pid") - notifiedShardID := uint32(math.MaxUint32) - args := createMockArgsCrossShardPeerTopicNotifier() - args.ShardCoordinator = &testscommon.ShardsCoordinatorMock{ - NoShards: 3, - CurrentShard: common.MetachainShardId, - } - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Equal(t, pid, expectedPid) - notifiedShardID = shardId - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + core.CommunicationIdentifierBetweenShards(common.MetachainShardId, 1) - notifier.NewPeerFound("pid", topic) - assert.Equal(t, uint32(1), notifiedShardID) - }) -} - -func BenchmarkCrossShardPeerTopicNotifier_NewPeerFound(b *testing.B) { - args := createMockArgsCrossShardPeerTopicNotifier() - notifier, _ := NewCrossShardPeerTopicNotifier(args) - - for i := 0; i < b.N; i++ { - switch i % 6 { - case 0: - notifier.NewPeerFound("pid", "global") - case 2: - notifier.NewPeerFound("pid", "intrashard_1") - case 3: - notifier.NewPeerFound("pid", "crossshard_1_2") - case 4: - notifier.NewPeerFound("pid", "crossshard_1_META") - case 5: - notifier.NewPeerFound("pid", "crossshard_META_1") - case 6: - notifier.NewPeerFound("pid", "crossshard_2_META") - } - } -} diff --git a/integrationTests/api/transaction_test.go b/integrationTests/api/transaction_test.go index c4267676343..2ecb27b850c 100644 --- a/integrationTests/api/transaction_test.go +++ b/integrationTests/api/transaction_test.go @@ -14,6 +14,10 @@ import ( ) func TestTransactionGroup(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + node := integrationTests.NewTestProcessorNodeWithTestWebServer(3, 0, 0) testTransactionGasCostWithMissingFields(t, node) diff --git a/integrationTests/benchmarks/loadFromTrie_test.go b/integrationTests/benchmarks/loadFromTrie_test.go index c3c7a99f573..576326bbc0d 100644 --- a/integrationTests/benchmarks/loadFromTrie_test.go +++ b/integrationTests/benchmarks/loadFromTrie_test.go @@ -32,6 +32,10 @@ func TestTrieLoadTime(t *testing.T) { } func TestTrieLoadTimeForOneLevel(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + numTrieLevels := 1 numTries := 10000 numChildrenPerBranch := 8 diff --git a/integrationTests/chainSimulator/interface.go b/integrationTests/chainSimulator/interface.go new file mode 100644 index 00000000000..6d66b9d62c0 --- /dev/null +++ b/integrationTests/chainSimulator/interface.go @@ -0,0 +1,24 @@ +package chainSimulator + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/data/api" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/node/chainSimulator/process" +) + +// ChainSimulator defines the operations for an entity that can simulate operations of a chain +type ChainSimulator interface { + GenerateBlocks(numOfBlocks int) error + GenerateBlocksUntilEpochIsReached(targetEpoch int32) error + AddValidatorKeys(validatorsPrivateKeys [][]byte) error + GetNodeHandler(shardID uint32) process.NodeHandler + SendTxAndGenerateBlockTilTxIsExecuted(txToSend *transaction.Transaction, maxNumOfBlockToGenerateWhenExecutingTx int) (*transaction.ApiTransactionResult, error) + SendTxsAndGenerateBlocksTilAreExecuted(txsToSend []*transaction.Transaction, maxNumOfBlocksToGenerateWhenExecutingTx int) ([]*transaction.ApiTransactionResult, error) + SetStateMultiple(stateSlice []*dtos.AddressState) error + GenerateAndMintWalletAddress(targetShardID uint32, value *big.Int) (dtos.WalletAddress, error) + GetInitialWalletKeys() *dtos.InitialWalletKeys + GetAccount(address dtos.WalletAddress) (api.AccountResponse, error) +} diff --git a/integrationTests/chainSimulator/staking/delegation_test.go b/integrationTests/chainSimulator/staking/delegation_test.go new file mode 100644 index 00000000000..1ed12f29fd9 --- /dev/null +++ b/integrationTests/chainSimulator/staking/delegation_test.go @@ -0,0 +1,1739 @@ +package staking + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "math/big" + "strings" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" + dataVm "github.com/multiversx/mx-chain-core-go/data/vm" + "github.com/multiversx/mx-chain-crypto-go/signing" + "github.com/multiversx/mx-chain-crypto-go/signing/mcl" + mclsig "github.com/multiversx/mx-chain-crypto-go/signing/mcl/singlesig" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + chainSimulatorIntegrationTests "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + chainSimulatorProcess "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/vm" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const mockBLSSignature = "010101" +const gasLimitForStakeOperation = 50_000_000 +const gasLimitForConvertOperation = 510_000_000 +const gasLimitForDelegationContractCreationOperation = 500_000_000 +const gasLimitForAddNodesOperation = 500_000_000 +const gasLimitForUndelegateOperation = 500_000_000 +const gasLimitForMergeOperation = 600_000_000 +const gasLimitForDelegate = 12_000_000 +const gasLimitForUnBond = 12_000_000 +const minGasPrice = 1000000000 +const txVersion = 1 +const mockTxSignature = "sig" +const queuedStatus = "queued" +const stakedStatus = "staked" +const notStakedStatus = "notStaked" +const unStakedStatus = "unStaked" +const auctionStatus = "auction" +const okReturnCode = "ok" +const maxCap = "00" // no cap +const hexServiceFee = "0ea1" // 37.45% +const walletAddressBytesLen = 32 + +var initialDelegationValue = big.NewInt(0).Mul(oneEGLD, big.NewInt(1250)) +var zeroValue = big.NewInt(0) +var oneEGLD = big.NewInt(1000000000000000000) +var minimumStakeValue = big.NewInt(0).Mul(oneEGLD, big.NewInt(2500)) + +// Test description: +// Test that delegation contract created with MakeNewContractFromValidatorData works properly +// Also check that delegate and undelegate works properly and the top-up remain the same if every delegator undelegates. +// Test that the top-up from normal stake will be transferred after creating the contract and will be used in auction list computing + +// Internal test scenario #10 +func TestChainSimulator_MakeNewContractFromValidatorData(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test scenario done in staking 3.5 phase (staking v4 is not active) + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Do a stake transaction for the validator key and test that the new key is on queue and topup is 500 + // 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on queue and topup is 500 + // 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700 + // 6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500 + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorData(t, cs, 1) + }) + + // Test scenario done in staking v4 phase step 1 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Do a stake transaction for the validator key and test that the new key is on auction list and topup is 500 + // 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on auction list and topup is 500 + // 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700 + // 6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500 + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorData(t, cs, 2) + }) + + // Test scenario done in staking v4 phase step 2 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Do a stake transaction for the validator key and test that the new key is on auction list and topup is 500 + // 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on auction list and topup is 500 + // 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700 + // 6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500 + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorData(t, cs, 3) + }) + + // Test scenario done in staking v4 phase step 3 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Do a stake transaction for the validator key and test that the new key is on auction list and topup is 500 + // 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on auction list and topup is 500 + // 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700 + // 6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500 + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorData(t, cs, 4) + }) +} + +func testChainSimulatorMakeNewContractFromValidatorData(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + log.Info("Step 1. Add a new validator private key in the multi key handler") + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + log.Info("Step 2. Set the initial state for the owner and the 2 delegators") + mintValue := big.NewInt(3010) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + delegator1, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + delegator2, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + log.Info("working with the following addresses", + "newValidatorOwner", validatorOwner.Bech32, "delegator1", delegator1.Bech32, "delegator2", delegator2.Bech32) + + log.Info("Step 3. Do a stake transaction for the validator key and test that the new key is on queue / auction list and the correct topup") + stakeValue := big.NewInt(0).Set(minimumStakeValue) + addedStakedValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(500)) + stakeValue.Add(stakeValue, addedStakedValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorOwner.Bytes, blsKeys[0], addedStakedValue, 1) + + log.Info("Step 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on queue / auction list and the correct topup") + txDataField = fmt.Sprintf("makeNewContractFromValidatorData@%s@%s", maxCap, hexServiceFee) + txConvert := generateTransaction(validatorOwner.Bytes, 1, vm.DelegationManagerSCAddress, zeroValue, txDataField, gasLimitForConvertOperation) + convertTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txConvert, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, convertTx) + + delegationAddress := convertTx.Logs.Events[0].Topics[1] + delegationAddressBech32 := metachainNode.GetCoreComponents().AddressPubKeyConverter().SilentEncode(delegationAddress, log) + log.Info("generated delegation address", "address", delegationAddressBech32) + + err = metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], addedStakedValue, 1) + + log.Info("Step 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700") + delegateValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(100)) + txDelegate1 := generateTransaction(delegator1.Bytes, 0, delegationAddress, delegateValue, "delegate", gasLimitForDelegate) + delegate1Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txDelegate1, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegate1Tx) + + txDelegate2 := generateTransaction(delegator2.Bytes, 0, delegationAddress, delegateValue, "delegate", gasLimitForDelegate) + delegate2Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txDelegate2, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegate2Tx) + + expectedTopUp := big.NewInt(0).Mul(oneEGLD, big.NewInt(700)) + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], expectedTopUp, 1) + + log.Info("6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500") + unDelegateValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(100)) + txDataField = fmt.Sprintf("unDelegate@%s", hex.EncodeToString(unDelegateValue.Bytes())) + txUnDelegate1 := generateTransaction(delegator1.Bytes, 1, delegationAddress, zeroValue, txDataField, gasLimitForDelegate) + unDelegate1Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnDelegate1, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unDelegate1Tx) + + txDataField = fmt.Sprintf("unDelegate@%s", hex.EncodeToString(unDelegateValue.Bytes())) + txUnDelegate2 := generateTransaction(delegator2.Bytes, 1, delegationAddress, zeroValue, txDataField, gasLimitForDelegate) + unDelegate2Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnDelegate2, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unDelegate2Tx) + + expectedTopUp = big.NewInt(0).Mul(oneEGLD, big.NewInt(500)) + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], expectedTopUp, 1) +} + +func testBLSKeyIsInQueueOrAuction(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, address []byte, blsKey string, expectedTopUp *big.Int, actionListSize int) { + decodedBLSKey, _ := hex.DecodeString(blsKey) + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + statistics, err := metachainNode.GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + assert.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, address)) + + activationEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step1Flag) + if activationEpoch <= metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() { + testBLSKeyIsInAuction(t, metachainNode, decodedBLSKey, blsKey, expectedTopUp, actionListSize, statistics, 1, address) + return + } + + // in staking ph 2/3.5 we do not find the bls key on the validator statistics + _, found := statistics[blsKey] + require.False(t, found) + require.Equal(t, queuedStatus, getBLSKeyStatus(t, metachainNode, decodedBLSKey)) +} + +func testBLSKeyIsInAuction( + t *testing.T, + metachainNode chainSimulatorProcess.NodeHandler, + blsKeyBytes []byte, + blsKey string, + topUpInAuctionList *big.Int, + actionListSize int, + validatorStatistics map[string]*validator.ValidatorStatistics, + numNodes int, + owner []byte, +) { + require.Equal(t, stakedStatus, getBLSKeyStatus(t, metachainNode, blsKeyBytes)) + + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + auctionList, err := metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) + + currentEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() + if metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step2Flag) == currentEpoch { + // starting from phase 2, we have the shuffled out nodes from the previous epoch in the action list + actionListSize += 8 + } + if metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step3Flag) <= currentEpoch { + // starting from phase 3, we have the shuffled out nodes from the previous epoch in the action list + actionListSize += 4 + } + + require.Equal(t, actionListSize, len(auctionList)) + ownerAsBech32, err := metachainNode.GetCoreComponents().AddressPubKeyConverter().Encode(owner) + require.Nil(t, err) + if actionListSize != 0 { + nodeWasFound := false + for _, item := range auctionList { + if item.Owner != ownerAsBech32 { + continue + } + + require.Equal(t, numNodes, len(auctionList[0].Nodes)) + for _, node := range item.Nodes { + if node.BlsKey == blsKey { + require.Equal(t, topUpInAuctionList.String(), item.TopUpPerNode) + nodeWasFound = true + } + } + } + require.True(t, nodeWasFound) + } + + // in staking ph 4 we should find the key in the validators statics + validatorInfo, found := validatorStatistics[blsKey] + require.True(t, found) + require.Equal(t, auctionStatus, validatorInfo.ValidatorStatus) +} + +func testBLSKeysAreInQueueOrAuction(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, address []byte, blsKeys []string, totalTopUp *big.Int, actionListSize int) { + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + statistics, err := metachainNode.GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + assert.Equal(t, totalTopUp, getBLSTopUpValue(t, metachainNode, address)) + + individualTopup := big.NewInt(0).Set(totalTopUp) + individualTopup.Div(individualTopup, big.NewInt(int64(len(blsKeys)))) + + for _, blsKey := range blsKeys { + decodedBLSKey, _ := hex.DecodeString(blsKey) + activationEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step1Flag) + if activationEpoch <= metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() { + testBLSKeyIsInAuction(t, metachainNode, decodedBLSKey, blsKey, individualTopup, actionListSize, statistics, len(blsKeys), address) + continue + } + + // in staking ph 2/3.5 we do not find the bls key on the validator statistics + _, found := statistics[blsKey] + require.False(t, found) + require.Equal(t, queuedStatus, getBLSKeyStatus(t, metachainNode, decodedBLSKey)) + } +} + +// Test description: +// Test that 2 different contracts with different topups that came from the normal stake will be considered in auction list computing in the correct order +// 1. Add 2 new validator private keys in the multi key handler +// 2. Set the initial state for 2 owners (mint 2 new wallets) +// 3. Do 2 stake transactions and test that the new keys are on queue / auction list and have the correct topup - 100 and 200 EGLD, respectively +// 4. Convert both validators into staking providers and test that the new keys are on queue / auction list and have the correct topup +// 5. If the staking v4 is activated (regardless the steps), check that the auction list sorted the 2 BLS keys based on topup + +// Internal test scenario #11 +func TestChainSimulator_MakeNewContractFromValidatorDataWith2StakingContracts(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t, cs, 1) + }) + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t, cs, 2) + }) + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t, cs, 3) + }) + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t, cs, 4) + }) +} + +func testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + log.Info("Step 1. Add 2 new validator private keys in the multi key handler") + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + log.Info("Step 2. Set the initial state for 2 owners") + mintValue := big.NewInt(3010) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorOwnerA, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + validatorOwnerB, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + log.Info("working with the following addresses", + "validatorOwnerA", validatorOwnerA.Bech32, "validatorOwnerB", validatorOwnerB.Bech32) + + log.Info("Step 3. Do 2 stake transactions and test that the new keys are on queue / auction list and have the correct topup") + + topupA := big.NewInt(0).Mul(oneEGLD, big.NewInt(100)) + stakeValueA := big.NewInt(0).Add(minimumStakeValue, topupA) + txStakeA := generateStakeTransaction(t, cs, validatorOwnerA, blsKeys[0], stakeValueA) + + topupB := big.NewInt(0).Mul(oneEGLD, big.NewInt(200)) + stakeValueB := big.NewInt(0).Add(minimumStakeValue, topupB) + txStakeB := generateStakeTransaction(t, cs, validatorOwnerB, blsKeys[1], stakeValueB) + + stakeTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txStakeA, txStakeB}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 2, len(stakeTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorOwnerA.Bytes, blsKeys[0], topupA, 2) + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorOwnerB.Bytes, blsKeys[1], topupB, 2) + + log.Info("Step 4. Convert both validators into staking providers and test that the new keys are on queue / auction list and have the correct topup") + + txConvertA := generateConvertToStakingProviderTransaction(t, cs, validatorOwnerA) + txConvertB := generateConvertToStakingProviderTransaction(t, cs, validatorOwnerB) + + convertTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txConvertA, txConvertB}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 2, len(convertTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + delegationAddressA := convertTxs[0].Logs.Events[0].Topics[1] + delegationAddressB := convertTxs[1].Logs.Events[0].Topics[1] + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddressA, blsKeys[0], topupA, 2) + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddressB, blsKeys[1], topupB, 2) + + log.Info("Step 5. If the staking v4 is activated, check that the auction list sorted the 2 BLS keys based on topup") + step1ActivationEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step1Flag) + if step1ActivationEpoch > metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() { + // we are in staking v3.5, the test ends here + return + } + + auctionList, err := metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) + + firstAuctionPosition := auctionList[0] + secondAuctionPosition := auctionList[1] + // check the correct order of the nodes in the auction list based on topup + require.Equal(t, blsKeys[1], firstAuctionPosition.Nodes[0].BlsKey) + require.Equal(t, topupB.String(), firstAuctionPosition.TopUpPerNode) + + require.Equal(t, blsKeys[0], secondAuctionPosition.Nodes[0].BlsKey) + require.Equal(t, topupA.String(), secondAuctionPosition.TopUpPerNode) +} + +// Test description: +// Test that 1 contract having 3 BLS keys proper handles the stakeNodes-unstakeNodes-unBondNodes sequence for 2 of the BLS keys +// 1. Add 3 new validator private keys in the multi key handler +// 2. Set the initial state for 1 owner and 1 delegator +// 3. Do a stake transaction and test that the new key is on queue / auction list and has the correct topup +// 4. Convert the validator into a staking providers and test that the key is on queue / auction list and has the correct topup +// 5. Add 2 nodes in the staking contract +// 6. Delegate 5000 EGLD to the contract +// 7. Stake the 2 nodes +// 8. UnStake 2 nodes (latest staked) +// 9. Unbond the 2 nodes (that were un staked) + +// Internal test scenario #85 +func TestChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 80, + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + // unbond succeeded because the nodes were on queue + testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t, cs, 1, notStakedStatus) + }) + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + cfg.EpochConfig.EnableEpochs.AlwaysMergeContextsInEEIEnableEpoch = 1 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t, cs, 2, unStakedStatus) + }) + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + cfg.EpochConfig.EnableEpochs.AlwaysMergeContextsInEEIEnableEpoch = 1 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t, cs, 3, unStakedStatus) + }) + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + cfg.EpochConfig.EnableEpochs.AlwaysMergeContextsInEEIEnableEpoch = 1 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t, cs, 4, unStakedStatus) + }) +} + +func testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond( + t *testing.T, + cs chainSimulatorIntegrationTests.ChainSimulator, + targetEpoch int32, + nodesStatusAfterUnBondTx string, +) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + log.Info("Step 1. Add 3 new validator private keys in the multi key handler") + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(3) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + log.Info("Step 2. Set the initial state for 1 owner and 1 delegator") + mintValue := big.NewInt(10001) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + owner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + delegator, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + log.Info("working with the following addresses", + "owner", owner.Bech32, "", delegator.Bech32) + + log.Info("Step 3. Do a stake transaction and test that the new key is on queue / auction list and has the correct topup") + + topup := big.NewInt(0).Mul(oneEGLD, big.NewInt(99)) + stakeValue := big.NewInt(0).Add(minimumStakeValue, topup) + txStake := generateStakeTransaction(t, cs, owner, blsKeys[0], stakeValue) + + stakeTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txStake}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(stakeTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, owner.Bytes, blsKeys[0], topup, 1) + + log.Info("Step 4. Convert the validator into a staking providers and test that the key is on queue / auction list and has the correct topup") + + txConvert := generateConvertToStakingProviderTransaction(t, cs, owner) + + convertTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txConvert}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(convertTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + delegationAddress := convertTxs[0].Logs.Events[0].Topics[1] + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], topup, 1) + + log.Info("Step 5. Add 2 nodes in the staking contract") + txDataFieldAddNodes := fmt.Sprintf("addNodes@%s@%s@%s@%s", blsKeys[1], mockBLSSignature+"02", blsKeys[2], mockBLSSignature+"03") + ownerNonce := getNonce(t, cs, owner) + txAddNodes := generateTransaction(owner.Bytes, ownerNonce, delegationAddress, big.NewInt(0), txDataFieldAddNodes, gasLimitForStakeOperation) + + addNodesTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txAddNodes}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(addNodesTxs)) + + log.Info("Step 6. Delegate 5000 EGLD to the contract") + delegateValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(5000)) + txDataFieldDelegate := "delegate" + delegatorNonce := getNonce(t, cs, delegator) + txDelegate := generateTransaction(delegator.Bytes, delegatorNonce, delegationAddress, delegateValue, txDataFieldDelegate, gasLimitForStakeOperation) + + delegateTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txDelegate}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(delegateTxs)) + + log.Info("Step 7. Stake the 2 nodes") + txDataFieldStakeNodes := fmt.Sprintf("stakeNodes@%s@%s", blsKeys[1], blsKeys[2]) + ownerNonce = getNonce(t, cs, owner) + txStakeNodes := generateTransaction(owner.Bytes, ownerNonce, delegationAddress, big.NewInt(0), txDataFieldStakeNodes, gasLimitForStakeOperation) + + stakeNodesTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txStakeNodes}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(stakeNodesTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the nodes + assert.Nil(t, err) + + // all 3 nodes should be staked (auction list is 1 as there is one delegation SC with 3 BLS keys in the auction list) + testBLSKeysAreInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys, topup, 1) + + log.Info("Step 8. UnStake 2 nodes (latest staked)") + + txDataFieldUnStakeNodes := fmt.Sprintf("unStakeNodes@%s@%s", blsKeys[1], blsKeys[2]) + ownerNonce = getNonce(t, cs, owner) + txUnStakeNodes := generateTransaction(owner.Bytes, ownerNonce, delegationAddress, big.NewInt(0), txDataFieldUnStakeNodes, gasLimitForStakeOperation) + + unStakeNodesTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txUnStakeNodes}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(unStakeNodesTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the nodes + assert.Nil(t, err) + + // all that only one node is staked (auction list is 1 as there is one delegation SC with 1 BLS key in the auction list) + expectedTopUp := big.NewInt(0) + expectedTopUp.Add(topup, delegateValue) // 99 + 5000 = 5099 + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], expectedTopUp, 1) + + log.Info("Step 9. Unbond the 2 nodes (that were un staked)") + + txDataFieldUnBondNodes := fmt.Sprintf("unBondNodes@%s@%s", blsKeys[1], blsKeys[2]) + ownerNonce = getNonce(t, cs, owner) + txUnBondNodes := generateTransaction(owner.Bytes, ownerNonce, delegationAddress, big.NewInt(0), txDataFieldUnBondNodes, gasLimitForStakeOperation) + + unBondNodesTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txUnBondNodes}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(unBondNodesTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the nodes + assert.Nil(t, err) + + keyStatus := getAllNodeStates(t, metachainNode, delegationAddress) + require.Equal(t, len(blsKeys), len(keyStatus)) + // key[0] should be staked + require.Equal(t, stakedStatus, keyStatus[blsKeys[0]]) + // key[1] and key[2] should be unstaked (unbond was not executed) + require.Equal(t, nodesStatusAfterUnBondTx, keyStatus[blsKeys[1]]) + require.Equal(t, nodesStatusAfterUnBondTx, keyStatus[blsKeys[2]]) +} + +func getNonce(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, address dtos.WalletAddress) uint64 { + account, err := cs.GetAccount(address) + require.Nil(t, err) + + return account.Nonce +} + +func getAllNodeStates(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, address []byte) map[string]string { + scQuery := &process.SCQuery{ + ScAddress: address, + FuncName: "getAllNodeStates", + CallerAddr: vm.StakingSCAddress, + CallValue: big.NewInt(0), + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + m := make(map[string]string) + status := "" + for _, resultData := range result.ReturnData { + if len(resultData) != 96 { + // not a BLS key + status = string(resultData) + continue + } + + m[hex.EncodeToString(resultData)] = status + } + + return m +} + +func generateStakeTransaction( + t *testing.T, + cs chainSimulatorIntegrationTests.ChainSimulator, + owner dtos.WalletAddress, + blsKeyHex string, + stakeValue *big.Int, +) *transaction.Transaction { + account, err := cs.GetAccount(owner) + require.Nil(t, err) + + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeyHex, mockBLSSignature) + return generateTransaction(owner.Bytes, account.Nonce, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) +} + +func generateConvertToStakingProviderTransaction( + t *testing.T, + cs chainSimulatorIntegrationTests.ChainSimulator, + owner dtos.WalletAddress, +) *transaction.Transaction { + account, err := cs.GetAccount(owner) + require.Nil(t, err) + + txDataField := fmt.Sprintf("makeNewContractFromValidatorData@%s@%s", maxCap, hexServiceFee) + return generateTransaction(owner.Bytes, account.Nonce, vm.DelegationManagerSCAddress, zeroValue, txDataField, gasLimitForConvertOperation) +} + +// Test description +// Test the creation of a new delegation contract, adding nodes to it, delegating, and undelegating. + +// Test scenario +// 1. Initialize the chain simulator +// 2. Generate blocks to activate staking phases +// 3. Create a new delegation contract +// 4. Add validator nodes to the delegation contract +// 5. Perform delegation operations +// 6. Perform undelegation operations +// 7. Validate the results at each step +func TestChainSimulator_CreateNewDelegationContract(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test scenario done in staking 3.5 phase (staking v4 is not active) + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Create a new delegation contract with 1250 egld + // 3. Add node to the delegation contract + // 4. Execute 2 delegation operations of 1250 EGLD each, check the topup is 3750 + // 5. Stake node, check the topup is 1250, check the node is staked + // 5. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 1250 + // 6. Check the node is unstaked in the next epoch + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorCreateNewDelegationContract(t, cs, 1) + }) + + // Test scenario done in staking v4 phase step 1 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Create a new delegation contract with 1250 egld + // 3. Add node to the delegation contract + // 4. Execute 2 delegation operations of 1250 EGLD each, check the topup is 3750 + // 5. Stake node, check the topup is 1250, check the node is in action list + // 5. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 1250 + // 6. Check the node is unstaked in the next epoch + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorCreateNewDelegationContract(t, cs, 2) + }) + + // Test scenario done in staking v4 phase step 2 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Create a new delegation contract with 1250 egld + // 3. Add node to the delegation contract + // 4. Execute 2 delegation operations of 1250 EGLD each, check the topup is 3750 + // 5. Stake node, check the topup is 1250, check the node is in action list + // 5. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 1250 + // 6. Check the node is unstaked in the next epoch + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorCreateNewDelegationContract(t, cs, 3) + }) + + // Test scenario done in staking v4 phase step 3 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Create a new delegation contract with 1250 egld + // 3. Add node to the delegation contract + // 4. Execute 2 delegation operations of 1250 EGLD each, check the topup is 3750 + // 5. Stake node, check the topup is 1250, check the node is in action list + // 5. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 1250 + // 6. Check the node is unstaked in the next epoch + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorCreateNewDelegationContract(t, cs, 4) + }) + +} + +func testChainSimulatorCreateNewDelegationContract(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + // Create new validator owner and delegators with initial funds + validatorOwnerBytes := generateWalletAddressBytes() + validatorOwner, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Encode(validatorOwnerBytes) + delegator1Bytes := generateWalletAddressBytes() + delegator1, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Encode(delegator1Bytes) + delegator2Bytes := generateWalletAddressBytes() + delegator2, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Encode(delegator2Bytes) + initialFunds := big.NewInt(0).Mul(oneEGLD, big.NewInt(10000)) // 10000 EGLD for each + addresses := []*dtos.AddressState{ + {Address: validatorOwner, Balance: initialFunds.String()}, + {Address: delegator1, Balance: initialFunds.String()}, + {Address: delegator2, Balance: initialFunds.String()}, + } + err = cs.SetStateMultiple(addresses) + require.Nil(t, err) + + // Step 3: Create a new delegation contract + maxDelegationCap := big.NewInt(0).Mul(oneEGLD, big.NewInt(51000)) // 51000 EGLD cap + txCreateDelegationContract := generateTransaction(validatorOwnerBytes, 0, vm.DelegationManagerSCAddress, initialDelegationValue, + fmt.Sprintf("createNewDelegationContract@%s@%s", hex.EncodeToString(maxDelegationCap.Bytes()), hexServiceFee), + gasLimitForDelegationContractCreationOperation) + createDelegationContractTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txCreateDelegationContract, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, createDelegationContractTx) + + // Check delegation contract creation was successful + data := createDelegationContractTx.SmartContractResults[0].Data + parts := strings.Split(data, "@") + require.Equal(t, 3, len(parts)) + + require.Equal(t, hex.EncodeToString([]byte("ok")), parts[1]) + delegationContractAddressHex, _ := hex.DecodeString(parts[2]) + delegationContractAddress, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Encode(delegationContractAddressHex) + + output, err := executeQuery(cs, core.MetachainShardId, vm.DelegationManagerSCAddress, "getAllContractAddresses", nil) + require.Nil(t, err) + returnAddress, err := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Encode(output.ReturnData[0]) + require.Nil(t, err) + require.Equal(t, delegationContractAddress, returnAddress) + delegationContractAddressBytes := output.ReturnData[0] + + // Step 2: Add validator nodes to the delegation contract + // This step requires generating BLS keys for validators, signing messages, and sending the "addNodes" transaction. + // Add checks to verify nodes are added successfully. + validatorSecretKeysBytes, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + err = cs.AddValidatorKeys(validatorSecretKeysBytes) + require.Nil(t, err) + + signatures := getSignatures(delegationContractAddressBytes, validatorSecretKeysBytes) + txAddNodes := generateTransaction(validatorOwnerBytes, 1, delegationContractAddressBytes, zeroValue, addNodesTxData(blsKeys, signatures), gasLimitForAddNodesOperation) + addNodesTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txAddNodes, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, addNodesTx) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys := getNodesFromContract(output.ReturnData) + require.Equal(t, 0, len(stakedKeys)) + require.Equal(t, 1, len(notStakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(notStakedKeys[0])) + require.Equal(t, 0, len(unStakedKeys)) + + expectedTopUp := big.NewInt(0).Set(initialDelegationValue) + expectedTotalStaked := big.NewInt(0).Set(initialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{validatorOwnerBytes}) + require.Nil(t, err) + require.Equal(t, initialDelegationValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + // Step 3: Perform delegation operations + txDelegate1 := generateTransaction(delegator1Bytes, 0, delegationContractAddressBytes, initialDelegationValue, "delegate", gasLimitForDelegate) + delegate1Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txDelegate1, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegate1Tx) + + expectedTopUp = expectedTopUp.Add(expectedTopUp, initialDelegationValue) + expectedTotalStaked = expectedTotalStaked.Add(expectedTotalStaked, initialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{delegator1Bytes}) + require.Nil(t, err) + require.Equal(t, initialDelegationValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + txDelegate2 := generateTransaction(delegator2Bytes, 0, delegationContractAddressBytes, initialDelegationValue, "delegate", gasLimitForDelegate) + delegate2Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txDelegate2, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegate2Tx) + + expectedTopUp = expectedTopUp.Add(expectedTopUp, initialDelegationValue) + expectedTotalStaked = expectedTotalStaked.Add(expectedTotalStaked, initialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{delegator2Bytes}) + require.Nil(t, err) + require.Equal(t, initialDelegationValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + // Step 4: Perform stakeNodes + + txStakeNodes := generateTransaction(validatorOwnerBytes, 2, delegationContractAddressBytes, zeroValue, fmt.Sprintf("stakeNodes@%s", blsKeys[0]), gasLimitForStakeOperation) + stakeNodesTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStakeNodes, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeNodesTx) + + expectedTopUp = expectedTopUp.Sub(expectedTopUp, initialDelegationValue) + expectedTopUp = expectedTopUp.Sub(expectedTopUp, initialDelegationValue) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys = getNodesFromContract(output.ReturnData) + require.Equal(t, 1, len(stakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(stakedKeys[0])) + require.Equal(t, 0, len(notStakedKeys)) + require.Equal(t, 0, len(unStakedKeys)) + + // Make block finalized + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationContractAddressBytes, blsKeys[0], expectedTopUp, 1) + + // Step 5: Perform unDelegate from 1 user + // The nodes should remain in the staked state + // The total active stake should be reduced by the amount undelegated + + txUndelegate1 := generateTransaction(delegator1Bytes, 1, delegationContractAddressBytes, zeroValue, fmt.Sprintf("unDelegate@%s", hex.EncodeToString(initialDelegationValue.Bytes())), gasLimitForUndelegateOperation) + undelegate1Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUndelegate1, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, undelegate1Tx) + + expectedTopUp = expectedTopUp.Sub(expectedTopUp, initialDelegationValue) + expectedTotalStaked = expectedTotalStaked.Sub(expectedTotalStaked, initialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp.String(), getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes).String()) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{delegator1Bytes}) + require.Nil(t, err) + require.Equal(t, zeroValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys = getNodesFromContract(output.ReturnData) + require.Equal(t, 1, len(stakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(stakedKeys[0])) + require.Equal(t, 0, len(notStakedKeys)) + require.Equal(t, 0, len(unStakedKeys)) + + // Step 6: Perform unDelegate from last user + // The nodes should remain in the unStaked state + // The total active stake should be reduced by the amount undelegated + + txUndelegate2 := generateTransaction(delegator2Bytes, 1, delegationContractAddressBytes, zeroValue, fmt.Sprintf("unDelegate@%s", hex.EncodeToString(initialDelegationValue.Bytes())), gasLimitForUndelegateOperation) + undelegate2Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUndelegate2, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, undelegate2Tx) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, "1250000000000000000000", big.NewInt(0).SetBytes(output.ReturnData[0]).String()) + require.Equal(t, zeroValue, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{delegator2Bytes}) + require.Nil(t, err) + require.Equal(t, "0", big.NewInt(0).SetBytes(output.ReturnData[0]).String()) + + // still staked until epoch change + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys = getNodesFromContract(output.ReturnData) + require.Equal(t, 1, len(stakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(stakedKeys[0])) + require.Equal(t, 0, len(notStakedKeys)) + require.Equal(t, 0, len(unStakedKeys)) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys = getNodesFromContract(output.ReturnData) + require.Equal(t, 0, len(stakedKeys)) + require.Equal(t, 0, len(notStakedKeys)) + require.Equal(t, 1, len(unStakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(unStakedKeys[0])) +} + +func generateWalletAddressBytes() []byte { + buff := make([]byte, walletAddressBytesLen) + _, _ = rand.Read(buff) + + return buff +} + +func executeQuery(cs chainSimulatorIntegrationTests.ChainSimulator, shardID uint32, scAddress []byte, funcName string, args [][]byte) (*dataVm.VMOutputApi, error) { + output, _, err := cs.GetNodeHandler(shardID).GetFacadeHandler().ExecuteSCQuery(&process.SCQuery{ + ScAddress: scAddress, + FuncName: funcName, + Arguments: args, + }) + return output, err +} + +func addNodesTxData(blsKeys []string, sigs [][]byte) string { + txData := "addNodes" + + for i := range blsKeys { + txData = txData + "@" + blsKeys[i] + "@" + hex.EncodeToString(sigs[i]) + } + + return txData +} + +func getSignatures(msg []byte, blsKeys [][]byte) [][]byte { + signer := mclsig.NewBlsSigner() + + signatures := make([][]byte, len(blsKeys)) + for i, blsKey := range blsKeys { + sk, _ := signing.NewKeyGenerator(mcl.NewSuiteBLS12()).PrivateKeyFromByteArray(blsKey) + signatures[i], _ = signer.Sign(sk, msg) + } + + return signatures +} + +func getNodesFromContract(returnData [][]byte) ([][]byte, [][]byte, [][]byte) { + var stakedKeys, notStakedKeys, unStakedKeys [][]byte + + for i := 0; i < len(returnData); i += 2 { + switch string(returnData[i]) { + case "staked": + stakedKeys = append(stakedKeys, returnData[i+1]) + case "notStaked": + notStakedKeys = append(notStakedKeys, returnData[i+1]) + case "unStaked": + unStakedKeys = append(unStakedKeys, returnData[i+1]) + } + } + return stakedKeys, notStakedKeys, unStakedKeys +} + +func getBLSKeyStatus(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte) string { + scQuery := &process.SCQuery{ + ScAddress: vm.StakingSCAddress, + FuncName: "getBLSKeyStatus", + CallerAddr: vm.StakingSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{blsKey}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + return string(result.ReturnData[0]) +} + +func getBLSTopUpValue(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, address []byte) *big.Int { + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStakedTopUpStakedBlsKeys", + CallerAddr: vm.StakingSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{address}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + if len(result.ReturnData[0]) == 0 { + return big.NewInt(0) + } + + return big.NewInt(0).SetBytes(result.ReturnData[0]) +} + +func generateTransaction(sender []byte, nonce uint64, receiver []byte, value *big.Int, data string, gasLimit uint64) *transaction.Transaction { + return &transaction.Transaction{ + Nonce: nonce, + Value: value, + SndAddr: sender, + RcvAddr: receiver, + Data: []byte(data), + GasLimit: gasLimit, + GasPrice: minGasPrice, + ChainID: []byte(configs.ChainID), + Version: txVersion, + Signature: []byte(mockTxSignature), + } +} + +// Test description: +// Test that merging delegation with whiteListForMerge and mergeValidatorToDelegationWithWhitelist contracts still works properly +// Test that their topups will merge too and will be used by auction list computing. +// +// Internal test scenario #12 +func TestChainSimulator_MergeDelegation(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test steps: + // 1. User A - Stake 1 node to have 100 egld more than minimum required stake value + // 2. User A - Execute `makeNewContractFromValidatorData` to create delegation contract based on User A account + // 3. User B - Stake 1 node with more than 2500 egld + // 4. User A - Execute `whiteListForMerge@addressA` in order to whitelist for merge User B + // 5. User B - Execute `mergeValidatorToDelegationWithWhitelist@delegationContract` in order to merge User B to delegation contract created at step 2. + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMergingDelegation(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMergingDelegation(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMergingDelegation(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMergingDelegation(t, cs, 4) + }) +} + +func testChainSimulatorMergingDelegation(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(3) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(3000) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorA, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + validatorB, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + log.Info("Step 1. User A: - stake 1 node to have 100 egld more than minimum stake value") + stakeValue := big.NewInt(0).Set(minimumStakeValue) + addedStakedValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(100)) + stakeValue.Add(stakeValue, addedStakedValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorA.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorA.Bytes, blsKeys[0], addedStakedValue, 1) + require.Equal(t, addedStakedValue, getBLSTopUpValue(t, metachainNode, validatorA.Bytes)) + + log.Info("Step 2. Execute MakeNewContractFromValidatorData for User A") + txDataField = fmt.Sprintf("makeNewContractFromValidatorData@%s@%s", maxCap, hexServiceFee) + txConvert := generateTransaction(validatorA.Bytes, 1, vm.DelegationManagerSCAddress, zeroValue, txDataField, gasLimitForConvertOperation) + convertTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txConvert, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, convertTx) + + delegationAddress := convertTx.Logs.Events[0].Topics[1] + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], addedStakedValue, 1) + + log.Info("Step 3. User B: - stake 1 node to have 100 egld more") + stakeValue = big.NewInt(0).Set(minimumStakeValue) + addedStakedValue = big.NewInt(0).Mul(oneEGLD, big.NewInt(100)) + stakeValue.Add(stakeValue, addedStakedValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], mockBLSSignature) + txStake = generateTransaction(validatorB.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorB.Bytes, blsKeys[1], addedStakedValue, 2) + require.Equal(t, addedStakedValue, getBLSTopUpValue(t, metachainNode, validatorB.Bytes)) + + decodedBLSKey0, _ := hex.DecodeString(blsKeys[0]) + require.Equal(t, delegationAddress, getBLSKeyOwner(t, metachainNode, decodedBLSKey0)) + + decodedBLSKey1, _ := hex.DecodeString(blsKeys[1]) + require.Equal(t, validatorB.Bytes, getBLSKeyOwner(t, metachainNode, decodedBLSKey1)) + + log.Info("Step 4. User A : whitelistForMerge@addressB") + txDataField = fmt.Sprintf("whitelistForMerge@%s", hex.EncodeToString(validatorB.Bytes)) + whitelistForMerge := generateTransaction(validatorA.Bytes, 2, delegationAddress, zeroValue, txDataField, gasLimitForDelegate) + whitelistForMergeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(whitelistForMerge, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, whitelistForMergeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + log.Info("Step 5. User A : mergeValidatorToDelegationWithWhitelist") + txDataField = fmt.Sprintf("mergeValidatorToDelegationWithWhitelist@%s", hex.EncodeToString(delegationAddress)) + + txConvert = generateTransaction(validatorB.Bytes, 1, vm.DelegationManagerSCAddress, zeroValue, txDataField, gasLimitForMergeOperation) + convertTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txConvert, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, convertTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + decodedBLSKey0, _ = hex.DecodeString(blsKeys[0]) + require.Equal(t, delegationAddress, getBLSKeyOwner(t, metachainNode, decodedBLSKey0)) + + decodedBLSKey1, _ = hex.DecodeString(blsKeys[1]) + require.Equal(t, delegationAddress, getBLSKeyOwner(t, metachainNode, decodedBLSKey1)) + + expectedTopUpValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(200)) + require.Equal(t, expectedTopUpValue, getBLSTopUpValue(t, metachainNode, delegationAddress)) +} + +func getBLSKeyOwner(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte) []byte { + scQuery := &process.SCQuery{ + ScAddress: vm.StakingSCAddress, + FuncName: "getOwner", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{blsKey}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + return result.ReturnData[0] +} diff --git a/integrationTests/chainSimulator/staking/jail_test.go b/integrationTests/chainSimulator/staking/jail_test.go new file mode 100644 index 00000000000..185365912b1 --- /dev/null +++ b/integrationTests/chainSimulator/staking/jail_test.go @@ -0,0 +1,257 @@ +package staking + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + chainSimulatorIntegrationTests "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/vm" + "github.com/stretchr/testify/require" +) + +const ( + stakingV4JailUnJailStep1EnableEpoch = 5 + + epochWhenNodeIsJailed = 4 +) + +// Test description +// All test cases will do a stake transaction and wait till the new node is jailed +// testcase1 -- unJail transaction will be sent when staking v3.5 is still action --> node status should be `new` after unjail +// testcase2 -- unJail transaction will be sent when staking v4 step1 is action --> node status should be `auction` after unjail +// testcase3 -- unJail transaction will be sent when staking v4 step2 is action --> node status should be `auction` after unjail +// testcase4 -- unJail transaction will be sent when staking v4 step3 is action --> node status should be `auction` after unjail +func TestChainSimulator_ValidatorJailUnJail(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + testChainSimulatorJailAndUnJail(t, 4, "new") + }) + + t.Run("staking ph 4 step 1 active", func(t *testing.T) { + testChainSimulatorJailAndUnJail(t, 5, "auction") + }) + + t.Run("staking ph 4 step 2 active", func(t *testing.T) { + testChainSimulatorJailAndUnJail(t, 6, "auction") + }) + + t.Run("staking ph 4 step 3 active", func(t *testing.T) { + testChainSimulatorJailAndUnJail(t, 7, "auction") + }) +} + +func testChainSimulatorJailAndUnJail(t *testing.T, targetEpoch int32, nodeStatusAfterUnJail string) { + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + numOfShards := uint32(3) + + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 2, + MetaChainMinNodes: 2, + AlterConfigsFunction: func(cfg *config.Configs) { + configs.SetStakingV4ActivationEpochs(cfg, stakingV4JailUnJailStep1EnableEpoch) + newNumNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + 8 // 8 nodes until new nodes will be placed on queue + configs.SetMaxNumberOfNodesInConfigs(cfg, newNumNodes, numOfShards) + configs.SetQuickJailRatingConfig(cfg) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + defer cs.Close() + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = cs.GenerateBlocksUntilEpochIsReached(1) + require.Nil(t, err) + + _, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + mintValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(3000)) + walletAddress, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(walletAddress.Bytes, 0, vm.ValidatorSCAddress, minimumStakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + // wait node to be jailed + err = cs.GenerateBlocksUntilEpochIsReached(epochWhenNodeIsJailed) + require.Nil(t, err) + + decodedBLSKey, _ := hex.DecodeString(blsKeys[0]) + status := getBLSKeyStatus(t, metachainNode, decodedBLSKey) + require.Equal(t, "jailed", status) + + // do an unjail transaction + unJailValue, _ := big.NewInt(0).SetString("2500000000000000000", 10) + txUnJailDataField := fmt.Sprintf("unJail@%s", blsKeys[0]) + txUnJail := generateTransaction(walletAddress.Bytes, 1, vm.ValidatorSCAddress, unJailValue, txUnJailDataField, gasLimitForStakeOperation) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + unJailTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnJail, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unJailTx) + require.Equal(t, transaction.TxStatusSuccess, unJailTx.Status) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + status = getBLSKeyStatus(t, metachainNode, decodedBLSKey) + require.Equal(t, "staked", status) + + checkValidatorStatus(t, cs, blsKeys[0], nodeStatusAfterUnJail) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + checkValidatorStatus(t, cs, blsKeys[0], "waiting") + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 2) + require.Nil(t, err) + + checkValidatorStatus(t, cs, blsKeys[0], "eligible") +} + +// Test description +// Add a new node and wait until the node get jailed +// Add a second node to take the place of the jailed node +// UnJail the first node --> should go in queue +// Activate staking v4 step 1 --> node should be unstaked as the queue is cleaned up + +// Internal test scenario #2 +func TestChainSimulator_FromQueueToAuctionList(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + numOfShards := uint32(3) + + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + configs.SetStakingV4ActivationEpochs(cfg, stakingV4JailUnJailStep1EnableEpoch) + configs.SetQuickJailRatingConfig(cfg) + + newNumNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + 1 + configs.SetMaxNumberOfNodesInConfigs(cfg, newNumNodes, numOfShards) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + defer cs.Close() + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = cs.GenerateBlocks(30) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys([][]byte{privateKeys[1]}) + require.Nil(t, err) + + mintValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(6000)) + walletAddress, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(walletAddress.Bytes, 0, vm.ValidatorSCAddress, minimumStakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + // wait node to be jailed + err = cs.GenerateBlocksUntilEpochIsReached(epochWhenNodeIsJailed) + require.Nil(t, err) + + decodedBLSKey0, _ := hex.DecodeString(blsKeys[0]) + status := getBLSKeyStatus(t, metachainNode, decodedBLSKey0) + require.Equal(t, "jailed", status) + + // add one more node + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], mockBLSSignature) + txStake = generateTransaction(walletAddress.Bytes, 1, vm.ValidatorSCAddress, minimumStakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + decodedBLSKey1, _ := hex.DecodeString(blsKeys[1]) + status = getBLSKeyStatus(t, metachainNode, decodedBLSKey1) + require.Equal(t, "staked", status) + + // unJail the first node + unJailValue, _ := big.NewInt(0).SetString("2500000000000000000", 10) + txUnJailDataField := fmt.Sprintf("unJail@%s", blsKeys[0]) + txUnJail := generateTransaction(walletAddress.Bytes, 2, vm.ValidatorSCAddress, unJailValue, txUnJailDataField, gasLimitForStakeOperation) + + unJailTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnJail, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unJailTx) + require.Equal(t, transaction.TxStatusSuccess, unJailTx.Status) + + status = getBLSKeyStatus(t, metachainNode, decodedBLSKey0) + require.Equal(t, "queued", status) + + err = cs.GenerateBlocksUntilEpochIsReached(stakingV4JailUnJailStep1EnableEpoch) + require.Nil(t, err) + + status = getBLSKeyStatus(t, metachainNode, decodedBLSKey0) + require.Equal(t, unStakedStatus, status) + + checkValidatorStatus(t, cs, blsKeys[0], string(common.InactiveList)) +} + +func checkValidatorStatus(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, blsKey string, expectedStatus string) { + err := cs.GetNodeHandler(core.MetachainShardId).GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + + validatorsStatistics, err := cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + require.Equal(t, expectedStatus, validatorsStatistics[blsKey].ValidatorStatus) +} diff --git a/integrationTests/chainSimulator/staking/simpleStake_test.go b/integrationTests/chainSimulator/staking/simpleStake_test.go new file mode 100644 index 00000000000..f738b2c7ff6 --- /dev/null +++ b/integrationTests/chainSimulator/staking/simpleStake_test.go @@ -0,0 +1,289 @@ +package staking + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/vm" + "github.com/stretchr/testify/require" +) + +// Test scenarios +// Do 3 stake transactions from 3 different wallets - tx value 2499, 2500, 2501 +// testcase1 -- staking v3.5 --> tx1 fail, tx2 - node in queue, tx3 - node in queue with topUp 1 +// testcase2 -- staking v4 step1 --> tx1 fail, tx2 - node in auction, tx3 - node in auction with topUp 1 +// testcase3 -- staking v4 step2 --> tx1 fail, tx2 - node in auction, tx3 - node in auction with topUp 1 +// testcase4 -- staking v3.step3 --> tx1 fail, tx2 - node in auction, tx3 - node in auction with topUp 1 + +// // Internal test scenario #3 +func TestChainSimulator_SimpleStake(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + testChainSimulatorSimpleStake(t, 1, "queued") + }) + + t.Run("staking ph 4 step1", func(t *testing.T) { + testChainSimulatorSimpleStake(t, 2, "auction") + }) + + t.Run("staking ph 4 step2", func(t *testing.T) { + testChainSimulatorSimpleStake(t, 3, "auction") + }) + + t.Run("staking ph 4 step3", func(t *testing.T) { + testChainSimulatorSimpleStake(t, 4, "auction") + }) +} + +func testChainSimulatorSimpleStake(t *testing.T, targetEpoch int32, nodesStatus string) { + if testing.Short() { + t.Skip("this is not a short test") + } + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + numOfShards := uint32(3) + + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + configs.SetStakingV4ActivationEpochs(cfg, 2) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + defer cs.Close() + + mintValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(3000)) + wallet1, err := cs.GenerateAndMintWalletAddress(0, mintValue) + require.Nil(t, err) + wallet2, err := cs.GenerateAndMintWalletAddress(0, mintValue) + require.Nil(t, err) + wallet3, err := cs.GenerateAndMintWalletAddress(0, mintValue) + require.Nil(t, err) + + _, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(3) + require.Nil(t, err) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + dataFieldTx1 := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + tx1Value := big.NewInt(0).Mul(big.NewInt(2499), oneEGLD) + tx1 := generateTransaction(wallet1.Bytes, 0, vm.ValidatorSCAddress, tx1Value, dataFieldTx1, gasLimitForStakeOperation) + + dataFieldTx2 := fmt.Sprintf("stake@01@%s@%s", blsKeys[1], mockBLSSignature) + tx2 := generateTransaction(wallet3.Bytes, 0, vm.ValidatorSCAddress, minimumStakeValue, dataFieldTx2, gasLimitForStakeOperation) + + dataFieldTx3 := fmt.Sprintf("stake@01@%s@%s", blsKeys[2], mockBLSSignature) + tx3Value := big.NewInt(0).Mul(big.NewInt(2501), oneEGLD) + tx3 := generateTransaction(wallet2.Bytes, 0, vm.ValidatorSCAddress, tx3Value, dataFieldTx3, gasLimitForStakeOperation) + + results, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{tx1, tx2, tx3}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 3, len(results)) + require.NotNil(t, results) + + // tx1 should fail + require.Equal(t, "insufficient stake value: expected 2500000000000000000000, got 2499000000000000000000", string(results[0].Logs.Events[0].Topics[1])) + + _ = cs.GenerateBlocks(1) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + if targetEpoch < 2 { + bls1, _ := hex.DecodeString(blsKeys[1]) + bls2, _ := hex.DecodeString(blsKeys[2]) + + blsKeyStatus := getBLSKeyStatus(t, metachainNode, bls1) + require.Equal(t, nodesStatus, blsKeyStatus) + + blsKeyStatus = getBLSKeyStatus(t, metachainNode, bls2) + require.Equal(t, nodesStatus, blsKeyStatus) + } else { + // tx2 -- validator should be in queue + checkValidatorStatus(t, cs, blsKeys[1], nodesStatus) + // tx3 -- validator should be in queue + checkValidatorStatus(t, cs, blsKeys[2], nodesStatus) + } +} + +// Test auction list api calls during stakingV4 step 2 and onwards. +// Nodes configuration at genesis consisting of a total of 32 nodes, distributed on 3 shards + meta: +// - 4 eligible nodes/shard +// - 4 waiting nodes/shard +// - 2 nodes to shuffle per shard +// - max num nodes config for stakingV4 step3 = 24 (being downsized from previously 32 nodes) +// Steps: +// 1. Stake 1 node and check that in stakingV4 step1 it is unstaked +// 2. Re-stake the node to enter the auction list +// 3. From stakingV4 step2 onwards, check that api returns 8 qualified + 1 unqualified nodes +func TestChainSimulator_StakingV4Step2APICalls(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + stakingV4Step1Epoch := uint32(2) + stakingV4Step2Epoch := uint32(3) + stakingV4Step3Epoch := uint32(4) + + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: uint64(6000), + RoundsPerEpoch: core.OptionalUint64{ + HasValue: true, + Value: 30, + }, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 4, + MetaChainMinNodes: 4, + NumNodesWaitingListMeta: 4, + NumNodesWaitingListShard: 4, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = stakingV4Step1Epoch + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = stakingV4Step2Epoch + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = stakingV4Step3Epoch + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[1].MaxNumNodes = 32 + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[1].NodesToShufflePerShard = 2 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = stakingV4Step3Epoch + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].MaxNumNodes = 24 + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].NodesToShufflePerShard = 2 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + + mintValue := big.NewInt(0).Add(minimumStakeValue, oneEGLD) + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + // Stake a new validator that should end up in auction in step 1 + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, minimumStakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = cs.GenerateBlocksUntilEpochIsReached(int32(stakingV4Step1Epoch)) + require.Nil(t, err) + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // In step 1, only the previously staked node should be in auction list + err = metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + auctionList, err := metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) + require.Empty(t, auctionList) + + // re-stake the node + txDataField = fmt.Sprintf("reStakeUnStakedNodes@%s", blsKeys[0]) + txReStake := generateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, big.NewInt(0), txDataField, gasLimitForStakeOperation) + reStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txReStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, reStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // after the re-stake process, the node should be in auction list + err = metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + auctionList, err = metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) + require.Equal(t, []*common.AuctionListValidatorAPIResponse{ + { + Owner: validatorOwner.Bech32, + NumStakedNodes: 1, + TotalTopUp: "0", + TopUpPerNode: "0", + QualifiedTopUp: "0", + Nodes: []*common.AuctionNode{ + { + BlsKey: blsKeys[0], + Qualified: true, + }, + }, + }, + }, auctionList) + + // For steps 2,3 and onwards, when making API calls, we'll be using the api nodes config provider to mimic the max number of + // nodes as it will be in step 3. This means we'll see the 8 nodes that were shuffled out from the eligible list, + // plus the additional node that was staked manually. + // Since those 8 shuffled out nodes will be replaced only with another 8 nodes, and the auction list size = 9, + // the outcome should show 8 nodes qualifying and 1 node not qualifying + for epochToSimulate := int32(stakingV4Step2Epoch); epochToSimulate < int32(stakingV4Step3Epoch)+3; epochToSimulate++ { + err = cs.GenerateBlocksUntilEpochIsReached(epochToSimulate) + require.Nil(t, err) + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + numQualified, numUnQualified := getNumQualifiedAndUnqualified(t, metachainNode) + require.Equal(t, 8, numQualified) + require.Equal(t, 1, numUnQualified) + } +} + +func getNumQualifiedAndUnqualified(t *testing.T, metachainNode process.NodeHandler) (int, int) { + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + auctionList, err := metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) + + numQualified := 0 + numUnQualified := 0 + + for _, auctionOwnerData := range auctionList { + for _, auctionNode := range auctionOwnerData.Nodes { + if auctionNode.Qualified { + numQualified++ + } else { + numUnQualified++ + } + } + } + + return numQualified, numUnQualified +} diff --git a/integrationTests/chainSimulator/staking/stakeAndUnStake_test.go b/integrationTests/chainSimulator/staking/stakeAndUnStake_test.go new file mode 100644 index 00000000000..b4c3fb6cf70 --- /dev/null +++ b/integrationTests/chainSimulator/staking/stakeAndUnStake_test.go @@ -0,0 +1,2302 @@ +package staking + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + coreAPI "github.com/multiversx/mx-chain-core-go/data/api" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + chainSimulatorIntegrationTests "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + chainSimulatorProcess "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/vm" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/stretchr/testify/require" +) + +const ( + defaultPathToInitialConfig = "../../../cmd/node/config/" + maxNumOfBlockToGenerateWhenExecutingTx = 7 +) + +var log = logger.GetOrCreate("integrationTests/chainSimulator") + +// TODO scenarios +// Make a staking provider with max num of nodes +// DO a merge transaction + +// Test scenario +// 1. Add a new validator private key in the multi key handler +// 2. Do a stake transaction for the validator key +// 3. Do an unstake transaction (to make a place for the new validator) +// 4. Check if the new validator has generated rewards +func TestChainSimulator_AddValidatorKey(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 0, + NumNodesWaitingListShard: 0, + AlterConfigsFunction: func(cfg *config.Configs) { + newNumNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + 8 // 8 nodes until new nodes will be placed on queue + configs.SetMaxNumberOfNodesInConfigs(cfg, newNumNodes, numOfShards) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + err = cs.GenerateBlocks(30) + require.Nil(t, err) + + // Step 1 --- add a new validator key in the chain simulator + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + + newValidatorOwner := "erd1l6xt0rqlyzw56a3k8xwwshq2dcjwy3q9cppucvqsmdyw8r98dz3sae0kxl" + newValidatorOwnerBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(newValidatorOwner) + rcv := "erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqplllst77y4l" + rcvAddrBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(rcv) + + // Step 2 --- set an initial balance for the address that will initialize all the transactions + err = cs.SetStateMultiple([]*dtos.AddressState{ + { + Address: "erd1l6xt0rqlyzw56a3k8xwwshq2dcjwy3q9cppucvqsmdyw8r98dz3sae0kxl", + Balance: "10000000000000000000000", + }, + }) + require.Nil(t, err) + + // Step 3 --- generate and send a stake transaction with the BLS key of the validator key that was added at step 1 + stakeValue, _ := big.NewInt(0).SetString("2500000000000000000000", 10) + tx := &transaction.Transaction{ + Nonce: 0, + Value: stakeValue, + SndAddr: newValidatorOwnerBytes, + RcvAddr: rcvAddrBytes, + Data: []byte(fmt.Sprintf("stake@01@%s@010101", blsKeys[0])), + GasLimit: 50_000_000, + GasPrice: 1000000000, + Signature: []byte("dummy"), + ChainID: []byte(configs.ChainID), + Version: 1, + } + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(newValidatorOwnerBytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(newValidatorOwner, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeActiveValidator := accountValidatorOwner.Balance + + // Step 5 --- create an unStake transaction with the bls key of an initial validator and execute the transaction to make place for the validator that was added at step 3 + firstValidatorKey, err := cs.GetValidatorPrivateKeys()[0].GeneratePublic().ToByteArray() + require.Nil(t, err) + + initialAddressWithValidators := cs.GetInitialWalletKeys().StakeWallets[0].Address + shardID := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(initialAddressWithValidators.Bytes) + initialAccount, _, err := cs.GetNodeHandler(shardID).GetFacadeHandler().GetAccount(initialAddressWithValidators.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + tx = &transaction.Transaction{ + Nonce: initialAccount.Nonce, + Value: big.NewInt(0), + SndAddr: initialAddressWithValidators.Bytes, + RcvAddr: rcvAddrBytes, + Data: []byte(fmt.Sprintf("unStake@%s", hex.EncodeToString(firstValidatorKey))), + GasLimit: 50_000_000, + GasPrice: 1000000000, + Signature: []byte("dummy"), + ChainID: []byte(configs.ChainID), + Version: 1, + } + _, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + + // Step 6 --- generate 8 epochs to get rewards + err = cs.GenerateBlocksUntilEpochIsReached(8) + require.Nil(t, err) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + validatorStatistics, err := metachainNode.GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + checkValidatorsRating(t, validatorStatistics) + + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(newValidatorOwner, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterActiveValidator := accountValidatorOwner.Balance + + log.Info("balance before validator", "value", balanceBeforeActiveValidator) + log.Info("balance after validator", "value", balanceAfterActiveValidator) + + balanceBeforeBig, _ := big.NewInt(0).SetString(balanceBeforeActiveValidator, 10) + balanceAfterBig, _ := big.NewInt(0).SetString(balanceAfterActiveValidator, 10) + diff := balanceAfterBig.Sub(balanceAfterBig, balanceBeforeBig) + log.Info("difference", "value", diff.String()) + + // Step 7 --- check the balance of the validator owner has been increased + require.True(t, diff.Cmp(big.NewInt(0)) > 0) +} + +func TestChainSimulator_AddANewValidatorAfterStakingV4(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 100, + MetaChainMinNodes: 100, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + cfg.GeneralConfig.ValidatorStatistics.CacheRefreshIntervalInSec = 1 + newNumNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + 8 // 8 nodes until new nodes will be placed on queue + configs.SetMaxNumberOfNodesInConfigs(cfg, newNumNodes, numOfShards) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + err = cs.GenerateBlocks(150) + require.Nil(t, err) + + // Step 1 --- add a new validator key in the chain simulator + numOfNodes := 20 + validatorSecretKeysBytes, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(numOfNodes) + require.Nil(t, err) + err = cs.AddValidatorKeys(validatorSecretKeysBytes) + require.Nil(t, err) + + newValidatorOwner := "erd1l6xt0rqlyzw56a3k8xwwshq2dcjwy3q9cppucvqsmdyw8r98dz3sae0kxl" + newValidatorOwnerBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(newValidatorOwner) + rcv := "erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqplllst77y4l" + rcvAddrBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(rcv) + + // Step 2 --- set an initial balance for the address that will initialize all the transactions + err = cs.SetStateMultiple([]*dtos.AddressState{ + { + Address: "erd1l6xt0rqlyzw56a3k8xwwshq2dcjwy3q9cppucvqsmdyw8r98dz3sae0kxl", + Balance: "1000000000000000000000000", + }, + }) + require.Nil(t, err) + + // Step 3 --- generate and send a stake transaction with the BLS keys of the validators key that were added at step 1 + validatorData := "" + for _, blsKey := range blsKeys { + validatorData += fmt.Sprintf("@%s@010101", blsKey) + } + + numOfNodesHex := hex.EncodeToString(big.NewInt(int64(numOfNodes)).Bytes()) + stakeValue, _ := big.NewInt(0).SetString("51000000000000000000000", 10) + tx := &transaction.Transaction{ + Nonce: 0, + Value: stakeValue, + SndAddr: newValidatorOwnerBytes, + RcvAddr: rcvAddrBytes, + Data: []byte(fmt.Sprintf("stake@%s%s", numOfNodesHex, validatorData)), + GasLimit: 500_000_000, + GasPrice: 1000000000, + Signature: []byte("dummy"), + ChainID: []byte(configs.ChainID), + Version: 1, + } + + txFromNetwork, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txFromNetwork) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + results, err := metachainNode.GetFacadeHandler().AuctionListApi() + require.Nil(t, err) + require.Equal(t, newValidatorOwner, results[0].Owner) + require.Equal(t, 20, len(results[0].Nodes)) + checkTotalQualified(t, results, 8) + + err = cs.GenerateBlocks(100) + require.Nil(t, err) + + results, err = cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().AuctionListApi() + require.Nil(t, err) + checkTotalQualified(t, results, 0) +} + +// Internal test scenario #4 #5 #6 +// do stake +// do unStake +// do unBondNodes +// do unBondTokens +func TestChainSimulatorStakeUnStakeUnBond(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + testStakeUnStakeUnBond(t, 1) + }) + + t.Run("staking ph 4 step 1 active", func(t *testing.T) { + testStakeUnStakeUnBond(t, 4) + }) + + t.Run("staking ph 4 step 2 active", func(t *testing.T) { + testStakeUnStakeUnBond(t, 5) + }) + + t.Run("staking ph 4 step 3 active", func(t *testing.T) { + testStakeUnStakeUnBond(t, 6) + }) +} + +func testStakeUnStakeUnBond(t *testing.T, targetEpoch int32) { + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriod = 1 + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 1 + newNumNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + 10 + configs.SetMaxNumberOfNodesInConfigs(cfg, newNumNodes, numOfShards) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + + mintValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(2600)) + walletAddressShardID := uint32(0) + walletAddress, err := cs.GenerateAndMintWalletAddress(walletAddressShardID, mintValue) + require.Nil(t, err) + + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(walletAddress.Bytes, 0, vm.ValidatorSCAddress, minimumStakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + bls0, _ := hex.DecodeString(blsKeys[0]) + blsKeyStatus := getBLSKeyStatus(t, metachainNode, bls0) + require.Equal(t, "staked", blsKeyStatus) + + // do unStake + txUnStake := generateTransaction(walletAddress.Bytes, 1, vm.ValidatorSCAddress, zeroValue, fmt.Sprintf("unStake@%s", blsKeys[0]), gasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + blsKeyStatus = getBLSKeyStatus(t, metachainNode, bls0) + require.Equal(t, "unStaked", blsKeyStatus) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + // do unBond + txUnBond := generateTransaction(walletAddress.Bytes, 2, vm.ValidatorSCAddress, zeroValue, fmt.Sprintf("unBondNodes@%s", blsKeys[0]), gasLimitForStakeOperation) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + // do claim + txClaim := generateTransaction(walletAddress.Bytes, 3, vm.ValidatorSCAddress, zeroValue, "unBondTokens", gasLimitForStakeOperation) + claimTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txClaim, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, claimTx) + + err = cs.GenerateBlocks(5) + require.Nil(t, err) + + // check tokens are in the wallet balance + walletAccount, _, err := cs.GetNodeHandler(walletAddressShardID).GetFacadeHandler().GetAccount(walletAddress.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + walletBalanceBig, _ := big.NewInt(0).SetString(walletAccount.Balance, 10) + require.True(t, walletBalanceBig.Cmp(minimumStakeValue) > 0) +} + +func checkTotalQualified(t *testing.T, auctionList []*common.AuctionListValidatorAPIResponse, expected int) { + totalQualified := 0 + for _, res := range auctionList { + for _, node := range res.Nodes { + if node.Qualified { + totalQualified++ + } + } + } + require.Equal(t, expected, totalQualified) +} + +func checkValidatorsRating(t *testing.T, validatorStatistics map[string]*validator.ValidatorStatistics) { + countRatingIncreased := 0 + for _, validatorInfo := range validatorStatistics { + validatorSignedAtLeastOneBlock := validatorInfo.NumValidatorSuccess > 0 || validatorInfo.NumLeaderSuccess > 0 + if !validatorSignedAtLeastOneBlock { + continue + } + countRatingIncreased++ + require.Greater(t, validatorInfo.TempRating, validatorInfo.Rating) + } + require.Greater(t, countRatingIncreased, 0) +} + +// Test description +// Stake funds - happy flow +// +// Preconditions: have an account with egld and 2 staked nodes (2500 stake per node) - directly staked, and no unstake +// +// 1. Check the stake amount for the owner of the staked nodes with the vmquery "getTotalStaked", and the account current EGLD balance +// 2. Create from the owner of staked nodes a transaction to stake 1 EGLD and send it to the network +// 3. Check the outcome of the TX & verify new stake state with vmquery + +// Internal test scenario #24 +func TestChainSimulator_DirectStakingNodes_StakeFunds(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedNodesStakingFunds(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedNodesStakingFunds(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedNodesStakingFunds(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedNodesStakingFunds(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedNodesStakingFunds(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + log.Info("Preconditions. Have an account with 2 staked nodes") + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(5010) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Set(minimumStakeValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + stakeValue = big.NewInt(0).Set(minimumStakeValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], mockBLSSignature) + txStake = generateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + log.Info("Step 1. Check the stake amount for the owner of the staked nodes") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5000) + + log.Info("Step 2. Create from the owner of the staked nodes a tx to stake 1 EGLD") + + stakeValue = big.NewInt(0).Mul(oneEGLD, big.NewInt(1)) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake = generateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + log.Info("Step 3. Check the stake amount for the owner of the staked nodes") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5001) +} + +func checkExpectedStakedValue(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte, expectedValue int64) { + totalStaked := getTotalStaked(t, metachainNode, blsKey) + + expectedStaked := big.NewInt(expectedValue) + expectedStaked = expectedStaked.Mul(oneEGLD, expectedStaked) + require.Equal(t, expectedStaked.String(), string(totalStaked)) +} + +func getTotalStaked(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte) []byte { + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStaked", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{blsKey}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + return result.ReturnData[0] +} + +// Test description: +// Unstake funds with deactivation of node if below 2500 -> the rest of funds are distributed as topup at epoch change +// +// Internal test scenario #26 +func TestChainSimulator_DirectStakingNodes_UnstakeFundsWithDeactivation(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Check the stake amount and number of nodes for the owner of the staked nodes with the vmquery "getTotalStaked", and the account current EGLD balance + // 2. Create from the owner of staked nodes a transaction to unstake 10 EGLD and send it to the network + // 3. Check the outcome of the TX & verify new stake state with vmquery "getTotalStaked" and "getUnStakedTokensList" + // 4. Wait for change of epoch and check the outcome + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(5010) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Set(minimumStakeValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + stakeValue = big.NewInt(0).Set(minimumStakeValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], mockBLSSignature) + txStake = generateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[1]) + + log.Info("Step 1. Check the stake amount for the owner of the staked nodes") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5000) + + log.Info("Step 2. Create from the owner of staked nodes a transaction to unstake 10 EGLD and send it to the network") + + unStakeValue := big.NewInt(10) + unStakeValue = unStakeValue.Mul(oneEGLD, unStakeValue) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue.Bytes())) + txUnStake := generateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("Step 3. Check the outcome of the TX & verify new stake state with vmquery getTotalStaked and getUnStakedTokensList") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 4990) + + unStakedTokensAmount := getUnStakedTokensList(t, metachainNode, validatorOwner.Bytes) + + expectedUnStaked := big.NewInt(10) + expectedUnStaked = expectedUnStaked.Mul(oneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(unStakedTokensAmount).String()) + + log.Info("Step 4. Wait for change of epoch and check the outcome") + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + checkOneOfTheNodesIsUnstaked(t, metachainNode, blsKeys[:2]) +} + +func getUnStakedTokensList(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte) []byte { + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{blsKey}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + return result.ReturnData[0] +} + +func checkOneOfTheNodesIsUnstaked(t *testing.T, + metachainNode chainSimulatorProcess.NodeHandler, + blsKeys []string, +) { + decodedBLSKey0, _ := hex.DecodeString(blsKeys[0]) + keyStatus0 := getBLSKeyStatus(t, metachainNode, decodedBLSKey0) + log.Info("Key info", "key", blsKeys[0], "status", keyStatus0) + + isNotStaked0 := keyStatus0 == unStakedStatus + + decodedBLSKey1, _ := hex.DecodeString(blsKeys[1]) + keyStatus1 := getBLSKeyStatus(t, metachainNode, decodedBLSKey1) + log.Info("Key info", "key", blsKeys[1], "status", keyStatus1) + + isNotStaked1 := keyStatus1 == unStakedStatus + + require.True(t, isNotStaked0 != isNotStaked1) +} + +func testBLSKeyStaked(t *testing.T, + metachainNode chainSimulatorProcess.NodeHandler, + blsKey string, +) { + decodedBLSKey, _ := hex.DecodeString(blsKey) + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + + validatorStatistics, err := metachainNode.GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + + activationEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step1Flag) + if activationEpoch <= metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() { + require.Equal(t, stakedStatus, getBLSKeyStatus(t, metachainNode, decodedBLSKey)) + return + } + + // in staking ph 2/3.5 we do not find the bls key on the validator statistics + _, found := validatorStatistics[blsKey] + require.False(t, found) + require.Equal(t, queuedStatus, getBLSKeyStatus(t, metachainNode, decodedBLSKey)) +} + +// Test description: +// Unstake funds with deactivation of node, followed by stake with sufficient ammount does not unstake node at end of epoch +// +// Internal test scenario #27 +func TestChainSimulator_DirectStakingNodes_UnstakeFundsWithDeactivation_WithReactivation(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Check the stake amount and number of nodes for the owner of the staked nodes with the vmquery "getTotalStaked", and the account current EGLD balance + // 2. Create from the owner of staked nodes a transaction to unstake 10 EGLD and send it to the network + // 3. Check the outcome of the TX & verify new stake state with vmquery + // 4. Create from the owner of staked nodes a transaction to stake 10 EGLD and send it to the network + // 5. Check the outcome of the TX & verify new stake state with vmquery + // 6. Wait for change of epoch and check the outcome + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(6000) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Set(minimumStakeValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + stakeValue = big.NewInt(0).Set(minimumStakeValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], mockBLSSignature) + txStake = generateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[1]) + + log.Info("Step 1. Check the stake amount for the owner of the staked nodes") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5000) + + log.Info("Step 2. Create from the owner of staked nodes a transaction to unstake 10 EGLD and send it to the network") + + unStakeValue := big.NewInt(10) + unStakeValue = unStakeValue.Mul(oneEGLD, unStakeValue) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue.Bytes())) + txUnStake := generateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("Step 3. Check the outcome of the TX & verify new stake state with vmquery getTotalStaked and getUnStakedTokensList") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 4990) + + unStakedTokensAmount := getUnStakedTokensList(t, metachainNode, validatorOwner.Bytes) + + expectedUnStaked := big.NewInt(10) + expectedUnStaked = expectedUnStaked.Mul(oneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(unStakedTokensAmount).String()) + + log.Info("Step 4. Create from the owner of staked nodes a transaction to stake 10 EGLD and send it to the network") + + newStakeValue := big.NewInt(10) + newStakeValue = newStakeValue.Mul(oneEGLD, newStakeValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake = generateTransaction(validatorOwner.Bytes, 3, vm.ValidatorSCAddress, newStakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("5. Check the outcome of the TX & verify new stake state with vmquery") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5000) + + log.Info("Step 6. Wait for change of epoch and check the outcome") + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + testBLSKeyStaked(t, metachainNode, blsKeys[1]) +} + +// Test description: +// Withdraw unstaked funds before unbonding period should return error +// +// Internal test scenario #28 +func TestChainSimulator_DirectStakingNodes_WithdrawUnstakedFundsBeforeUnbonding(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Create from the owner of staked nodes a transaction to withdraw the unstaked funds + // 2. Check the outcome of the TX & verify new stake state with vmquery ("getUnStakedTokensList") + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(10000) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(2600)) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + log.Info("Step 1. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + unStakeValue := big.NewInt(10) + unStakeValue = unStakeValue.Mul(oneEGLD, unStakeValue) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue.Bytes())) + txUnStake := generateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // check bls key is still staked + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond := generateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForUnBond) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("Step 2. Check the outcome of the TX & verify new stake state with vmquery (`getUnStakedTokensList`)") + + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + expectedUnStaked := big.NewInt(10) + expectedUnStaked = expectedUnStaked.Mul(oneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(result.ReturnData[0]).String()) + + // the owner balance should decrease only with the txs fee + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + txsFee, _ := big.NewInt(0).SetString(unBondTx.Fee, 10) + balanceAfterUnbondingWithFee := big.NewInt(0).Add(balanceAfterUnbonding, txsFee) + + txsFee, _ = big.NewInt(0).SetString(unStakeTx.Fee, 10) + balanceAfterUnbondingWithFee.Add(balanceAfterUnbondingWithFee, txsFee) + + txsFee, _ = big.NewInt(0).SetString(stakeTx.Fee, 10) + balanceAfterUnbondingWithFee.Add(balanceAfterUnbondingWithFee, txsFee) + + require.Equal(t, 1, balanceAfterUnbondingWithFee.Cmp(balanceBeforeUnbonding)) +} + +// Test description: +// Withdraw unstaked funds in first available withdraw epoch +// +// Internal test scenario #29 +func TestChainSimulator_DirectStakingNodes_WithdrawUnstakedInWithdrawEpoch(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Wait for the unbonding epoch to start + // 2. Create from the owner of staked nodes a transaction to withdraw the unstaked funds + // 3. Check the outcome of the TX & verify new stake state with vmquery ("getUnStakedTokensList") + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(10000) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(2600)) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + unStakeValue := big.NewInt(10) + unStakeValue = unStakeValue.Mul(oneEGLD, unStakeValue) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue.Bytes())) + txUnStake := generateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // check bls key is still staked + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + expectedUnStaked := big.NewInt(10) + expectedUnStaked = expectedUnStaked.Mul(oneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(result.ReturnData[0]).String()) + + log.Info("Step 1. Wait for the unbonding epoch to start") + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + log.Info("Step 2. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond := generateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForUnBond) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("Step 3. Check the outcome of the TX & verify new stake state with vmquery (`getUnStakedTokensList`)") + + scQuery = &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStaked", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err = metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + expectedStaked := big.NewInt(2590) + expectedStaked = expectedStaked.Mul(oneEGLD, expectedStaked) + require.Equal(t, expectedStaked.String(), string(result.ReturnData[0])) + + // the owner balance should increase with the (10 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + // substract unbonding value + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue) + + txsFee, _ := big.NewInt(0).SetString(unBondTx.Fee, 10) + balanceAfterUnbondingWithFee := big.NewInt(0).Add(balanceAfterUnbonding, txsFee) + + txsFee, _ = big.NewInt(0).SetString(unStakeTx.Fee, 10) + balanceAfterUnbondingWithFee.Add(balanceAfterUnbondingWithFee, txsFee) + + txsFee, _ = big.NewInt(0).SetString(stakeTx.Fee, 10) + balanceAfterUnbondingWithFee.Add(balanceAfterUnbondingWithFee, txsFee) + + require.Equal(t, 1, balanceAfterUnbondingWithFee.Cmp(balanceBeforeUnbonding)) +} + +// Test description: +// Unstaking funds in different batches allows correct withdrawal for each batch +// at the corresponding epoch. +// +// Internal test scenario #30 +func TestChainSimulator_DirectStakingNodes_WithdrawUnstakedInBatches(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Create 3 transactions for unstaking: first one unstaking 11 egld each, second one unstaking 12 egld and third one unstaking 13 egld. + // 2. Send the transactions in consecutive epochs, one TX in each epoch. + // 3. Wait for the epoch when first tx unbonding period ends. + // 4. Create a transaction for withdraw and send it to the network + // 5. Wait for an epoch + // 6. Create another transaction for withdraw and send it to the network + // 7. Wait for an epoch + // 8. Create another transasction for withdraw and send it to the network + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 6 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 6 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 6 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 6 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(2700) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(2600)) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + stakeTxFee, _ := big.NewInt(0).SetString(stakeTx.Fee, 10) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + log.Info("Step 1. Create 3 transactions for unstaking: first one unstaking 11 egld, second one unstaking 12 egld and third one unstaking 13 egld.") + log.Info("Step 2. Send the transactions in consecutive epochs, one TX in each epoch.") + + unStakeValue1 := big.NewInt(11) + unStakeValue1 = unStakeValue1.Mul(oneEGLD, unStakeValue1) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue1.Bytes())) + txUnStake := generateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + unStakeTxFee, _ := big.NewInt(0).SetString(unStakeTx.Fee, 10) + + testEpoch := targetEpoch + 1 + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + unStakeValue2 := big.NewInt(12) + unStakeValue2 = unStakeValue2.Mul(oneEGLD, unStakeValue2) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue2.Bytes())) + txUnStake = generateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + testEpoch++ + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + unStakeValue3 := big.NewInt(13) + unStakeValue3 = unStakeValue3.Mul(oneEGLD, unStakeValue3) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue3.Bytes())) + txUnStake = generateTransaction(validatorOwner.Bytes, 3, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + testEpoch++ + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + // check bls key is still staked + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + expectedUnStaked := big.NewInt(11) + expectedUnStaked = expectedUnStaked.Mul(oneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(result.ReturnData[0]).String()) + + scQuery = &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStaked", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err = metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + expectedStaked := big.NewInt(2600 - 11 - 12 - 13) + expectedStaked = expectedStaked.Mul(oneEGLD, expectedStaked) + require.Equal(t, expectedStaked.String(), string(result.ReturnData[0])) + + log.Info("Step 3. Wait for the unbonding epoch to start") + + testEpoch += 3 + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + log.Info("Step 4.1. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond := generateTransaction(validatorOwner.Bytes, 4, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForUnBond) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + unBondTxFee, _ := big.NewInt(0).SetString(unBondTx.Fee, 10) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // the owner balance should increase with the (11 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue1) + + txsFee := big.NewInt(0) + + txsFee.Add(txsFee, stakeTxFee) + txsFee.Add(txsFee, unBondTxFee) + txsFee.Add(txsFee, unStakeTxFee) + txsFee.Add(txsFee, unStakeTxFee) + txsFee.Add(txsFee, unStakeTxFee) + + balanceAfterUnbonding.Add(balanceAfterUnbonding, txsFee) + + require.Equal(t, 1, balanceAfterUnbonding.Cmp(balanceBeforeUnbonding)) + + log.Info("Step 4.2. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + testEpoch++ + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond = generateTransaction(validatorOwner.Bytes, 5, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForUnBond) + unBondTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // the owner balance should increase with the (11+12 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ = big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue1) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue2) + + txsFee.Add(txsFee, unBondTxFee) + balanceAfterUnbonding.Add(balanceAfterUnbonding, txsFee) + + require.Equal(t, 1, balanceAfterUnbonding.Cmp(balanceBeforeUnbonding)) + + log.Info("Step 4.3. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + testEpoch++ + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond = generateTransaction(validatorOwner.Bytes, 6, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForUnBond) + unBondTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // the owner balance should increase with the (11+12+13 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ = big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue1) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue2) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue3) + + txsFee.Add(txsFee, unBondTxFee) + balanceAfterUnbonding.Add(balanceAfterUnbonding, txsFee) + + require.Equal(t, 1, balanceAfterUnbonding.Cmp(balanceBeforeUnbonding)) +} + +// Test description: +// Unstake funds in different batches in the same epoch allows correct withdrawal in the correct epoch +// +// Internal test scenario #31 +func TestChainSimulator_DirectStakingNodes_WithdrawUnstakedInEpoch(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Create 3 transactions for unstaking: first one unstaking 11 egld each, second one unstaking 12 egld and third one unstaking 13 egld. + // 2. Send the transactions consecutively in the same epoch + // 3. Wait for the epoch when unbonding period ends. + // 4. Create a transaction for withdraw and send it to the network + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 3 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 3 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 3 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 3 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(2700) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(2600)) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + stakeTxFee, _ := big.NewInt(0).SetString(stakeTx.Fee, 10) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + log.Info("Step 1. Create 3 transactions for unstaking: first one unstaking 11 egld each, second one unstaking 12 egld and third one unstaking 13 egld.") + log.Info("Step 2. Send the transactions in consecutively in same epoch.") + + unStakeValue1 := big.NewInt(11) + unStakeValue1 = unStakeValue1.Mul(oneEGLD, unStakeValue1) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue1.Bytes())) + txUnStake := generateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + unStakeTxFee, _ := big.NewInt(0).SetString(unStakeTx.Fee, 10) + + unStakeValue2 := big.NewInt(12) + unStakeValue2 = unStakeValue2.Mul(oneEGLD, unStakeValue2) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue2.Bytes())) + txUnStake = generateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + unStakeValue3 := big.NewInt(13) + unStakeValue3 = unStakeValue3.Mul(oneEGLD, unStakeValue3) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue3.Bytes())) + txUnStake = generateTransaction(validatorOwner.Bytes, 3, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + // check bls key is still staked + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + expectedUnStaked := big.NewInt(11 + 12 + 13) + expectedUnStaked = expectedUnStaked.Mul(oneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(result.ReturnData[0]).String()) + + scQuery = &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStaked", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err = metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + expectedStaked := big.NewInt(2600 - 11 - 12 - 13) + expectedStaked = expectedStaked.Mul(oneEGLD, expectedStaked) + require.Equal(t, expectedStaked.String(), string(result.ReturnData[0])) + + log.Info("Step 3. Wait for the unbonding epoch to start") + + testEpoch := targetEpoch + 3 + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + log.Info("Step 4.1. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond := generateTransaction(validatorOwner.Bytes, 4, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForUnBond) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + unBondTxFee, _ := big.NewInt(0).SetString(unBondTx.Fee, 10) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // the owner balance should increase with the (11+12+13 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue1) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue2) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue3) + + txsFee := big.NewInt(0) + + txsFee.Add(txsFee, stakeTxFee) + txsFee.Add(txsFee, unBondTxFee) + txsFee.Add(txsFee, unStakeTxFee) + txsFee.Add(txsFee, unStakeTxFee) + txsFee.Add(txsFee, unStakeTxFee) + + balanceAfterUnbonding.Add(balanceAfterUnbonding, txsFee) + + require.Equal(t, 1, balanceAfterUnbonding.Cmp(balanceBeforeUnbonding)) +} diff --git a/integrationTests/common.go b/integrationTests/common.go new file mode 100644 index 00000000000..e4365471cd7 --- /dev/null +++ b/integrationTests/common.go @@ -0,0 +1,38 @@ +package integrationTests + +import ( + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +// ProcessSCOutputAccounts will save account changes in accounts db from vmOutput +func ProcessSCOutputAccounts(vmOutput *vmcommon.VMOutput, accountsDB state.AccountsAdapter) error { + outputAccounts := process.SortVMOutputInsideData(vmOutput) + for _, outAcc := range outputAccounts { + acc := stakingcommon.LoadUserAccount(accountsDB, outAcc.Address) + + storageUpdates := process.GetSortedStorageUpdates(outAcc) + for _, storeUpdate := range storageUpdates { + err := acc.SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) + if err != nil { + return err + } + + if outAcc.BalanceDelta != nil && outAcc.BalanceDelta.Cmp(zero) != 0 { + err = acc.AddToBalance(outAcc.BalanceDelta) + if err != nil { + return err + } + } + + err = accountsDB.SaveAccount(acc) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/integrationTests/factory/consensusComponents/consensusComponents_test.go b/integrationTests/factory/consensusComponents/consensusComponents_test.go index 5be694c740d..f560f099705 100644 --- a/integrationTests/factory/consensusComponents/consensusComponents_test.go +++ b/integrationTests/factory/consensusComponents/consensusComponents_test.go @@ -67,6 +67,7 @@ func TestConsensusComponents_Close_ShouldWork(t *testing.T) { managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(t, err) managedStatusComponents, err := nr.CreateManagedStatusComponents( diff --git a/integrationTests/factory/dataComponents/dataComponents_test.go b/integrationTests/factory/dataComponents/dataComponents_test.go index 9ebc4a49fc5..c28a41c6543 100644 --- a/integrationTests/factory/dataComponents/dataComponents_test.go +++ b/integrationTests/factory/dataComponents/dataComponents_test.go @@ -13,6 +13,10 @@ import ( ) func TestDataComponents_Create_Close_ShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + time.Sleep(time.Second * 4) gc := goroutines.NewGoCounter(goroutines.TestsRelevantGoRoutines) diff --git a/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go b/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go index 0bd34fd45e4..9082ce63c06 100644 --- a/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go +++ b/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go @@ -67,6 +67,7 @@ func TestHeartbeatComponents_Close_ShouldWork(t *testing.T) { managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(t, err) managedStatusComponents, err := nr.CreateManagedStatusComponents( diff --git a/integrationTests/factory/processComponents/processComponents_test.go b/integrationTests/factory/processComponents/processComponents_test.go index d81d921e74c..2f2c859bc94 100644 --- a/integrationTests/factory/processComponents/processComponents_test.go +++ b/integrationTests/factory/processComponents/processComponents_test.go @@ -68,6 +68,7 @@ func TestProcessComponents_Close_ShouldWork(t *testing.T) { managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(t, err) managedStatusComponents, err := nr.CreateManagedStatusComponents( diff --git a/integrationTests/factory/statusComponents/statusComponents_test.go b/integrationTests/factory/statusComponents/statusComponents_test.go index 9865ce593ce..62e2ad1e289 100644 --- a/integrationTests/factory/statusComponents/statusComponents_test.go +++ b/integrationTests/factory/statusComponents/statusComponents_test.go @@ -68,6 +68,7 @@ func TestStatusComponents_Create_Close_ShouldWork(t *testing.T) { managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(t, err) managedStatusComponents, err := nr.CreateManagedStatusComponents( diff --git a/integrationTests/frontend/wallet/txInterception_test.go b/integrationTests/frontend/wallet/txInterception_test.go index 1cb60ea8a46..1eeacc61f94 100644 --- a/integrationTests/frontend/wallet/txInterception_test.go +++ b/integrationTests/frontend/wallet/txInterception_test.go @@ -15,6 +15,10 @@ import ( const mintingValue = "100000000" func TestInterceptedTxWithoutDataField(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + value := big.NewInt(0) value.SetString("999", 10) @@ -35,6 +39,10 @@ func TestInterceptedTxWithoutDataField(t *testing.T) { } func TestInterceptedTxWithDataField(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + value := big.NewInt(0) value.SetString("999", 10) @@ -55,6 +63,10 @@ func TestInterceptedTxWithDataField(t *testing.T) { } func TestInterceptedTxWithSigningOverTxHash(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + value := big.NewInt(0) value.SetString("1000000000000000000", 10) diff --git a/integrationTests/interface.go b/integrationTests/interface.go index abe0b1a7be8..e4be7fe388c 100644 --- a/integrationTests/interface.go +++ b/integrationTests/interface.go @@ -96,6 +96,7 @@ type Facade interface { EncodeAddressPubkey(pk []byte) (string, error) GetThrottlerForEndpoint(endpoint string) (core.Throttler, bool) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) + AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) ExecuteSCQuery(*process.SCQuery) (*vm.VMOutputApi, api.BlockInfo, error) DecodeAddressPubkey(pk string) ([]byte, error) GetProof(rootHash string, address string) (*common.GetProofResponse, error) @@ -113,6 +114,7 @@ type Facade interface { IsDataTrieMigrated(address string, options api.AccountQueryOptions) (bool, error) GetManagedKeysCount() int GetManagedKeys() []string + GetLoadedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) diff --git a/integrationTests/miniNetwork.go b/integrationTests/miniNetwork.go new file mode 100644 index 00000000000..e9c64f5606d --- /dev/null +++ b/integrationTests/miniNetwork.go @@ -0,0 +1,113 @@ +package integrationTests + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/data/transaction" +) + +// MiniNetwork is a mini network, useful for some integration tests +type MiniNetwork struct { + Round uint64 + Nonce uint64 + + Nodes []*TestProcessorNode + ShardNode *TestProcessorNode + MetachainNode *TestProcessorNode + Users map[string]*TestWalletAccount +} + +// NewMiniNetwork creates a MiniNetwork +func NewMiniNetwork() *MiniNetwork { + n := &MiniNetwork{} + + nodes := CreateNodes( + 1, + 1, + 1, + ) + + n.Nodes = nodes + n.ShardNode = nodes[0] + n.MetachainNode = nodes[1] + n.Users = make(map[string]*TestWalletAccount) + + return n +} + +// Stop stops the mini network +func (n *MiniNetwork) Stop() { + n.ShardNode.Close() + n.MetachainNode.Close() +} + +// FundAccount funds an account +func (n *MiniNetwork) FundAccount(address []byte, value *big.Int) { + shard := n.MetachainNode.ShardCoordinator.ComputeId(address) + + if shard == n.MetachainNode.ShardCoordinator.SelfId() { + MintAddress(n.MetachainNode.AccntState, address, value) + } else { + MintAddress(n.ShardNode.AccntState, address, value) + } +} + +// AddUser adds a user (account) to the mini network +func (n *MiniNetwork) AddUser(balance *big.Int) *TestWalletAccount { + user := CreateTestWalletAccount(n.ShardNode.ShardCoordinator, 0) + n.Users[string(user.Address)] = user + n.FundAccount(user.Address, balance) + return user +} + +// Start starts the mini network +func (n *MiniNetwork) Start() { + n.Round = 1 + n.Nonce = 1 +} + +// Continue advances processing with a number of rounds +func (n *MiniNetwork) Continue(t *testing.T, numRounds int) { + idxProposers := []int{0, 1} + + for i := int64(0); i < int64(numRounds); i++ { + n.Nonce, n.Round = ProposeAndSyncOneBlock(t, n.Nodes, idxProposers, n.Round, n.Nonce) + } +} + +// SendTransaction sends a transaction +func (n *MiniNetwork) SendTransaction( + senderPubkey []byte, + receiverPubkey []byte, + value *big.Int, + data string, + additionalGasLimit uint64, +) (string, error) { + sender, ok := n.Users[string(senderPubkey)] + if !ok { + return "", fmt.Errorf("unknown sender: %s", hex.EncodeToString(senderPubkey)) + } + + tx := &transaction.Transaction{ + Nonce: sender.Nonce, + Value: new(big.Int).Set(value), + SndAddr: sender.Address, + RcvAddr: receiverPubkey, + Data: []byte(data), + GasPrice: MinTxGasPrice, + GasLimit: MinTxGasLimit + uint64(len(data)) + additionalGasLimit, + ChainID: ChainID, + Version: MinTransactionVersion, + } + + txBuff, _ := tx.GetDataForSigning(TestAddressPubkeyConverter, TestTxSignMarshalizer, TestTxSignHasher) + tx.Signature, _ = sender.SingleSigner.Sign(sender.SkTxSign, txBuff) + txHash, err := n.ShardNode.SendTransaction(tx) + + sender.Nonce++ + + return txHash, err +} diff --git a/integrationTests/mock/builtInCostHandlerStub.go b/integrationTests/mock/builtInCostHandlerStub.go deleted file mode 100644 index 4ee3b23b062..00000000000 --- a/integrationTests/mock/builtInCostHandlerStub.go +++ /dev/null @@ -1,24 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// BuiltInCostHandlerStub - -type BuiltInCostHandlerStub struct { -} - -// ComputeBuiltInCost - -func (b *BuiltInCostHandlerStub) ComputeBuiltInCost(_ data.TransactionWithFeeHandler) uint64 { - return 1 -} - -// IsBuiltInFuncCall - -func (b *BuiltInCostHandlerStub) IsBuiltInFuncCall(_ data.TransactionWithFeeHandler) bool { - return false -} - -// IsInterfaceNil - -func (b *BuiltInCostHandlerStub) IsInterfaceNil() bool { - return b == nil -} diff --git a/integrationTests/mock/epochRewardsCreatorStub.go b/integrationTests/mock/epochRewardsCreatorStub.go deleted file mode 100644 index 22c425f3e41..00000000000 --- a/integrationTests/mock/epochRewardsCreatorStub.go +++ /dev/null @@ -1,109 +0,0 @@ -package mock - -import ( - "math/big" - - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/state" -) - -// EpochRewardsCreatorStub - -type EpochRewardsCreatorStub struct { - CreateRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, - ) (block.MiniBlockSlice, error) - VerifyRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, - ) error - CreateMarshalledDataCalled func(body *block.Body) map[string][][]byte - SaveBlockDataToStorageCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - DeleteBlockDataFromStorageCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - RemoveBlockDataFromPoolsCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - GetRewardsTxsCalled func(body *block.Body) map[string]data.TransactionHandler - GetProtocolSustainCalled func() *big.Int - GetLocalTxCacheCalled func() epochStart.TransactionCacher -} - -// GetProtocolSustainabilityRewards - -func (e *EpochRewardsCreatorStub) GetProtocolSustainabilityRewards() *big.Int { - if e.GetProtocolSustainCalled != nil { - return e.GetProtocolSustainCalled() - } - return big.NewInt(0) -} - -// GetLocalTxCache - -func (e *EpochRewardsCreatorStub) GetLocalTxCache() epochStart.TransactionCacher { - if e.GetLocalTxCacheCalled != nil { - return e.GetLocalTxCacheCalled() - } - return &TxForCurrentBlockStub{} -} - -// CreateRewardsMiniBlocks - -func (e *EpochRewardsCreatorStub) CreateRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, - computedEconomics *block.Economics, -) (block.MiniBlockSlice, error) { - if e.CreateRewardsMiniBlocksCalled != nil { - return e.CreateRewardsMiniBlocksCalled(metaBlock, validatorsInfo, computedEconomics) - } - return nil, nil -} - -// GetRewardsTxs - -func (e *EpochRewardsCreatorStub) GetRewardsTxs(body *block.Body) map[string]data.TransactionHandler { - if e.GetRewardsTxsCalled != nil { - return e.GetRewardsTxsCalled(body) - } - return nil -} - -// VerifyRewardsMiniBlocks - -func (e *EpochRewardsCreatorStub) VerifyRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, - computedEconomics *block.Economics, -) error { - if e.VerifyRewardsMiniBlocksCalled != nil { - return e.VerifyRewardsMiniBlocksCalled(metaBlock, validatorsInfo, computedEconomics) - } - return nil -} - -// CreateMarshalledData - -func (e *EpochRewardsCreatorStub) CreateMarshalledData(body *block.Body) map[string][][]byte { - if e.CreateMarshalledDataCalled != nil { - return e.CreateMarshalledDataCalled(body) - } - return nil -} - -// SaveBlockDataToStorage - -func (e *EpochRewardsCreatorStub) SaveBlockDataToStorage(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.SaveBlockDataToStorageCalled != nil { - e.SaveBlockDataToStorageCalled(metaBlock, body) - } -} - -// DeleteBlockDataFromStorage - -func (e *EpochRewardsCreatorStub) DeleteBlockDataFromStorage(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.DeleteBlockDataFromStorageCalled != nil { - e.DeleteBlockDataFromStorageCalled(metaBlock, body) - } -} - -// IsInterfaceNil - -func (e *EpochRewardsCreatorStub) IsInterfaceNil() bool { - return e == nil -} - -// RemoveBlockDataFromPools - -func (e *EpochRewardsCreatorStub) RemoveBlockDataFromPools(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.RemoveBlockDataFromPoolsCalled != nil { - e.RemoveBlockDataFromPoolsCalled(metaBlock, body) - } -} diff --git a/integrationTests/mock/epochValidatorInfoCreatorStub.go b/integrationTests/mock/epochValidatorInfoCreatorStub.go deleted file mode 100644 index 445d305596e..00000000000 --- a/integrationTests/mock/epochValidatorInfoCreatorStub.go +++ /dev/null @@ -1,86 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/state" -) - -// EpochValidatorInfoCreatorStub - -type EpochValidatorInfoCreatorStub struct { - CreateValidatorInfoMiniBlocksCalled func(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) - VerifyValidatorInfoMiniBlocksCalled func(miniblocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error - GetLocalValidatorInfoCacheCalled func() epochStart.ValidatorInfoCacher - CreateMarshalledDataCalled func(body *block.Body) map[string][][]byte - GetValidatorInfoTxsCalled func(body *block.Body) map[string]*state.ShardValidatorInfo - SaveBlockDataToStorageCalled func(metaBlock data.HeaderHandler, body *block.Body) - DeleteBlockDataFromStorageCalled func(metaBlock data.HeaderHandler, body *block.Body) - RemoveBlockDataFromPoolsCalled func(metaBlock data.HeaderHandler, body *block.Body) -} - -// CreateValidatorInfoMiniBlocks - -func (e *EpochValidatorInfoCreatorStub) CreateValidatorInfoMiniBlocks(validatorInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { - if e.CreateValidatorInfoMiniBlocksCalled != nil { - return e.CreateValidatorInfoMiniBlocksCalled(validatorInfo) - } - return make(block.MiniBlockSlice, 0), nil -} - -// VerifyValidatorInfoMiniBlocks - -func (e *EpochValidatorInfoCreatorStub) VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error { - if e.VerifyValidatorInfoMiniBlocksCalled != nil { - return e.VerifyValidatorInfoMiniBlocksCalled(miniBlocks, validatorsInfo) - } - return nil -} - -// GetLocalValidatorInfoCache - -func (e *EpochValidatorInfoCreatorStub) GetLocalValidatorInfoCache() epochStart.ValidatorInfoCacher { - if e.GetLocalValidatorInfoCacheCalled != nil { - return e.GetLocalValidatorInfoCacheCalled() - } - return nil -} - -// CreateMarshalledData - -func (e *EpochValidatorInfoCreatorStub) CreateMarshalledData(body *block.Body) map[string][][]byte { - if e.CreateMarshalledDataCalled != nil { - return e.CreateMarshalledDataCalled(body) - } - return nil -} - -// GetValidatorInfoTxs - -func (e *EpochValidatorInfoCreatorStub) GetValidatorInfoTxs(body *block.Body) map[string]*state.ShardValidatorInfo { - if e.GetValidatorInfoTxsCalled != nil { - return e.GetValidatorInfoTxsCalled(body) - } - return nil -} - -// SaveBlockDataToStorage - -func (e *EpochValidatorInfoCreatorStub) SaveBlockDataToStorage(metaBlock data.HeaderHandler, body *block.Body) { - if e.SaveBlockDataToStorageCalled != nil { - e.SaveBlockDataToStorageCalled(metaBlock, body) - } -} - -// DeleteBlockDataFromStorage - -func (e *EpochValidatorInfoCreatorStub) DeleteBlockDataFromStorage(metaBlock data.HeaderHandler, body *block.Body) { - if e.DeleteBlockDataFromStorageCalled != nil { - e.DeleteBlockDataFromStorageCalled(metaBlock, body) - } -} - -// IsInterfaceNil - -func (e *EpochValidatorInfoCreatorStub) IsInterfaceNil() bool { - return e == nil -} - -// RemoveBlockDataFromPools - -func (e *EpochValidatorInfoCreatorStub) RemoveBlockDataFromPools(metaBlock data.HeaderHandler, body *block.Body) { - if e.RemoveBlockDataFromPoolsCalled != nil { - e.RemoveBlockDataFromPoolsCalled(metaBlock, body) - } -} diff --git a/integrationTests/mock/validatorStatisticsProcessorStub.go b/integrationTests/mock/validatorStatisticsProcessorStub.go deleted file mode 100644 index 34a0e35cad1..00000000000 --- a/integrationTests/mock/validatorStatisticsProcessorStub.go +++ /dev/null @@ -1,130 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/state" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - UpdatePeerStateCalled func(header data.MetaHeaderHandler) ([]byte, error) - RevertPeerStateCalled func(header data.MetaHeaderHandler) error - GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) - RootHashCalled func() ([]byte, error) - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo - SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) -} - -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - -// PeerAccountToValidatorInfo - -func (vsp *ValidatorStatisticsProcessorStub) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { - if vsp.PeerAccountToValidatorInfoCalled != nil { - return vsp.PeerAccountToValidatorInfoCalled(peerAccount) - } - return nil -} - -// Process - -func (vsp *ValidatorStatisticsProcessorStub) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if vsp.ProcessCalled != nil { - return vsp.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (vsp *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { - if vsp.CommitCalled != nil { - return vsp.CommitCalled() - } - - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - return nil -} - -// ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { - if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { - return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) - } - return nil -} - -// GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { - if vsp.GetValidatorInfoForRootHashCalled != nil { - return vsp.GetValidatorInfoForRootHashCalled(rootHash) - } - return nil, nil -} - -// UpdatePeerState - -func (vsp *ValidatorStatisticsProcessorStub) UpdatePeerState(header data.MetaHeaderHandler, _ map[string]data.HeaderHandler) ([]byte, error) { - if vsp.UpdatePeerStateCalled != nil { - return vsp.UpdatePeerStateCalled(header) - } - return nil, nil -} - -// RevertPeerState - -func (vsp *ValidatorStatisticsProcessorStub) RevertPeerState(header data.MetaHeaderHandler) error { - if vsp.RevertPeerStateCalled != nil { - return vsp.RevertPeerStateCalled(header) - } - return nil -} - -// RootHash - -func (vsp *ValidatorStatisticsProcessorStub) RootHash() ([]byte, error) { - if vsp.RootHashCalled != nil { - return vsp.RootHashCalled() - } - return nil, nil -} - -// GetExistingPeerAccount - -func (vsp *ValidatorStatisticsProcessorStub) GetExistingPeerAccount(address []byte) (state.PeerAccountHandler, error) { - if vsp.GetPeerAccountCalled != nil { - return vsp.GetPeerAccountCalled(address) - } - - return nil, nil -} - -// DisplayRatings - -func (vsp *ValidatorStatisticsProcessorStub) DisplayRatings(_ uint32) { -} - -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { - return nil -} - -// IsInterfaceNil - -func (vsp *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - return false -} diff --git a/integrationTests/mock/validatorsProviderStub.go b/integrationTests/mock/validatorsProviderStub.go deleted file mode 100644 index 98ea652340b..00000000000 --- a/integrationTests/mock/validatorsProviderStub.go +++ /dev/null @@ -1,28 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data/validator" -) - -// ValidatorsProviderStub - -type ValidatorsProviderStub struct { - GetLatestValidatorsCalled func() map[string]*validator.ValidatorStatistics -} - -// GetLatestValidators - -func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*validator.ValidatorStatistics { - if vp.GetLatestValidatorsCalled != nil { - return vp.GetLatestValidatorsCalled() - } - return nil -} - -// Close - -func (vp *ValidatorsProviderStub) Close() error { - return nil -} - -// IsInterfaceNil - -func (vp *ValidatorsProviderStub) IsInterfaceNil() bool { - return vp == nil -} diff --git a/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go index cf104b736db..eec61878296 100644 --- a/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go @@ -5,7 +5,6 @@ import ( "encoding/hex" "fmt" "math/big" - "sync" "testing" "time" @@ -14,13 +13,14 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-crypto-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { @@ -61,15 +61,15 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { proposerNode := nodes[0] - //sender shard keys, receivers keys + // sender shard keys, receivers keys sendersPrivateKeys := make([]crypto.PrivateKey, 3) receiversPublicKeys := make(map[uint32][]crypto.PublicKey) for i := 0; i < txToGenerateInEachMiniBlock; i++ { sendersPrivateKeys[i], _, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard) - //receivers in same shard with the sender + // receivers in same shard with the sender _, pk, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard) receiversPublicKeys[senderShard] = append(receiversPublicKeys[senderShard], pk) - //receivers in other shards + // receivers in other shards for _, shardId := range recvShards { _, pk, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, shardId) receiversPublicKeys[shardId] = append(receiversPublicKeys[shardId], pk) @@ -111,13 +111,13 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { continue } - //test sender balances + // test sender balances for _, sk := range sendersPrivateKeys { valTransferred := big.NewInt(0).Mul(totalValuePerTx, big.NewInt(int64(len(receiversPublicKeys)))) valRemaining := big.NewInt(0).Sub(valMinting, valTransferred) integrationTests.TestPrivateKeyHasBalance(t, n, sk, valRemaining) } - //test receiver balances from same shard + // test receiver balances from same shard for _, pk := range receiversPublicKeys[proposerNode.ShardCoordinator.SelfId()] { integrationTests.TestPublicKeyHasBalance(t, n, pk, valToTransferPerTx) } @@ -136,7 +136,7 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { continue } - //test receiver balances from same shard + // test receiver balances from same shard for _, pk := range receiversPublicKeys[n.ShardCoordinator.SelfId()] { integrationTests.TestPublicKeyHasBalance(t, n, pk, valToTransferPerTx) } @@ -352,87 +352,6 @@ func TestSimpleTransactionsWithMoreValueThanBalanceYieldReceiptsInMultiShardedEn } } -func TestExecuteBlocksWithGapsBetweenBlocks(t *testing.T) { - //TODO fix this test - t.Skip("TODO fix this test") - if testing.Short() { - t.Skip("this is not a short test") - } - nodesPerShard := 2 - shardConsensusGroupSize := 2 - nbMetaNodes := 400 - nbShards := 1 - consensusGroupSize := 400 - - cacheMut := &sync.Mutex{} - - putCounter := 0 - cacheMap := make(map[string]interface{}) - - // create map of shard - testNodeProcessors for metachain and shard chain - nodesMap := integrationTests.CreateNodesWithNodesCoordinatorWithCacher( - nodesPerShard, - nbMetaNodes, - nbShards, - shardConsensusGroupSize, - consensusGroupSize, - ) - - roundsPerEpoch := uint64(1000) - maxGasLimitPerBlock := uint64(100000) - gasPrice := uint64(10) - gasLimit := uint64(100) - for _, nodes := range nodesMap { - integrationTests.SetEconomicsParameters(nodes, maxGasLimitPerBlock, gasPrice, gasLimit) - integrationTests.DisplayAndStartNodes(nodes[0:1]) - - for _, node := range nodes { - node.EpochStartTrigger.SetRoundsPerEpoch(roundsPerEpoch) - } - } - - defer func() { - for _, nodes := range nodesMap { - for _, n := range nodes { - n.Close() - } - } - }() - - round := uint64(1) - roundDifference := 10 - nonce := uint64(1) - - firstNodeOnMeta := nodesMap[core.MetachainShardId][0] - body, header, _ := firstNodeOnMeta.ProposeBlock(round, nonce) - - // set bitmap for all consensus nodes signing - bitmap := make([]byte, consensusGroupSize/8+1) - for i := range bitmap { - bitmap[i] = 0xFF - } - - bitmap[consensusGroupSize/8] >>= uint8(8 - (consensusGroupSize % 8)) - err := header.SetPubKeysBitmap(bitmap) - assert.Nil(t, err) - - firstNodeOnMeta.CommitBlock(body, header) - - round += uint64(roundDifference) - nonce++ - putCounter = 0 - - cacheMut.Lock() - for k := range cacheMap { - delete(cacheMap, k) - } - cacheMut.Unlock() - - firstNodeOnMeta.ProposeBlock(round, nonce) - - assert.Equal(t, roundDifference, putCounter) -} - // TestShouldSubtractTheCorrectTxFee uses the mock VM as it's gas model is predictable // The test checks the tx fee subtraction from the sender account when deploying a SC // It also checks the fee obtained by the leader is correct diff --git a/integrationTests/multiShard/endOfEpoch/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment_test.go b/integrationTests/multiShard/endOfEpoch/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment_test.go index d89abd3aae5..dd964aeb745 100644 --- a/integrationTests/multiShard/endOfEpoch/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment_test.go +++ b/integrationTests/multiShard/endOfEpoch/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment_test.go @@ -23,6 +23,9 @@ func TestEpochStartChangeWithContinuousTransactionsInMultiShardedEnvironment(t * StakingV2EnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( diff --git a/integrationTests/multiShard/endOfEpoch/epochStartChangeWithoutTransactionInMultiShardedEnvironment/epochStartChangeWithoutTransactionInMultiShardedEnvironment_test.go b/integrationTests/multiShard/endOfEpoch/epochStartChangeWithoutTransactionInMultiShardedEnvironment/epochStartChangeWithoutTransactionInMultiShardedEnvironment_test.go index b7b658e4ca2..d14eb086de6 100644 --- a/integrationTests/multiShard/endOfEpoch/epochStartChangeWithoutTransactionInMultiShardedEnvironment/epochStartChangeWithoutTransactionInMultiShardedEnvironment_test.go +++ b/integrationTests/multiShard/endOfEpoch/epochStartChangeWithoutTransactionInMultiShardedEnvironment/epochStartChangeWithoutTransactionInMultiShardedEnvironment_test.go @@ -22,6 +22,9 @@ func TestEpochStartChangeWithoutTransactionInMultiShardedEnvironment(t *testing. StakingV2EnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index c423b75354c..ce933a22666 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -33,6 +33,8 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/scheduledDataSyncer" @@ -67,6 +69,9 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, RefactorPeersMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( @@ -149,7 +154,7 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui pksBytes := integrationTests.CreatePkBytes(uint32(numOfShards)) address := []byte("afafafafafafafafafafafafafafafaf") - nodesConfig := &mock.NodesSetupStub{ + nodesConfig := &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) for i := uint32(0); i < uint32(numOfShards); i++ { @@ -181,7 +186,6 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui return integrationTests.MinTransactionVersion }, } - defer func() { errRemoveDir := os.RemoveAll("Epoch_0") assert.NoError(t, errRemoveDir) @@ -231,12 +235,17 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui coreComponents.ChanStopNodeProcessField = endProcess.GetDummyEndProcessChannel() coreComponents.HardforkTriggerPubKeyField = []byte("provided hardfork pub key") + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + &marshallerMock.MarshalizerMock{}, + 444, + ) argsBootstrapHandler := bootstrap.ArgsEpochStartBootstrap{ - CryptoComponentsHolder: cryptoComponents, - CoreComponentsHolder: coreComponents, - MainMessenger: nodeToJoinLate.MainMessenger, - FullArchiveMessenger: nodeToJoinLate.FullArchiveMessenger, - GeneralConfig: generalConfig, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, + CryptoComponentsHolder: cryptoComponents, + CoreComponentsHolder: coreComponents, + MainMessenger: nodeToJoinLate.MainMessenger, + FullArchiveMessenger: nodeToJoinLate.FullArchiveMessenger, + GeneralConfig: generalConfig, PrefsConfig: config.PreferencesConfig{ FullArchive: false, }, diff --git a/integrationTests/multiShard/hardFork/hardFork_test.go b/integrationTests/multiShard/hardFork/hardFork_test.go index 4cbf4cc92d0..6686aa5b5c2 100644 --- a/integrationTests/multiShard/hardFork/hardFork_test.go +++ b/integrationTests/multiShard/hardFork/hardFork_test.go @@ -21,6 +21,7 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" vmFactory "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" commonMocks "github.com/multiversx/mx-chain-go/testscommon/common" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/dblookupext" @@ -406,8 +407,6 @@ func hardForkImport( dataComponents.DataPool = node.DataPool dataComponents.BlockChain = node.BlockChain - roundConfig := integrationTests.GetDefaultRoundsConfig() - argsGenesis := process.ArgsGenesisBlockCreator{ GenesisTime: 0, StartEpochNum: 100, @@ -465,6 +464,8 @@ func hardForkImport( MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -475,11 +476,17 @@ func hardForkImport( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, AccountsParser: &genesisMocks.AccountsParserStub{}, SmartContractParser: &mock.SmartContractParserStub{}, BlockSignKeyGen: &mock.KeyGenMock{}, - EpochConfig: &config.EpochConfig{ + EpochConfig: config.EpochConfig{ EnableEpochs: config.EnableEpochs{ BuiltInFunctionsEnableEpoch: 0, SCDeployEnableEpoch: 0, @@ -491,7 +498,8 @@ func hardForkImport( DelegationSmartContractEnableEpoch: 0, }, }, - RoundConfig: &roundConfig, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + HeaderVersionConfigs: testscommon.GetDefaultHeaderVersionConfig(), HistoryRepository: &dblookupext.HistoryRepositoryStub{}, TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, } diff --git a/integrationTests/multiShard/smartContract/polynetworkbridge/bridge_test.go b/integrationTests/multiShard/smartContract/polynetworkbridge/bridge_test.go index d01f900d5e2..e09c0fe12c2 100644 --- a/integrationTests/multiShard/smartContract/polynetworkbridge/bridge_test.go +++ b/integrationTests/multiShard/smartContract/polynetworkbridge/bridge_test.go @@ -29,7 +29,6 @@ func TestBridgeSetupAndBurn(t *testing.T) { enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, SCProcessorV2EnableEpoch: integrationTests.UnreachableEpoch, FixAsyncCallBackArgsListEnableEpoch: integrationTests.UnreachableEpoch, } diff --git a/integrationTests/multiShard/softfork/scDeploy_test.go b/integrationTests/multiShard/softfork/scDeploy_test.go index dc735b26abd..8af125f5797 100644 --- a/integrationTests/multiShard/softfork/scDeploy_test.go +++ b/integrationTests/multiShard/softfork/scDeploy_test.go @@ -26,7 +26,6 @@ func TestScDeploy(t *testing.T) { t.Skip("this is not a short test") } - builtinEnableEpoch := uint32(0) deployEnableEpoch := uint32(1) relayedTxEnableEpoch := uint32(0) penalizedTooMuchGasEnableEpoch := uint32(0) @@ -34,11 +33,13 @@ func TestScDeploy(t *testing.T) { scProcessorV2EnableEpoch := integrationTests.UnreachableEpoch enableEpochs := integrationTests.CreateEnableEpochsConfig() - enableEpochs.BuiltInFunctionOnMetaEnableEpoch = builtinEnableEpoch enableEpochs.SCDeployEnableEpoch = deployEnableEpoch enableEpochs.RelayedTransactionsEnableEpoch = relayedTxEnableEpoch enableEpochs.PenalizedTooMuchGasEnableEpoch = penalizedTooMuchGasEnableEpoch enableEpochs.SCProcessorV2EnableEpoch = scProcessorV2EnableEpoch + enableEpochs.StakingV4Step1EnableEpoch = integrationTests.StakingV4Step1EnableEpoch + enableEpochs.StakingV4Step2EnableEpoch = integrationTests.StakingV4Step2EnableEpoch + enableEpochs.StakingV4Step3EnableEpoch = integrationTests.StakingV4Step3EnableEpoch shardNode := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, diff --git a/integrationTests/node/getAccount/getAccount_test.go b/integrationTests/node/getAccount/getAccount_test.go index 16fa37909c3..487c8b1a15a 100644 --- a/integrationTests/node/getAccount/getAccount_test.go +++ b/integrationTests/node/getAccount/getAccount_test.go @@ -31,7 +31,9 @@ func createAccountsRepository(accDB state.AccountsAdapter, blockchain chainData. } func TestNode_GetAccountAccountDoesNotExistsShouldRetEmpty(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } trieStorage, _ := integrationTests.CreateTrieStorageManager(testscommon.CreateMemUnit()) accDB, _ := integrationTests.CreateAccountsDB(0, trieStorage) @@ -67,7 +69,9 @@ func TestNode_GetAccountAccountDoesNotExistsShouldRetEmpty(t *testing.T) { } func TestNode_GetAccountAccountExistsShouldReturn(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } testNonce := uint64(7) testBalance := big.NewInt(100) diff --git a/integrationTests/nodesCoordinatorFactory.go b/integrationTests/nodesCoordinatorFactory.go index 08bf6f0f3dd..28267d44c5a 100644 --- a/integrationTests/nodesCoordinatorFactory.go +++ b/integrationTests/nodesCoordinatorFactory.go @@ -11,8 +11,8 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/storage" - "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" ) @@ -54,7 +54,12 @@ func (tpn *IndexHashedNodesCoordinatorFactory) CreateNodesCoordinator(arg ArgInd MaxNodesEnableConfig: nil, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, } + nodeShuffler, _ := nodesCoordinator.NewHashValidatorsShuffler(nodeShufflerArgs) + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + TestMarshalizer, + StakingV4Step2EnableEpoch, + ) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ ShardConsensusGroupSize: arg.shardConsensusGroupSize, MetaConsensusGroupSize: arg.metaConsensusGroupSize, @@ -75,14 +80,15 @@ func (tpn *IndexHashedNodesCoordinatorFactory) CreateNodesCoordinator(arg ArgInd IsFullArchive: false, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { - if flag == common.RefactorPeersMiniBlocksFlag { + if flag == common.RefactorPeersMiniBlocksFlag || flag == common.StakingV4Step2Flag { return UnreachableEpoch } return 0 }, }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } nodesCoord, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) if err != nil { @@ -114,7 +120,12 @@ func (ihncrf *IndexHashedNodesCoordinatorWithRaterFactory) CreateNodesCoordinato MaxNodesEnableConfig: nil, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, } + nodeShuffler, _ := nodesCoordinator.NewHashValidatorsShuffler(shufflerArgs) + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + TestMarshalizer, + StakingV4Step2EnableEpoch, + ) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ ShardConsensusGroupSize: arg.shardConsensusGroupSize, MetaConsensusGroupSize: arg.metaConsensusGroupSize, @@ -141,8 +152,9 @@ func (ihncrf *IndexHashedNodesCoordinatorWithRaterFactory) CreateNodesCoordinato return 0 }, }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } baseCoordinator, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/oneNodeNetwork.go b/integrationTests/oneNodeNetwork.go deleted file mode 100644 index 720ff0529c6..00000000000 --- a/integrationTests/oneNodeNetwork.go +++ /dev/null @@ -1,70 +0,0 @@ -package integrationTests - -import ( - "math/big" - "testing" - - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/data/transaction" - "github.com/multiversx/mx-chain-go/process" -) - -type oneNodeNetwork struct { - Round uint64 - Nonce uint64 - - Node *TestProcessorNode -} - -// NewOneNodeNetwork creates a one-node network, useful for some integration tests -func NewOneNodeNetwork() *oneNodeNetwork { - n := &oneNodeNetwork{} - - nodes := CreateNodes( - 1, - 1, - 0, - ) - - n.Node = nodes[0] - return n -} - -// Stop stops the test network -func (n *oneNodeNetwork) Stop() { - n.Node.Close() -} - -// Mint mints the given address -func (n *oneNodeNetwork) Mint(address []byte, value *big.Int) { - MintAddress(n.Node.AccntState, address, value) -} - -// GetMinGasPrice returns the min gas price -func (n *oneNodeNetwork) GetMinGasPrice() uint64 { - return n.Node.EconomicsData.GetMinGasPrice() -} - -// MaxGasLimitPerBlock returns the max gas per block -func (n *oneNodeNetwork) MaxGasLimitPerBlock() uint64 { - return n.Node.EconomicsData.MaxGasLimitPerBlock(0) - 1 -} - -// GoToRoundOne advances processing to block and round 1 -func (n *oneNodeNetwork) GoToRoundOne() { - n.Round = IncrementAndPrintRound(n.Round) - n.Nonce++ -} - -// Continue advances processing with a number of rounds -func (n *oneNodeNetwork) Continue(t *testing.T, numRounds int) { - n.Nonce, n.Round = WaitOperationToBeDone(t, []*TestProcessorNode{n.Node}, numRounds, n.Nonce, n.Round, []int{0}) -} - -// AddTxToPool adds a transaction to the pool (skips signature checks and interceptors) -func (n *oneNodeNetwork) AddTxToPool(tx *transaction.Transaction) { - txHash, _ := core.CalculateHash(TestMarshalizer, TestHasher, tx) - sourceShard := n.Node.ShardCoordinator.ComputeId(tx.SndAddr) - cacheIdentifier := process.ShardCacherIdentifier(sourceShard, sourceShard) - n.Node.DataPool.Transactions().AddData(txHash, tx, tx.Size(), cacheIdentifier) -} diff --git a/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go b/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go index b458b3f779f..d74d999779a 100644 --- a/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go +++ b/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go @@ -39,6 +39,10 @@ func createDefaultConfig() p2pConfig.P2PConfig { } func TestConnectionsInNetworkShardingWithShardingWithLists(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + p2pCfg := createDefaultConfig() p2pCfg.Sharding = p2pConfig.ShardingConfig{ TargetPeerCount: 12, @@ -54,10 +58,6 @@ func TestConnectionsInNetworkShardingWithShardingWithLists(t *testing.T) { } func testConnectionsInNetworkSharding(t *testing.T, p2pConfig p2pConfig.P2PConfig) { - if testing.Short() { - t.Skip("this is not a short test") - } - nodesPerShard := 8 numMetaNodes := 8 numObserversOnShard := 2 diff --git a/integrationTests/realcomponents/processorRunner.go b/integrationTests/realcomponents/processorRunner.go index 6881284899b..f788de20f84 100644 --- a/integrationTests/realcomponents/processorRunner.go +++ b/integrationTests/realcomponents/processorRunner.go @@ -304,6 +304,7 @@ func (pr *ProcessorRunner) createStatusComponents(tb testing.TB) { pr.CoreComponents.NodeTypeProvider(), pr.CoreComponents.EnableEpochsHandler(), pr.DataComponents.Datapool().CurrentEpochValidatorInfo(), + pr.BootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(tb, err) @@ -406,6 +407,7 @@ func (pr *ProcessorRunner) createProcessComponents(tb testing.TB) { argsProcess := factoryProcessing.ProcessComponentsFactoryArgs{ Config: *pr.Config.GeneralConfig, EpochConfig: *pr.Config.EpochConfig, + RoundConfig: *pr.Config.RoundConfig, PrefConfigs: *pr.Config.PreferencesConfig, ImportDBConfig: *pr.Config.ImportDbConfig, FlagsConfig: config.ContextFlagsConfig{ diff --git a/integrationTests/singleShard/smartContract/dns_test.go b/integrationTests/singleShard/smartContract/dns_test.go index 94319e2ef7a..bdfd26da827 100644 --- a/integrationTests/singleShard/smartContract/dns_test.go +++ b/integrationTests/singleShard/smartContract/dns_test.go @@ -13,9 +13,8 @@ import ( ) func TestDNS_Register(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } expectedDNSAddress := []byte{0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 180, 108, 178, 102, 195, 67, 184, 127, 204, 159, 104, 123, 190, 33, 224, 91, 255, 244, 118, 95, 24, 217} diff --git a/integrationTests/state/genesisState/genesisState_test.go b/integrationTests/state/genesisState/genesisState_test.go index 306980f2ce6..811ae1a4901 100644 --- a/integrationTests/state/genesisState/genesisState_test.go +++ b/integrationTests/state/genesisState/genesisState_test.go @@ -70,7 +70,9 @@ func TestCreationOfTheGenesisState(t *testing.T) { } func TestExtensionNodeToBranchEdgeCaseSet1(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } tr1 := integrationTests.CreateNewDefaultTrie() tr2 := integrationTests.CreateNewDefaultTrie() @@ -105,7 +107,9 @@ func TestExtensionNodeToBranchEdgeCaseSet1(t *testing.T) { } func TestExtensionNodeToBranchEdgeCaseSet2(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } tr1 := integrationTests.CreateNewDefaultTrie() tr2 := integrationTests.CreateNewDefaultTrie() diff --git a/integrationTests/state/stateExecTransaction/stateExecTransaction_test.go b/integrationTests/state/stateExecTransaction/stateExecTransaction_test.go index c97b9ad52b6..f79e0ff22cc 100644 --- a/integrationTests/state/stateExecTransaction/stateExecTransaction_test.go +++ b/integrationTests/state/stateExecTransaction/stateExecTransaction_test.go @@ -52,7 +52,9 @@ func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { } func TestExecTransaction_SelfTransactionWithRevertShouldWork(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } trieStorage, _ := integrationTests.CreateTrieStorageManager(integrationTests.CreateMemUnit()) accnts, _ := integrationTests.CreateAccountsDB(0, trieStorage) @@ -182,7 +184,6 @@ func TestExecTransaction_MoreTransactionsMoreIterationsWithRevertShouldWork(t *t if testing.Short() { t.Skip("this is not a short test") } - t.Parallel() trieStorage, _ := integrationTests.CreateTrieStorageManager(integrationTests.CreateMemUnit()) accnts, _ := integrationTests.CreateAccountsDB(0, trieStorage) diff --git a/integrationTests/state/stateTrie/stateTrie_test.go b/integrationTests/state/stateTrie/stateTrie_test.go index ecb1b9b8ee0..688adc61353 100644 --- a/integrationTests/state/stateTrie/stateTrie_test.go +++ b/integrationTests/state/stateTrie/stateTrie_test.go @@ -2480,11 +2480,14 @@ func startNodesAndIssueToken( enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, OptimizeGasUsedInCrossMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, StakingV2EnableEpoch: integrationTests.UnreachableEpoch, + StakeLimitsEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, AutoBalanceDataTriesEnableEpoch: 1, } nodes := integrationTests.CreateNodesWithEnableEpochs( diff --git a/integrationTests/state/stateTrieSync/stateTrieSync_test.go b/integrationTests/state/stateTrieSync/stateTrieSync_test.go index 8bfbd584a70..4833c99f4fe 100644 --- a/integrationTests/state/stateTrieSync/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync/stateTrieSync_test.go @@ -59,6 +59,10 @@ func createTestProcessorNodeAndTrieStorage( } func TestNode_RequestInterceptTrieNodesWithMessenger(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + t.Run("test with double lists version", func(t *testing.T) { testNodeRequestInterceptTrieNodesWithMessenger(t, 2) }) @@ -180,6 +184,10 @@ func printStatistics(ctx context.Context, stats common.SizeSyncStatisticsHandler } func TestNode_RequestInterceptTrieNodesWithMessengerNotSyncingShouldErr(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + t.Run("test with double lists version", func(t *testing.T) { testNodeRequestInterceptTrieNodesWithMessengerNotSyncingShouldErr(t, 2) }) diff --git a/integrationTests/testConsensusNode.go b/integrationTests/testConsensusNode.go index b97b9f511e7..5f5987b11cf 100644 --- a/integrationTests/testConsensusNode.go +++ b/integrationTests/testConsensusNode.go @@ -43,6 +43,7 @@ import ( dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" testFactory "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" @@ -244,7 +245,7 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { return string(ChainID) } coreComponents.GenesisTimeField = time.Unix(args.StartTime, 0) - coreComponents.GenesisNodesSetupField = &testscommon.NodesSetupStub{ + coreComponents.GenesisNodesSetupField = &genesisMocks.NodesSetupStub{ GetShardConsensusGroupSizeCalled: func() uint32 { return uint32(args.ConsensusSize) }, @@ -367,26 +368,27 @@ func (tcn *TestConsensusNode) initNodesCoordinator( cache storage.Cacher, ) { argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusSize, - MetaConsensusGroupSize: consensusSize, - Marshalizer: TestMarshalizer, - Hasher: hasher, - Shuffler: &shardingMocks.NodeShufflerMock{}, - EpochStartNotifier: epochStartRegistrationHandler, - BootStorer: CreateMemUnit(), - NbShards: maxShards, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: pkBytes, - ConsensusGroupCache: cache, - ShuffledOutHandler: &chainShardingMocks.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - ShardIDAsObserver: tcn.ShardCoordinator.SelfId(), - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ShardConsensusGroupSize: consensusSize, + MetaConsensusGroupSize: consensusSize, + Marshalizer: TestMarshalizer, + Hasher: hasher, + Shuffler: &shardingMocks.NodeShufflerMock{}, + EpochStartNotifier: epochStartRegistrationHandler, + BootStorer: CreateMemUnit(), + NbShards: maxShards, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: pkBytes, + ConsensusGroupCache: cache, + ShuffledOutHandler: &chainShardingMocks.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardIDAsObserver: tcn.ShardCoordinator.SelfId(), + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, } tcn.NodesCoordinator, _ = nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go index 25ab4a21e6e..1ba488b9e12 100644 --- a/integrationTests/testHeartbeatNode.go +++ b/integrationTests/testHeartbeatNode.go @@ -29,7 +29,6 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever/factory/resolverscontainer" "github.com/multiversx/mx-chain-go/dataRetriever/requestHandlers" "github.com/multiversx/mx-chain-go/epochStart/notifier" - "github.com/multiversx/mx-chain-go/heartbeat/monitor" "github.com/multiversx/mx-chain-go/heartbeat/processor" "github.com/multiversx/mx-chain-go/heartbeat/sender" "github.com/multiversx/mx-chain-go/integrationTests/mock" @@ -52,6 +51,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" @@ -349,27 +349,28 @@ func CreateNodesWithTestHeartbeatNode( suCache, _ := storageunit.NewCache(cacherCfg) for shardId, validatorList := range validatorsMap { argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - ShardIDAsObserver: shardId, - NbShards: uint32(numShards), - EligibleNodes: validatorsForNodesCoordinator, - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: suCache, - Shuffler: &shardingMocks.NodeShufflerMock{}, - BootStorer: CreateMemUnit(), - WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), - Epoch: 0, - EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + ShardIDAsObserver: shardId, + NbShards: uint32(numShards), + EligibleNodes: validatorsForNodesCoordinator, + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: suCache, + Shuffler: &shardingMocks.NodeShufflerMock{}, + BootStorer: CreateMemUnit(), + WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), + Epoch: 0, + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) log.LogIfError(err) @@ -396,27 +397,28 @@ func CreateNodesWithTestHeartbeatNode( } argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - ShardIDAsObserver: shardId, - NbShards: uint32(numShards), - EligibleNodes: validatorsForNodesCoordinator, - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: suCache, - Shuffler: &shardingMocks.NodeShufflerMock{}, - BootStorer: CreateMemUnit(), - WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), - Epoch: 0, - EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + ShardIDAsObserver: shardId, + NbShards: uint32(numShards), + EligibleNodes: validatorsForNodesCoordinator, + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: suCache, + Shuffler: &shardingMocks.NodeShufflerMock{}, + BootStorer: CreateMemUnit(), + WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), + Epoch: 0, + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) log.LogIfError(err) @@ -447,7 +449,6 @@ func (thn *TestHeartbeatNode) InitTestHeartbeatNode(tb testing.TB, minPeersWaiti thn.initResolversAndRequesters() thn.initInterceptors() thn.initShardSender(tb) - thn.initCrossShardPeerTopicNotifier(tb) thn.initDirectConnectionProcessor(tb) for len(thn.MainMessenger.Peers()) < minPeersWaiting { @@ -527,13 +528,14 @@ func (thn *TestHeartbeatNode) initResolversAndRequesters() { return &trieMock.TrieStub{} }, }, - SizeCheckDelta: 100, - InputAntifloodHandler: &mock.NilAntifloodHandler{}, - OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - NumConcurrentResolvingJobs: 10, - MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PayloadValidator: payloadValidator, + SizeCheckDelta: 100, + InputAntifloodHandler: &mock.NilAntifloodHandler{}, + OutputAntifloodHandler: &mock.NilAntifloodHandler{}, + NumConcurrentResolvingJobs: 10, + NumConcurrentResolvingTrieNodesJobs: 3, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PayloadValidator: payloadValidator, } requestersContainerFactoryArgs := requesterscontainer.FactoryArgs{ @@ -793,29 +795,6 @@ func (thn *TestHeartbeatNode) initDirectConnectionProcessor(tb testing.TB) { require.Nil(tb, err) } -func (thn *TestHeartbeatNode) initCrossShardPeerTopicNotifier(tb testing.TB) { - argsCrossShardPeerTopicNotifier := monitor.ArgsCrossShardPeerTopicNotifier{ - ShardCoordinator: thn.ShardCoordinator, - PeerShardMapper: thn.MainPeerShardMapper, - } - crossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsCrossShardPeerTopicNotifier) - require.Nil(tb, err) - - err = thn.MainMessenger.AddPeerTopicNotifier(crossShardPeerTopicNotifier) - require.Nil(tb, err) - - argsCrossShardPeerTopicNotifier = monitor.ArgsCrossShardPeerTopicNotifier{ - ShardCoordinator: thn.ShardCoordinator, - PeerShardMapper: thn.FullArchivePeerShardMapper, - } - fullArchiveCrossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsCrossShardPeerTopicNotifier) - require.Nil(tb, err) - - err = thn.FullArchiveMessenger.AddPeerTopicNotifier(fullArchiveCrossShardPeerTopicNotifier) - require.Nil(tb, err) - -} - // ConnectOnMain will try to initiate a connection to the provided parameter on the main messenger func (thn *TestHeartbeatNode) ConnectOnMain(connectable Connectable) error { if check.IfNil(connectable) { @@ -861,13 +840,19 @@ func MakeDisplayTableForHeartbeatNodes(nodes map[uint32][]*TestHeartbeatNode) st for _, n := range nodesList { buffPk, _ := n.NodeKeys.MainKey.Pk.ToByteArray() + validatorMarker := "" + v, _, _ := n.NodesCoordinator.GetValidatorWithPublicKey(buffPk) + if v != nil { + validatorMarker = "*" + } + peerInfo := n.MainMessenger.GetConnectedPeersInfo() pid := n.MainMessenger.ID().Pretty() lineData := display.NewLineData( false, []string{ - core.GetTrimmedPk(hex.EncodeToString(buffPk)), + core.GetTrimmedPk(hex.EncodeToString(buffPk)) + validatorMarker, pid[len(pid)-6:], fmt.Sprintf("%d", shardId), fmt.Sprintf("%d", n.CountGlobalMessages()), diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 9ba3d5d25a3..ca2ed8dcd25 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -69,6 +69,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" testStorage "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" testcommonStorage "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -109,7 +110,6 @@ const ( adaptivity = false hysteresis = float32(0.2) maxTrieLevelInMemory = uint(5) - delegationManagementKey = "delegationManagement" delegationContractsList = "delegationContracts" ) @@ -665,8 +665,6 @@ func CreateFullGenesisBlocks( dataComponents.DataPool = dataPool dataComponents.BlockChain = blkc - roundsConfig := GetDefaultRoundsConfig() - argsGenesis := genesisProcess.ArgsGenesisBlockCreator{ Core: coreComponents, Data: dataComponents, @@ -715,6 +713,8 @@ func CreateFullGenesisBlocks( MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -725,14 +725,21 @@ func CreateFullGenesisBlocks( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, AccountsParser: accountsParser, SmartContractParser: smartContractParser, BlockSignKeyGen: &mock.KeyGenMock{}, - EpochConfig: &config.EpochConfig{ + EpochConfig: config.EpochConfig{ EnableEpochs: enableEpochsConfig, }, - RoundConfig: &roundsConfig, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + HeaderVersionConfigs: testscommon.GetDefaultHeaderVersionConfig(), HistoryRepository: &dblookupext.HistoryRepositoryStub{}, TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, } @@ -822,6 +829,8 @@ func CreateGenesisMetaBlock( MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -832,12 +841,20 @@ func CreateGenesisMetaBlock( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, BlockSignKeyGen: &mock.KeyGenMock{}, GenesisNodePrice: big.NewInt(1000), - EpochConfig: &config.EpochConfig{ + EpochConfig: config.EpochConfig{ EnableEpochs: enableEpochsConfig, }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + HeaderVersionConfigs: testscommon.GetDefaultHeaderVersionConfig(), HistoryRepository: &dblookupext.HistoryRepositoryStub{}, TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, } @@ -1379,7 +1396,7 @@ func CreateNodesWithEnableEpochsAndVmConfig( nodesPerShard, numMetaChainNodes, epochConfig, - GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, ) } @@ -1519,6 +1536,9 @@ func CreateNodesWithFullGenesis( ) ([]*TestProcessorNode, *TestProcessorNode) { enableEpochsConfig := GetDefaultEnableEpochsConfig() enableEpochsConfig.StakingV2EnableEpoch = UnreachableEpoch + enableEpochsConfig.StakingV4Step1EnableEpoch = UnreachableEpoch + enableEpochsConfig.StakingV4Step2EnableEpoch = UnreachableEpoch + enableEpochsConfig.StakingV4Step3EnableEpoch = UnreachableEpoch return CreateNodesWithFullGenesisCustomEnableEpochs(numOfShards, nodesPerShard, numMetaChainNodes, genesisFile, enableEpochsConfig) } @@ -2607,18 +2627,7 @@ func SaveDelegationManagerConfig(nodes []*TestProcessorNode) { continue } - acc, _ := n.AccntState.LoadAccount(vm.DelegationManagerSCAddress) - userAcc, _ := acc.(state.UserAccountHandler) - - managementData := &systemSmartContracts.DelegationManagement{ - MinDeposit: big.NewInt(100), - LastAddress: vm.FirstDelegationSCAddress, - MinDelegationAmount: big.NewInt(1), - } - marshaledData, _ := TestMarshalizer.Marshal(managementData) - _ = userAcc.SaveKeyValue([]byte(delegationManagementKey), marshaledData) - _ = n.AccntState.SaveAccount(userAcc) - _, _ = n.AccntState.Commit() + stakingcommon.SaveDelegationManagerConfig(n.AccntState, TestMarshalizer) } } diff --git a/integrationTests/testNetwork.go b/integrationTests/testNetwork.go index e22222d41a7..a08b3aa85c7 100644 --- a/integrationTests/testNetwork.go +++ b/integrationTests/testNetwork.go @@ -34,7 +34,7 @@ type GasScheduleMap = map[string]map[string]uint64 // TestNetwork wraps a set of TestProcessorNodes along with a set of test // Wallets, instantiates them, controls them and provides operations with them; // designed to be used in integration tests. -// TODO combine TestNetwork with the preexisting TestContext and OneNodeNetwork +// TODO combine TestNetwork with the preexisting TestContext and MiniNetwork // into a single struct containing the functionality of all three type TestNetwork struct { NumShards int diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 7704b9c1029..b52cc3585a8 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -115,6 +115,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -222,6 +223,18 @@ const sizeCheckDelta = 100 // UnreachableEpoch defines an unreachable epoch for integration tests const UnreachableEpoch = uint32(1000000) +// StakingV4Step1EnableEpoch defines the epoch for integration tests when stakingV4 init is enabled +const StakingV4Step1EnableEpoch = 4443 + +// StakingV4Step2EnableEpoch defines the epoch for integration tests when stakingV4 is enabled; should be greater than StakingV2Epoch +const StakingV4Step2EnableEpoch = 4444 + +// StakingV4Step3EnableEpoch defines the epoch for integration tests when nodes distribution from auction to waiting list is enabled in staking v4 +const StakingV4Step3EnableEpoch = 4445 + +// ScheduledMiniBlocksEnableEpoch defines the epoch for integration tests when scheduled nini blocks are enabled +const ScheduledMiniBlocksEnableEpoch = 1000 + // TestSingleSigner defines a Ed25519Signer var TestSingleSigner = &ed25519SingleSig.Ed25519Signer{} @@ -483,7 +496,7 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { } if args.RoundsConfig == nil { - defaultRoundsConfig := GetDefaultRoundsConfig() + defaultRoundsConfig := testscommon.GetDefaultRoundsConfig() args.RoundsConfig = &defaultRoundsConfig } genericRoundNotifier := forking.NewGenericRoundNotifier() @@ -653,7 +666,7 @@ func (tpn *TestProcessorNode) initValidatorStatistics() { rater, _ := rating.NewBlockSigningRater(tpn.RatingsData) if check.IfNil(tpn.NodesSetup) { - tpn.NodesSetup = &mock.NodesSetupStub{ + tpn.NodesSetup = &genesisMocks.NodesSetupStub{ MinNumberOfNodesCalled: func() uint32 { return tpn.ShardCoordinator.NumberOfShards() * 2 }, @@ -942,6 +955,8 @@ func (tpn *TestProcessorNode) createFullSCQueryService(gasMap map[string]map[str MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -952,12 +967,19 @@ func (tpn *TestProcessorNode) createFullSCQueryService(gasMap map[string]map[str MinServiceFee: 0, MaxServiceFee: 100000, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ValidatorAccountsDB: tpn.PeerState, UserAccountsDB: tpn.AccntState, ChanceComputer: tpn.NodesCoordinator, ShardCoordinator: tpn.ShardCoordinator, EnableEpochsHandler: tpn.EnableEpochsHandler, + NodesCoordinator: tpn.NodesCoordinator, } tpn.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: tpn.EnableEpochs.DelegationSmartContractEnableEpoch, @@ -1081,11 +1103,10 @@ func (tpn *TestProcessorNode) initChainHandler() { func (tpn *TestProcessorNode) initEconomicsData(economicsConfig *config.EconomicsConfig) { tpn.EnableEpochs.PenalizedTooMuchGasEnableEpoch = 0 argsNewEconomicsData := economics.ArgsNewEconomicsData{ - Economics: economicsConfig, - EpochNotifier: tpn.EpochNotifier, - EnableEpochsHandler: tpn.EnableEpochsHandler, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + Economics: economicsConfig, + EpochNotifier: tpn.EpochNotifier, + EnableEpochsHandler: tpn.EnableEpochsHandler, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) tpn.EconomicsData = economics.NewTestEconomicsData(economicsData) @@ -1434,22 +1455,23 @@ func (tpn *TestProcessorNode) initResolvers() { fullArchivePreferredPeersHolder, _ := p2pFactory.NewPeersHolder([]string{}) resolverContainerFactory := resolverscontainer.FactoryArgs{ - ShardCoordinator: tpn.ShardCoordinator, - MainMessenger: tpn.MainMessenger, - FullArchiveMessenger: tpn.FullArchiveMessenger, - Store: tpn.Storage, - Marshalizer: TestMarshalizer, - DataPools: tpn.DataPool, - Uint64ByteSliceConverter: TestUint64Converter, - DataPacker: dataPacker, - TriesContainer: tpn.TrieContainer, - SizeCheckDelta: 100, - InputAntifloodHandler: &mock.NilAntifloodHandler{}, - OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - NumConcurrentResolvingJobs: 10, - MainPreferredPeersHolder: preferredPeersHolder, - FullArchivePreferredPeersHolder: fullArchivePreferredPeersHolder, - PayloadValidator: payloadValidator, + ShardCoordinator: tpn.ShardCoordinator, + MainMessenger: tpn.MainMessenger, + FullArchiveMessenger: tpn.FullArchiveMessenger, + Store: tpn.Storage, + Marshalizer: TestMarshalizer, + DataPools: tpn.DataPool, + Uint64ByteSliceConverter: TestUint64Converter, + DataPacker: dataPacker, + TriesContainer: tpn.TrieContainer, + SizeCheckDelta: 100, + InputAntifloodHandler: &mock.NilAntifloodHandler{}, + OutputAntifloodHandler: &mock.NilAntifloodHandler{}, + NumConcurrentResolvingJobs: 10, + NumConcurrentResolvingTrieNodesJobs: 3, + MainPreferredPeersHolder: preferredPeersHolder, + FullArchivePreferredPeersHolder: fullArchivePreferredPeersHolder, + PayloadValidator: payloadValidator, } var err error @@ -1526,7 +1548,7 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u } if tpn.ValidatorStatisticsProcessor == nil { - tpn.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + tpn.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ UpdatePeerStateCalled: func(header data.MetaHeaderHandler) ([]byte, error) { return []byte("validator statistics root hash"), nil }, @@ -1651,7 +1673,7 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u mockVM.GasForOperation = OpGasValueForMockVm _ = tpn.VMContainer.Add(procFactory.InternalTestingVM, mockVM) - tpn.FeeAccumulator, _ = postprocess.NewFeeAccumulator() + tpn.FeeAccumulator = postprocess.NewFeeAccumulator() tpn.ArgsParser = smartContract.NewArgumentParser() argsTxTypeHandler := coordinator.ArgNewTxTypeHandler{ @@ -1897,6 +1919,8 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -1907,12 +1931,19 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri MinServiceFee: 0, MaxServiceFee: 100000, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ValidatorAccountsDB: tpn.PeerState, UserAccountsDB: tpn.AccntState, ChanceComputer: &mock.RaterMock{}, ShardCoordinator: tpn.ShardCoordinator, EnableEpochsHandler: tpn.EnableEpochsHandler, + NodesCoordinator: tpn.NodesCoordinator, } vmFactory, _ := metaProcess.NewVMContainerFactory(argsVMContainerFactory) @@ -1922,7 +1953,7 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri tpn.SystemSCFactory = vmFactory.SystemSmartContractContainerFactory() tpn.addMockVm(tpn.BlockchainHook) - tpn.FeeAccumulator, _ = postprocess.NewFeeAccumulator() + tpn.FeeAccumulator = postprocess.NewFeeAccumulator() tpn.ArgsParser = smartContract.NewArgumentParser() esdtTransferParser, _ := parsers.NewESDTTransferParser(TestMarshalizer) argsTxTypeHandler := coordinator.ArgNewTxTypeHandler{ @@ -2069,7 +2100,7 @@ func (tpn *TestProcessorNode) InitDelegationManager() { log.Error("error while initializing system SC", "return code", vmOutput.ReturnCode) } - err = tpn.processSCOutputAccounts(vmOutput) + err = ProcessSCOutputAccounts(vmOutput, tpn.AccntState) log.LogIfError(err) err = tpn.updateSystemSCContractsCode(vmInput.ContractCodeMetadata, vm.DelegationManagerSCAddress) @@ -2092,39 +2123,6 @@ func (tpn *TestProcessorNode) updateSystemSCContractsCode(contractMetadata []byt return tpn.AccntState.SaveAccount(userAcc) } -// save account changes in state from vmOutput - protected by VM - every output can be treated as is. -func (tpn *TestProcessorNode) processSCOutputAccounts(vmOutput *vmcommon.VMOutput) error { - outputAccounts := process.SortVMOutputInsideData(vmOutput) - for _, outAcc := range outputAccounts { - acc, err := tpn.getUserAccount(outAcc.Address) - if err != nil { - return err - } - - storageUpdates := process.GetSortedStorageUpdates(outAcc) - for _, storeUpdate := range storageUpdates { - err = acc.SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) - if err != nil { - return err - } - } - - if outAcc.BalanceDelta != nil && outAcc.BalanceDelta.Cmp(zero) != 0 { - err = acc.AddToBalance(outAcc.BalanceDelta) - if err != nil { - return err - } - } - - err = tpn.AccntState.SaveAccount(acc) - if err != nil { - return err - } - } - - return nil -} - func (tpn *TestProcessorNode) getUserAccount(address []byte) (state.UserAccountHandler, error) { acnt, err := tpn.AccntState.LoadAccount(address) if err != nil { @@ -2287,7 +2285,13 @@ func (tpn *TestProcessorNode) initBlockProcessor() { if errGet != nil { log.Error("initBlockProcessor tpn.VMContainer.Get", "error", errGet) } - stakingDataProvider, errRsp := metachain.NewStakingDataProvider(systemVM, "1000") + + argsStakingDataProvider := metachain.StakingDataProviderArgs{ + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + SystemVM: systemVM, + MinNodePrice: "1000", + } + stakingDataProvider, errRsp := metachain.NewStakingDataProvider(argsStakingDataProvider) if errRsp != nil { log.Error("initBlockProcessor NewRewardsStakingProvider", "error", errRsp) } @@ -2326,23 +2330,52 @@ func (tpn *TestProcessorNode) initBlockProcessor() { EnableEpochsHandler: tpn.EnableEpochsHandler, } epochStartValidatorInfo, _ := metachain.NewValidatorInfoCreator(argsEpochValidatorInfo) + + maxNodesChangeConfigProvider, _ := notifier.NewNodesConfigProvider( + tpn.EpochNotifier, + nil, + ) + auctionCfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } + ald, _ := metachain.NewAuctionListDisplayer(metachain.ArgsAuctionListDisplayer{ + TableDisplayHandler: metachain.NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: auctionCfg, + }) + + argsAuctionListSelector := metachain.AuctionListSelectorArgs{ + ShardCoordinator: tpn.ShardCoordinator, + StakingDataProvider: stakingDataProvider, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListDisplayHandler: ald, + SoftAuctionConfig: auctionCfg, + } + auctionListSelector, _ := metachain.NewAuctionListSelector(argsAuctionListSelector) + argsEpochSystemSC := metachain.ArgsNewEpochStartSystemSCProcessing{ - SystemVM: systemVM, - UserAccountsDB: tpn.AccntState, - PeerAccountsDB: tpn.PeerState, - Marshalizer: TestMarshalizer, - StartRating: tpn.RatingsData.StartRating(), - ValidatorInfoCreator: tpn.ValidatorStatisticsProcessor, - EndOfEpochCallerAddress: vm.EndOfEpochAddress, - StakingSCAddress: vm.StakingSCAddress, - ChanceComputer: tpn.NodesCoordinator, - EpochNotifier: tpn.EpochNotifier, - GenesisNodesConfig: tpn.NodesSetup, - StakingDataProvider: stakingDataProvider, - NodesConfigProvider: tpn.NodesCoordinator, - ShardCoordinator: tpn.ShardCoordinator, - ESDTOwnerAddressBytes: vm.EndOfEpochAddress, - EnableEpochsHandler: tpn.EnableEpochsHandler, + SystemVM: systemVM, + UserAccountsDB: tpn.AccntState, + PeerAccountsDB: tpn.PeerState, + Marshalizer: TestMarshalizer, + StartRating: tpn.RatingsData.StartRating(), + ValidatorInfoCreator: tpn.ValidatorStatisticsProcessor, + EndOfEpochCallerAddress: vm.EndOfEpochAddress, + StakingSCAddress: vm.StakingSCAddress, + ChanceComputer: tpn.NodesCoordinator, + EpochNotifier: tpn.EpochNotifier, + GenesisNodesConfig: tpn.NodesSetup, + StakingDataProvider: stakingDataProvider, + NodesConfigProvider: tpn.NodesCoordinator, + ShardCoordinator: tpn.ShardCoordinator, + ESDTOwnerAddressBytes: vm.EndOfEpochAddress, + EnableEpochsHandler: tpn.EnableEpochsHandler, + AuctionListSelector: auctionListSelector, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, } epochStartSystemSCProcessor, _ := metachain.NewSystemSCProcessor(argsEpochSystemSC) tpn.EpochStartSystemSCProcessor = epochStartSystemSCProcessor @@ -3064,14 +3097,14 @@ func (tpn *TestProcessorNode) createHeartbeatWithHardforkTrigger() { processComponents.ShardCoord = tpn.ShardCoordinator processComponents.IntContainer = tpn.MainInterceptorsContainer processComponents.FullArchiveIntContainer = tpn.FullArchiveInterceptorsContainer - processComponents.ValidatorStatistics = &mock.ValidatorStatisticsProcessorStub{ - GetValidatorInfoForRootHashCalled: func(_ []byte) (map[uint32][]*state.ValidatorInfo, error) { - return map[uint32][]*state.ValidatorInfo{ - 0: {{PublicKey: []byte("pk0")}}, - }, nil + processComponents.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + GetValidatorInfoForRootHashCalled: func(_ []byte) (state.ShardValidatorsInfoMapHandler, error) { + ret := state.NewShardValidatorsInfoMap() + _ = ret.Add(&state.ValidatorInfo{PublicKey: []byte("pk0")}) + return ret, nil }, } - processComponents.ValidatorProvider = &mock.ValidatorsProviderStub{} + processComponents.ValidatorProvider = &stakingcommon.ValidatorsProviderStub{} processComponents.EpochTrigger = tpn.EpochStartTrigger processComponents.EpochNotifier = tpn.EpochStartNotifier processComponents.WhiteListerVerifiedTxsInternal = tpn.WhiteListerVerifiedTxs @@ -3184,12 +3217,10 @@ func CreateEnableEpochsConfig() config.EnableEpochs { SaveJailedAlwaysEnableEpoch: UnreachableEpoch, ValidatorToDelegationEnableEpoch: UnreachableEpoch, ReDelegateBelowMinCheckEnableEpoch: UnreachableEpoch, - WaitingListFixEnableEpoch: UnreachableEpoch, IncrementSCRNonceInMultiTransferEnableEpoch: UnreachableEpoch, ESDTMultiTransferEnableEpoch: UnreachableEpoch, GlobalMintBurnDisableEpoch: UnreachableEpoch, ESDTTransferRoleEnableEpoch: UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: UnreachableEpoch, ComputeRewardCheckpointEnableEpoch: UnreachableEpoch, SCRSizeInvariantCheckEnableEpoch: UnreachableEpoch, BackwardCompSaveKeyValueEnableEpoch: UnreachableEpoch, @@ -3252,7 +3283,7 @@ func GetDefaultCoreComponents(enableEpochsConfig config.EnableEpochs) *mock.Core EconomicsDataField: &economicsmocks.EconomicsHandlerStub{}, RatingsDataField: &testscommon.RatingsInfoMock{}, RaterField: &testscommon.RaterMock{}, - GenesisNodesSetupField: &testscommon.NodesSetupStub{}, + GenesisNodesSetupField: &genesisMocks.NodesSetupStub{}, GenesisTimeField: time.Time{}, EpochNotifierField: genericEpochNotifier, EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, @@ -3281,8 +3312,8 @@ func GetDefaultProcessComponents() *mock.ProcessComponentsStub { BootSore: &mock.BoostrapStorerMock{}, HeaderSigVerif: &mock.HeaderSigVerifierStub{}, HeaderIntegrVerif: &mock.HeaderIntegrityVerifierStub{}, - ValidatorStatistics: &mock.ValidatorStatisticsProcessorStub{}, - ValidatorProvider: &mock.ValidatorsProviderStub{}, + ValidatorStatistics: &testscommon.ValidatorStatisticsProcessorStub{}, + ValidatorProvider: &stakingcommon.ValidatorsProviderStub{}, BlockTrack: &mock.BlockTrackerStub{}, PendingMiniBlocksHdl: &mock.PendingMiniBlocksHandlerStub{}, ReqHandler: &testscommon.RequestHandlerStub{}, @@ -3469,7 +3500,7 @@ func getDefaultVMConfig() *config.VirtualMachineConfig { } func getDefaultNodesSetup(maxShards, numNodes uint32, address []byte, pksBytes map[uint32][]byte) sharding.GenesisNodesSetupHandler { - return &mock.NodesSetupStub{ + return &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) for i := uint32(0); i < maxShards; i++ { @@ -3492,7 +3523,7 @@ func getDefaultNodesSetup(maxShards, numNodes uint32, address []byte, pksBytes m func getDefaultNodesCoordinator(maxShards uint32, pksBytes map[uint32][]byte) nodesCoordinator.NodesCoordinator { return &shardingMocks.NodesCoordinatorStub{ - ComputeValidatorsGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validators []nodesCoordinator.Validator, err error) { + ComputeConsensusGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validators []nodesCoordinator.Validator, err error) { v, _ := nodesCoordinator.NewValidator(pksBytes[shardId], 1, defaultChancesSelection) return []nodesCoordinator.Validator{v}, nil }, @@ -3522,16 +3553,8 @@ func GetDefaultEnableEpochsConfig() *config.EnableEpochs { MiniBlockPartialExecutionEnableEpoch: UnreachableEpoch, FailExecutionOnEveryAPIErrorEnableEpoch: UnreachableEpoch, DynamicGasCostForDataTrieStorageLoadEnableEpoch: UnreachableEpoch, - } -} - -// GetDefaultRoundsConfig - -func GetDefaultRoundsConfig() config.RoundConfig { - return config.RoundConfig{ - RoundActivations: map[string]config.ActivationRoundByName{ - "DisableAsyncCallV1": { - Round: "18446744073709551615", - }, - }, + StakingV4Step1EnableEpoch: UnreachableEpoch, + StakingV4Step2EnableEpoch: UnreachableEpoch, + StakingV4Step3EnableEpoch: UnreachableEpoch, } } diff --git a/integrationTests/testProcessorNodeWithCoordinator.go b/integrationTests/testProcessorNodeWithCoordinator.go index 71f6c3afd51..63392658a76 100644 --- a/integrationTests/testProcessorNodeWithCoordinator.go +++ b/integrationTests/testProcessorNodeWithCoordinator.go @@ -13,8 +13,8 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/storage/cache" - "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" ) @@ -48,7 +48,7 @@ func CreateProcessorNodesWithNodesCoordinator( waitingMap := GenValidatorsFromPubKeys(pubKeysWaiting, nbShards) waitingMapForNodesCoordinator, _ := nodesCoordinator.NodesInfoToValidators(waitingMap) - nodesSetup := &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + nodesSetup := &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, waitingMap }} @@ -76,7 +76,7 @@ func CreateProcessorNodesWithNodesCoordinator( IsFullArchive: false, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index d7de5cc05cc..42f08a62b39 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -31,6 +31,8 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" @@ -89,7 +91,7 @@ func CreateNodesWithNodesCoordinatorAndTxKeys( } waitingMapForNodesCoordinator[core.MetachainShardId] = make([]nodesCoordinator.Validator, 0) - nodesSetup := &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + nodesSetup := &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, waitingMap }} @@ -221,7 +223,7 @@ func CreateNodesWithNodesCoordinatorFactory( numNodes := nbShards*nodesPerShard + nbMetaNodes - nodesSetup := &mock.NodesSetupStub{ + nodesSetup := &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, waitingMap }, @@ -236,6 +238,9 @@ func CreateNodesWithNodesCoordinatorFactory( MiniBlockPartialExecutionEnableEpoch: UnreachableEpoch, RefactorPeersMiniBlocksEnableEpoch: UnreachableEpoch, DynamicGasCostForDataTrieStorageLoadEnableEpoch: UnreachableEpoch, + StakingV4Step1EnableEpoch: UnreachableEpoch, + StakingV4Step2EnableEpoch: UnreachableEpoch, + StakingV4Step3EnableEpoch: UnreachableEpoch, } nodesMap := make(map[uint32][]*TestProcessorNode) @@ -406,34 +411,39 @@ func CreateNodesWithNodesCoordinatorAndHeaderSigVerifier( epochStartSubscriber := notifier.NewEpochStartSubscriptionHandler() bootStorer := CreateMemUnit() - nodesSetup := &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + nodesSetup := &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, nil }} + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + &marshallerMock.MarshalizerMock{}, + StakingV4Step2EnableEpoch, + ) completeNodesList := make([]Connectable, 0) for shardId, validatorList := range validatorsMap { consensusCache, _ := cache.NewLRUCache(10000) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - Shuffler: nodeShuffler, - BootStorer: bootStorer, - EpochStartNotifier: epochStartSubscriber, - ShardIDAsObserver: shardId, - NbShards: uint32(nbShards), - EligibleNodes: validatorsMapForNodesCoordinator, - WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: consensusCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + Shuffler: nodeShuffler, + BootStorer: bootStorer, + EpochStartNotifier: epochStartSubscriber, + ShardIDAsObserver: shardId, + NbShards: uint32(nbShards), + EligibleNodes: validatorsMapForNodesCoordinator, + WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: consensusCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) @@ -519,37 +529,42 @@ func CreateNodesWithNodesCoordinatorKeygenAndSingleSigner( epochStartSubscriber := notifier.NewEpochStartSubscriptionHandler() nodeShuffler := &shardingMocks.NodeShufflerMock{} - nodesSetup := &mock.NodesSetupStub{ + nodesSetup := &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, waitingMap }, } + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + &marshallerMock.MarshalizerMock{}, + StakingV4Step2EnableEpoch, + ) completeNodesList := make([]Connectable, 0) for shardId, validatorList := range validatorsMap { bootStorer := CreateMemUnit() lruCache, _ := cache.NewLRUCache(10000) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardId, - NbShards: uint32(nbShards), - EligibleNodes: validatorsMapForNodesCoordinator, - WaitingNodes: waitingMapForNodesCoordinator, - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: lruCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardId, + NbShards: uint32(nbShards), + EligibleNodes: validatorsMapForNodesCoordinator, + WaitingNodes: waitingMapForNodesCoordinator, + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: lruCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } nodesCoord, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/testProcessorNodeWithTestWebServer.go b/integrationTests/testProcessorNodeWithTestWebServer.go index f1a11c9d72a..592d7d1bdba 100644 --- a/integrationTests/testProcessorNodeWithTestWebServer.go +++ b/integrationTests/testProcessorNodeWithTestWebServer.go @@ -101,7 +101,7 @@ func createFacadeArg(tpn *TestProcessorNode) nodeFacade.ArgNodeFacade { func createTestApiConfig() config.ApiRoutesConfig { routes := map[string][]string{ - "node": {"/status", "/metrics", "/heartbeatstatus", "/statistics", "/p2pstatus", "/debug", "/peerinfo", "/bootstrapstatus", "/connected-peers-ratings", "/managed-keys/count", "/managed-keys", "/managed-keys/eligible", "/managed-keys/waiting", "/waiting-epochs-left/:key"}, + "node": {"/status", "/metrics", "/heartbeatstatus", "/statistics", "/p2pstatus", "/debug", "/peerinfo", "/bootstrapstatus", "/connected-peers-ratings", "/managed-keys/count", "/managed-keys", "/loaded-keys", "/managed-keys/eligible", "/managed-keys/waiting", "/waiting-epochs-left/:key"}, "address": {"/:address", "/:address/balance", "/:address/username", "/:address/code-hash", "/:address/key/:key", "/:address/esdt", "/:address/esdt/:tokenIdentifier"}, "hardfork": {"/trigger"}, "network": {"/status", "/total-staked", "/economics", "/config"}, @@ -179,6 +179,7 @@ func createFacadeComponents(tpn *TestProcessorNode) nodeFacade.ApiResolver { Hasher: TestHasher, VMOutputCacher: &testscommon.CacherMock{}, DataFieldParser: dataFieldParser, + BlockChainHook: tpn.BlockchainHook, } txSimulator, err := transactionEvaluator.NewTransactionSimulator(argSimulator) @@ -194,6 +195,7 @@ func createFacadeComponents(tpn *TestProcessorNode) nodeFacade.ApiResolver { Accounts: wrappedAccounts, ShardCoordinator: tpn.ShardCoordinator, EnableEpochsHandler: tpn.EnableEpochsHandler, + BlockChain: tpn.BlockChain, } apiTransactionEvaluator, err := transactionEvaluator.NewAPITransactionEvaluator(argsTransactionEvaluator) log.LogIfError(err) @@ -273,7 +275,7 @@ func createFacadeComponents(tpn *TestProcessorNode) nodeFacade.ApiResolver { APITransactionHandler: apiTransactionHandler, APIBlockHandler: blockAPIHandler, APIInternalBlockHandler: apiInternalBlockProcessor, - GenesisNodesSetupHandler: &mock.NodesSetupStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, AccountsParser: &genesisMocks.AccountsParserStub{}, GasScheduleNotifier: &testscommon.GasScheduleNotifierMock{}, diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index b7783e7f872..b28d5e3f953 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -117,14 +117,14 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, EpochStartDataCreator: &mock.EpochStartDataCreatorStub{}, EpochEconomics: &mock.EpochEconomicsStub{}, - EpochRewardsCreator: &mock.EpochRewardsCreatorStub{}, - EpochValidatorInfoCreator: &mock.EpochValidatorInfoCreatorStub{}, - ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorStub{ + EpochRewardsCreator: &testscommon.RewardsCreatorStub{}, + EpochValidatorInfoCreator: &testscommon.EpochValidatorInfoCreatorStub{}, + ValidatorStatisticsProcessor: &testscommon.ValidatorStatisticsProcessorStub{ UpdatePeerStateCalled: func(header data.MetaHeaderHandler) ([]byte, error) { return []byte("validator stats root hash"), nil }, }, - EpochSystemSCProcessor: &mock.EpochStartSystemSCStub{}, + EpochSystemSCProcessor: &testscommon.EpochStartSystemSCStub{}, } tpn.BlockProcessor, err = block.NewMetaProcessor(arguments) diff --git a/integrationTests/vm/delegation/changeOwner_test.go b/integrationTests/vm/delegation/changeOwner_test.go index 2b23993882d..c634452ea9c 100644 --- a/integrationTests/vm/delegation/changeOwner_test.go +++ b/integrationTests/vm/delegation/changeOwner_test.go @@ -1,5 +1,3 @@ -//go:build !race - package delegation import ( @@ -23,6 +21,10 @@ var ( ) func TestDelegationChangeOwnerOnAccountHandler(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + t.Run("fix flag not activated, should not save - backwards compatibility", func(t *testing.T) { _, _, userAccount := testDelegationChangeOwnerOnAccountHandler(t, 1) diff --git a/integrationTests/vm/delegation/delegationMulti_test.go b/integrationTests/vm/delegation/delegationMulti_test.go index 90d307c741d..b0eef67dcaa 100644 --- a/integrationTests/vm/delegation/delegationMulti_test.go +++ b/integrationTests/vm/delegation/delegationMulti_test.go @@ -1,5 +1,3 @@ -//go:build !race - package delegation import ( @@ -19,6 +17,10 @@ import ( ) func TestDelegationSystemClaimMulti(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -127,6 +129,10 @@ func TestDelegationSystemClaimMulti(t *testing.T) { } func TestDelegationSystemRedelegateMulti(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, diff --git a/integrationTests/vm/delegation/delegationScenarios_test.go b/integrationTests/vm/delegation/delegationScenarios_test.go index e1d58b12d6d..4b9dbd07fba 100644 --- a/integrationTests/vm/delegation/delegationScenarios_test.go +++ b/integrationTests/vm/delegation/delegationScenarios_test.go @@ -1,5 +1,3 @@ -//go:build !race - package delegation import ( @@ -32,6 +30,10 @@ import ( ) func TestDelegationSystemNodesOperationsTestBackwardComp(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -82,6 +84,10 @@ func TestDelegationSystemNodesOperationsTestBackwardComp(t *testing.T) { } func TestDelegationSystemNodesOperations(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -163,6 +169,10 @@ func TestDelegationSystemNodesOperations(t *testing.T) { } func TestDelegationSystemReStakeNodes(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -230,6 +240,10 @@ func TestDelegationSystemReStakeNodes(t *testing.T) { } func TestDelegationChangeConfig(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -288,6 +302,10 @@ func TestDelegationChangeConfig(t *testing.T) { } func TestDelegationSystemDelegateUnDelegateFromTopUpWithdraw(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -348,6 +366,10 @@ func TestDelegationSystemDelegateUnDelegateFromTopUpWithdraw(t *testing.T) { } func TestDelegationSystemDelegateUnDelegateOnlyPartOfDelegation(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -409,6 +431,10 @@ func TestDelegationSystemDelegateUnDelegateOnlyPartOfDelegation(t *testing.T) { } func TestDelegationSystemMultipleDelegationContractsAndSameBlsKeysShouldNotWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -483,6 +509,10 @@ func TestDelegationSystemMultipleDelegationContractsAndSameBlsKeysShouldNotWork( } func TestDelegationSystemMultipleDelegationContractsAndSameDelegators(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -551,6 +581,10 @@ func TestDelegationSystemMultipleDelegationContractsAndSameDelegators(t *testing } func TestDelegationRewardsComputationAfterChangeServiceFee(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -655,6 +689,10 @@ func TestDelegationRewardsComputationAfterChangeServiceFee(t *testing.T) { } func TestDelegationUnJail(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -718,6 +756,10 @@ func TestDelegationUnJail(t *testing.T) { } func TestDelegationSystemDelegateSameUsersAFewTimes(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -779,6 +821,10 @@ func TestDelegationSystemDelegateSameUsersAFewTimes(t *testing.T) { } func TestDelegationSystemMultipleDelegationContractsAndSameDelegatorsClaimRewardsMultipleTimeUndelegateClaimRewardsMultipleTime(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -931,6 +977,10 @@ func TestDelegationSystemMultipleDelegationContractsAndSameDelegatorsClaimReward } func TestDelegationSystemDelegateUnDelegateReceiveRewardsWhenAllIsUndelegated(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -1069,6 +1119,10 @@ func TestDelegationSystemDelegateUnDelegateReceiveRewardsWhenAllIsUndelegated(t } func TestDelegationSystemCleanUpContract(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, diff --git a/integrationTests/vm/delegation/delegation_test.go b/integrationTests/vm/delegation/delegation_test.go index 65ff98aab2f..9bae5235076 100644 --- a/integrationTests/vm/delegation/delegation_test.go +++ b/integrationTests/vm/delegation/delegation_test.go @@ -1,5 +1,3 @@ -//go:build !race - package delegation import ( diff --git a/integrationTests/vm/esdt/common.go b/integrationTests/vm/esdt/common.go index 3287641d0e6..2d04331a85f 100644 --- a/integrationTests/vm/esdt/common.go +++ b/integrationTests/vm/esdt/common.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/process" vmFactory "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" "github.com/multiversx/mx-chain-go/vm" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -170,7 +171,7 @@ func CreateNodesAndPrepareBalances(numOfShards int) ([]*integrationTests.TestPro ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, } - roundsConfig := integrationTests.GetDefaultRoundsConfig() + roundsConfig := testscommon.GetDefaultRoundsConfig() return CreateNodesAndPrepareBalancesWithEpochsAndRoundsConfig( numOfShards, enableEpochs, @@ -178,7 +179,7 @@ func CreateNodesAndPrepareBalances(numOfShards int) ([]*integrationTests.TestPro ) } -// CreateNodesAndPrepareBalances - +// CreateNodesAndPrepareBalancesWithEpochsAndRoundsConfig - func CreateNodesAndPrepareBalancesWithEpochsAndRoundsConfig(numOfShards int, enableEpochs config.EnableEpochs, roundsConfig config.RoundConfig) ([]*integrationTests.TestProcessorNode, []int) { nodesPerShard := 1 numMetachainNodes := 1 @@ -230,6 +231,7 @@ func IssueTestToken(nodes []*integrationTests.TestProcessorNode, initialSupply i issueTestToken(nodes, initialSupply, ticker, core.MinMetaTxExtraGasCost) } +// IssueTestTokenWithIssuerAccount - func IssueTestTokenWithIssuerAccount(nodes []*integrationTests.TestProcessorNode, issuerAccount *integrationTests.TestWalletAccount, initialSupply int64, ticker string) { issueTestTokenWithIssuerAccount(nodes, issuerAccount, initialSupply, ticker, core.MinMetaTxExtraGasCost) } @@ -302,6 +304,7 @@ func CheckNumCallBacks( } } +// CheckForwarderRawSavedCallbackArgs - func CheckForwarderRawSavedCallbackArgs( t *testing.T, address []byte, @@ -338,13 +341,14 @@ func CheckForwarderRawSavedCallbackArgs( } } -/// ForwarderRawSavedPaymentInfo contains token data to be checked in the forwarder-raw contract. +// ForwarderRawSavedPaymentInfo contains token data to be checked in the forwarder-raw contract. type ForwarderRawSavedPaymentInfo struct { TokenId string Nonce uint64 Payment *big.Int } +// CheckForwarderRawSavedCallbackPayments - func CheckForwarderRawSavedCallbackPayments( t *testing.T, address []byte, diff --git a/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_MockContracts_test.go b/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_MockContracts_test.go index e5abb053058..c088215b3c0 100644 --- a/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_MockContracts_test.go +++ b/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_MockContracts_test.go @@ -1,5 +1,3 @@ -//go:build !race - package localFuncs import ( diff --git a/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_test.go b/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_test.go index c5e9da76d9b..742531fb801 100644 --- a/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_test.go +++ b/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_test.go @@ -1,5 +1,3 @@ -//go:build !race - package localFuncs import ( @@ -265,17 +263,22 @@ func TestESDTSetTransferRoles(t *testing.T) { } func TestESDTSetTransferRolesForwardAsyncCallFailsIntra(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testESDTWithTransferRoleAndForwarder(t, 1) } func TestESDTSetTransferRolesForwardAsyncCallFailsCross(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testESDTWithTransferRoleAndForwarder(t, 2) } func testESDTWithTransferRoleAndForwarder(t *testing.T, numShards int) { - if testing.Short() { - t.Skip("this is not a short test") - } nodes, idxProposers := esdtCommon.CreateNodesAndPrepareBalances(numShards) defer func() { @@ -325,18 +328,22 @@ func testESDTWithTransferRoleAndForwarder(t *testing.T, numShards int) { } func TestAsyncCallsAndCallBacksArgumentsIntra(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testAsyncCallAndCallBacksArguments(t, 1) } func TestAsyncCallsAndCallBacksArgumentsCross(t *testing.T) { - testAsyncCallAndCallBacksArguments(t, 2) -} - -func testAsyncCallAndCallBacksArguments(t *testing.T, numShards int) { if testing.Short() { t.Skip("this is not a short test") } + testAsyncCallAndCallBacksArguments(t, 2) +} + +func testAsyncCallAndCallBacksArguments(t *testing.T, numShards int) { nodes, idxProposers := esdtCommon.CreateNodesAndPrepareBalances(numShards) defer func() { for _, n := range nodes { diff --git a/integrationTests/vm/esdt/multisign/esdtMultisign_test.go b/integrationTests/vm/esdt/multisign/esdtMultisign_test.go index 42b2bcacbdc..2beb0fa319c 100644 --- a/integrationTests/vm/esdt/multisign/esdtMultisign_test.go +++ b/integrationTests/vm/esdt/multisign/esdtMultisign_test.go @@ -1,5 +1,3 @@ -//go:build !race - package multisign import ( diff --git a/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go b/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go index 99138f77ce5..a1db92372bd 100644 --- a/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go +++ b/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go @@ -1,5 +1,3 @@ -//go:build !race - package esdtNFT import ( @@ -908,6 +906,10 @@ func testESDTSemiFungibleTokenTransferRole(t *testing.T, numOfShards int) { } func TestESDTSFTWithEnhancedTransferRole(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + nodesPerShard := 2 numMetachainNodes := 2 numOfShards := 3 diff --git a/integrationTests/vm/esdt/nft/esdtNFTSCs/esdtNFTSCs_test.go b/integrationTests/vm/esdt/nft/esdtNFTSCs/esdtNFTSCs_test.go index 8f62294a776..534c1c7435e 100644 --- a/integrationTests/vm/esdt/nft/esdtNFTSCs/esdtNFTSCs_test.go +++ b/integrationTests/vm/esdt/nft/esdtNFTSCs/esdtNFTSCs_test.go @@ -1,5 +1,3 @@ -//go:build !race - package esdtNFTSCs import ( diff --git a/integrationTests/vm/esdt/process/esdtProcess_test.go b/integrationTests/vm/esdt/process/esdtProcess_test.go index 470280c2f81..5a1a2414fb3 100644 --- a/integrationTests/vm/esdt/process/esdtProcess_test.go +++ b/integrationTests/vm/esdt/process/esdtProcess_test.go @@ -1,5 +1,3 @@ -//go:build !race - package process import ( @@ -42,7 +40,6 @@ func TestESDTIssueAndTransactionsOnMultiShardEnvironment(t *testing.T) { enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, OptimizeGasUsedInCrossMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, @@ -174,7 +171,6 @@ func TestESDTCallBurnOnANonBurnableToken(t *testing.T) { enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, OptimizeGasUsedInCrossMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, @@ -333,6 +329,10 @@ func TestESDTIssueAndSelfTransferShouldNotChangeBalance(t *testing.T) { } func TestESDTIssueFromASmartContractSimulated(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + metaNode := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -878,133 +878,6 @@ func TestCallbackPaymentEgld(t *testing.T) { }) } -func TestScCallsScWithEsdtCrossShard(t *testing.T) { - t.Skip("test is not ready yet") - - numOfShards := 2 - nodesPerShard := 2 - numMetachainNodes := 2 - - nodes := integrationTests.CreateNodes( - numOfShards, - nodesPerShard, - numMetachainNodes, - ) - - idxProposers := make([]int, numOfShards+1) - for i := 0; i < numOfShards; i++ { - idxProposers[i] = i * nodesPerShard - } - idxProposers[numOfShards] = numOfShards * nodesPerShard - - integrationTests.DisplayAndStartNodes(nodes) - - defer func() { - for _, n := range nodes { - n.Close() - } - }() - - initialVal := big.NewInt(10000000000) - integrationTests.MintAllNodes(nodes, initialVal) - - round := uint64(0) - nonce := uint64(0) - round = integrationTests.IncrementAndPrintRound(round) - nonce++ - - // send token issue - - initialSupply := int64(10000000000) - ticker := "TCK" - esdtCommon.IssueTestToken(nodes, initialSupply, ticker) - tokenIssuer := nodes[0] - - time.Sleep(time.Second) - nrRoundsToPropagateMultiShard := 12 - nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) - time.Sleep(time.Second) - - tokenIdentifier := string(integrationTests.GetTokenIdentifier(nodes, []byte(ticker))) - esdtCommon.CheckAddressHasTokens(t, tokenIssuer.OwnAccount.Address, nodes, []byte(tokenIdentifier), 0, initialSupply) - - // deploy the smart contracts - - vaultCode := wasm.GetSCCode("../testdata/vault.wasm") - secondScAddress, _ := tokenIssuer.BlockchainHook.NewAddress(tokenIssuer.OwnAccount.Address, tokenIssuer.OwnAccount.Nonce, vmFactory.WasmVirtualMachine) - - integrationTests.CreateAndSendTransaction( - nodes[0], - nodes, - big.NewInt(0), - testVm.CreateEmptyAddress(), - wasm.CreateDeployTxData(vaultCode), - integrationTests.AdditionalGasLimit, - ) - - nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, 4, nonce, round, idxProposers) - _, err := nodes[0].AccntState.GetExistingAccount(secondScAddress) - require.Nil(t, err) - - forwarderCode := wasm.GetSCCode("../testdata/forwarder-raw.wasm") - forwarder, _ := nodes[2].BlockchainHook.NewAddress(nodes[2].OwnAccount.Address, nodes[2].OwnAccount.Nonce, vmFactory.WasmVirtualMachine) - integrationTests.CreateAndSendTransaction( - nodes[2], - nodes, - big.NewInt(0), - testVm.CreateEmptyAddress(), - wasm.CreateDeployTxData(forwarderCode), - integrationTests.AdditionalGasLimit, - ) - - nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, 4, nonce, round, idxProposers) - _, err = nodes[2].AccntState.GetExistingAccount(forwarder) - require.Nil(t, err) - - txData := txDataBuilder.NewBuilder() - - // call forwarder with esdt, and the forwarder automatically calls second sc - valueToSendToSc := int64(1000) - txData.Clear().TransferESDT(tokenIdentifier, valueToSendToSc) - txData.Str("forward_async_call_half_payment").Bytes(secondScAddress).Str("accept_funds") - integrationTests.CreateAndSendTransaction(tokenIssuer, nodes, big.NewInt(0), forwarder, txData.ToString(), integrationTests.AdditionalGasLimit) - - time.Sleep(time.Second) - nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) - time.Sleep(time.Second) - - esdtCommon.CheckAddressHasTokens(t, tokenIssuer.OwnAccount.Address, nodes, []byte(tokenIdentifier), 0, initialSupply-valueToSendToSc) - esdtCommon.CheckAddressHasTokens(t, forwarder, nodes, []byte(tokenIdentifier), 0, valueToSendToSc/2) - esdtCommon.CheckAddressHasTokens(t, secondScAddress, nodes, []byte(tokenIdentifier), 0, valueToSendToSc/2) - - esdtCommon.CheckNumCallBacks(t, forwarder, nodes, 1) - esdtCommon.CheckForwarderRawSavedCallbackArgs(t, forwarder, nodes, 1, vmcommon.Ok, [][]byte{}) - esdtCommon.CheckForwarderRawSavedCallbackPayments(t, forwarder, nodes, []*esdtCommon.ForwarderRawSavedPaymentInfo{}) - - // call forwarder to ask the second one to send it back some esdt - valueToRequest := valueToSendToSc / 4 - txData.Clear().Func("forward_async_call").Bytes(secondScAddress) - txData.Str("retrieve_funds").Str(tokenIdentifier).Int64(0).Int64(valueToRequest) - integrationTests.CreateAndSendTransaction(tokenIssuer, nodes, big.NewInt(0), forwarder, txData.ToString(), integrationTests.AdditionalGasLimit) - - time.Sleep(time.Second) - _, _ = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) - time.Sleep(time.Second) - - esdtCommon.CheckAddressHasTokens(t, forwarder, nodes, []byte(tokenIdentifier), 0, valueToSendToSc*3/4) - esdtCommon.CheckAddressHasTokens(t, secondScAddress, nodes, []byte(tokenIdentifier), 0, valueToSendToSc/4) - - esdtCommon.CheckNumCallBacks(t, forwarder, nodes, 2) - esdtCommon.CheckForwarderRawSavedCallbackArgs(t, forwarder, nodes, 2, vmcommon.Ok, [][]byte{}) - esdtCommon.CheckForwarderRawSavedCallbackPayments(t, forwarder, nodes, []*esdtCommon.ForwarderRawSavedPaymentInfo{ - { - TokenId: "EGLD", - Nonce: 0, - Payment: big.NewInt(valueToSendToSc), - }, - }) -} - func TestScCallsScWithEsdtIntraShard_SecondScRefusesPayment(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") @@ -1410,7 +1283,6 @@ func TestExecOnDestWithTokenTransferFromScAtoScBWithIntermediaryExecOnDest_NotEn enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, SCProcessorV2EnableEpoch: integrationTests.UnreachableEpoch, FailExecutionOnEveryAPIErrorEnableEpoch: integrationTests.UnreachableEpoch, } @@ -2106,7 +1978,6 @@ func TestIssueAndBurnESDT_MaxGasPerBlockExceeded(t *testing.T) { enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, MaxBlockchainHookCountersEnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( diff --git a/integrationTests/vm/esdt/roles/esdtRoles_test.go b/integrationTests/vm/esdt/roles/esdtRoles_test.go index aa2834062c4..5c117ed4edd 100644 --- a/integrationTests/vm/esdt/roles/esdtRoles_test.go +++ b/integrationTests/vm/esdt/roles/esdtRoles_test.go @@ -1,5 +1,3 @@ -//go:build !race - package roles import ( diff --git a/integrationTests/vm/mockVM/vmDeploy/vmDeploy_test.go b/integrationTests/vm/mockVM/vmDeploy/vmDeploy_test.go index 4390a3eff47..1a53d3ce4e9 100644 --- a/integrationTests/vm/mockVM/vmDeploy/vmDeploy_test.go +++ b/integrationTests/vm/mockVM/vmDeploy/vmDeploy_test.go @@ -15,6 +15,10 @@ import ( ) func TestVmDeployWithoutTransferShouldDeploySCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) @@ -70,6 +74,10 @@ func TestVmDeployWithoutTransferShouldDeploySCCode(t *testing.T) { } func TestVmDeployWithTransferShouldDeploySCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) @@ -124,6 +132,10 @@ func TestVmDeployWithTransferShouldDeploySCCode(t *testing.T) { } func TestVmDeployWithTransferAndGasShouldDeploySCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) @@ -181,6 +193,10 @@ func TestVmDeployWithTransferAndGasShouldDeploySCCode(t *testing.T) { } func TestVMDeployWithTransferWithInsufficientGasShouldReturnErr(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1000) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) diff --git a/integrationTests/vm/mockVM/vmGet/vmGet_test.go b/integrationTests/vm/mockVM/vmGet/vmGet_test.go index bd818df6884..5083c44a276 100644 --- a/integrationTests/vm/mockVM/vmGet/vmGet_test.go +++ b/integrationTests/vm/mockVM/vmGet/vmGet_test.go @@ -29,6 +29,10 @@ import ( ) func TestVmGetShouldReturnValue(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + accnts, destinationAddressBytes, expectedValueForVar := deploySmartContract(t) mockVM := vm.CreateOneSCExecutorMockVM(accnts) diff --git a/integrationTests/vm/mockVM/vmRunContract/vmRunContract_test.go b/integrationTests/vm/mockVM/vmRunContract/vmRunContract_test.go index 00f8ef20610..af7d0e33e47 100644 --- a/integrationTests/vm/mockVM/vmRunContract/vmRunContract_test.go +++ b/integrationTests/vm/mockVM/vmRunContract/vmRunContract_test.go @@ -19,6 +19,10 @@ import ( // TODO add integration and unit tests with generating and broadcasting transaction with empty recv address func TestRunSCWithoutTransferShouldRunSCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) @@ -89,6 +93,10 @@ func TestRunSCWithoutTransferShouldRunSCCode(t *testing.T) { } func TestRunSCWithTransferShouldRunSCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) @@ -160,6 +168,10 @@ func TestRunSCWithTransferShouldRunSCCode(t *testing.T) { } func TestRunWithTransferAndGasShouldRunSCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) @@ -231,6 +243,10 @@ func TestRunWithTransferAndGasShouldRunSCCode(t *testing.T) { } func TestRunWithTransferWithInsufficientGasShouldReturnErr(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) diff --git a/integrationTests/vm/staking/baseTestMetaProcessor.go b/integrationTests/vm/staking/baseTestMetaProcessor.go new file mode 100644 index 00000000000..0ae2b5ed2d8 --- /dev/null +++ b/integrationTests/vm/staking/baseTestMetaProcessor.go @@ -0,0 +1,394 @@ +package staking + +import ( + "fmt" + "math/big" + "strconv" + "strings" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/epochStart/metachain" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/integrationTests" + "github.com/multiversx/mx-chain-go/process" + vmFactory "github.com/multiversx/mx-chain-go/process/factory" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + arwenConfig "github.com/multiversx/mx-chain-vm-v1_4-go/config" + "github.com/stretchr/testify/require" +) + +const ( + stakingV4Step1EnableEpoch = 1 + stakingV4Step2EnableEpoch = 2 + stakingV4Step3EnableEpoch = 3 + addressLength = 15 + nodePrice = 1000 +) + +func haveTime() bool { return true } +func noTime() bool { return false } + +type nodesConfig struct { + eligible map[uint32][][]byte + waiting map[uint32][][]byte + leaving map[uint32][][]byte + shuffledOut map[uint32][][]byte + queue [][]byte + auction [][]byte + new [][]byte +} + +// TestMetaProcessor - +type TestMetaProcessor struct { + MetaBlockProcessor process.BlockProcessor + NodesCoordinator nodesCoordinator.NodesCoordinator + ValidatorStatistics process.ValidatorStatisticsProcessor + EpochStartTrigger integrationTests.TestEpochStartTrigger + BlockChainHandler data.ChainHandler + NodesConfig nodesConfig + AccountsAdapter state.AccountsAdapter + Marshaller marshal.Marshalizer + TxCacher dataRetriever.TransactionCacher + TxCoordinator process.TransactionCoordinator + SystemVM vmcommon.VMExecutionHandler + BlockChainHook process.BlockChainHookHandler + StakingDataProvider epochStart.StakingDataProvider + + currentRound uint64 +} + +func newTestMetaProcessor( + coreComponents factory.CoreComponentsHolder, + dataComponents factory.DataComponentsHolder, + bootstrapComponents factory.BootstrapComponentsHolder, + statusComponents factory.StatusComponentsHolder, + stateComponents factory.StateComponentsHandler, + nc nodesCoordinator.NodesCoordinator, + maxNodesConfig []config.MaxNodesChangeConfig, + queue [][]byte, +) *TestMetaProcessor { + saveNodesConfig( + stateComponents.AccountsAdapter(), + coreComponents.InternalMarshalizer(), + nc, + maxNodesConfig, + ) + + stakingcommon.SaveDelegationManagerConfig( + stateComponents.AccountsAdapter(), + coreComponents.InternalMarshalizer(), + ) + + gasScheduleNotifier := createGasScheduleNotifier() + argsBlockChainHook, blockChainHook := createBlockChainHook( + dataComponents, + coreComponents, + stateComponents.AccountsAdapter(), + bootstrapComponents.ShardCoordinator(), + gasScheduleNotifier, + ) + + metaVmFactory := createVMContainerFactory( + coreComponents, + gasScheduleNotifier, + blockChainHook, + argsBlockChainHook, + stateComponents, + bootstrapComponents.ShardCoordinator(), + nc, + maxNodesConfig[0].MaxNumNodes, + ) + vmContainer, _ := metaVmFactory.Create() + systemVM, _ := vmContainer.Get(vmFactory.SystemVirtualMachine) + + validatorStatisticsProcessor := createValidatorStatisticsProcessor( + dataComponents, + coreComponents, + nc, + bootstrapComponents.ShardCoordinator(), + stateComponents.PeerAccounts(), + ) + stakingDataProvider := createStakingDataProvider( + coreComponents.EnableEpochsHandler(), + systemVM, + ) + scp := createSystemSCProcessor( + nc, + coreComponents, + stateComponents, + bootstrapComponents.ShardCoordinator(), + maxNodesConfig, + validatorStatisticsProcessor, + systemVM, + stakingDataProvider, + ) + + txCoordinator := &testscommon.TransactionCoordinatorMock{} + epochStartTrigger := createEpochStartTrigger(coreComponents, dataComponents.StorageService()) + + eligible, _ := nc.GetAllEligibleValidatorsPublicKeys(0) + waiting, _ := nc.GetAllWaitingValidatorsPublicKeys(0) + shuffledOut, _ := nc.GetAllShuffledOutValidatorsPublicKeys(0) + + return &TestMetaProcessor{ + AccountsAdapter: stateComponents.AccountsAdapter(), + Marshaller: coreComponents.InternalMarshalizer(), + NodesConfig: nodesConfig{ + eligible: eligible, + waiting: waiting, + shuffledOut: shuffledOut, + queue: queue, + auction: make([][]byte, 0), + }, + MetaBlockProcessor: createMetaBlockProcessor( + nc, + scp, + coreComponents, + dataComponents, + bootstrapComponents, + statusComponents, + stateComponents, + validatorStatisticsProcessor, + blockChainHook, + metaVmFactory, + epochStartTrigger, + vmContainer, + txCoordinator, + ), + currentRound: 1, + NodesCoordinator: nc, + ValidatorStatistics: validatorStatisticsProcessor, + EpochStartTrigger: epochStartTrigger, + BlockChainHandler: dataComponents.Blockchain(), + TxCacher: dataComponents.Datapool().CurrentBlockTxs(), + TxCoordinator: txCoordinator, + SystemVM: systemVM, + BlockChainHook: blockChainHook, + StakingDataProvider: stakingDataProvider, + } +} + +func saveNodesConfig( + accountsDB state.AccountsAdapter, + marshaller marshal.Marshalizer, + nc nodesCoordinator.NodesCoordinator, + maxNodesConfig []config.MaxNodesChangeConfig, +) { + eligibleMap, _ := nc.GetAllEligibleValidatorsPublicKeys(0) + waitingMap, _ := nc.GetAllWaitingValidatorsPublicKeys(0) + allStakedNodes := int64(len(getAllPubKeys(eligibleMap)) + len(getAllPubKeys(waitingMap))) + + maxNumNodes := allStakedNodes + if len(maxNodesConfig) > 0 { + maxNumNodes = int64(maxNodesConfig[0].MaxNumNodes) + } + + stakingcommon.SaveNodesConfig( + accountsDB, + marshaller, + allStakedNodes, + 1, + maxNumNodes, + ) +} + +func createGasScheduleNotifier() core.GasScheduleNotifier { + gasSchedule := arwenConfig.MakeGasMapForTests() + defaults.FillGasMapInternal(gasSchedule, 1) + return testscommon.NewGasScheduleNotifierMock(gasSchedule) +} + +func createEpochStartTrigger( + coreComponents factory.CoreComponentsHolder, + storageService dataRetriever.StorageService, +) integrationTests.TestEpochStartTrigger { + argsEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ + Settings: &config.EpochStartConfig{ + MinRoundsBetweenEpochs: 10, + RoundsPerEpoch: 10, + }, + Epoch: 0, + EpochStartNotifier: coreComponents.EpochStartNotifierWithConfirm(), + Storage: storageService, + Marshalizer: coreComponents.InternalMarshalizer(), + Hasher: coreComponents.Hasher(), + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + DataPool: dataRetrieverMock.NewPoolsHolderMock(), + } + + epochStartTrigger, _ := metachain.NewEpochStartTrigger(argsEpochStart) + testTrigger := &metachain.TestTrigger{} + testTrigger.SetTrigger(epochStartTrigger) + + return testTrigger +} + +// Process - +func (tmp *TestMetaProcessor) Process(t *testing.T, numOfRounds uint64) { + for r := tmp.currentRound; r < tmp.currentRound+numOfRounds; r++ { + header := tmp.createNewHeader(t, r) + tmp.createAndCommitBlock(t, header, haveTime) + } + + tmp.currentRound += numOfRounds +} + +func (tmp *TestMetaProcessor) createNewHeader(t *testing.T, round uint64) *block.MetaBlock { + _, err := tmp.MetaBlockProcessor.CreateNewHeader(round, round) + require.Nil(t, err) + + epoch := tmp.EpochStartTrigger.Epoch() + printNewHeaderRoundEpoch(round, epoch) + + currentHeader, currentHash := tmp.getCurrentHeaderInfo() + header := createMetaBlockToCommit( + epoch, + round, + currentHash, + currentHeader.GetRandSeed(), + tmp.NodesCoordinator.ConsensusGroupSize(core.MetachainShardId), + ) + + return header +} + +func (tmp *TestMetaProcessor) createAndCommitBlock(t *testing.T, header data.HeaderHandler, haveTime func() bool) { + newHeader, blockBody, err := tmp.MetaBlockProcessor.CreateBlock(header, haveTime) + require.Nil(t, err) + + err = tmp.MetaBlockProcessor.CommitBlock(newHeader, blockBody) + require.Nil(t, err) + + time.Sleep(time.Millisecond * 50) + tmp.updateNodesConfig(header.GetEpoch()) + tmp.displayConfig(tmp.NodesConfig) +} + +func printNewHeaderRoundEpoch(round uint64, epoch uint32) { + headline := display.Headline( + fmt.Sprintf("Commiting header in epoch %v round %v", epoch, round), + "", + delimiter, + ) + fmt.Println(headline) +} + +func (tmp *TestMetaProcessor) getCurrentHeaderInfo() (data.HeaderHandler, []byte) { + currentHeader := tmp.BlockChainHandler.GetCurrentBlockHeader() + currentHash := tmp.BlockChainHandler.GetCurrentBlockHeaderHash() + if currentHeader == nil { + currentHeader = tmp.BlockChainHandler.GetGenesisHeader() + currentHash = tmp.BlockChainHandler.GetGenesisHeaderHash() + } + + return currentHeader, currentHash +} + +func createMetaBlockToCommit( + epoch uint32, + round uint64, + prevHash []byte, + prevRandSeed []byte, + consensusSize int, +) *block.MetaBlock { + roundStr := strconv.Itoa(int(round)) + hdr := block.MetaBlock{ + Epoch: epoch, + Nonce: round, + Round: round, + PrevHash: prevHash, + Signature: []byte("signature"), + PubKeysBitmap: []byte(strings.Repeat("f", consensusSize)), + RootHash: []byte("roothash" + roundStr), + ShardInfo: make([]block.ShardData, 0), + TxCount: 1, + PrevRandSeed: prevRandSeed, + RandSeed: []byte("randseed" + roundStr), + AccumulatedFeesInEpoch: big.NewInt(0), + AccumulatedFees: big.NewInt(0), + DevFeesInEpoch: big.NewInt(0), + DeveloperFees: big.NewInt(0), + } + + shardMiniBlockHeaders := make([]block.MiniBlockHeader, 0) + shardMiniBlockHeader := block.MiniBlockHeader{ + Hash: []byte("mb_hash" + roundStr), + ReceiverShardID: 0, + SenderShardID: 0, + TxCount: 1, + } + shardMiniBlockHeaders = append(shardMiniBlockHeaders, shardMiniBlockHeader) + shardData := block.ShardData{ + Nonce: round, + ShardID: 0, + HeaderHash: []byte("hdr_hash" + roundStr), + TxCount: 1, + ShardMiniBlockHeaders: shardMiniBlockHeaders, + DeveloperFees: big.NewInt(0), + AccumulatedFees: big.NewInt(0), + } + hdr.ShardInfo = append(hdr.ShardInfo, shardData) + + return &hdr +} + +func (tmp *TestMetaProcessor) updateNodesConfig(epoch uint32) { + eligible, _ := tmp.NodesCoordinator.GetAllEligibleValidatorsPublicKeys(epoch) + waiting, _ := tmp.NodesCoordinator.GetAllWaitingValidatorsPublicKeys(epoch) + leaving, _ := tmp.NodesCoordinator.GetAllLeavingValidatorsPublicKeys(epoch) + shuffledOut, _ := tmp.NodesCoordinator.GetAllShuffledOutValidatorsPublicKeys(epoch) + + rootHash, _ := tmp.ValidatorStatistics.RootHash() + validatorsInfoMap, _ := tmp.ValidatorStatistics.GetValidatorInfoForRootHash(rootHash) + + auction := make([][]byte, 0) + newList := make([][]byte, 0) + for _, validator := range validatorsInfoMap.GetAllValidatorsInfo() { + if validator.GetList() == string(common.AuctionList) { + auction = append(auction, validator.GetPublicKey()) + } + if validator.GetList() == string(common.NewList) { + newList = append(newList, validator.GetPublicKey()) + } + } + + tmp.NodesConfig.eligible = eligible + tmp.NodesConfig.waiting = waiting + tmp.NodesConfig.shuffledOut = shuffledOut + tmp.NodesConfig.leaving = leaving + tmp.NodesConfig.auction = auction + tmp.NodesConfig.new = newList + tmp.NodesConfig.queue = tmp.getWaitingListKeys() +} + +func generateAddresses(startIdx, n uint32) [][]byte { + ret := make([][]byte, 0, n) + + for i := startIdx; i < n+startIdx; i++ { + ret = append(ret, generateAddress(i)) + } + + return ret +} + +func generateAddress(identifier uint32) []byte { + uniqueIdentifier := fmt.Sprintf("address-%d", identifier) + return []byte(strings.Repeat("0", addressLength-len(uniqueIdentifier)) + uniqueIdentifier) +} diff --git a/integrationTests/vm/staking/componentsHolderCreator.go b/integrationTests/vm/staking/componentsHolderCreator.go new file mode 100644 index 00000000000..e3673b08ec7 --- /dev/null +++ b/integrationTests/vm/staking/componentsHolderCreator.go @@ -0,0 +1,221 @@ +package staking + +import ( + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/nodetype" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/data/typeConverters/uint64ByteSlice" + "github.com/multiversx/mx-chain-core-go/hashing/sha256" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/enablers" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" + "github.com/multiversx/mx-chain-go/epochStart/notifier" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/integrationTests" + integrationMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" + mockFactory "github.com/multiversx/mx-chain-go/node/mock/factory" + "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + stateFactory "github.com/multiversx/mx-chain-go/state/factory" + "github.com/multiversx/mx-chain-go/state/storagePruningManager" + "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" + "github.com/multiversx/mx-chain-go/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon" + dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + notifierMocks "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + factoryTests "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" + "github.com/multiversx/mx-chain-go/testscommon/outport" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + stateTests "github.com/multiversx/mx-chain-go/testscommon/state" + statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/trie" +) + +const hashSize = 32 + +func createComponentHolders(numOfShards uint32) ( + factory.CoreComponentsHolder, + factory.DataComponentsHolder, + factory.BootstrapComponentsHolder, + factory.StatusComponentsHolder, + factory.StateComponentsHandler, +) { + coreComponents := createCoreComponents() + statusComponents := createStatusComponents() + stateComponents := createStateComponents(coreComponents) + dataComponents := createDataComponents(coreComponents, numOfShards) + bootstrapComponents := createBootstrapComponents(coreComponents.InternalMarshalizer(), numOfShards) + + return coreComponents, dataComponents, bootstrapComponents, statusComponents, stateComponents +} + +func createCoreComponents() factory.CoreComponentsHolder { + epochNotifier := forking.NewGenericEpochNotifier() + configEnableEpochs := config.EnableEpochs{ + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + StakingV4Step3EnableEpoch: stakingV4Step3EnableEpoch, + GovernanceEnableEpoch: integrationTests.UnreachableEpoch, + RefactorPeersMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, + } + + enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(configEnableEpochs, epochNotifier) + + return &integrationMocks.CoreComponentsStub{ + InternalMarshalizerField: &marshal.GogoProtoMarshalizer{}, + HasherField: sha256.NewSha256(), + Uint64ByteSliceConverterField: uint64ByteSlice.NewBigEndianConverter(), + StatusHandlerField: statusHandler.NewStatusMetrics(), + RoundHandlerField: &mock.RoundHandlerMock{RoundTimeDuration: time.Second}, + EpochStartNotifierWithConfirmField: notifier.NewEpochStartSubscriptionHandler(), + EpochNotifierField: epochNotifier, + RaterField: &testscommon.RaterMock{Chance: 5}, + AddressPubKeyConverterField: testscommon.NewPubkeyConverterMock(addressLength), + EconomicsDataField: stakingcommon.CreateEconomicsData(), + ChanStopNodeProcessField: endProcess.GetDummyEndProcessChannel(), + NodeTypeProviderField: nodetype.NewNodeTypeProvider(core.NodeTypeValidator), + ProcessStatusHandlerInternal: statusHandler.NewProcessStatusHandler(), + EnableEpochsHandlerField: enableEpochsHandler, + EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, + RoundNotifierField: ¬ifierMocks.RoundNotifierStub{}, + } +} + +func createDataComponents(coreComponents factory.CoreComponentsHolder, numOfShards uint32) factory.DataComponentsHolder { + genesisBlock := createGenesisMetaBlock() + genesisBlockHash, _ := coreComponents.InternalMarshalizer().Marshal(genesisBlock) + genesisBlockHash = coreComponents.Hasher().Compute(string(genesisBlockHash)) + + blockChain, _ := blockchain.NewMetaChain(&statusHandlerMock.AppStatusHandlerStub{}) + _ = blockChain.SetGenesisHeader(createGenesisMetaBlock()) + blockChain.SetGenesisHeaderHash(genesisBlockHash) + + chainStorer := dataRetriever.NewChainStorer() + chainStorer.AddStorer(dataRetriever.BootstrapUnit, integrationTests.CreateMemUnit()) + chainStorer.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, integrationTests.CreateMemUnit()) + chainStorer.AddStorer(dataRetriever.MetaBlockUnit, integrationTests.CreateMemUnit()) + chainStorer.AddStorer(dataRetriever.MiniBlockUnit, integrationTests.CreateMemUnit()) + chainStorer.AddStorer(dataRetriever.BlockHeaderUnit, integrationTests.CreateMemUnit()) + for i := uint32(0); i < numOfShards; i++ { + unit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) + chainStorer.AddStorer(unit, integrationTests.CreateMemUnit()) + } + + return &mockFactory.DataComponentsMock{ + Store: chainStorer, + DataPool: dataRetrieverMock.NewPoolsHolderMock(), + BlockChain: blockChain, + EconomicsData: coreComponents.EconomicsData(), + } +} + +func createBootstrapComponents( + marshaller marshal.Marshalizer, + numOfShards uint32, +) factory.BootstrapComponentsHolder { + shardCoordinator, _ := sharding.NewMultiShardCoordinator(numOfShards, core.MetachainShardId) + ncr, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + marshaller, + stakingV4Step2EnableEpoch, + ) + + return &mainFactoryMocks.BootstrapComponentsStub{ + ShCoordinator: shardCoordinator, + HdrIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, + VersionedHdrFactory: &testscommon.VersionedHeaderFactoryStub{ + CreateCalled: func(epoch uint32) data.HeaderHandler { + return &block.MetaBlock{Epoch: epoch} + }, + }, + NodesCoordinatorRegistryFactoryField: ncr, + } +} + +func createStatusComponents() factory.StatusComponentsHolder { + return &integrationMocks.StatusComponentsStub{ + Outport: &outport.OutportStub{}, + SoftwareVersionCheck: &integrationMocks.SoftwareVersionCheckerMock{}, + ManagedPeersMonitorField: &testscommon.ManagedPeersMonitorStub{}, + } +} + +func createStateComponents(coreComponents factory.CoreComponentsHolder) factory.StateComponentsHandler { + tsmArgs := getNewTrieStorageManagerArgs(coreComponents) + tsm, _ := trie.CreateTrieStorageManager(tsmArgs, trie.StorageManagerOptions{}) + trieFactoryManager, _ := trie.NewTrieStorageManagerWithoutPruning(tsm) + + argsAccCreator := stateFactory.ArgsAccountCreator{ + Hasher: coreComponents.Hasher(), + Marshaller: coreComponents.InternalMarshalizer(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + + accCreator, _ := stateFactory.NewAccountCreator(argsAccCreator) + + userAccountsDB := createAccountsDB(coreComponents, accCreator, trieFactoryManager) + peerAccountsDB := createAccountsDB(coreComponents, stateFactory.NewPeerAccountCreator(), trieFactoryManager) + + _ = userAccountsDB.SetSyncer(&mock.AccountsDBSyncerStub{}) + _ = peerAccountsDB.SetSyncer(&mock.AccountsDBSyncerStub{}) + + return &factoryTests.StateComponentsMock{ + PeersAcc: peerAccountsDB, + Accounts: userAccountsDB, + } +} + +func getNewTrieStorageManagerArgs(coreComponents factory.CoreComponentsHolder) trie.NewTrieStorageManagerArgs { + return trie.NewTrieStorageManagerArgs{ + MainStorer: testscommon.CreateMemUnit(), + Marshalizer: coreComponents.InternalMarshalizer(), + Hasher: coreComponents.Hasher(), + GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, + IdleProvider: &testscommon.ProcessStatusHandlerStub{}, + Identifier: "id", + StatsCollector: disabled.NewStateStatistics(), + } +} + +func createAccountsDB( + coreComponents factory.CoreComponentsHolder, + accountFactory state.AccountFactory, + trieStorageManager common.StorageManager, +) *state.AccountsDB { + tr, _ := trie.NewTrie( + trieStorageManager, + coreComponents.InternalMarshalizer(), + coreComponents.Hasher(), + coreComponents.EnableEpochsHandler(), + 5, + ) + + argsEvictionWaitingList := evictionWaitingList.MemoryEvictionWaitingListArgs{ + RootHashesSize: 10, + HashesSize: hashSize, + } + ewl, _ := evictionWaitingList.NewMemoryEvictionWaitingList(argsEvictionWaitingList) + spm, _ := storagePruningManager.NewStoragePruningManager(ewl, 10) + argsAccountsDb := state.ArgsAccountsDB{ + Trie: tr, + Hasher: coreComponents.Hasher(), + Marshaller: coreComponents.InternalMarshalizer(), + AccountFactory: accountFactory, + StoragePruningManager: spm, + AddressConverter: coreComponents.AddressPubKeyConverter(), + SnapshotsManager: &stateTests.SnapshotsManagerStub{}, + } + adb, _ := state.NewAccountsDB(argsAccountsDb) + return adb +} diff --git a/integrationTests/vm/staking/configDisplayer.go b/integrationTests/vm/staking/configDisplayer.go new file mode 100644 index 00000000000..3ea2a402f7f --- /dev/null +++ b/integrationTests/vm/staking/configDisplayer.go @@ -0,0 +1,132 @@ +package staking + +import ( + "bytes" + "fmt" + "sort" + "strconv" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-go/state" +) + +const ( + delimiter = "#" + maxPubKeysListLen = 6 +) + +// TODO: Make a subcomponent which will register to epoch notifier to display config only upon epoch change + +func getAllPubKeys(validatorsMap map[uint32][][]byte) [][]byte { + allValidators := make([][]byte, 0) + for _, validatorsInShard := range validatorsMap { + allValidators = append(allValidators, validatorsInShard...) + } + + return allValidators +} + +func getShortPubKeysList(pubKeys [][]byte) [][]byte { + pubKeysToDisplay := pubKeys + sort.SliceStable(pubKeysToDisplay, func(i, j int) bool { + return string(pubKeysToDisplay[i]) < string(pubKeysToDisplay[j]) + }) + + if len(pubKeys) > maxPubKeysListLen { + pubKeysToDisplay = make([][]byte, 0) + pubKeysToDisplay = append(pubKeysToDisplay, pubKeys[:maxPubKeysListLen/2]...) + pubKeysToDisplay = append(pubKeysToDisplay, [][]byte{[]byte("...")}...) + pubKeysToDisplay = append(pubKeysToDisplay, pubKeys[len(pubKeys)-maxPubKeysListLen/2:]...) + } + + return pubKeysToDisplay +} + +func (tmp *TestMetaProcessor) getAllNodeKeys() state.ShardValidatorsInfoMapHandler { + rootHash, _ := tmp.ValidatorStatistics.RootHash() + validatorsMap, _ := tmp.ValidatorStatistics.GetValidatorInfoForRootHash(rootHash) + return validatorsMap +} + +func (tmp *TestMetaProcessor) displayConfig(config nodesConfig) { + lines := make([]*display.LineData, 0) + + allNodes := tmp.getAllNodeKeys() + _ = tmp.StakingDataProvider.PrepareStakingData(allNodes) + + numShards := uint32(len(config.eligible)) + for shardId := uint32(0); shardId < numShards; shardId++ { + shard := getShardId(shardId, numShards) + + lines = append(lines, tmp.getDisplayableValidatorsInShard("eligible", config.eligible[shard], shard)...) + lines = append(lines, tmp.getDisplayableValidatorsInShard("waiting", config.waiting[shard], shard)...) + lines = append(lines, tmp.getDisplayableValidatorsInShard("leaving", config.leaving[shard], shard)...) + lines = append(lines, tmp.getDisplayableValidatorsInShard("shuffled", config.shuffledOut[shard], shard)...) + lines = append(lines, display.NewLineData(true, []string{})) + } + lines = append(lines, display.NewLineData(true, []string{"eligible", fmt.Sprintf("Total: %d", len(getAllPubKeys(config.eligible))), "", "", "All shards"})) + lines = append(lines, display.NewLineData(true, []string{"waiting", fmt.Sprintf("Total: %d", len(getAllPubKeys(config.waiting))), "", "", "All shards"})) + lines = append(lines, display.NewLineData(true, []string{"leaving", fmt.Sprintf("Total: %d", len(getAllPubKeys(config.leaving))), "", "", "All shards"})) + lines = append(lines, display.NewLineData(true, []string{"shuffled", fmt.Sprintf("Total: %d", len(getAllPubKeys(config.shuffledOut))), "", "", "All shards"})) + + tableHeader := []string{"List", "BLS key", "Owner", "TopUp", "Shard ID"} + table, _ := display.CreateTableString(tableHeader, lines) + headline := display.Headline("Nodes config", "", delimiter) + fmt.Printf("%s\n%s\n", headline, table) + + tmp.displayValidators("New", config.new) + tmp.displayValidators("Auction", config.auction) + tmp.displayValidators("Queue", config.queue) + + tmp.StakingDataProvider.Clean() +} + +func getShardId(shardId, numShards uint32) uint32 { + if shardId == numShards-1 { + return core.MetachainShardId + } + + return shardId +} + +func (tmp *TestMetaProcessor) getDisplayableValidatorsInShard(list string, pubKeys [][]byte, shardID uint32) []*display.LineData { + pubKeysToDisplay := getShortPubKeysList(pubKeys) + + lines := make([]*display.LineData, 0) + for idx, pk := range pubKeysToDisplay { + horizontalLineAfter := idx == len(pubKeysToDisplay)-1 + owner, _ := tmp.StakingDataProvider.GetBlsKeyOwner(pk) + topUp, _ := tmp.StakingDataProvider.GetNodeStakedTopUp(pk) + if bytes.Equal(pk, []byte("...")) { + lines = append(lines, display.NewLineData(horizontalLineAfter, []string{list, string(pk), "...", "...", strconv.Itoa(int(shardID))})) + } else { + lines = append(lines, display.NewLineData(horizontalLineAfter, []string{list, string(pk), owner, topUp.String(), strconv.Itoa(int(shardID))})) + } + } + lines = append(lines, display.NewLineData(true, []string{list, fmt.Sprintf("Total: %d", len(pubKeys)), "", "", strconv.Itoa(int(shardID))})) + + return lines +} + +func (tmp *TestMetaProcessor) displayValidators(list string, pubKeys [][]byte) { + pubKeysToDisplay := getShortPubKeysList(pubKeys) + + lines := make([]*display.LineData, 0) + tableHeader := []string{"List", "BLS key", "Owner", "TopUp"} + for idx, pk := range pubKeysToDisplay { + horizontalLineAfter := idx == len(pubKeysToDisplay)-1 + owner, _ := tmp.StakingDataProvider.GetBlsKeyOwner(pk) + topUp, _ := tmp.StakingDataProvider.GetNodeStakedTopUp(pk) + if bytes.Equal(pk, []byte("...")) { + lines = append(lines, display.NewLineData(horizontalLineAfter, []string{list, string(pk), "...", "..."})) + } else { + lines = append(lines, display.NewLineData(horizontalLineAfter, []string{list, string(pk), owner, topUp.String()})) + } + } + lines = append(lines, display.NewLineData(true, []string{list, fmt.Sprintf("Total: %d", len(pubKeys))})) + + headline := display.Headline(fmt.Sprintf("%s list", list), "", delimiter) + table, _ := display.CreateTableString(tableHeader, lines) + fmt.Printf("%s \n%s\n", headline, table) +} diff --git a/integrationTests/vm/staking/metaBlockProcessorCreator.go b/integrationTests/vm/staking/metaBlockProcessorCreator.go new file mode 100644 index 00000000000..759458cf30e --- /dev/null +++ b/integrationTests/vm/staking/metaBlockProcessorCreator.go @@ -0,0 +1,245 @@ +package staking + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/epochStart/metachain" + "github.com/multiversx/mx-chain-go/factory" + integrationMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/process" + blproc "github.com/multiversx/mx-chain-go/process/block" + "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" + "github.com/multiversx/mx-chain-go/process/block/postprocess" + "github.com/multiversx/mx-chain-go/process/block/processedMb" + "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/process/scToProtocol" + "github.com/multiversx/mx-chain-go/process/smartContract" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/dblookupext" + factory2 "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/integrationtests" + "github.com/multiversx/mx-chain-go/testscommon/outport" + statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" +) + +func createMetaBlockProcessor( + nc nodesCoordinator.NodesCoordinator, + systemSCProcessor process.EpochStartSystemSCProcessor, + coreComponents factory.CoreComponentsHolder, + dataComponents factory.DataComponentsHolder, + bootstrapComponents factory.BootstrapComponentsHolder, + statusComponents factory.StatusComponentsHolder, + stateComponents factory.StateComponentsHandler, + validatorsInfoCreator process.ValidatorStatisticsProcessor, + blockChainHook process.BlockChainHookHandler, + metaVMFactory process.VirtualMachinesContainerFactory, + epochStartHandler process.EpochStartTriggerHandler, + vmContainer process.VirtualMachinesContainer, + txCoordinator process.TransactionCoordinator, +) process.BlockProcessor { + blockTracker := createBlockTracker( + dataComponents.Blockchain().GetGenesisHeader(), + bootstrapComponents.ShardCoordinator(), + ) + epochStartDataCreator := createEpochStartDataCreator( + coreComponents, + dataComponents, + bootstrapComponents.ShardCoordinator(), + epochStartHandler, + blockTracker, + ) + + accountsDb := make(map[state.AccountsDbIdentifier]state.AccountsAdapter) + accountsDb[state.UserAccountsState] = stateComponents.AccountsAdapter() + accountsDb[state.PeerAccountsState] = stateComponents.PeerAccounts() + + bootStrapStorer, _ := dataComponents.StorageService().GetStorer(dataRetriever.BootstrapUnit) + bootStorer, _ := bootstrapStorage.NewBootstrapStorer( + coreComponents.InternalMarshalizer(), + bootStrapStorer, + ) + + headerValidator := createHeaderValidator(coreComponents) + valInfoCreator := createValidatorInfoCreator(coreComponents, dataComponents, bootstrapComponents.ShardCoordinator()) + stakingToPeer := createSCToProtocol(coreComponents, stateComponents, dataComponents.Datapool().CurrentBlockTxs()) + + args := blproc.ArgMetaProcessor{ + ArgBaseProcessor: blproc.ArgBaseProcessor{ + CoreComponents: coreComponents, + DataComponents: dataComponents, + BootstrapComponents: bootstrapComponents, + StatusComponents: statusComponents, + StatusCoreComponents: &factory2.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandlerMock.AppStatusHandlerStub{}, + }, + AccountsDB: accountsDb, + ForkDetector: &integrationMocks.ForkDetectorStub{}, + NodesCoordinator: nc, + FeeHandler: postprocess.NewFeeAccumulator(), + RequestHandler: &testscommon.RequestHandlerStub{}, + BlockChainHook: blockChainHook, + TxCoordinator: txCoordinator, + EpochStartTrigger: epochStartHandler, + HeaderValidator: headerValidator, + BootStorer: bootStorer, + BlockTracker: blockTracker, + BlockSizeThrottler: &mock.BlockSizeThrottlerStub{}, + HistoryRepository: &dblookupext.HistoryRepositoryStub{}, + VMContainersFactory: metaVMFactory, + VmContainer: vmContainer, + GasHandler: &mock.GasHandlerMock{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 10000, + ProcessedMiniBlocksTracker: processedMb.NewProcessedMiniBlocksTracker(), + OutportDataProvider: &outport.OutportDataProviderStub{}, + ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, + ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, + }, + SCToProtocol: stakingToPeer, + PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, + EpochStartDataCreator: epochStartDataCreator, + EpochEconomics: &mock.EpochEconomicsStub{}, + EpochRewardsCreator: &testscommon.RewardsCreatorStub{ + GetLocalTxCacheCalled: func() epochStart.TransactionCacher { + return dataComponents.Datapool().CurrentBlockTxs() + }, + }, + EpochValidatorInfoCreator: valInfoCreator, + ValidatorStatisticsProcessor: validatorsInfoCreator, + EpochSystemSCProcessor: systemSCProcessor, + } + + metaProc, _ := blproc.NewMetaProcessor(args) + return metaProc +} + +func createValidatorInfoCreator( + coreComponents factory.CoreComponentsHolder, + dataComponents factory.DataComponentsHolder, + shardCoordinator sharding.Coordinator, +) process.EpochStartValidatorInfoCreator { + mbStorer, _ := dataComponents.StorageService().GetStorer(dataRetriever.MiniBlockUnit) + + args := metachain.ArgsNewValidatorInfoCreator{ + ShardCoordinator: shardCoordinator, + MiniBlockStorage: mbStorer, + Hasher: coreComponents.Hasher(), + Marshalizer: coreComponents.InternalMarshalizer(), + DataPool: dataComponents.Datapool(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + ValidatorInfoStorage: integrationtests.CreateMemUnit(), + } + + valInfoCreator, _ := metachain.NewValidatorInfoCreator(args) + return valInfoCreator +} + +func createEpochStartDataCreator( + coreComponents factory.CoreComponentsHolder, + dataComponents factory.DataComponentsHolder, + shardCoordinator sharding.Coordinator, + epochStartTrigger process.EpochStartTriggerHandler, + blockTracker process.BlockTracker, +) process.EpochStartDataCreator { + argsEpochStartDataCreator := metachain.ArgsNewEpochStartData{ + Marshalizer: coreComponents.InternalMarshalizer(), + Hasher: coreComponents.Hasher(), + Store: dataComponents.StorageService(), + DataPool: dataComponents.Datapool(), + BlockTracker: blockTracker, + ShardCoordinator: shardCoordinator, + EpochStartTrigger: epochStartTrigger, + RequestHandler: &testscommon.RequestHandlerStub{}, + GenesisEpoch: 0, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + epochStartDataCreator, _ := metachain.NewEpochStartData(argsEpochStartDataCreator) + return epochStartDataCreator +} + +func createBlockTracker( + genesisMetaHeader data.HeaderHandler, + shardCoordinator sharding.Coordinator, +) process.BlockTracker { + genesisBlocks := make(map[uint32]data.HeaderHandler) + for ShardID := uint32(0); ShardID < shardCoordinator.NumberOfShards(); ShardID++ { + genesisBlocks[ShardID] = createGenesisBlock(ShardID) + } + + genesisBlocks[core.MetachainShardId] = genesisMetaHeader + return mock.NewBlockTrackerMock(shardCoordinator, genesisBlocks) +} + +func createGenesisBlock(shardID uint32) *block.Header { + rootHash := []byte("roothash") + return &block.Header{ + Nonce: 0, + Round: 0, + Signature: rootHash, + RandSeed: rootHash, + PrevRandSeed: rootHash, + ShardID: shardID, + PubKeysBitmap: rootHash, + RootHash: rootHash, + PrevHash: rootHash, + AccumulatedFees: big.NewInt(0), + DeveloperFees: big.NewInt(0), + } +} + +func createGenesisMetaBlock() *block.MetaBlock { + rootHash := []byte("roothash") + return &block.MetaBlock{ + Nonce: 0, + Round: 0, + Signature: rootHash, + RandSeed: rootHash, + PrevRandSeed: rootHash, + PubKeysBitmap: rootHash, + RootHash: rootHash, + PrevHash: rootHash, + AccumulatedFees: big.NewInt(0), + DeveloperFees: big.NewInt(0), + AccumulatedFeesInEpoch: big.NewInt(0), + DevFeesInEpoch: big.NewInt(0), + } +} + +func createHeaderValidator(coreComponents factory.CoreComponentsHolder) epochStart.HeaderValidator { + argsHeaderValidator := blproc.ArgsHeaderValidator{ + Hasher: coreComponents.Hasher(), + Marshalizer: coreComponents.InternalMarshalizer(), + } + headerValidator, _ := blproc.NewHeaderValidator(argsHeaderValidator) + return headerValidator +} + +func createSCToProtocol( + coreComponents factory.CoreComponentsHolder, + stateComponents factory.StateComponentsHandler, + txCacher dataRetriever.TransactionCacher, +) process.SmartContractToProtocolHandler { + args := scToProtocol.ArgStakingToPeer{ + PubkeyConv: coreComponents.AddressPubKeyConverter(), + Hasher: coreComponents.Hasher(), + Marshalizer: coreComponents.InternalMarshalizer(), + PeerState: stateComponents.PeerAccounts(), + BaseState: stateComponents.AccountsAdapter(), + ArgParser: smartContract.NewArgumentParser(), + CurrTxs: txCacher, + RatingsData: &mock.RatingsInfoMock{}, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + stakingToPeer, _ := scToProtocol.NewStakingToPeer(args) + return stakingToPeer +} diff --git a/integrationTests/vm/staking/nodesCoordiantorCreator.go b/integrationTests/vm/staking/nodesCoordiantorCreator.go new file mode 100644 index 00000000000..27a54719521 --- /dev/null +++ b/integrationTests/vm/staking/nodesCoordiantorCreator.go @@ -0,0 +1,232 @@ +package staking + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever/dataPool" + "github.com/multiversx/mx-chain-go/factory" + integrationMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state/accounts" + "github.com/multiversx/mx-chain-go/storage" + nodesSetupMock "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + "github.com/multiversx/mx-chain-storage-go/lrucache" +) + +const ( + shuffleBetweenShards = false + adaptivity = false + hysteresis = float32(0.2) + initialRating = 5 +) + +func createNodesCoordinator( + eligibleMap map[uint32][]nodesCoordinator.Validator, + waitingMap map[uint32][]nodesCoordinator.Validator, + numOfMetaNodes uint32, + numOfShards uint32, + numOfEligibleNodesPerShard uint32, + shardConsensusGroupSize int, + metaConsensusGroupSize int, + coreComponents factory.CoreComponentsHolder, + bootStorer storage.Storer, + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory, + maxNodesConfig []config.MaxNodesChangeConfig, +) nodesCoordinator.NodesCoordinator { + shufflerArgs := &nodesCoordinator.NodesShufflerArgs{ + NodesShard: numOfEligibleNodesPerShard, + NodesMeta: numOfMetaNodes, + Hysteresis: hysteresis, + Adaptivity: adaptivity, + ShuffleBetweenShards: shuffleBetweenShards, + MaxNodesEnableConfig: maxNodesConfig, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + StakingV4Step3EnableEpoch: stakingV4Step3EnableEpoch, + }, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + nodeShuffler, _ := nodesCoordinator.NewHashValidatorsShuffler(shufflerArgs) + cache, _ := lrucache.NewCache(10000) + argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: coreComponents.InternalMarshalizer(), + Hasher: coreComponents.Hasher(), + ShardIDAsObserver: core.MetachainShardId, + NbShards: numOfShards, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: eligibleMap[core.MetachainShardId][0].PubKey(), + ConsensusGroupCache: cache, + ShuffledOutHandler: &integrationMocks.ShuffledOutHandlerStub{}, + ChanStopNode: coreComponents.ChanStopNodeProcess(), + IsFullArchive: false, + Shuffler: nodeShuffler, + BootStorer: bootStorer, + EpochStartNotifier: coreComponents.EpochStartNotifierWithConfirm(), + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, + NodeTypeProvider: coreComponents.NodeTypeProvider(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + ValidatorInfoCacher: dataPool.NewCurrentEpochValidatorInfoPool(), + GenesisNodesSetupHandler: &nodesSetupMock.NodesSetupStub{}, + } + + baseNodesCoordinator, _ := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) + nodesCoord, _ := nodesCoordinator.NewIndexHashedNodesCoordinatorWithRater(baseNodesCoordinator, coreComponents.Rater()) + return nodesCoord +} + +func createGenesisNodes( + numOfMetaNodes uint32, + numOfShards uint32, + numOfNodesPerShard uint32, + numOfWaitingNodesPerShard uint32, + marshaller marshal.Marshalizer, + stateComponents factory.StateComponentsHandler, +) (map[uint32][]nodesCoordinator.Validator, map[uint32][]nodesCoordinator.Validator) { + addressStartIdx := uint32(0) + eligibleGenesisNodes := generateGenesisNodeInfoMap(numOfMetaNodes, numOfShards, numOfNodesPerShard, addressStartIdx) + eligibleValidators, _ := nodesCoordinator.NodesInfoToValidators(eligibleGenesisNodes) + + addressStartIdx = numOfMetaNodes + numOfShards*numOfNodesPerShard + waitingGenesisNodes := generateGenesisNodeInfoMap(numOfWaitingNodesPerShard, numOfShards, numOfWaitingNodesPerShard, addressStartIdx) + waitingValidators, _ := nodesCoordinator.NodesInfoToValidators(waitingGenesisNodes) + + registerValidators(eligibleValidators, stateComponents, marshaller, common.EligibleList) + registerValidators(waitingValidators, stateComponents, marshaller, common.WaitingList) + + return eligibleValidators, waitingValidators +} + +func createGenesisNodesWithCustomConfig( + owners map[string]*OwnerStats, + marshaller marshal.Marshalizer, + stateComponents factory.StateComponentsHandler, +) (map[uint32][]nodesCoordinator.Validator, map[uint32][]nodesCoordinator.Validator) { + eligibleGenesis := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) + waitingGenesis := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) + + for owner, ownerStats := range owners { + registerOwnerKeys( + []byte(owner), + ownerStats.EligibleBlsKeys, + ownerStats.TotalStake, + stateComponents, + marshaller, + common.EligibleList, + eligibleGenesis, + ) + + registerOwnerKeys( + []byte(owner), + ownerStats.WaitingBlsKeys, + ownerStats.TotalStake, + stateComponents, + marshaller, + common.WaitingList, + waitingGenesis, + ) + } + + eligible, _ := nodesCoordinator.NodesInfoToValidators(eligibleGenesis) + waiting, _ := nodesCoordinator.NodesInfoToValidators(waitingGenesis) + + return eligible, waiting +} + +func generateGenesisNodeInfoMap( + numOfMetaNodes uint32, + numOfShards uint32, + numOfNodesPerShard uint32, + addressStartIdx uint32, +) map[uint32][]nodesCoordinator.GenesisNodeInfoHandler { + validatorsMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) + id := addressStartIdx + for shardId := uint32(0); shardId < numOfShards; shardId++ { + for n := uint32(0); n < numOfNodesPerShard; n++ { + addr := generateAddress(id) + validator := integrationMocks.NewNodeInfo(addr, addr, shardId, initialRating) + validatorsMap[shardId] = append(validatorsMap[shardId], validator) + id++ + } + } + + for n := uint32(0); n < numOfMetaNodes; n++ { + addr := generateAddress(id) + validator := integrationMocks.NewNodeInfo(addr, addr, core.MetachainShardId, initialRating) + validatorsMap[core.MetachainShardId] = append(validatorsMap[core.MetachainShardId], validator) + id++ + } + + return validatorsMap +} + +func registerOwnerKeys( + owner []byte, + ownerPubKeys map[uint32][][]byte, + totalStake *big.Int, + stateComponents factory.StateComponentsHolder, + marshaller marshal.Marshalizer, + list common.PeerType, + allNodes map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, +) { + for shardID, pubKeysInShard := range ownerPubKeys { + for _, pubKey := range pubKeysInShard { + validator := integrationMocks.NewNodeInfo(pubKey, pubKey, shardID, initialRating) + allNodes[shardID] = append(allNodes[shardID], validator) + + savePeerAcc(stateComponents, pubKey, shardID, list) + } + stakingcommon.RegisterValidatorKeys( + stateComponents.AccountsAdapter(), + owner, + owner, + pubKeysInShard, + totalStake, + marshaller, + ) + } +} + +func registerValidators( + validators map[uint32][]nodesCoordinator.Validator, + stateComponents factory.StateComponentsHolder, + marshaller marshal.Marshalizer, + list common.PeerType, +) { + for shardID, validatorsInShard := range validators { + for idx, val := range validatorsInShard { + pubKey := val.PubKey() + savePeerAcc(stateComponents, pubKey, shardID, list) + + stakingcommon.RegisterValidatorKeys( + stateComponents.AccountsAdapter(), + pubKey, + pubKey, + [][]byte{pubKey}, + big.NewInt(nodePrice+int64(idx)), + marshaller, + ) + } + } +} + +func savePeerAcc( + stateComponents factory.StateComponentsHolder, + pubKey []byte, + shardID uint32, + list common.PeerType, +) { + peerAccount, _ := accounts.NewPeerAccount(pubKey) + peerAccount.SetTempRating(initialRating) + peerAccount.ShardId = shardID + peerAccount.BLSPublicKey = pubKey + peerAccount.List = string(list) + _ = stateComponents.PeerAccounts().SaveAccount(peerAccount) +} diff --git a/integrationTests/vm/staking/stakingQueue.go b/integrationTests/vm/staking/stakingQueue.go new file mode 100644 index 00000000000..7544e18cf40 --- /dev/null +++ b/integrationTests/vm/staking/stakingQueue.go @@ -0,0 +1,121 @@ +package staking + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + "github.com/multiversx/mx-chain-go/vm" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" +) + +func createStakingQueue( + numOfNodesInStakingQueue uint32, + totalNumOfNodes uint32, + marshaller marshal.Marshalizer, + accountsAdapter state.AccountsAdapter, +) [][]byte { + ownerWaitingNodes := make([][]byte, 0) + if numOfNodesInStakingQueue == 0 { + return ownerWaitingNodes + } + + owner := generateAddress(totalNumOfNodes) + totalNumOfNodes += 1 + for i := totalNumOfNodes; i < totalNumOfNodes+numOfNodesInStakingQueue; i++ { + ownerWaitingNodes = append(ownerWaitingNodes, generateAddress(i)) + } + + stakingcommon.AddKeysToWaitingList( + accountsAdapter, + ownerWaitingNodes, + marshaller, + owner, + owner, + ) + + stakingcommon.RegisterValidatorKeys( + accountsAdapter, + owner, + owner, + ownerWaitingNodes, + big.NewInt(int64(2*nodePrice*numOfNodesInStakingQueue)), + marshaller, + ) + + return ownerWaitingNodes +} + +func createStakingQueueCustomNodes( + owners map[string]*OwnerStats, + marshaller marshal.Marshalizer, + accountsAdapter state.AccountsAdapter, +) [][]byte { + queue := make([][]byte, 0) + + for owner, ownerStats := range owners { + stakingcommon.RegisterValidatorKeys( + accountsAdapter, + []byte(owner), + []byte(owner), + ownerStats.StakingQueueKeys, + ownerStats.TotalStake, + marshaller, + ) + + stakingcommon.AddKeysToWaitingList( + accountsAdapter, + ownerStats.StakingQueueKeys, + marshaller, + []byte(owner), + []byte(owner), + ) + + queue = append(queue, ownerStats.StakingQueueKeys...) + } + + return queue +} + +func (tmp *TestMetaProcessor) getWaitingListKeys() [][]byte { + stakingSCAcc := stakingcommon.LoadUserAccount(tmp.AccountsAdapter, vm.StakingSCAddress) + + waitingList := &systemSmartContracts.WaitingList{ + FirstKey: make([]byte, 0), + LastKey: make([]byte, 0), + Length: 0, + LastJailedKey: make([]byte, 0), + } + marshaledData, _, _ := stakingSCAcc.RetrieveValue([]byte("waitingList")) + if len(marshaledData) == 0 { + return nil + } + + err := tmp.Marshaller.Unmarshal(waitingList, marshaledData) + if err != nil { + return nil + } + + index := uint32(1) + nextKey := make([]byte, len(waitingList.FirstKey)) + copy(nextKey, waitingList.FirstKey) + + allPubKeys := make([][]byte, 0) + for len(nextKey) != 0 && index <= waitingList.Length { + allPubKeys = append(allPubKeys, nextKey[2:]) // remove "w_" prefix + + element, errGet := stakingcommon.GetWaitingListElement(stakingSCAcc, tmp.Marshaller, nextKey) + if errGet != nil { + return nil + } + + nextKey = make([]byte, len(element.NextKey)) + if len(element.NextKey) == 0 { + break + } + index++ + copy(nextKey, element.NextKey) + } + return allPubKeys +} diff --git a/integrationTests/vm/staking/stakingV4_test.go b/integrationTests/vm/staking/stakingV4_test.go new file mode 100644 index 00000000000..67b1f19ab03 --- /dev/null +++ b/integrationTests/vm/staking/stakingV4_test.go @@ -0,0 +1,1526 @@ +package staking + +import ( + "bytes" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + "github.com/multiversx/mx-chain-go/vm" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" + "github.com/stretchr/testify/require" +) + +func requireSliceContains(t *testing.T, s1, s2 [][]byte) { + for _, elemInS2 := range s2 { + require.Contains(t, s1, elemInS2) + } +} + +func requireSliceContainsNumOfElements(t *testing.T, s1, s2 [][]byte, numOfElements int) { + foundCt := 0 + for _, elemInS2 := range s2 { + if searchInSlice(s1, elemInS2) { + foundCt++ + } + } + + require.Equal(t, numOfElements, foundCt) +} + +func requireSameSliceDifferentOrder(t *testing.T, s1, s2 [][]byte) { + require.Equal(t, len(s1), len(s2)) + + for _, elemInS1 := range s1 { + require.Contains(t, s2, elemInS1) + } +} + +func searchInSlice(s1 [][]byte, s2 []byte) bool { + for _, elemInS1 := range s1 { + if bytes.Equal(elemInS1, s2) { + return true + } + } + + return false +} + +func searchInMap(validatorMap map[uint32][][]byte, pk []byte) bool { + for _, validatorsInShard := range validatorMap { + for _, val := range validatorsInShard { + if bytes.Equal(val, pk) { + return true + } + } + } + return false +} + +func requireMapContains(t *testing.T, m map[uint32][][]byte, s [][]byte) { + for _, elemInSlice := range s { + require.True(t, searchInMap(m, elemInSlice)) + } +} + +func requireMapDoesNotContain(t *testing.T, m map[uint32][][]byte, s [][]byte) { + for _, elemInSlice := range s { + require.False(t, searchInMap(m, elemInSlice)) + } +} + +// remove will remove the item from slice without keeping the order of the original slice +func remove(slice [][]byte, elem []byte) [][]byte { + ret := slice + for i, e := range slice { + if bytes.Equal(elem, e) { + ret[i] = ret[len(slice)-1] + return ret[:len(slice)-1] + } + } + + return ret +} + +func getIntersection(slice1, slice2 [][]byte) [][]byte { + ret := make([][]byte, 0) + for _, value := range slice2 { + if searchInSlice(slice1, value) { + copiedVal := make([]byte, len(value)) + copy(copiedVal, value) + ret = append(ret, copiedVal) + } + } + + return ret +} + +func getAllPubKeysFromConfig(nodesCfg nodesConfig) [][]byte { + allPubKeys := getAllPubKeys(nodesCfg.eligible) + allPubKeys = append(allPubKeys, getAllPubKeys(nodesCfg.waiting)...) + allPubKeys = append(allPubKeys, getAllPubKeys(nodesCfg.leaving)...) + allPubKeys = append(allPubKeys, getAllPubKeys(nodesCfg.shuffledOut)...) + allPubKeys = append(allPubKeys, nodesCfg.queue...) + allPubKeys = append(allPubKeys, nodesCfg.auction...) + allPubKeys = append(allPubKeys, nodesCfg.new...) + + return allPubKeys +} + +func unStake(t *testing.T, owner []byte, accountsDB state.AccountsAdapter, marshaller marshal.Marshalizer, stake *big.Int) { + validatorSC := stakingcommon.LoadUserAccount(accountsDB, vm.ValidatorSCAddress) + ownerStoredData, _, err := validatorSC.RetrieveValue(owner) + require.Nil(t, err) + + validatorData := &systemSmartContracts.ValidatorDataV2{} + err = marshaller.Unmarshal(validatorData, ownerStoredData) + require.Nil(t, err) + + validatorData.TotalStakeValue.Sub(validatorData.TotalStakeValue, stake) + marshaledData, _ := marshaller.Marshal(validatorData) + err = validatorSC.SaveKeyValue(owner, marshaledData) + require.Nil(t, err) + + err = accountsDB.SaveAccount(validatorSC) + require.Nil(t, err) + _, err = accountsDB.Commit() + require.Nil(t, err) +} + +type configNum struct { + eligible map[uint32]int + waiting map[uint32]int + leaving map[uint32]int + shuffledOut map[uint32]int + queue int + auction int + new int +} + +func checkConfig(t *testing.T, expectedConfig *configNum, nodesConfig nodesConfig) { + checkNumNodes(t, expectedConfig.eligible, nodesConfig.eligible) + checkNumNodes(t, expectedConfig.waiting, nodesConfig.waiting) + checkNumNodes(t, expectedConfig.leaving, nodesConfig.leaving) + checkNumNodes(t, expectedConfig.shuffledOut, nodesConfig.shuffledOut) + + require.Equal(t, expectedConfig.queue, len(nodesConfig.queue)) + require.Equal(t, expectedConfig.auction, len(nodesConfig.auction)) + require.Equal(t, expectedConfig.new, len(nodesConfig.new)) +} + +func checkNumNodes(t *testing.T, expectedNumNodes map[uint32]int, actualNodes map[uint32][][]byte) { + for shardID, numNodesInShard := range expectedNumNodes { + require.Equal(t, numNodesInShard, len(actualNodes[shardID])) + } +} + +func checkShuffledOutNodes(t *testing.T, currNodesConfig, prevNodesConfig nodesConfig, numShuffledOutNodes int, numRemainingEligible int) { + // Shuffled nodes from previous eligible are sent to waiting and previous waiting list nodes are replacing shuffled nodes + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), numShuffledOutNodes) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), numRemainingEligible) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), numShuffledOutNodes) +} + +func checkStakingV4EpochChangeFlow( + t *testing.T, + currNodesConfig, prevNodesConfig nodesConfig, + numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction int) { + + // Nodes which are now in eligible are from previous waiting list + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), numOfShuffledOut) + + // New auction list also contains unselected nodes from previous auction list + requireSliceContainsNumOfElements(t, currNodesConfig.auction, prevNodesConfig.auction, numOfUnselectedNodesFromAuction) + + // All shuffled out are from previous eligible config + requireMapContains(t, prevNodesConfig.eligible, getAllPubKeys(currNodesConfig.shuffledOut)) + + // All shuffled out are now in auction + requireSliceContains(t, currNodesConfig.auction, getAllPubKeys(currNodesConfig.shuffledOut)) + + // Nodes which have been selected from previous auction list are now in waiting + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), prevNodesConfig.auction, numOfSelectedNodesFromAuction) +} + +func TestStakingV4(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfMetaNodes := uint32(400) + numOfShards := uint32(3) + numOfEligibleNodesPerShard := uint32(400) + numOfWaitingNodesPerShard := uint32(400) + numOfNodesToShufflePerShard := uint32(80) + shardConsensusGroupSize := 266 + metaConsensusGroupSize := 266 + numOfNodesInStakingQueue := uint32(60) + + totalEligible := int(numOfEligibleNodesPerShard*numOfShards) + int(numOfMetaNodes) // 1600 + totalWaiting := int(numOfWaitingNodesPerShard*numOfShards) + int(numOfMetaNodes) // 1600 + + node := NewTestMetaProcessor( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + numOfNodesToShufflePerShard, + shardConsensusGroupSize, + metaConsensusGroupSize, + numOfNodesInStakingQueue, + ) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + initialNodes := node.NodesConfig + require.Len(t, getAllPubKeys(initialNodes.eligible), totalEligible) + require.Len(t, getAllPubKeys(initialNodes.waiting), totalWaiting) + require.Len(t, initialNodes.queue, int(numOfNodesInStakingQueue)) + require.Empty(t, initialNodes.shuffledOut) + require.Empty(t, initialNodes.auction) + + // 2. Check config after staking v4 initialization + node.Process(t, 5) + nodesConfigStakingV4Step1 := node.NodesConfig + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step1.eligible), totalEligible) + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step1.waiting), totalWaiting) + require.Empty(t, nodesConfigStakingV4Step1.queue) + require.Empty(t, nodesConfigStakingV4Step1.shuffledOut) + require.Empty(t, nodesConfigStakingV4Step1.auction) // the queue should be empty + + // 3. re-stake the node nodes that were in the queue + node.ProcessReStake(t, initialNodes.queue) + nodesConfigStakingV4Step1 = node.NodesConfig + requireSameSliceDifferentOrder(t, initialNodes.queue, nodesConfigStakingV4Step1.auction) + + // 4. Check config after first staking v4 epoch, WITHOUT distribution from auction -> waiting + node.Process(t, 6) + nodesConfigStakingV4Step2 := node.NodesConfig + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step2.eligible), totalEligible) // 1600 + + numOfShuffledOut := int((numOfShards + 1) * numOfNodesToShufflePerShard) // 320 + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step2.shuffledOut), numOfShuffledOut) + + newWaiting := totalWaiting - numOfShuffledOut // 1280 (1600 - 320) + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step2.waiting), newWaiting) + + // 380 (320 from shuffled out + 60 from initial staking queue -> auction from stakingV4 init) + auctionListSize := numOfShuffledOut + len(nodesConfigStakingV4Step1.auction) + require.Len(t, nodesConfigStakingV4Step2.auction, auctionListSize) + requireSliceContains(t, nodesConfigStakingV4Step2.auction, nodesConfigStakingV4Step1.auction) + + require.Empty(t, nodesConfigStakingV4Step2.queue) + require.Empty(t, nodesConfigStakingV4Step2.leaving) + + // 320 nodes which are now in eligible are from previous waiting list + requireSliceContainsNumOfElements(t, getAllPubKeys(nodesConfigStakingV4Step2.eligible), getAllPubKeys(nodesConfigStakingV4Step1.waiting), numOfShuffledOut) + + // All shuffled out are from previous staking v4 init eligible + requireMapContains(t, nodesConfigStakingV4Step1.eligible, getAllPubKeys(nodesConfigStakingV4Step2.shuffledOut)) + + // All shuffled out are in auction + requireSliceContains(t, nodesConfigStakingV4Step2.auction, getAllPubKeys(nodesConfigStakingV4Step2.shuffledOut)) + + // No auction node from previous epoch has been moved to waiting + requireMapDoesNotContain(t, nodesConfigStakingV4Step2.waiting, nodesConfigStakingV4Step1.auction) + + epochs := 0 + prevConfig := nodesConfigStakingV4Step2 + numOfSelectedNodesFromAuction := numOfShuffledOut // 320, since we will always fill shuffled out nodes with this config + numOfUnselectedNodesFromAuction := auctionListSize - numOfShuffledOut // 60 = 380 - 320 + for epochs < 10 { + node.Process(t, 5) + newNodeConfig := node.NodesConfig + + require.Len(t, getAllPubKeys(newNodeConfig.eligible), totalEligible) // 1600 + require.Len(t, getAllPubKeys(newNodeConfig.waiting), newWaiting) // 1280 + require.Len(t, getAllPubKeys(newNodeConfig.shuffledOut), numOfShuffledOut) // 320 + require.Len(t, newNodeConfig.auction, auctionListSize) // 380 + require.Empty(t, newNodeConfig.queue) + require.Empty(t, newNodeConfig.leaving) + + checkStakingV4EpochChangeFlow(t, newNodeConfig, prevConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + prevConfig = newNodeConfig + epochs++ + } +} + +func TestStakingV4MetaProcessor_ProcessMultipleNodesWithSameSetupExpectSameRootHash(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfMetaNodes := uint32(6) + numOfShards := uint32(3) + numOfEligibleNodesPerShard := uint32(6) + numOfWaitingNodesPerShard := uint32(6) + numOfNodesToShufflePerShard := uint32(2) + shardConsensusGroupSize := 2 + metaConsensusGroupSize := 2 + numOfNodesInStakingQueue := uint32(2) + + nodes := make([]*TestMetaProcessor, 0, numOfMetaNodes) + for i := uint32(0); i < numOfMetaNodes; i++ { + nodes = append(nodes, NewTestMetaProcessor( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + numOfNodesToShufflePerShard, + shardConsensusGroupSize, + metaConsensusGroupSize, + numOfNodesInStakingQueue, + )) + nodes[i].EpochStartTrigger.SetRoundsPerEpoch(4) + } + + numOfEpochs := uint32(15) + rootHashes := make(map[uint32][][]byte) + for currEpoch := uint32(1); currEpoch <= numOfEpochs; currEpoch++ { + for _, node := range nodes { + rootHash, _ := node.ValidatorStatistics.RootHash() + rootHashes[currEpoch] = append(rootHashes[currEpoch], rootHash) + + node.Process(t, 5) + require.Equal(t, currEpoch, node.EpochStartTrigger.Epoch()) + } + } + + for _, rootHashesInEpoch := range rootHashes { + firstNodeRootHashInEpoch := rootHashesInEpoch[0] + for _, rootHash := range rootHashesInEpoch { + require.Equal(t, firstNodeRootHashInEpoch, rootHash) + } + } +} + +func TestStakingV4_UnStakeNodesWithNotEnoughFunds(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + // Owner1 has 8 nodes, but enough stake for just 7 nodes. At the end of the epoch(staking v4 init), + // his last node from staking queue should be unStaked + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:3], + }, + WaitingBlsKeys: map[uint32][][]byte{ + 0: pubKeys[3:6], + }, + StakingQueueKeys: pubKeys[6:8], + TotalStake: big.NewInt(7 * nodePrice), + } + + // Owner2 has 6 nodes, but enough stake for just 5 nodes. At the end of the epoch(staking v4 init), + // one node from waiting list should be unStaked + owner2 := "owner2" + owner2Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + 0: pubKeys[8:11], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[11:14], + }, + TotalStake: big.NewInt(5 * nodePrice), + } + + // Owner3 has 2 nodes in staking queue with topUp = nodePrice + owner3 := "owner3" + owner3Stats := &OwnerStats{ + StakingQueueKeys: pubKeys[14:16], + TotalStake: big.NewInt(3 * nodePrice), + } + + // Owner4 has 1 node in staking queue with topUp = nodePrice + owner4 := "owner4" + owner4Stats := &OwnerStats{ + StakingQueueKeys: pubKeys[16:17], + TotalStake: big.NewInt(2 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 2, + ShardConsensusGroupSize: 2, + MinNumberOfEligibleShardNodes: 3, + MinNumberOfEligibleMetaNodes: 3, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + owner2: owner2Stats, + owner3: owner3Stats, + owner4: owner4Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 12, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: 10, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 6) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 6) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.eligible[0], 3) + require.Len(t, currNodesConfig.waiting[0], 3) + + requireSliceContainsNumOfElements(t, currNodesConfig.eligible[core.MetachainShardId], owner1Stats.EligibleBlsKeys[core.MetachainShardId], 3) + requireSliceContainsNumOfElements(t, currNodesConfig.waiting[core.MetachainShardId], owner2Stats.WaitingBlsKeys[core.MetachainShardId], 3) + requireSliceContainsNumOfElements(t, currNodesConfig.eligible[0], owner2Stats.EligibleBlsKeys[0], 3) + requireSliceContainsNumOfElements(t, currNodesConfig.waiting[0], owner1Stats.WaitingBlsKeys[0], 3) + + owner1StakingQueue := owner1Stats.StakingQueueKeys + owner3StakingQueue := owner3Stats.StakingQueueKeys + owner4StakingQueue := owner4Stats.StakingQueueKeys + queue := make([][]byte, 0) + queue = append(queue, owner1StakingQueue...) + queue = append(queue, owner3StakingQueue...) + queue = append(queue, owner4StakingQueue...) + require.Len(t, currNodesConfig.queue, 5) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + // 2. Check config after staking v4 initialization + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 6) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 5) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.eligible[0], 3) + require.Len(t, currNodesConfig.waiting[0], 3) + + // Owner1 will have the second node from queue removed, before adding all the nodes to auction list + queue = remove(queue, owner1StakingQueue[1]) + require.Empty(t, currNodesConfig.queue) + require.Empty(t, currNodesConfig.auction) // all nodes from the queue should be unStaked and the auction list should be empty + + // Owner2 will have one of the nodes in waiting list removed + require.Len(t, getAllPubKeys(currNodesConfig.leaving), 1) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.leaving), getAllPubKeys(owner2Stats.WaitingBlsKeys), 1) + + // Owner1 will unStake some EGLD => at the end of next epoch, he should not be able to reStake all the nodes + unStake(t, []byte(owner1), node.AccountsAdapter, node.Marshaller, big.NewInt(0.1*nodePrice)) + + // 3. ReStake the nodes that were in the queue + queue = remove(queue, owner1StakingQueue[0]) + node.ProcessReStake(t, queue) + currNodesConfig = node.NodesConfig + require.Len(t, currNodesConfig.auction, 3) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + + // 4. Check config in epoch = staking v4 + node.Process(t, 4) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 6) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 3) + require.Len(t, getAllPubKeys(currNodesConfig.shuffledOut), 2) + + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 1) + require.Len(t, currNodesConfig.shuffledOut[core.MetachainShardId], 1) + require.Len(t, currNodesConfig.eligible[0], 3) + require.Len(t, currNodesConfig.waiting[0], 2) + require.Len(t, currNodesConfig.shuffledOut[0], 1) + + require.Len(t, currNodesConfig.auction, 3) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + require.Len(t, getAllPubKeys(currNodesConfig.leaving), 0) + // There are no more unStaked nodes left from owner1 because of insufficient funds + requireSliceContainsNumOfElements(t, getAllPubKeysFromConfig(currNodesConfig), owner1StakingQueue, 0) + + // Owner3 will unStake EGLD => he will have negative top-up at the selection time => one of his nodes will be unStaked. + // His other node should not have been selected => remains in auction. + // Meanwhile, owner4 had never unStaked EGLD => his node from auction list will be distributed to waiting + unStake(t, []byte(owner3), node.AccountsAdapter, node.Marshaller, big.NewInt(2*nodePrice)) + + // 5. Check config in epoch = staking v4 step3 + node.Process(t, 5) + currNodesConfig = node.NodesConfig + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.leaving), owner3StakingQueue, 1) + requireSliceContainsNumOfElements(t, currNodesConfig.auction, owner3StakingQueue, 1) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), owner4StakingQueue, 1) +} + +func TestStakingV4_StakeNewNodes(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + // Owner1 has 6 nodes, zero top up + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:2], + }, + WaitingBlsKeys: map[uint32][][]byte{ + 0: pubKeys[2:4], + }, + StakingQueueKeys: pubKeys[4:6], + TotalStake: big.NewInt(6 * nodePrice), + } + + // Owner2 has 4 nodes, zero top up + owner2 := "owner2" + owner2Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + 0: pubKeys[6:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:10], + }, + TotalStake: big.NewInt(4 * nodePrice), + } + // Owner3 has 1 node in staking queue with topUp = nodePrice + owner3 := "owner3" + owner3Stats := &OwnerStats{ + StakingQueueKeys: pubKeys[10:11], + TotalStake: big.NewInt(2 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 1, + ShardConsensusGroupSize: 1, + MinNumberOfEligibleShardNodes: 1, + MinNumberOfEligibleMetaNodes: 1, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + owner2: owner2Stats, + owner3: owner3Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 8, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1.1 Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.eligible[0], 2) + require.Len(t, currNodesConfig.waiting[0], 2) + + owner1StakingQueue := owner1Stats.StakingQueueKeys + owner3StakingQueue := owner3Stats.StakingQueueKeys + queue := make([][]byte, 0) + queue = append(queue, owner1StakingQueue...) + queue = append(queue, owner3StakingQueue...) + require.Len(t, currNodesConfig.queue, 3) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + // NewOwner0 stakes 1 node with top up = 0 before staking v4; should be sent to staking queue + newOwner0 := "newOwner0" + newNodes0 := map[string]*NodesRegisterData{ + newOwner0: { + BLSKeys: [][]byte{generateAddress(333)}, + TotalStake: big.NewInt(nodePrice), + }, + } + + // 1.2 Check staked node before staking v4 is sent to staking queue + node.ProcessStake(t, newNodes0) + queue = append(queue, newNodes0[newOwner0].BLSKeys...) + currNodesConfig = node.NodesConfig + require.Len(t, currNodesConfig.queue, 4) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + // NewOwner1 stakes 1 node with top up = 2*node price; should be sent to auction list + newOwner1 := "newOwner1" + newNodes1 := map[string]*NodesRegisterData{ + newOwner1: { + BLSKeys: [][]byte{generateAddress(444)}, + TotalStake: big.NewInt(3 * nodePrice), + }, + } + // 2. Check config after staking v4 init when a new node is staked + node.Process(t, 4) + node.ProcessStake(t, newNodes1) + node.ProcessReStake(t, queue) + currNodesConfig = node.NodesConfig + queue = append(queue, newNodes1[newOwner1].BLSKeys...) + require.Empty(t, currNodesConfig.queue) + require.Empty(t, currNodesConfig.leaving) + require.Len(t, currNodesConfig.auction, 5) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + + // NewOwner2 stakes 2 node with top up = 2*node price; should be sent to auction list + newOwner2 := "newOwner2" + newNodes2 := map[string]*NodesRegisterData{ + newOwner2: { + BLSKeys: [][]byte{generateAddress(555), generateAddress(666)}, + TotalStake: big.NewInt(4 * nodePrice), + }, + } + // 2. Check in epoch = staking v4 step2 when 2 new nodes are staked + node.Process(t, 4) + node.ProcessStake(t, newNodes2) + currNodesConfig = node.NodesConfig + queue = append(queue, newNodes2[newOwner2].BLSKeys...) + require.Empty(t, currNodesConfig.queue) + requireSliceContainsNumOfElements(t, currNodesConfig.auction, queue, 7) + + // 3. Epoch = staking v4 step3 + // Only the new 2 owners + owner3 had enough top up to be distributed to waiting. + // Meanwhile, owner1 which had 0 top up, still has his bls keys in auction, along with newOwner0 + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Empty(t, currNodesConfig.queue) + requireMapContains(t, currNodesConfig.waiting, newNodes1[newOwner1].BLSKeys) + requireMapContains(t, currNodesConfig.waiting, newNodes2[newOwner2].BLSKeys) + requireMapContains(t, currNodesConfig.waiting, owner3StakingQueue) + requireSliceContains(t, currNodesConfig.auction, owner1StakingQueue) + requireSliceContains(t, currNodesConfig.auction, newNodes0[newOwner0].BLSKeys) +} + +func TestStakingV4_UnStakeNodes(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:2], + }, + WaitingBlsKeys: map[uint32][][]byte{ + 0: pubKeys[2:4], + }, + StakingQueueKeys: pubKeys[4:6], + TotalStake: big.NewInt(10 * nodePrice), + } + + owner2 := "owner2" + owner2Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + 0: pubKeys[6:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:12], + }, + StakingQueueKeys: pubKeys[12:15], + TotalStake: big.NewInt(10 * nodePrice), + } + + owner3 := "owner3" + owner3Stats := &OwnerStats{ + StakingQueueKeys: pubKeys[15:17], + TotalStake: big.NewInt(6 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 1, + ShardConsensusGroupSize: 1, + MinNumberOfEligibleShardNodes: 2, + MinNumberOfEligibleMetaNodes: 2, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + owner2: owner2Stats, + owner3: owner3Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 10, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 6) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.eligible[0], 2) + require.Len(t, currNodesConfig.waiting[0], 2) + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + owner1StakingQueue := owner1Stats.StakingQueueKeys + owner2StakingQueue := owner2Stats.StakingQueueKeys + owner3StakingQueue := owner3Stats.StakingQueueKeys + queue := make([][]byte, 0) + queue = append(queue, owner1StakingQueue...) + queue = append(queue, owner2StakingQueue...) + queue = append(queue, owner3StakingQueue...) + require.Len(t, currNodesConfig.queue, 7) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + // 1.1 Owner2 unStakes one of his staking queue nodes. Node should be removed from staking queue list + node.ProcessUnStake(t, map[string][][]byte{ + owner2: {owner2Stats.StakingQueueKeys[0]}, + }) + currNodesConfig = node.NodesConfig + queue = remove(queue, owner2Stats.StakingQueueKeys[0]) + require.Len(t, currNodesConfig.queue, 6) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + require.Empty(t, currNodesConfig.new) + require.Empty(t, currNodesConfig.auction) + + // 1.2 Owner2 unStakes one of his waiting list keys. First node from staking queue should be added to fill its place. + copy(queue, currNodesConfig.queue) // copy queue to local variable so we have the queue in same order + node.ProcessUnStake(t, map[string][][]byte{ + owner2: {owner2Stats.WaitingBlsKeys[core.MetachainShardId][0]}, + }) + currNodesConfig = node.NodesConfig + require.Len(t, currNodesConfig.new, 1) + requireSliceContains(t, queue, currNodesConfig.new) + require.Empty(t, currNodesConfig.auction) + queue = remove(queue, currNodesConfig.new[0]) + require.Len(t, currNodesConfig.queue, 5) + requireSameSliceDifferentOrder(t, queue, currNodesConfig.queue) + + // 2. Check config after staking v4 step1 + node.Process(t, 3) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 6) + // Owner2's node from waiting list which was unStaked in previous epoch is now leaving + require.Len(t, currNodesConfig.leaving, 1) + require.Equal(t, owner2Stats.WaitingBlsKeys[core.MetachainShardId][0], currNodesConfig.leaving[core.MetachainShardId][0]) + require.Empty(t, currNodesConfig.auction) // all nodes from queue have been unStaked, the auction list is empty + + // 2.1 restake the nodes that were on the queue + node.ProcessReStake(t, queue) + currNodesConfig = node.NodesConfig + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + + // 2.2 Owner3 unStakes one of his nodes from auction + node.ProcessUnStake(t, map[string][][]byte{ + owner3: {owner3StakingQueue[1]}, + }) + unStakedNodesInStakingV4Step1Epoch := make([][]byte, 0) + unStakedNodesInStakingV4Step1Epoch = append(unStakedNodesInStakingV4Step1Epoch, owner3StakingQueue[1]) + currNodesConfig = node.NodesConfig + queue = remove(queue, owner3StakingQueue[1]) + require.Len(t, currNodesConfig.auction, 4) + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + require.Empty(t, currNodesConfig.queue) + require.Empty(t, currNodesConfig.new) + + // 2.3 Owner1 unStakes 2 nodes: one from auction + one active + node.ProcessUnStake(t, map[string][][]byte{ + owner1: {owner1StakingQueue[1], owner1Stats.WaitingBlsKeys[0][0]}, + }) + unStakedNodesInStakingV4Step1Epoch = append(unStakedNodesInStakingV4Step1Epoch, owner1StakingQueue[1]) + unStakedNodesInStakingV4Step1Epoch = append(unStakedNodesInStakingV4Step1Epoch, owner1Stats.WaitingBlsKeys[0][0]) + currNodesConfig = node.NodesConfig + queue = remove(queue, owner1StakingQueue[1]) + require.Len(t, currNodesConfig.auction, 3) + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + require.Empty(t, currNodesConfig.queue) + require.Empty(t, currNodesConfig.new) + + // 3. Check config in epoch = staking v4 step2 + node.Process(t, 3) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) + require.Len(t, getAllPubKeys(currNodesConfig.leaving), 3) + // All unStaked nodes in previous epoch are now leaving + requireMapContains(t, currNodesConfig.leaving, unStakedNodesInStakingV4Step1Epoch) + // 3.1 Owner2 unStakes one of his nodes from auction + node.ProcessUnStake(t, map[string][][]byte{ + owner2: {owner2StakingQueue[1]}, + }) + currNodesConfig = node.NodesConfig + queue = remove(queue, owner2StakingQueue[1]) + shuffledOutNodes := getAllPubKeys(currNodesConfig.shuffledOut) + require.Len(t, currNodesConfig.auction, len(shuffledOutNodes)+len(queue)) + requireSliceContains(t, currNodesConfig.auction, shuffledOutNodes) + requireSliceContains(t, currNodesConfig.auction, queue) + + // 4. Check config after whole staking v4 chain is ready, when one of the owners unStakes a node + node.Process(t, 4) + currNodesConfig = node.NodesConfig + node.ProcessUnStake(t, map[string][][]byte{ + owner2: {owner2Stats.EligibleBlsKeys[0][0]}, + }) + node.Process(t, 4) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.leaving), 1) + requireMapContains(t, currNodesConfig.leaving, [][]byte{owner2Stats.EligibleBlsKeys[0][0]}) + require.Empty(t, currNodesConfig.new) + require.Empty(t, currNodesConfig.queue) + + // 4.1 NewOwner stakes 1 node, should be sent to auction + newOwner := "newOwner1" + newNode := map[string]*NodesRegisterData{ + newOwner: { + BLSKeys: [][]byte{generateAddress(444)}, + TotalStake: big.NewInt(2 * nodePrice), + }, + } + node.ProcessStake(t, newNode) + currNodesConfig = node.NodesConfig + requireSliceContains(t, currNodesConfig.auction, newNode[newOwner].BLSKeys) + + // 4.2 NewOwner unStakes his node, he should not be in auction anymore + set to leaving + node.ProcessUnStake(t, map[string][][]byte{ + newOwner: {newNode[newOwner].BLSKeys[0]}, + }) + currNodesConfig = node.NodesConfig + requireSliceContainsNumOfElements(t, currNodesConfig.auction, newNode[newOwner].BLSKeys, 0) + node.Process(t, 3) + currNodesConfig = node.NodesConfig + requireMapContains(t, currNodesConfig.leaving, newNode[newOwner].BLSKeys) +} + +func TestStakingV4_JailAndUnJailNodes(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:2], + }, + WaitingBlsKeys: map[uint32][][]byte{ + 0: pubKeys[2:4], + }, + StakingQueueKeys: pubKeys[4:6], + TotalStake: big.NewInt(10 * nodePrice), + } + + owner2 := "owner2" + owner2Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + 0: pubKeys[6:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:12], + }, + StakingQueueKeys: pubKeys[12:15], + TotalStake: big.NewInt(10 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 1, + ShardConsensusGroupSize: 1, + MinNumberOfEligibleShardNodes: 2, + MinNumberOfEligibleMetaNodes: 2, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + owner2: owner2Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 10, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: 4, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 6) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.eligible[0], 2) + require.Len(t, currNodesConfig.waiting[0], 2) + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + owner1StakingQueue := owner1Stats.StakingQueueKeys + owner2StakingQueue := owner2Stats.StakingQueueKeys + queue := make([][]byte, 0) + queue = append(queue, owner1StakingQueue...) + queue = append(queue, owner2StakingQueue...) + require.Len(t, currNodesConfig.queue, 5) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + // 1.1 Jail 4 nodes: + // - 2 nodes from waiting list shard = 0 + // - 2 nodes from waiting list shard = meta chain + jailedNodes := make([][]byte, 0) + jailedNodes = append(jailedNodes, owner1Stats.WaitingBlsKeys[0]...) + jailedNodes = append(jailedNodes, owner2Stats.WaitingBlsKeys[core.MetachainShardId][:2]...) + node.ProcessJail(t, jailedNodes) + + // 1.2 UnJail 2 nodes from initial jailed nodes: + // - 1 node from waiting list shard = 0 + // - 1 node from waiting list shard = meta chain + unJailedNodes := make([][]byte, 0) + unJailedNodes = append(unJailedNodes, owner1Stats.WaitingBlsKeys[0][0]) + unJailedNodes = append(unJailedNodes, owner2Stats.WaitingBlsKeys[core.MetachainShardId][0]) + jailedNodes = remove(jailedNodes, unJailedNodes[0]) + jailedNodes = remove(jailedNodes, unJailedNodes[1]) + node.ProcessUnJail(t, unJailedNodes) + + // 2. Two jailed nodes are now leaving; the other two unJailed nodes are re-staked and distributed on waiting list + node.Process(t, 3) + currNodesConfig = node.NodesConfig + requireMapContains(t, currNodesConfig.leaving, jailedNodes) + requireMapContains(t, currNodesConfig.waiting, unJailedNodes) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, make([][]byte, 0)) + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) + require.Empty(t, currNodesConfig.queue) + + // 2.1 ReStake the nodes that were in the queue + // but first, we need to unJail the nodes + node.ProcessUnJail(t, jailedNodes) + node.ProcessReStake(t, queue) + currNodesConfig = node.NodesConfig + queue = append(queue, jailedNodes...) + require.Empty(t, currNodesConfig.queue) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + + // 3. Epoch = stakingV4Step2 + node.Process(t, 1) + currNodesConfig = node.NodesConfig + queue = append(queue, getAllPubKeys(currNodesConfig.shuffledOut)...) + require.Empty(t, currNodesConfig.queue) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + + // 3.1 Jail a random node from waiting list + newJailed := getAllPubKeys(currNodesConfig.waiting)[:1] + node.ProcessJail(t, newJailed) + + // 4. Epoch = stakingV4Step3; + // 4.1 Expect jailed node from waiting list is now leaving + node.Process(t, 4) + currNodesConfig = node.NodesConfig + requireMapContains(t, currNodesConfig.leaving, newJailed) + requireSliceContainsNumOfElements(t, currNodesConfig.auction, newJailed, 0) + require.Empty(t, currNodesConfig.queue) + + // 4.2 UnJail previous node and expect it is sent to auction + node.ProcessUnJail(t, newJailed) + currNodesConfig = node.NodesConfig + requireSliceContains(t, currNodesConfig.auction, newJailed) + require.Empty(t, currNodesConfig.queue) + + // 5. Epoch is now after whole staking v4 chain is activated + node.Process(t, 3) + currNodesConfig = node.NodesConfig + queue = currNodesConfig.auction + newJailed = queue[:1] + newUnJailed := newJailed[0] + + // 5.1 Take a random node from auction and jail it; expect it is removed from auction list + node.ProcessJail(t, newJailed) + queue = remove(queue, newJailed[0]) + currNodesConfig = node.NodesConfig + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + + // 5.2 UnJail previous node; expect it is sent back to auction + node.ProcessUnJail(t, [][]byte{newUnJailed}) + queue = append(queue, newUnJailed) + currNodesConfig = node.NodesConfig + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + require.Empty(t, node.NodesConfig.queue) +} + +func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffledToToWaiting(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:4], + 0: pubKeys[4:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:9], + 0: pubKeys[9:10], + }, + TotalStake: big.NewInt(20 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 2, + ShardConsensusGroupSize: 2, + MinNumberOfEligibleShardNodes: 4, + MinNumberOfEligibleMetaNodes: 4, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 12, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, // epoch 3 + MaxNumNodes: 10, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: 6, + MaxNumNodes: 12, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + expectedNodesNum := &configNum{ + eligible: map[uint32]int{ + core.MetachainShardId: 4, + 0: 4, + }, + waiting: map[uint32]int{ + core.MetachainShardId: 1, + 0: 1, + }, + } + currNodesConfig := node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + + // During these 9 epochs, we will always have: + // - 10 activeNodes (8 eligible + 2 waiting) + // - 1 node to shuffle out per shard + // Meanwhile, maxNumNodes changes from 12-10-12 + // Since activeNodes <= maxNumNodes, shuffled out nodes will always be sent directly to waiting list, + // instead of auction(there is no reason to send them to auction, they will be selected anyway) + epoch := uint32(0) + numOfShuffledOut := 2 + numRemainingEligible := 6 + prevNodesConfig := currNodesConfig + for epoch < 9 { + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) + + prevNodesConfig = currNodesConfig + epoch++ + } + + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + // Epoch = 9 with: + // - activeNodes = 10 + // - maxNumNodes = 12 + // Owner2 stakes 2 nodes, which should be initially sent to auction list + owner2Nodes := pubKeys[10:12] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner2": { + BLSKeys: owner2Nodes, + TotalStake: big.NewInt(5 * nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + expectedNodesNum.auction = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner2Nodes) + + // Epoch = 10 with: + // - activeNodes = 12 + // - maxNumNodes = 12 + // Owner2's new nodes are selected from auction and distributed to waiting list + node.Process(t, 5) + currNodesConfig = node.NodesConfig + expectedNodesNum.waiting[core.MetachainShardId]++ + expectedNodesNum.waiting[0]++ + expectedNodesNum.auction = 0 + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) + requireSliceContains(t, getAllPubKeys(currNodesConfig.waiting), owner2Nodes) + + // During epochs 10-13, we will have: + // - activeNodes = 12 + // - maxNumNodes = 12 + // Since activeNodes == maxNumNodes, shuffled out nodes will always be sent directly to waiting list, instead of auction + epoch = 10 + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + prevNodesConfig = currNodesConfig + for epoch < 13 { + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) + + prevNodesConfig = currNodesConfig + epoch++ + } + + // Epoch = 13 with: + // - activeNodes = 12 + // - maxNumNodes = 12 + // Owner3 stakes 2 nodes, which should be initially sent to auction list + owner3Nodes := pubKeys[12:14] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner3": { + BLSKeys: owner3Nodes, + TotalStake: big.NewInt(5 * nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + expectedNodesNum.auction = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner3Nodes) + + // During epochs 14-18, we will have: + // - activeNodes = 14 + // - maxNumNodes = 12 + // Since activeNodes > maxNumNodes, shuffled out nodes (2) will be sent to auction list + node.Process(t, 5) + prevNodesConfig = node.NodesConfig + epoch = 14 + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + numOfUnselectedNodesFromAuction := 0 + numOfSelectedNodesFromAuction := 2 + for epoch < 18 { + checkConfig(t, expectedNodesNum, currNodesConfig) + + node.Process(t, 5) + currNodesConfig = node.NodesConfig + checkStakingV4EpochChangeFlow(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + + prevNodesConfig = currNodesConfig + epoch++ + } + + // Epoch = 18, with: + // - activeNodes = 14 + // - maxNumNodes = 12 + // Owner3 unStakes one of his nodes + node.ProcessUnStake(t, map[string][][]byte{ + "owner3": {owner3Nodes[0]}, + }) + + // Epoch = 19, with: + // - activeNodes = 13 + // - maxNumNodes = 12 + // Owner3's unStaked node is now leaving + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Len(t, currNodesConfig.leaving, 1) + requireMapContains(t, currNodesConfig.leaving, [][]byte{owner3Nodes[0]}) + + epoch = 19 + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + // During epochs 19-23, we will have: + // - activeNodes = 13 + // - maxNumNodes = 12 + // Since activeNodes > maxNumNodes: + // - shuffled out nodes (2) will be sent to auction list + // - waiting lists will be unbalanced (3 in total: 1 + 2 per shard) + // - no node will spend extra epochs in eligible/waiting, since waiting lists will always be refilled + prevNodesConfig = node.NodesConfig + for epoch < 23 { + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 3) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.eligible[0], 4) + require.Len(t, currNodesConfig.auction, 2) + + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + checkStakingV4EpochChangeFlow(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + + prevNodesConfig = currNodesConfig + epoch++ + } +} + +func TestStakingV4_NewlyStakedNodesInStakingV4Step2ShouldBeSentToWaitingIfListIsTooLow(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:4], + 0: pubKeys[4:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:9], + 0: pubKeys[9:10], + }, + TotalStake: big.NewInt(20 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 2, + ShardConsensusGroupSize: 2, + MinNumberOfEligibleShardNodes: 4, + MinNumberOfEligibleMetaNodes: 4, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 20, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: 18, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + expectedNodesNum := &configNum{ + eligible: map[uint32]int{ + core.MetachainShardId: 4, + 0: 4, + }, + waiting: map[uint32]int{ + core.MetachainShardId: 1, + 0: 1, + }, + } + currNodesConfig := node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + + // Epoch = 0, before staking v4, owner2 stakes 2 nodes + // - maxNumNodes = 20 + // - activeNumNodes = 10 + // Newly staked nodes should be sent to new list + owner2Nodes := pubKeys[12:14] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner2": { + BLSKeys: owner2Nodes, + TotalStake: big.NewInt(2 * nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + expectedNodesNum.new = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSameSliceDifferentOrder(t, currNodesConfig.new, owner2Nodes) + + // Epoch = 1, staking v4 step 1 + // - maxNumNodes = 20 + // - activeNumNodes = 12 + // Owner2's new nodes should have been sent to waiting + node.Process(t, 5) + currNodesConfig = node.NodesConfig + expectedNodesNum.new = 0 + expectedNodesNum.waiting[0]++ + expectedNodesNum.waiting[core.MetachainShardId]++ + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), owner2Nodes, 2) + + // Epoch = 1, before staking v4, owner3 stakes 2 nodes + // - maxNumNodes = 20 + // - activeNumNodes = 12 + // Newly staked nodes should be sent to auction list + owner3Nodes := pubKeys[15:17] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner3": { + BLSKeys: owner3Nodes, + TotalStake: big.NewInt(2 * nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + expectedNodesNum.auction = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner3Nodes) + + // Epoch = 2, staking v4 step 2 + // - maxNumNodes = 20 + // - activeNumNodes = 14 + // Owner3's auction nodes should have been sent to waiting + node.Process(t, 5) + currNodesConfig = node.NodesConfig + expectedNodesNum.auction = 0 + expectedNodesNum.waiting[0]++ + expectedNodesNum.waiting[core.MetachainShardId]++ + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), owner3Nodes, 2) + + // During epochs 2-6, we will have: + // - activeNodes = 14 + // - maxNumNodes = 18-20 + // Since activeNodes < maxNumNodes, shuffled out nodes will always be sent directly to waiting list, instead of auction + epoch := uint32(2) + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + numOfShuffledOut := 2 + numRemainingEligible := 6 + numOfUnselectedNodesFromAuction := 0 + numOfSelectedNodesFromAuction := 0 + + prevNodesConfig := currNodesConfig + for epoch < 6 { + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) + checkStakingV4EpochChangeFlow(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + + prevNodesConfig = currNodesConfig + epoch++ + } +} + +func TestStakingV4_LeavingNodesEdgeCases(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:3], + 0: pubKeys[3:6], + 1: pubKeys[6:9], + 2: pubKeys[9:12], + }, + TotalStake: big.NewInt(12 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 3, + ShardConsensusGroupSize: 3, + MinNumberOfEligibleShardNodes: 3, + MinNumberOfEligibleMetaNodes: 3, + NumOfShards: 3, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 16, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 18, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: 12, + NodesToShufflePerShard: 2, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(5) + + // 1. Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 12) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 0) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 0) + require.Len(t, currNodesConfig.eligible[0], 3) + require.Len(t, currNodesConfig.waiting[0], 0) + require.Len(t, currNodesConfig.eligible[1], 3) + require.Len(t, currNodesConfig.waiting[1], 0) + require.Len(t, currNodesConfig.eligible[2], 3) + require.Len(t, currNodesConfig.waiting[2], 0) + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + // NewOwner0 stakes 1 node with top up = 0 before staking v4; should be sent to new nodes, since there are enough slots + newOwner0 := "newOwner0" + newOwner0BlsKeys := [][]byte{generateAddress(101)} + node.ProcessStake(t, map[string]*NodesRegisterData{ + newOwner0: { + BLSKeys: newOwner0BlsKeys, + TotalStake: big.NewInt(nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + requireSameSliceDifferentOrder(t, currNodesConfig.new, newOwner0BlsKeys) + + // UnStake one of the initial nodes + node.ProcessUnStake(t, map[string][][]byte{ + owner1: {owner1Stats.EligibleBlsKeys[core.MetachainShardId][0]}, + }) + + // Fast-forward few epochs such that the whole staking v4 is activated. + // We should have same 12 initial nodes + 1 extra node (because of legacy code where all leaving nodes were + // considered to be eligible and the unStaked node was forced to remain eligible) + node.Process(t, 49) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 12) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 1) + + // Stake 10 extra nodes and check that they are sent to auction + newOwner1 := "newOwner1" + newOwner1BlsKeys := generateAddresses(303, 10) + node.ProcessStake(t, map[string]*NodesRegisterData{ + newOwner1: { + BLSKeys: newOwner1BlsKeys, + TotalStake: big.NewInt(nodePrice * 10), + }, + }) + currNodesConfig = node.NodesConfig + requireSameSliceDifferentOrder(t, currNodesConfig.auction, newOwner1BlsKeys) + + // After 2 epochs, unStake all previously staked keys. Some of them have been already sent to eligible/waiting, but most + // of them are still in auction. UnStaked nodes' status from auction should be: leaving now, but their previous list was auction. + // We should not force his auction nodes as being eligible in the next epoch. We should only force his existing active + // nodes to remain in the system. + node.Process(t, 10) + currNodesConfig = node.NodesConfig + newOwner1AuctionNodes := getIntersection(currNodesConfig.auction, newOwner1BlsKeys) + newOwner1EligibleNodes := getIntersection(getAllPubKeys(currNodesConfig.eligible), newOwner1BlsKeys) + newOwner1WaitingNodes := getIntersection(getAllPubKeys(currNodesConfig.waiting), newOwner1BlsKeys) + newOwner1ActiveNodes := append(newOwner1EligibleNodes, newOwner1WaitingNodes...) + require.Equal(t, len(newOwner1AuctionNodes)+len(newOwner1ActiveNodes), len(newOwner1BlsKeys)) // sanity check + + node.ClearStoredMbs() + node.ProcessUnStake(t, map[string][][]byte{ + newOwner1: newOwner1BlsKeys, + }) + + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 12) + requireMapContains(t, currNodesConfig.leaving, newOwner1AuctionNodes) + requireMapDoesNotContain(t, currNodesConfig.eligible, newOwner1AuctionNodes) + requireMapDoesNotContain(t, currNodesConfig.waiting, newOwner1AuctionNodes) + + allCurrentActiveNodes := append(getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(currNodesConfig.waiting)...) + owner1NodesThatAreStillForcedToRemain := getIntersection(allCurrentActiveNodes, newOwner1ActiveNodes) + require.NotZero(t, len(owner1NodesThatAreStillForcedToRemain)) + + // Fast-forward some epochs, no error should occur, and we should have our initial config of: + // - 12 eligible nodes + // - 1 waiting list + // - some forced nodes to remain from newOwner1 + node.Process(t, 10) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 12) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 1) + allCurrentActiveNodes = append(getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(currNodesConfig.waiting)...) + owner1NodesThatAreStillForcedToRemain = getIntersection(allCurrentActiveNodes, newOwner1ActiveNodes) + require.NotZero(t, len(owner1NodesThatAreStillForcedToRemain)) + + // Stake 10 extra nodes such that the forced eligible nodes from previous newOwner1 can leave the system + // and are replaced by new nodes + newOwner2 := "newOwner2" + newOwner2BlsKeys := generateAddresses(403, 10) + node.ProcessStake(t, map[string]*NodesRegisterData{ + newOwner2: { + BLSKeys: newOwner2BlsKeys, + TotalStake: big.NewInt(nodePrice * 10), + }, + }) + currNodesConfig = node.NodesConfig + requireSliceContains(t, currNodesConfig.auction, newOwner2BlsKeys) + + // Fast-forward multiple epochs and check that newOwner1's forced nodes from previous epochs left + node.Process(t, 20) + currNodesConfig = node.NodesConfig + allCurrentNodesInSystem := getAllPubKeysFromConfig(currNodesConfig) + owner1LeftNodes := getIntersection(owner1NodesThatAreStillForcedToRemain, allCurrentNodesInSystem) + require.Zero(t, len(owner1LeftNodes)) +} diff --git a/integrationTests/vm/staking/systemSCCreator.go b/integrationTests/vm/staking/systemSCCreator.go new file mode 100644 index 00000000000..cf18140797a --- /dev/null +++ b/integrationTests/vm/staking/systemSCCreator.go @@ -0,0 +1,264 @@ +package staking + +import ( + "bytes" + "strconv" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/epochStart/metachain" + epochStartMock "github.com/multiversx/mx-chain-go/epochStart/mock" + "github.com/multiversx/mx-chain-go/epochStart/notifier" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/genesis/process/disabled" + "github.com/multiversx/mx-chain-go/process" + metaProcess "github.com/multiversx/mx-chain-go/process/factory/metachain" + "github.com/multiversx/mx-chain-go/process/peer" + "github.com/multiversx/mx-chain-go/process/smartContract/builtInFunctions" + "github.com/multiversx/mx-chain-go/process/smartContract/hooks" + "github.com/multiversx/mx-chain-go/process/smartContract/hooks/counters" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" + "github.com/multiversx/mx-chain-go/vm" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + vmcommonMock "github.com/multiversx/mx-chain-vm-common-go/mock" +) + +func createSystemSCProcessor( + nc nodesCoordinator.NodesCoordinator, + coreComponents factory.CoreComponentsHolder, + stateComponents factory.StateComponentsHandler, + shardCoordinator sharding.Coordinator, + maxNodesConfig []config.MaxNodesChangeConfig, + validatorStatisticsProcessor process.ValidatorStatisticsProcessor, + systemVM vmcommon.VMExecutionHandler, + stakingDataProvider epochStart.StakingDataProvider, +) process.EpochStartSystemSCProcessor { + maxNodesChangeConfigProvider, _ := notifier.NewNodesConfigProvider( + coreComponents.EpochNotifier(), + maxNodesConfig, + ) + + auctionCfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } + ald, _ := metachain.NewAuctionListDisplayer(metachain.ArgsAuctionListDisplayer{ + TableDisplayHandler: metachain.NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: auctionCfg, + }) + + argsAuctionListSelector := metachain.AuctionListSelectorArgs{ + ShardCoordinator: shardCoordinator, + StakingDataProvider: stakingDataProvider, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListDisplayHandler: ald, + SoftAuctionConfig: auctionCfg, + } + auctionListSelector, _ := metachain.NewAuctionListSelector(argsAuctionListSelector) + + args := metachain.ArgsNewEpochStartSystemSCProcessing{ + SystemVM: systemVM, + UserAccountsDB: stateComponents.AccountsAdapter(), + PeerAccountsDB: stateComponents.PeerAccounts(), + Marshalizer: coreComponents.InternalMarshalizer(), + StartRating: initialRating, + ValidatorInfoCreator: validatorStatisticsProcessor, + EndOfEpochCallerAddress: vm.EndOfEpochAddress, + StakingSCAddress: vm.StakingSCAddress, + ChanceComputer: &epochStartMock.ChanceComputerStub{}, + EpochNotifier: coreComponents.EpochNotifier(), + GenesisNodesConfig: &genesisMocks.NodesSetupStub{}, + StakingDataProvider: stakingDataProvider, + NodesConfigProvider: nc, + ShardCoordinator: shardCoordinator, + ESDTOwnerAddressBytes: bytes.Repeat([]byte{1}, 32), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListSelector: auctionListSelector, + } + + systemSCProcessor, _ := metachain.NewSystemSCProcessor(args) + return systemSCProcessor +} + +func createStakingDataProvider( + enableEpochsHandler common.EnableEpochsHandler, + systemVM vmcommon.VMExecutionHandler, +) epochStart.StakingDataProvider { + argsStakingDataProvider := metachain.StakingDataProviderArgs{ + EnableEpochsHandler: enableEpochsHandler, + SystemVM: systemVM, + MinNodePrice: strconv.Itoa(nodePrice), + } + stakingSCProvider, _ := metachain.NewStakingDataProvider(argsStakingDataProvider) + + return stakingSCProvider +} + +func createValidatorStatisticsProcessor( + dataComponents factory.DataComponentsHolder, + coreComponents factory.CoreComponentsHolder, + nc nodesCoordinator.NodesCoordinator, + shardCoordinator sharding.Coordinator, + peerAccounts state.AccountsAdapter, +) process.ValidatorStatisticsProcessor { + argsValidatorsProcessor := peer.ArgValidatorStatisticsProcessor{ + Marshalizer: coreComponents.InternalMarshalizer(), + NodesCoordinator: nc, + ShardCoordinator: shardCoordinator, + DataPool: dataComponents.Datapool(), + StorageService: dataComponents.StorageService(), + PubkeyConv: coreComponents.AddressPubKeyConverter(), + PeerAdapter: peerAccounts, + Rater: coreComponents.Rater(), + RewardsHandler: &epochStartMock.RewardsHandlerStub{}, + NodesSetup: &genesisMocks.NodesSetupStub{}, + MaxComputableRounds: 1, + MaxConsecutiveRoundsOfRatingDecrease: 2000, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + validatorStatisticsProcessor, _ := peer.NewValidatorStatisticsProcessor(argsValidatorsProcessor) + return validatorStatisticsProcessor +} + +func createBlockChainHook( + dataComponents factory.DataComponentsHolder, + coreComponents factory.CoreComponentsHolder, + accountsAdapter state.AccountsAdapter, + shardCoordinator sharding.Coordinator, + gasScheduleNotifier core.GasScheduleNotifier, +) (hooks.ArgBlockChainHook, process.BlockChainHookWithAccountsAdapter) { + argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ + GasSchedule: gasScheduleNotifier, + MapDNSAddresses: make(map[string]struct{}), + Marshalizer: coreComponents.InternalMarshalizer(), + Accounts: accountsAdapter, + ShardCoordinator: shardCoordinator, + EpochNotifier: coreComponents.EpochNotifier(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + AutomaticCrawlerAddresses: [][]byte{core.SystemAccountAddress}, + MaxNumNodesInTransferRole: 1, + GuardedAccountHandler: &guardianMocks.GuardedAccountHandlerStub{}, + MapDNSV2Addresses: make(map[string]struct{}), + } + + builtInFunctionsContainer, _ := builtInFunctions.CreateBuiltInFunctionsFactory(argsBuiltIn) + _ = builtInFunctionsContainer.CreateBuiltInFunctionContainer() + builtInFunctionsContainer.BuiltInFunctionContainer() + + argsHook := hooks.ArgBlockChainHook{ + Accounts: accountsAdapter, + PubkeyConv: coreComponents.AddressPubKeyConverter(), + StorageService: dataComponents.StorageService(), + BlockChain: dataComponents.Blockchain(), + ShardCoordinator: shardCoordinator, + Marshalizer: coreComponents.InternalMarshalizer(), + Uint64Converter: coreComponents.Uint64ByteSliceConverter(), + NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, + BuiltInFunctions: builtInFunctionsContainer.BuiltInFunctionContainer(), + DataPool: dataComponents.Datapool(), + CompiledSCPool: dataComponents.Datapool().SmartContracts(), + EpochNotifier: coreComponents.EpochNotifier(), + GlobalSettingsHandler: &vmcommonMock.GlobalSettingsHandlerStub{}, + NilCompiledSCStore: true, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + GasSchedule: gasScheduleNotifier, + Counter: counters.NewDisabledCounter(), + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, + } + + blockChainHook, _ := hooks.NewBlockChainHookImpl(argsHook) + return argsHook, blockChainHook +} + +func createVMContainerFactory( + coreComponents factory.CoreComponentsHolder, + gasScheduleNotifier core.GasScheduleNotifier, + blockChainHook process.BlockChainHookWithAccountsAdapter, + argsBlockChainHook hooks.ArgBlockChainHook, + stateComponents factory.StateComponentsHandler, + shardCoordinator sharding.Coordinator, + nc nodesCoordinator.NodesCoordinator, + maxNumNodes uint32, +) process.VirtualMachinesContainerFactory { + signVerifer, _ := disabled.NewMessageSignVerifier(&cryptoMocks.KeyGenStub{}) + + argsNewVMContainerFactory := metaProcess.ArgsNewVMContainerFactory{ + BlockChainHook: blockChainHook, + PubkeyConv: coreComponents.AddressPubKeyConverter(), + Economics: coreComponents.EconomicsData(), + MessageSignVerifier: signVerifer, + GasSchedule: gasScheduleNotifier, + NodesConfigProvider: &genesisMocks.NodesSetupStub{}, + Hasher: coreComponents.Hasher(), + Marshalizer: coreComponents.InternalMarshalizer(), + SystemSCConfig: &config.SystemSmartContractsConfig{ + ESDTSystemSCConfig: config.ESDTSystemSCConfig{ + BaseIssuingCost: "1000", + OwnerAddress: "aaaaaa", + }, + GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ + V1: config.GovernanceSystemSCConfigV1{ + NumNodes: 2000, + ProposalCost: "500", + MinQuorum: 50, + MinPassThreshold: 10, + MinVetoThreshold: 10, + }, + OwnerAddress: "3132333435363738393031323334353637383930313233343536373839303234", + }, + StakingSystemSCConfig: config.StakingSystemSCConfig{ + GenesisNodePrice: strconv.Itoa(nodePrice), + UnJailValue: "10", + MinStepValue: "10", + MinStakeValue: "1", + UnBondPeriod: 1, + NumRoundsWithoutBleed: 1, + MaximumPercentageToBleed: 1, + BleedPercentagePerRound: 1, + MaxNumberOfNodesForStake: uint64(maxNumNodes), + ActivateBLSPubKeyMessageVerification: false, + MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, + }, + DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ + MinCreationDeposit: "100", + MinStakeAmount: "100", + ConfigChangeAddress: "3132333435363738393031323334353637383930313233343536373839303234", + }, + DelegationSystemSCConfig: config.DelegationSystemSCConfig{ + MinServiceFee: 0, + MaxServiceFee: 100, + }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, + }, + ValidatorAccountsDB: stateComponents.PeerAccounts(), + ChanceComputer: coreComponents.Rater(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + ShardCoordinator: shardCoordinator, + NodesCoordinator: nc, + UserAccountsDB: stateComponents.AccountsAdapter(), + ArgBlockChainHook: argsBlockChainHook, + } + + metaVmFactory, _ := metaProcess.NewVMContainerFactory(argsNewVMContainerFactory) + return metaVmFactory +} diff --git a/integrationTests/vm/staking/testMetaProcessor.go b/integrationTests/vm/staking/testMetaProcessor.go new file mode 100644 index 00000000000..168287b66bc --- /dev/null +++ b/integrationTests/vm/staking/testMetaProcessor.go @@ -0,0 +1,99 @@ +package staking + +import ( + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" +) + +// NewTestMetaProcessor - +func NewTestMetaProcessor( + numOfMetaNodes uint32, + numOfShards uint32, + numOfEligibleNodesPerShard uint32, + numOfWaitingNodesPerShard uint32, + numOfNodesToShufflePerShard uint32, + shardConsensusGroupSize int, + metaConsensusGroupSize int, + numOfNodesInStakingQueue uint32, +) *TestMetaProcessor { + coreComponents, dataComponents, bootstrapComponents, statusComponents, stateComponents := createComponentHolders(numOfShards) + + maxNodesConfig := createMaxNodesConfig( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + numOfNodesToShufflePerShard, + ) + + queue := createStakingQueue( + numOfNodesInStakingQueue, + maxNodesConfig[0].MaxNumNodes, + coreComponents.InternalMarshalizer(), + stateComponents.AccountsAdapter(), + ) + + eligibleMap, waitingMap := createGenesisNodes( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + coreComponents.InternalMarshalizer(), + stateComponents, + ) + + bootStrapStorer, _ := dataComponents.StorageService().GetStorer(dataRetriever.BootstrapUnit) + nc := createNodesCoordinator( + eligibleMap, + waitingMap, + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + shardConsensusGroupSize, + metaConsensusGroupSize, + coreComponents, + bootStrapStorer, + bootstrapComponents.NodesCoordinatorRegistryFactory(), + maxNodesConfig, + ) + + return newTestMetaProcessor( + coreComponents, + dataComponents, + bootstrapComponents, + statusComponents, + stateComponents, + nc, + maxNodesConfig, + queue, + ) +} + +func createMaxNodesConfig( + numOfMetaNodes uint32, + numOfShards uint32, + numOfEligibleNodesPerShard uint32, + numOfWaitingNodesPerShard uint32, + numOfNodesToShufflePerShard uint32, +) []config.MaxNodesChangeConfig { + totalEligible := numOfMetaNodes + numOfShards*numOfEligibleNodesPerShard + totalWaiting := (numOfShards + 1) * numOfWaitingNodesPerShard + totalNodes := totalEligible + totalWaiting + + maxNodesConfig := make([]config.MaxNodesChangeConfig, 0) + maxNodesConfig = append(maxNodesConfig, config.MaxNodesChangeConfig{ + EpochEnable: 0, + MaxNumNodes: totalNodes, + NodesToShufflePerShard: numOfNodesToShufflePerShard, + }, + ) + + maxNodesConfig = append(maxNodesConfig, config.MaxNodesChangeConfig{ + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: totalNodes - numOfNodesToShufflePerShard*(numOfShards+1), + NodesToShufflePerShard: numOfNodesToShufflePerShard, + }, + ) + + return maxNodesConfig +} diff --git a/integrationTests/vm/staking/testMetaProcessorWithCustomNodesConfig.go b/integrationTests/vm/staking/testMetaProcessorWithCustomNodesConfig.go new file mode 100644 index 00000000000..841a2b77b43 --- /dev/null +++ b/integrationTests/vm/staking/testMetaProcessorWithCustomNodesConfig.go @@ -0,0 +1,358 @@ +package staking + +import ( + "bytes" + "encoding/hex" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/integrationTests" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/smartContract" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/vm" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +// OwnerStats - +type OwnerStats struct { + EligibleBlsKeys map[uint32][][]byte + WaitingBlsKeys map[uint32][][]byte + StakingQueueKeys [][]byte + TotalStake *big.Int +} + +// InitialNodesConfig - +type InitialNodesConfig struct { + Owners map[string]*OwnerStats + MaxNodesChangeConfig []config.MaxNodesChangeConfig + NumOfShards uint32 + MinNumberOfEligibleShardNodes uint32 + MinNumberOfEligibleMetaNodes uint32 + ShardConsensusGroupSize int + MetaConsensusGroupSize int +} + +// NewTestMetaProcessorWithCustomNodes - +func NewTestMetaProcessorWithCustomNodes(config *InitialNodesConfig) *TestMetaProcessor { + coreComponents, dataComponents, bootstrapComponents, statusComponents, stateComponents := createComponentHolders(config.NumOfShards) + + queue := createStakingQueueCustomNodes( + config.Owners, + coreComponents.InternalMarshalizer(), + stateComponents.AccountsAdapter(), + ) + + eligibleMap, waitingMap := createGenesisNodesWithCustomConfig( + config.Owners, + coreComponents.InternalMarshalizer(), + stateComponents, + ) + + bootstrapStorer, _ := dataComponents.StorageService().GetStorer(dataRetriever.BootstrapUnit) + nc := createNodesCoordinator( + eligibleMap, + waitingMap, + config.MinNumberOfEligibleMetaNodes, + config.NumOfShards, + config.MinNumberOfEligibleShardNodes, + config.ShardConsensusGroupSize, + config.MetaConsensusGroupSize, + coreComponents, + bootstrapStorer, + bootstrapComponents.NodesCoordinatorRegistryFactory(), + config.MaxNodesChangeConfig, + ) + + return newTestMetaProcessor( + coreComponents, + dataComponents, + bootstrapComponents, + statusComponents, + stateComponents, + nc, + config.MaxNodesChangeConfig, + queue, + ) +} + +// NodesRegisterData - +type NodesRegisterData struct { + BLSKeys [][]byte + TotalStake *big.Int +} + +// ProcessStake will create a block containing mini blocks with staking txs using provided nodes. +// Block will be committed + call to validator system sc will be made to stake all nodes +func (tmp *TestMetaProcessor) ProcessStake(t *testing.T, nodes map[string]*NodesRegisterData) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + txHashes := make([][]byte, 0) + for owner, registerData := range nodes { + scrs := tmp.doStake(t, []byte(owner), registerData) + txHashes = append(txHashes, tmp.addTxsToCacher(scrs)...) + } + + tmp.commitBlockTxs(t, txHashes, header) +} + +func (tmp *TestMetaProcessor) doStake( + t *testing.T, + owner []byte, + registerData *NodesRegisterData, +) map[string]*smartContractResult.SmartContractResult { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: owner, + Arguments: createStakeArgs(registerData.BLSKeys), + CallValue: registerData.TotalStake, + GasProvided: 10, + }, + RecipientAddr: vm.ValidatorSCAddress, + Function: "stake", + } + + return tmp.runSC(t, arguments) +} + +// ProcessReStake will create a block containing mini blocks with re-staking txs using provided nodes. +// Block will be committed + call to validator system sc will be made to stake all nodes +func (tmp *TestMetaProcessor) ProcessReStake(t *testing.T, blsKeys [][]byte) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + txHashes := make([][]byte, 0) + for _, blsKey := range blsKeys { + scrs := tmp.doReStake(t, blsKey) + txHashes = append(txHashes, tmp.addTxsToCacher(scrs)...) + } + + tmp.commitBlockTxs(t, txHashes, header) +} + +func (tmp *TestMetaProcessor) doReStake( + t *testing.T, + blsKey []byte, +) map[string]*smartContractResult.SmartContractResult { + owner := tmp.getOwnerOfBLSKey(t, blsKey) + + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: owner, + Arguments: [][]byte{blsKey}, + CallValue: big.NewInt(0), + GasProvided: 10, + }, + RecipientAddr: vm.ValidatorSCAddress, + Function: "reStakeUnStakedNodes", + } + + return tmp.runSC(t, arguments) +} + +func (tmp *TestMetaProcessor) getOwnerOfBLSKey(t *testing.T, blsKey []byte) []byte { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.ValidatorSCAddress, + Arguments: [][]byte{blsKey}, + CallValue: big.NewInt(0), + GasProvided: 10, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "getOwner", + } + + vmOutput, err := tmp.SystemVM.RunSmartContractCall(arguments) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, vmOutput.ReturnCode) + + return vmOutput.ReturnData[0] +} + +func createStakeArgs(blsKeys [][]byte) [][]byte { + numBLSKeys := int64(len(blsKeys)) + numBLSKeysBytes := big.NewInt(numBLSKeys).Bytes() + argsStake := [][]byte{numBLSKeysBytes} + + for _, blsKey := range blsKeys { + signature := append([]byte("signature-"), blsKey...) + argsStake = append(argsStake, blsKey, signature) + } + + return argsStake +} + +// ProcessUnStake will create a block containing mini blocks with unStaking txs using provided nodes. +// Block will be committed + call to validator system sc will be made to unStake all nodes +func (tmp *TestMetaProcessor) ProcessUnStake(t *testing.T, nodes map[string][][]byte) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + txHashes := make([][]byte, 0) + for owner, blsKeys := range nodes { + scrs := tmp.doUnStake(t, []byte(owner), blsKeys) + txHashes = append(txHashes, tmp.addTxsToCacher(scrs)...) + } + + tmp.commitBlockTxs(t, txHashes, header) +} + +func (tmp *TestMetaProcessor) doUnStake( + t *testing.T, + owner []byte, + blsKeys [][]byte, +) map[string]*smartContractResult.SmartContractResult { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: owner, + Arguments: blsKeys, + CallValue: big.NewInt(0), + GasProvided: 100, + }, + RecipientAddr: vm.ValidatorSCAddress, + Function: "unStake", + } + + return tmp.runSC(t, arguments) +} + +// ProcessJail will create a block containing mini blocks with jail txs using provided nodes. +// Block will be committed + call to validator system sc will be made to jail all nodes +func (tmp *TestMetaProcessor) ProcessJail(t *testing.T, blsKeys [][]byte) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + scrs := tmp.doJail(t, blsKeys) + txHashes := tmp.addTxsToCacher(scrs) + tmp.commitBlockTxs(t, txHashes, header) +} + +func (tmp *TestMetaProcessor) doJail( + t *testing.T, + blsKeys [][]byte, +) map[string]*smartContractResult.SmartContractResult { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.JailingAddress, + Arguments: blsKeys, + CallValue: big.NewInt(0), + GasProvided: 10, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "jail", + } + + return tmp.runSC(t, arguments) +} + +// ProcessUnJail will create a block containing mini blocks with unJail txs using provided nodes. +// Block will be committed + call to validator system sc will be made to unJail all nodes +func (tmp *TestMetaProcessor) ProcessUnJail(t *testing.T, blsKeys [][]byte) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + txHashes := make([][]byte, 0) + for _, blsKey := range blsKeys { + scrs := tmp.doUnJail(t, blsKey) + txHashes = append(txHashes, tmp.addTxsToCacher(scrs)...) + } + + tmp.commitBlockTxs(t, txHashes, header) +} + +// ClearStoredMbs clears the stored miniblocks +func (tmp *TestMetaProcessor) ClearStoredMbs() { + txCoordMock, _ := tmp.TxCoordinator.(*testscommon.TransactionCoordinatorMock) + txCoordMock.ClearStoredMbs() +} + +func (tmp *TestMetaProcessor) doUnJail( + t *testing.T, + blsKey []byte, +) map[string]*smartContractResult.SmartContractResult { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.ValidatorSCAddress, + Arguments: [][]byte{blsKey}, + CallValue: big.NewInt(0), + GasProvided: 10, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "unJail", + } + + return tmp.runSC(t, arguments) +} + +func (tmp *TestMetaProcessor) addTxsToCacher(scrs map[string]*smartContractResult.SmartContractResult) [][]byte { + txHashes := make([][]byte, 0) + for scrHash, scr := range scrs { + txHashes = append(txHashes, []byte(scrHash)) + tmp.TxCacher.AddTx([]byte(scrHash), scr) + } + + return txHashes +} + +func (tmp *TestMetaProcessor) commitBlockTxs(t *testing.T, txHashes [][]byte, header data.HeaderHandler) { + _, err := tmp.AccountsAdapter.Commit() + require.Nil(t, err) + + miniBlocks := block.MiniBlockSlice{ + { + TxHashes: txHashes, + SenderShardID: core.MetachainShardId, + ReceiverShardID: core.MetachainShardId, + Type: block.SmartContractResultBlock, + }, + } + tmp.TxCoordinator.AddTxsFromMiniBlocks(miniBlocks) + tmp.createAndCommitBlock(t, header, noTime) + tmp.currentRound += 1 +} + +func (tmp *TestMetaProcessor) runSC(t *testing.T, arguments *vmcommon.ContractCallInput) map[string]*smartContractResult.SmartContractResult { + vmOutput, err := tmp.SystemVM.RunSmartContractCall(arguments) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, vmOutput.ReturnCode) + + err = integrationTests.ProcessSCOutputAccounts(vmOutput, tmp.AccountsAdapter) + require.Nil(t, err) + + return createSCRsFromStakingSCOutput(vmOutput, tmp.Marshaller) +} + +func createSCRsFromStakingSCOutput( + vmOutput *vmcommon.VMOutput, + marshaller marshal.Marshalizer, +) map[string]*smartContractResult.SmartContractResult { + allSCR := make(map[string]*smartContractResult.SmartContractResult) + parser := smartContract.NewArgumentParser() + outputAccounts := process.SortVMOutputInsideData(vmOutput) + for _, outAcc := range outputAccounts { + storageUpdates := process.GetSortedStorageUpdates(outAcc) + + if bytes.Equal(outAcc.Address, vm.StakingSCAddress) { + scrData := parser.CreateDataFromStorageUpdate(storageUpdates) + scr := &smartContractResult.SmartContractResult{ + RcvAddr: vm.StakingSCAddress, + Data: []byte(scrData), + } + scrBytes, _ := marshaller.Marshal(scr) + scrHash := hex.EncodeToString(scrBytes) + + allSCR[scrHash] = scr + } + } + + return allSCR +} diff --git a/integrationTests/vm/systemVM/stakingSC_test.go b/integrationTests/vm/systemVM/stakingSC_test.go index 69ad5d15a6e..75e958f926b 100644 --- a/integrationTests/vm/systemVM/stakingSC_test.go +++ b/integrationTests/vm/systemVM/stakingSC_test.go @@ -35,6 +35,9 @@ func TestStakingUnstakingAndUnbondingOnMultiShardEnvironment(t *testing.T) { StakingV2EnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index 0c9fa15b273..7d44d945e14 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -60,6 +60,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -322,11 +323,6 @@ func createEconomicsData(enableEpochsConfig config.EnableEpochs) (process.Econom minGasLimit := strconv.FormatUint(1, 10) testProtocolSustainabilityAddress := "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp" - builtInCost, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - ArgsParser: smartContract.NewArgumentParser(), - GasSchedule: mock.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - }) - realEpochNotifier := forking.NewGenericEpochNotifier() enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsConfig, realEpochNotifier) @@ -371,10 +367,9 @@ func createEconomicsData(enableEpochsConfig config.EnableEpochs) (process.Econom MaxGasPriceSetGuardian: "2000000000", }, }, - EpochNotifier: realEpochNotifier, - EnableEpochsHandler: enableEpochsHandler, - BuiltInFunctionsCostHandler: builtInCost, - TxVersionChecker: versioning.NewTxVersionChecker(minTransactionVersion), + EpochNotifier: realEpochNotifier, + EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: versioning.NewTxVersionChecker(minTransactionVersion), } return economics.NewEconomicsData(argsNewEconomicsData) @@ -702,7 +697,8 @@ func CreateVMAndBlockchainHookMeta( Economics: economicsData, MessageSignVerifier: &mock.MessageSignVerifierMock{}, GasSchedule: gasSchedule, - NodesConfigProvider: &mock.NodesSetupStub{}, + ArgBlockChainHook: args, + NodesConfigProvider: &genesisMocks.NodesSetupStub{}, Hasher: integrationtests.TestHasher, Marshalizer: integrationtests.TestMarshalizer, SystemSCConfig: createSystemSCConfig(), @@ -711,6 +707,7 @@ func CreateVMAndBlockchainHookMeta( ChanceComputer: &shardingMocks.NodesCoordinatorMock{}, ShardCoordinator: mock.NewMultiShardsCoordinatorMock(1), EnableEpochsHandler: enableEpochsHandler, + NodesCoordinator: &shardingMocks.NodesCoordinatorMock{}, } vmFactory, err := metachain.NewVMContainerFactory(argVMContainer) if err != nil { @@ -764,6 +761,8 @@ func createSystemSCConfig() *config.SystemSmartContractsConfig { BleedPercentagePerRound: 0.00001, MaxNumberOfNodesForStake: 36, ActivateBLSPubKeyMessageVerification: false, + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "1250000000000000000000", @@ -774,6 +773,12 @@ func createSystemSCConfig() *config.SystemSmartContractsConfig { MinServiceFee: 1, MaxServiceFee: 20, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, } } @@ -818,6 +823,7 @@ func CreateTxProcessorWithOneSCExecutorWithVMs( epochNotifierInstance process.EpochNotifier, guardianChecker process.GuardianChecker, roundNotifierInstance process.RoundNotifier, + chainHandler data.ChainHandler, ) (*ResultsCreateTxProcessor, error) { if check.IfNil(poolsHolder) { poolsHolder = dataRetrieverMock.NewPoolsHolderMock() @@ -980,6 +986,7 @@ func CreateTxProcessorWithOneSCExecutorWithVMs( Marshalizer: integrationtests.TestMarshalizer, Hasher: integrationtests.TestHasher, DataFieldParser: dataFieldParser, + BlockChainHook: blockChainHook, } argsNewSCProcessor.VMOutputCacher = txSimulatorProcessorArgs.VMOutputCacher @@ -1006,6 +1013,7 @@ func CreateTxProcessorWithOneSCExecutorWithVMs( Accounts: simulationAccountsDB, ShardCoordinator: shardCoordinator, EnableEpochsHandler: argsNewSCProcessor.EnableEpochsHandler, + BlockChain: chainHandler, } apiTransactionEvaluator, err := transactionEvaluator.NewAPITransactionEvaluator(argsTransactionEvaluator) if err != nil { @@ -1077,7 +1085,7 @@ func CreatePreparedTxProcessorAndAccountsWithVMs( senderAddressBytes, senderBalance, enableEpochsConfig, - integrationTests.GetDefaultRoundsConfig()) + testscommon.GetDefaultRoundsConfig()) } // CreatePreparedTxProcessorAndAccountsWithVMsWithRoundsConfig - @@ -1088,7 +1096,7 @@ func CreatePreparedTxProcessorAndAccountsWithVMsWithRoundsConfig( enableEpochsConfig config.EnableEpochs, roundsConfig config.RoundConfig, ) (*VMTestContext, error) { - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() accounts := integrationtests.CreateInMemoryShardAccountsDB() _, _ = CreateAccount(accounts, senderAddressBytes, senderNonce, senderBalance) vmConfig := createDefaultVMConfig() @@ -1128,6 +1136,7 @@ func CreatePreparedTxProcessorAndAccountsWithVMsWithRoundsConfig( epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, ) if err != nil { return nil, err @@ -1174,13 +1183,13 @@ func CreatePreparedTxProcessorWithVMsAndCustomGasSchedule( mock.NewMultiShardsCoordinatorMock(2), integrationtests.CreateMemUnit(), createMockGasScheduleNotifierWithCustomGasSchedule(updateGasSchedule), - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), ) } // CreatePreparedTxProcessorWithVMsWithShardCoordinator - func CreatePreparedTxProcessorWithVMsWithShardCoordinator(enableEpochsConfig config.EnableEpochs, shardCoordinator sharding.Coordinator) (*VMTestContext, error) { - return CreatePreparedTxProcessorWithVMsWithShardCoordinatorAndRoundConfig(enableEpochsConfig, integrationTests.GetDefaultRoundsConfig(), shardCoordinator) + return CreatePreparedTxProcessorWithVMsWithShardCoordinatorAndRoundConfig(enableEpochsConfig, testscommon.GetDefaultRoundsConfig(), shardCoordinator) } // CreatePreparedTxProcessorWithVMsWithShardCoordinatorAndRoundConfig - @@ -1207,7 +1216,7 @@ func CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas( shardCoordinator, db, gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, ) } @@ -1240,7 +1249,7 @@ func CreatePreparedTxProcessorWithVMConfigWithShardCoordinatorDBAndGasAndRoundCo roundsConfig config.RoundConfig, vmConfig *config.VirtualMachineConfig, ) (*VMTestContext, error) { - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() epochNotifierInstance := forking.NewGenericEpochNotifier() enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsConfig, epochNotifierInstance) accounts := integrationtests.CreateAccountsDB(db, enableEpochsHandler) @@ -1279,6 +1288,7 @@ func CreatePreparedTxProcessorWithVMConfigWithShardCoordinatorDBAndGasAndRoundCo epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, ) if err != nil { return nil, err @@ -1319,7 +1329,7 @@ func CreateTxProcessorWasmVMWithGasSchedule( senderBalance, gasScheduleMap, enableEpochsConfig, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), ) } @@ -1332,7 +1342,7 @@ func CreateTxProcessorArwenVMWithGasScheduleAndRoundConfig( enableEpochsConfig config.EnableEpochs, roundsConfig config.RoundConfig, ) (*VMTestContext, error) { - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() accounts := integrationtests.CreateInMemoryShardAccountsDB() _, _ = CreateAccount(accounts, senderAddressBytes, senderNonce, senderBalance) vmConfig := createDefaultVMConfig() @@ -1374,6 +1384,7 @@ func CreateTxProcessorArwenVMWithGasScheduleAndRoundConfig( epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, ) if err != nil { return nil, err @@ -1403,7 +1414,7 @@ func CreateTxProcessorWasmVMWithVMConfig( ) (*VMTestContext, error) { return CreateTxProcessorArwenWithVMConfigAndRoundConfig( enableEpochsConfig, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, gasSchedule, ) @@ -1416,7 +1427,7 @@ func CreateTxProcessorArwenWithVMConfigAndRoundConfig( vmConfig *config.VirtualMachineConfig, gasSchedule map[string]map[string]uint64, ) (*VMTestContext, error) { - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() accounts := integrationtests.CreateInMemoryShardAccountsDB() wasmVMChangeLocker := &sync.RWMutex{} gasScheduleNotifier := mock.NewGasScheduleNotifierMock(gasSchedule) @@ -1455,6 +1466,7 @@ func CreateTxProcessorArwenWithVMConfigAndRoundConfig( epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, ) if err != nil { return nil, err @@ -1492,7 +1504,7 @@ func CreatePreparedTxProcessorAndAccountsWithMockedVM( senderAddressBytes, senderBalance, enableEpochs, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), wasmVMChangeLocker, ) } @@ -1823,7 +1835,7 @@ func GetNodeIndex(nodeList []*integrationTests.TestProcessorNode, node *integrat // CreatePreparedTxProcessorWithVMsMultiShard - func CreatePreparedTxProcessorWithVMsMultiShard(selfShardID uint32, enableEpochsConfig config.EnableEpochs) (*VMTestContext, error) { - return CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig(selfShardID, enableEpochsConfig, integrationTests.GetDefaultRoundsConfig()) + return CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig(selfShardID, enableEpochsConfig, testscommon.GetDefaultRoundsConfig()) } // CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig - @@ -1840,7 +1852,7 @@ func CreatePreparedTxProcessorWithVMsMultiShardRoundVMConfig( ) (*VMTestContext, error) { shardCoordinator, _ := sharding.NewMultiShardCoordinator(3, selfShardID) - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() accounts := integrationtests.CreateInMemoryShardAccountsDB() wasmVMChangeLocker := &sync.RWMutex{} @@ -1885,6 +1897,7 @@ func CreatePreparedTxProcessorWithVMsMultiShardRoundVMConfig( epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, ) if err != nil { return nil, err diff --git a/integrationTests/vm/txsFee/apiTransactionEvaluator_test.go b/integrationTests/vm/txsFee/apiTransactionEvaluator_test.go index e5b6661d02e..6c3f6844403 100644 --- a/integrationTests/vm/txsFee/apiTransactionEvaluator_test.go +++ b/integrationTests/vm/txsFee/apiTransactionEvaluator_test.go @@ -27,7 +27,7 @@ func getZeroGasAndFees() scheduled.GasAndFees { func TestSCCallCostTransactionCost(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ @@ -54,7 +54,7 @@ func TestSCCallCostTransactionCost(t *testing.T) { func TestScDeployTransactionCost(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) @@ -74,7 +74,7 @@ func TestScDeployTransactionCost(t *testing.T) { func TestAsyncCallsTransactionCost(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) @@ -105,7 +105,7 @@ func TestAsyncCallsTransactionCost(t *testing.T) { func TestBuiltInFunctionTransactionCost(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorWithVMs( @@ -131,7 +131,7 @@ func TestBuiltInFunctionTransactionCost(t *testing.T) { func TestESDTTransfer(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) @@ -154,7 +154,7 @@ func TestESDTTransfer(t *testing.T) { func TestAsyncESDTTransfer(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ diff --git a/integrationTests/vm/txsFee/asyncCall_multi_test.go b/integrationTests/vm/txsFee/asyncCall_multi_test.go index 289f440efa3..61886be4da3 100644 --- a/integrationTests/vm/txsFee/asyncCall_multi_test.go +++ b/integrationTests/vm/txsFee/asyncCall_multi_test.go @@ -1,5 +1,3 @@ -//go:build !race - package txsFee import ( @@ -23,6 +21,10 @@ var egldBalance = big.NewInt(50000000000) var esdtBalance = big.NewInt(100) func TestAsyncCallLegacy(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -66,6 +68,10 @@ func TestAsyncCallLegacy(t *testing.T) { } func TestAsyncCallMulti(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -113,6 +119,10 @@ func TestAsyncCallMulti(t *testing.T) { } func TestAsyncCallTransferAndExecute(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -164,6 +174,10 @@ func TestAsyncCallTransferAndExecute(t *testing.T) { } func TestAsyncCallTransferESDTAndExecute_Success(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + numberOfCallsFromParent := 3 numberOfBackTransfers := 2 transferESDTAndExecute(t, numberOfCallsFromParent, numberOfBackTransfers) @@ -280,6 +294,10 @@ func deployForwarderAndTestContract( } func TestAsyncCallMulti_CrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextFirstContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) require.Nil(t, err) defer testContextFirstContract.Close() @@ -366,6 +384,10 @@ func TestAsyncCallMulti_CrossShard(t *testing.T) { } func TestAsyncCallTransferAndExecute_CrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + childShard, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) require.Nil(t, err) defer childShard.Close() @@ -448,6 +470,10 @@ func TestAsyncCallTransferAndExecute_CrossShard(t *testing.T) { } func TestAsyncCallTransferESDTAndExecute_CrossShard_Success(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + numberOfCallsFromParent := 3 numberOfBackTransfers := 2 transferESDTAndExecuteCrossShard(t, numberOfCallsFromParent, numberOfBackTransfers) diff --git a/integrationTests/vm/txsFee/asyncCall_test.go b/integrationTests/vm/txsFee/asyncCall_test.go index cedf9ad825b..19a966e2fa8 100644 --- a/integrationTests/vm/txsFee/asyncCall_test.go +++ b/integrationTests/vm/txsFee/asyncCall_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -22,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" vmcommon "github.com/multiversx/mx-chain-vm-common-go" wasmConfig "github.com/multiversx/mx-chain-vm-go/config" @@ -32,6 +29,10 @@ import ( const upgradeContractFunction = "upgradeContract" func TestAsyncCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -84,6 +85,10 @@ func TestAsyncCallShouldWork(t *testing.T) { } func TestMinterContractWithAsyncCalls(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMsAndCustomGasSchedule(config.EnableEpochs{}, func(gasMap wasmConfig.GasScheduleMap) { // if `MaxBuiltInCallsPerTx` is 200 test will fail gasMap[common.MaxPerTransaction]["MaxBuiltInCallsPerTx"] = 199 @@ -140,6 +145,10 @@ func TestMinterContractWithAsyncCalls(t *testing.T) { } func TestAsyncCallsOnInitFunctionOnUpgrade(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + firstContractCode := wasm.GetSCCode("./testdata/first/output/first.wasm") newContractCode := wasm.GetSCCode("./testdata/asyncOnInit/asyncOnInitAndUpgrade.wasm") @@ -191,7 +200,7 @@ func testAsyncCallsOnInitFunctionOnUpgrade( shardCoordinatorForShard1, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.4"), ) require.Nil(t, err) @@ -200,7 +209,7 @@ func testAsyncCallsOnInitFunctionOnUpgrade( shardCoordinatorForShardMeta, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.4"), ) require.Nil(t, err) @@ -275,6 +284,10 @@ func testAsyncCallsOnInitFunctionOnUpgrade( } func TestAsyncCallsOnInitFunctionOnDeploy(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + firstSCCode := wasm.GetSCCode("./testdata/first/output/first.wasm") pathToSecondSC := "./testdata/asyncOnInit/asyncOnInitAndUpgrade.wasm" secondSCCode := wasm.GetSCCode(pathToSecondSC) @@ -325,7 +338,7 @@ func testAsyncCallsOnInitFunctionOnDeploy(t *testing.T, shardCoordinatorForShard1, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.4"), ) require.Nil(t, err) @@ -334,7 +347,7 @@ func testAsyncCallsOnInitFunctionOnDeploy(t *testing.T, shardCoordinatorForShardMeta, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.4"), ) require.Nil(t, err) diff --git a/integrationTests/vm/txsFee/asyncESDT_test.go b/integrationTests/vm/txsFee/asyncESDT_test.go index 2c2dfce4c71..289926f96db 100644 --- a/integrationTests/vm/txsFee/asyncESDT_test.go +++ b/integrationTests/vm/txsFee/asyncESDT_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -25,6 +21,10 @@ import ( ) func TestAsyncESDTCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, }) @@ -79,6 +79,10 @@ func TestAsyncESDTCallShouldWork(t *testing.T) { } func TestAsyncESDTCallSecondScRefusesPayment(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -132,6 +136,10 @@ func TestAsyncESDTCallSecondScRefusesPayment(t *testing.T) { } func TestAsyncESDTCallsOutOfGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -184,6 +192,10 @@ func TestAsyncESDTCallsOutOfGas(t *testing.T) { } func TestAsyncMultiTransferOnCallback(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, }) @@ -279,6 +291,10 @@ func TestAsyncMultiTransferOnCallback(t *testing.T) { } func TestAsyncMultiTransferOnCallAndOnCallback(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -379,6 +395,10 @@ func TestAsyncMultiTransferOnCallAndOnCallback(t *testing.T) { } func TestSendNFTToContractWith0Function(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -428,6 +448,10 @@ func TestSendNFTToContractWith0Function(t *testing.T) { } func TestSendNFTToContractWith0FunctionNonPayable(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -478,6 +502,10 @@ func TestSendNFTToContractWith0FunctionNonPayable(t *testing.T) { } func TestAsyncESDTCallForThirdContractShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/backwardsCompatibility_test.go b/integrationTests/vm/txsFee/backwardsCompatibility_test.go index b4a73596edb..2b160d342cd 100644 --- a/integrationTests/vm/txsFee/backwardsCompatibility_test.go +++ b/integrationTests/vm/txsFee/backwardsCompatibility_test.go @@ -17,12 +17,15 @@ import ( // minGasPrice = 1, gasPerDataByte = 1, minGasLimit = 1 func TestMoveBalanceSelfShouldWorkAndConsumeTxFeeWhenAllFlagsAreDisabled(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ - PenalizedTooMuchGasEnableEpoch: 100, - BuiltInFunctionOnMetaEnableEpoch: 100, - SCDeployEnableEpoch: 100, - MetaProtectionEnableEpoch: 100, - RelayedTransactionsEnableEpoch: 100, + PenalizedTooMuchGasEnableEpoch: 100, + SCDeployEnableEpoch: 100, + MetaProtectionEnableEpoch: 100, + RelayedTransactionsEnableEpoch: 100, }) require.Nil(t, err) defer testContext.Close() @@ -58,6 +61,10 @@ func TestMoveBalanceSelfShouldWorkAndConsumeTxFeeWhenAllFlagsAreDisabled(t *test // minGasPrice = 1, gasPerDataByte = 1, minGasLimit = 1 func TestMoveBalanceAllFlagsDisabledLessBalanceThanGasLimitMulGasPrice(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: integrationTests.UnreachableEpoch, BuiltInFunctionsEnableEpoch: integrationTests.UnreachableEpoch, @@ -81,6 +88,10 @@ func TestMoveBalanceAllFlagsDisabledLessBalanceThanGasLimitMulGasPrice(t *testin } func TestMoveBalanceSelfShouldWorkAndConsumeTxFeeWhenSomeFlagsAreDisabled(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs( config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: 0, diff --git a/integrationTests/vm/txsFee/builtInFunctions_test.go b/integrationTests/vm/txsFee/builtInFunctions_test.go index 6a9b31bb674..5f0ae16ebc3 100644 --- a/integrationTests/vm/txsFee/builtInFunctions_test.go +++ b/integrationTests/vm/txsFee/builtInFunctions_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -20,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" @@ -27,6 +24,10 @@ import ( ) func TestBuildInFunctionChangeOwnerCallShouldWorkV1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs( config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: integrationTests.UnreachableEpoch, @@ -65,6 +66,10 @@ func TestBuildInFunctionChangeOwnerCallShouldWorkV1(t *testing.T) { } func TestBuildInFunctionChangeOwnerCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs( config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: integrationTests.UnreachableEpoch, @@ -102,6 +107,10 @@ func TestBuildInFunctionChangeOwnerCallShouldWork(t *testing.T) { } func TestBuildInFunctionChangeOwnerCallWrongOwnerShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -139,6 +148,10 @@ func TestBuildInFunctionChangeOwnerCallWrongOwnerShouldConsumeGas(t *testing.T) } func TestBuildInFunctionChangeOwnerInvalidAddressShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -173,6 +186,10 @@ func TestBuildInFunctionChangeOwnerInvalidAddressShouldConsumeGas(t *testing.T) } func TestBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldNotConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -209,6 +226,10 @@ func TestBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldNotConsumeGas(t } func TestBuildInFunctionChangeOwnerOutOfGasShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -244,6 +265,10 @@ func TestBuildInFunctionChangeOwnerOutOfGasShouldConsumeGas(t *testing.T) { } func TestBuildInFunctionSaveKeyValue_WrongDestination(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + shardCoord, _ := sharding.NewMultiShardCoordinator(2, 0) testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinator( @@ -279,6 +304,10 @@ func TestBuildInFunctionSaveKeyValue_WrongDestination(t *testing.T) { } func TestBuildInFunctionSaveKeyValue_NotEnoughGasFor3rdSave(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + shardCoord, _ := sharding.NewMultiShardCoordinator(2, 0) testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinator( @@ -307,6 +336,10 @@ func TestBuildInFunctionSaveKeyValue_NotEnoughGasFor3rdSave(t *testing.T) { } func TestBuildInFunctionSaveKeyValue_NotEnoughGasForTheSameKeyValue(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + shardCoord, _ := sharding.NewMultiShardCoordinator(2, 0) gasScheduleNotifier := vm.CreateMockGasScheduleNotifier() @@ -321,7 +354,7 @@ func TestBuildInFunctionSaveKeyValue_NotEnoughGasForTheSameKeyValue(t *testing.T shardCoord, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.5"), ) require.Nil(t, err) diff --git a/integrationTests/vm/txsFee/dns_test.go b/integrationTests/vm/txsFee/dns_test.go index 53c6644b679..a859341d1d4 100644 --- a/integrationTests/vm/txsFee/dns_test.go +++ b/integrationTests/vm/txsFee/dns_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -19,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" @@ -28,6 +25,10 @@ import ( const returnOkData = "@6f6b" func TestDeployDNSContract_TestRegisterAndResolveAndSendTxWithSndAndRcvUserName(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: 10, }) @@ -115,6 +116,10 @@ func TestDeployDNSContract_TestRegisterAndResolveAndSendTxWithSndAndRcvUserName( // relayer address is in shard 2, creates a transaction on the behalf of the user from shard 2, that will call the DNS contract // from shard 1. func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShardBackwardsCompatibility(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ ChangeUsernameEnableEpoch: 1000, // flag disabled, backwards compatibility SCProcessorV2EnableEpoch: 1000, @@ -124,7 +129,7 @@ func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShardBackwardsCompat testContextForDNSContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShardRoundVMConfig( 1, enableEpochs, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, ) require.Nil(t, err) @@ -133,7 +138,7 @@ func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShardBackwardsCompat testContextForRelayerAndUser, err := vm.CreatePreparedTxProcessorWithVMsMultiShardRoundVMConfig( 2, enableEpochs, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, ) require.Nil(t, err) @@ -191,6 +196,10 @@ func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShardBackwardsCompat } func TestDeployDNSContract_TestGasWhenSaveUsernameAfterDNSv2IsActivated(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextForDNSContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, }) diff --git a/integrationTests/vm/txsFee/dynamicGasCost_test.go b/integrationTests/vm/txsFee/dynamicGasCost_test.go index a8c8a8eb9eb..e1fca367f3f 100644 --- a/integrationTests/vm/txsFee/dynamicGasCost_test.go +++ b/integrationTests/vm/txsFee/dynamicGasCost_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -23,6 +19,10 @@ import ( ) func TestDynamicGasCostForDataTrieStorageLoad(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: 0, } diff --git a/integrationTests/vm/txsFee/esdtLocalBurn_test.go b/integrationTests/vm/txsFee/esdtLocalBurn_test.go index c76957928a5..29c4fc26320 100644 --- a/integrationTests/vm/txsFee/esdtLocalBurn_test.go +++ b/integrationTests/vm/txsFee/esdtLocalBurn_test.go @@ -14,6 +14,10 @@ import ( ) func TestESDTLocalBurnShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -44,6 +48,10 @@ func TestESDTLocalBurnShouldWork(t *testing.T) { } func TestESDTLocalBurnMoreThanTotalBalanceShouldErr(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -74,6 +82,10 @@ func TestESDTLocalBurnMoreThanTotalBalanceShouldErr(t *testing.T) { } func TestESDTLocalBurnNotAllowedShouldErr(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/esdtLocalMint_test.go b/integrationTests/vm/txsFee/esdtLocalMint_test.go index 491d9102372..f2104f4c341 100644 --- a/integrationTests/vm/txsFee/esdtLocalMint_test.go +++ b/integrationTests/vm/txsFee/esdtLocalMint_test.go @@ -14,6 +14,10 @@ import ( ) func TestESDTLocalMintShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -44,6 +48,10 @@ func TestESDTLocalMintShouldWork(t *testing.T) { } func TestESDTLocalMintNotAllowedShouldErr(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/esdt_test.go b/integrationTests/vm/txsFee/esdt_test.go index da865619d4e..07871a87750 100644 --- a/integrationTests/vm/txsFee/esdt_test.go +++ b/integrationTests/vm/txsFee/esdt_test.go @@ -18,6 +18,10 @@ import ( ) func TestESDTTransferShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -54,6 +58,10 @@ func TestESDTTransferShouldWork(t *testing.T) { } func TestESDTTransferShouldWorkToMuchGasShouldConsumeAllGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -90,6 +98,10 @@ func TestESDTTransferShouldWorkToMuchGasShouldConsumeAllGas(t *testing.T) { } func TestESDTTransferInvalidESDTValueShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -126,6 +138,10 @@ func TestESDTTransferInvalidESDTValueShouldConsumeGas(t *testing.T) { } func TestESDTTransferCallBackOnErrorShouldNotGenerateSCRsFurther(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + shardC, _ := sharding.NewMultiShardCoordinator(2, 0) testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinator(config.EnableEpochs{}, shardC) require.Nil(t, err) diff --git a/integrationTests/vm/txsFee/guardAccount_test.go b/integrationTests/vm/txsFee/guardAccount_test.go index 2baa497f991..6ccde4df164 100644 --- a/integrationTests/vm/txsFee/guardAccount_test.go +++ b/integrationTests/vm/txsFee/guardAccount_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -99,14 +95,13 @@ func prepareTestContextForGuardedAccounts(tb testing.TB) *vm.VMTestContext { testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGasAndRoundConfig( config.EnableEpochs{ GovernanceEnableEpoch: unreachableEpoch, - WaitingListFixEnableEpoch: unreachableEpoch, SetSenderInEeiOutputTransferEnableEpoch: unreachableEpoch, RefactorPeersMiniBlocksEnableEpoch: unreachableEpoch, }, testscommon.NewMultiShardsCoordinatorMock(2), db, gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), ) require.Nil(tb, err) @@ -351,6 +346,10 @@ func setNewEpochOnContext(testContext *vm.VMTestContext, epoch uint32) { } func TestGuardAccount_ShouldErrorIfInstantSetIsDoneOnANotProtectedAccount(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext := prepareTestContextForGuardedAccounts(t) defer testContext.Close() @@ -368,6 +367,10 @@ func TestGuardAccount_ShouldErrorIfInstantSetIsDoneOnANotProtectedAccount(t *tes } func TestGuardAccount_ShouldSetGuardianOnANotProtectedAccount(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext := prepareTestContextForGuardedAccounts(t) defer testContext.Close() @@ -468,6 +471,10 @@ func TestGuardAccount_ShouldSetGuardianOnANotProtectedAccount(t *testing.T) { } func TestGuardAccount_SendingFundsWhileProtectedAndNotProtected(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext := prepareTestContextForGuardedAccounts(t) defer testContext.Close() @@ -593,6 +600,10 @@ func TestGuardAccount_SendingFundsWhileProtectedAndNotProtected(t *testing.T) { // 14. alice un-guards the accounts immediately using a cosigned transaction and then sends a guarded transaction -> should error // 14.1 alice sends unguarded transaction -> should work func TestGuardAccount_Scenario1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext := prepareTestContextForGuardedAccounts(t) defer testContext.Close() @@ -917,6 +928,10 @@ func TestGuardAccount_Scenario1(t *testing.T) { // 3.1 cosigned transaction should work // 3.2 single signed transaction should not work func TestGuardAccounts_RelayedTransactionV1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext := prepareTestContextForGuardedAccounts(t) defer testContext.Close() @@ -1037,6 +1052,10 @@ func TestGuardAccounts_RelayedTransactionV1(t *testing.T) { // 3.1 cosigned transaction should not work // 3.2 single signed transaction should not work func TestGuardAccounts_RelayedTransactionV2(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext := prepareTestContextForGuardedAccounts(t) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/migrateDataTrie_test.go b/integrationTests/vm/txsFee/migrateDataTrie_test.go index 9c62a4f30fd..02eecc0e1c3 100644 --- a/integrationTests/vm/txsFee/migrateDataTrie_test.go +++ b/integrationTests/vm/txsFee/migrateDataTrie_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -31,7 +27,9 @@ type dataTrie interface { } func TestMigrateDataTrieBuiltInFunc(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } enableEpochs := config.EnableEpochs{ AutoBalanceDataTriesEnableEpoch: 0, diff --git a/integrationTests/vm/txsFee/moveBalance_test.go b/integrationTests/vm/txsFee/moveBalance_test.go index 8a119084cff..28907f5a2c6 100644 --- a/integrationTests/vm/txsFee/moveBalance_test.go +++ b/integrationTests/vm/txsFee/moveBalance_test.go @@ -18,6 +18,10 @@ import ( // minGasPrice = 1, gasPerDataByte = 1, minGasLimit = 1 func TestMoveBalanceSelfShouldWorkAndConsumeTxFee(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -53,6 +57,10 @@ func TestMoveBalanceSelfShouldWorkAndConsumeTxFee(t *testing.T) { // minGasPrice = 1, gasPerDataByte = 1, minGasLimit = 1 func TestMoveBalanceAllFlagsEnabledLessBalanceThanGasLimitMulGasPrice(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -70,6 +78,10 @@ func TestMoveBalanceAllFlagsEnabledLessBalanceThanGasLimitMulGasPrice(t *testing } func TestMoveBalanceShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -110,6 +122,10 @@ func TestMoveBalanceShouldWork(t *testing.T) { } func TestMoveBalanceInvalidHasGasButNoValueShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -139,6 +155,10 @@ func TestMoveBalanceInvalidHasGasButNoValueShouldConsumeGas(t *testing.T) { } func TestMoveBalanceHigherNonceShouldNotConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -169,6 +189,10 @@ func TestMoveBalanceHigherNonceShouldNotConsumeGas(t *testing.T) { } func TestMoveBalanceMoreGasThanGasLimitPerMiniBlockForSafeCrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -200,6 +224,10 @@ func TestMoveBalanceMoreGasThanGasLimitPerMiniBlockForSafeCrossShard(t *testing. } func TestMoveBalanceInvalidUserNames(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/multiESDTTransfer_test.go b/integrationTests/vm/txsFee/multiESDTTransfer_test.go index d9457da31c5..c85a1a2bc1b 100644 --- a/integrationTests/vm/txsFee/multiESDTTransfer_test.go +++ b/integrationTests/vm/txsFee/multiESDTTransfer_test.go @@ -15,6 +15,10 @@ import ( ) func TestMultiESDTTransferShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -69,6 +73,10 @@ func TestMultiESDTTransferShouldWork(t *testing.T) { } func TestMultiESDTTransferFailsBecauseOfMaxLimit(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMsAndCustomGasSchedule(config.EnableEpochs{}, func(gasMap wasmConfig.GasScheduleMap) { gasMap[common.MaxPerTransaction]["MaxNumberOfTransfersPerTx"] = 1 diff --git a/integrationTests/vm/txsFee/multiShard/asyncCallWithChangeOwner_test.go b/integrationTests/vm/txsFee/multiShard/asyncCallWithChangeOwner_test.go index aac3723f294..28130046e11 100644 --- a/integrationTests/vm/txsFee/multiShard/asyncCallWithChangeOwner_test.go +++ b/integrationTests/vm/txsFee/multiShard/asyncCallWithChangeOwner_test.go @@ -17,7 +17,7 @@ import ( func TestDoChangeOwnerCrossShardFromAContract(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } enableEpochs := config.EnableEpochs{ diff --git a/integrationTests/vm/txsFee/multiShard/asyncCall_test.go b/integrationTests/vm/txsFee/multiShard/asyncCall_test.go index 181d937e55e..9a0297de698 100644 --- a/integrationTests/vm/txsFee/multiShard/asyncCall_test.go +++ b/integrationTests/vm/txsFee/multiShard/asyncCall_test.go @@ -1,5 +1,3 @@ -//go:build !race - package multiShard import ( @@ -11,14 +9,14 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" + "github.com/multiversx/mx-chain-go/testscommon" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/require" ) func TestAsyncCallShouldWork(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } enableEpochs := config.EnableEpochs{ @@ -118,7 +116,7 @@ func TestAsyncCallShouldWork(t *testing.T) { func TestAsyncCallDisabled(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Arwen fix") + t.Skip("this is not a short test") } enableEpochs := config.EnableEpochs{ @@ -128,7 +126,7 @@ func TestAsyncCallDisabled(t *testing.T) { SCProcessorV2EnableEpoch: integrationTests.UnreachableEpoch, } - roundsConfig := integrationTests.GetDefaultRoundsConfig() + roundsConfig := testscommon.GetDefaultRoundsConfig() activationRound := roundsConfig.RoundActivations["DisableAsyncCallV1"] activationRound.Round = "0" roundsConfig.RoundActivations["DisableAsyncCallV1"] = activationRound diff --git a/integrationTests/vm/txsFee/multiShard/asyncESDT_test.go b/integrationTests/vm/txsFee/multiShard/asyncESDT_test.go index 114859ac5bf..e7d78430350 100644 --- a/integrationTests/vm/txsFee/multiShard/asyncESDT_test.go +++ b/integrationTests/vm/txsFee/multiShard/asyncESDT_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package multiShard import ( @@ -18,6 +14,10 @@ import ( ) func TestAsyncESDTTransferWithSCCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, } @@ -130,6 +130,10 @@ func TestAsyncESDTTransferWithSCCallShouldWork(t *testing.T) { } func TestAsyncESDTTransferWithSCCallSecondContractAnotherToken(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, } diff --git a/integrationTests/vm/txsFee/multiShard/builtInFunctions_test.go b/integrationTests/vm/txsFee/multiShard/builtInFunctions_test.go index ea14882730b..dc6172eeef8 100644 --- a/integrationTests/vm/txsFee/multiShard/builtInFunctions_test.go +++ b/integrationTests/vm/txsFee/multiShard/builtInFunctions_test.go @@ -33,7 +33,7 @@ func getZeroGasAndFees() scheduled.GasAndFees { // 4. Execute SCR from context destination on context source ( the new owner will receive the developer rewards) func TestBuiltInFunctionExecuteOnSourceAndDestinationShouldWork(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard( diff --git a/integrationTests/vm/txsFee/multiShard/esdtLiquidity_test.go b/integrationTests/vm/txsFee/multiShard/esdtLiquidity_test.go index a18a62003e3..036c17d9cef 100644 --- a/integrationTests/vm/txsFee/multiShard/esdtLiquidity_test.go +++ b/integrationTests/vm/txsFee/multiShard/esdtLiquidity_test.go @@ -18,6 +18,10 @@ import ( ) func TestSystemAccountLiquidityAfterCrossShardTransferAndBurn(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tokenID := []byte("MYNFT") sh0Addr := []byte("12345678901234567890123456789010") sh1Addr := []byte("12345678901234567890123456789011") @@ -66,6 +70,10 @@ func TestSystemAccountLiquidityAfterCrossShardTransferAndBurn(t *testing.T) { } func TestSystemAccountLiquidityAfterNFTWipe(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tokenID := []byte("MYNFT-0a0a0a") sh0Addr := bytes.Repeat([]byte{1}, 31) sh0Addr = append(sh0Addr, 0) @@ -112,6 +120,10 @@ func TestSystemAccountLiquidityAfterNFTWipe(t *testing.T) { } func TestSystemAccountLiquidityAfterSFTWipe(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tokenID := []byte("MYSFT-0a0a0a") sh0Addr := bytes.Repeat([]byte{1}, 31) sh0Addr = append(sh0Addr, 0) diff --git a/integrationTests/vm/txsFee/multiShard/esdt_test.go b/integrationTests/vm/txsFee/multiShard/esdt_test.go index f224b528ef6..8f978daee1c 100644 --- a/integrationTests/vm/txsFee/multiShard/esdt_test.go +++ b/integrationTests/vm/txsFee/multiShard/esdt_test.go @@ -16,6 +16,10 @@ import ( ) func TestESDTTransferShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -46,6 +50,10 @@ func TestESDTTransferShouldWork(t *testing.T) { } func TestMultiESDTNFTTransferViaRelayedV2(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tokenID1 := []byte("MYNFT1") tokenID2 := []byte("MYNFT2") sh0Addr := []byte("12345678901234567890123456789010") diff --git a/integrationTests/vm/txsFee/multiShard/moveBalance_test.go b/integrationTests/vm/txsFee/multiShard/moveBalance_test.go index 41e404d4af7..8c5f6bd6015 100644 --- a/integrationTests/vm/txsFee/multiShard/moveBalance_test.go +++ b/integrationTests/vm/txsFee/multiShard/moveBalance_test.go @@ -14,6 +14,10 @@ import ( ) func TestMoveBalanceShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -49,7 +53,9 @@ func TestMoveBalanceShouldWork(t *testing.T) { } func TestMoveBalanceContractAddressDataFieldNilShouldConsumeGas(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) require.Nil(t, err) @@ -89,7 +95,9 @@ func TestMoveBalanceContractAddressDataFieldNilShouldConsumeGas(t *testing.T) { } func TestMoveBalanceContractAddressDataFieldNotNilShouldConsumeGas(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) require.Nil(t, err) @@ -129,6 +137,10 @@ func TestMoveBalanceContractAddressDataFieldNotNilShouldConsumeGas(t *testing.T) } func TestMoveBalanceExecuteOneSourceAndDestinationShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) require.Nil(t, err) defer testContextSource.Close() diff --git a/integrationTests/vm/txsFee/multiShard/nftTransferUpdate_test.go b/integrationTests/vm/txsFee/multiShard/nftTransferUpdate_test.go index 3a0b19b0b24..1fdd2f6f78f 100644 --- a/integrationTests/vm/txsFee/multiShard/nftTransferUpdate_test.go +++ b/integrationTests/vm/txsFee/multiShard/nftTransferUpdate_test.go @@ -15,6 +15,10 @@ import ( ) func TestNFTTransferAndUpdateOnOldTypeToken(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ CheckCorrectTokenIDForTransferRoleEnableEpoch: 3, DisableExecByCallerEnableEpoch: 3, diff --git a/integrationTests/vm/txsFee/multiShard/relayedBuiltInFunctions_test.go b/integrationTests/vm/txsFee/multiShard/relayedBuiltInFunctions_test.go index a97a5bfd7fe..e987d4dbc74 100644 --- a/integrationTests/vm/txsFee/multiShard/relayedBuiltInFunctions_test.go +++ b/integrationTests/vm/txsFee/multiShard/relayedBuiltInFunctions_test.go @@ -15,9 +15,8 @@ import ( ) func TestRelayedBuiltInFunctionExecuteOnRelayerAndDstShardShouldWork(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContextRelayer, err := vm.CreatePreparedTxProcessorWithVMsMultiShard( diff --git a/integrationTests/vm/txsFee/multiShard/relayedMoveBalance_test.go b/integrationTests/vm/txsFee/multiShard/relayedMoveBalance_test.go index 2dd36161143..aa206c591b4 100644 --- a/integrationTests/vm/txsFee/multiShard/relayedMoveBalance_test.go +++ b/integrationTests/vm/txsFee/multiShard/relayedMoveBalance_test.go @@ -14,6 +14,10 @@ import ( ) func TestRelayedMoveBalanceRelayerShard0InnerTxSenderAndReceiverShard1ShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -58,6 +62,10 @@ func TestRelayedMoveBalanceRelayerShard0InnerTxSenderAndReceiverShard1ShouldWork } func TestRelayedMoveBalanceRelayerAndInnerTxSenderShard0ReceiverShard1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -103,6 +111,10 @@ func TestRelayedMoveBalanceRelayerAndInnerTxSenderShard0ReceiverShard1(t *testin } func TestRelayedMoveBalanceExecuteOnSourceAndDestination(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) require.Nil(t, err) defer testContextSource.Close() @@ -167,6 +179,10 @@ func TestRelayedMoveBalanceExecuteOnSourceAndDestination(t *testing.T) { } func TestRelayedMoveBalanceExecuteOnSourceAndDestinationRelayerAndInnerTxSenderShard0InnerTxReceiverShard1ShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) require.Nil(t, err) defer testContextSource.Close() @@ -227,6 +243,10 @@ func TestRelayedMoveBalanceExecuteOnSourceAndDestinationRelayerAndInnerTxSenderS } func TestRelayedMoveBalanceRelayerAndInnerTxReceiverShard0SenderShard1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) require.Nil(t, err) defer testContextSource.Close() @@ -299,6 +319,10 @@ func TestRelayedMoveBalanceRelayerAndInnerTxReceiverShard0SenderShard1(t *testin } func TestMoveBalanceRelayerShard0InnerTxSenderShard1InnerTxReceiverShard2ShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextRelayer, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) require.Nil(t, err) defer testContextRelayer.Close() diff --git a/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go b/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go index 499fbe5c6ee..7700c55b0f4 100644 --- a/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go +++ b/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go @@ -1,5 +1,3 @@ -//go:build !race - package multiShard import ( @@ -16,6 +14,10 @@ import ( ) func TestRelayedSCDeployShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextRelayer, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{}) require.Nil(t, err) defer testContextRelayer.Close() diff --git a/integrationTests/vm/txsFee/multiShard/relayedTxScCalls_test.go b/integrationTests/vm/txsFee/multiShard/relayedTxScCalls_test.go index 8e0229fef08..4e0f0d983fa 100644 --- a/integrationTests/vm/txsFee/multiShard/relayedTxScCalls_test.go +++ b/integrationTests/vm/txsFee/multiShard/relayedTxScCalls_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package multiShard import ( @@ -27,6 +23,10 @@ import ( // 4. Execute SCR with the smart contract call on shard 1 // 5. Execute SCR with refund on relayer shard (shard 2) func TestRelayedTxScCallMultiShardShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, } @@ -136,6 +136,10 @@ func TestRelayedTxScCallMultiShardShouldWork(t *testing.T) { } func TestRelayedTxScCallMultiShardFailOnInnerTxDst(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextRelayer, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{}) require.Nil(t, err) defer testContextRelayer.Close() diff --git a/integrationTests/vm/txsFee/multiShard/scCallWithValueTransfer_test.go b/integrationTests/vm/txsFee/multiShard/scCallWithValueTransfer_test.go index bcb14308bab..8f66a649a3b 100644 --- a/integrationTests/vm/txsFee/multiShard/scCallWithValueTransfer_test.go +++ b/integrationTests/vm/txsFee/multiShard/scCallWithValueTransfer_test.go @@ -1,5 +1,3 @@ -//go:build !race - package multiShard import ( @@ -16,10 +14,18 @@ import ( ) func TestDeployContractAndTransferValueSCProcessorV1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testDeployContractAndTransferValue(t, 1000) } func TestDeployContractAndTransferValueSCProcessorV2(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testDeployContractAndTransferValue(t, 0) } diff --git a/integrationTests/vm/txsFee/multiShard/scCalls_test.go b/integrationTests/vm/txsFee/multiShard/scCalls_test.go index 42e1dc824c1..1338e280c65 100644 --- a/integrationTests/vm/txsFee/multiShard/scCalls_test.go +++ b/integrationTests/vm/txsFee/multiShard/scCalls_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package multiShard import ( @@ -17,6 +13,10 @@ import ( ) func TestScCallExecuteOnSourceAndDstShardShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, } @@ -97,6 +97,10 @@ func TestScCallExecuteOnSourceAndDstShardShouldWork(t *testing.T) { } func TestScCallExecuteOnSourceAndDstShardInvalidOnDst(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) require.Nil(t, err) defer testContextSource.Close() diff --git a/integrationTests/vm/txsFee/relayedAsyncCall_test.go b/integrationTests/vm/txsFee/relayedAsyncCall_test.go index b782f318432..d98a440b648 100644 --- a/integrationTests/vm/txsFee/relayedAsyncCall_test.go +++ b/integrationTests/vm/txsFee/relayedAsyncCall_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -19,6 +15,10 @@ import ( ) func TestRelayedAsyncCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + senderAddr := []byte("12345678901234567890123456789011") t.Run("nonce fix is disabled, should increase the sender's nonce", func(t *testing.T) { diff --git a/integrationTests/vm/txsFee/relayedAsyncESDT_test.go b/integrationTests/vm/txsFee/relayedAsyncESDT_test.go index 061a884b268..5e3ca24d999 100644 --- a/integrationTests/vm/txsFee/relayedAsyncESDT_test.go +++ b/integrationTests/vm/txsFee/relayedAsyncESDT_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -18,6 +14,10 @@ import ( ) func TestRelayedAsyncESDTCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, }) @@ -78,6 +78,10 @@ func TestRelayedAsyncESDTCallShouldWork(t *testing.T) { } func TestRelayedAsyncESDTCall_InvalidCallFirstContract(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -136,6 +140,10 @@ func TestRelayedAsyncESDTCall_InvalidCallFirstContract(t *testing.T) { } func TestRelayedAsyncESDTCall_InvalidOutOfGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/relayedBuiltInFunctions_test.go b/integrationTests/vm/txsFee/relayedBuiltInFunctions_test.go index dd82f276e27..115dc545244 100644 --- a/integrationTests/vm/txsFee/relayedBuiltInFunctions_test.go +++ b/integrationTests/vm/txsFee/relayedBuiltInFunctions_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -20,6 +16,10 @@ import ( ) func TestRelayedBuildInFunctionChangeOwnerCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs( config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: integrationTests.UnreachableEpoch, @@ -68,6 +68,10 @@ func TestRelayedBuildInFunctionChangeOwnerCallShouldWork(t *testing.T) { } func TestRelayedBuildInFunctionChangeOwnerCallWrongOwnerShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -114,6 +118,10 @@ func TestRelayedBuildInFunctionChangeOwnerCallWrongOwnerShouldConsumeGas(t *test } func TestRelayedBuildInFunctionChangeOwnerInvalidAddressShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -158,6 +166,10 @@ func TestRelayedBuildInFunctionChangeOwnerInvalidAddressShouldConsumeGas(t *test } func TestRelayedBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + t.Run("nonce fix is disabled, should increase the sender's nonce", func(t *testing.T) { testRelayedBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldConsumeGas(t, config.EnableEpochs{ @@ -220,6 +232,10 @@ func testRelayedBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldConsumeG } func TestRelayedBuildInFunctionChangeOwnerCallOutOfGasShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/relayedDns_test.go b/integrationTests/vm/txsFee/relayedDns_test.go index e71c02622f1..54c70be0ee8 100644 --- a/integrationTests/vm/txsFee/relayedDns_test.go +++ b/integrationTests/vm/txsFee/relayedDns_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -18,6 +14,10 @@ import ( ) func TestRelayedTxDnsTransaction_ShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/relayedESDT_test.go b/integrationTests/vm/txsFee/relayedESDT_test.go index eba6eedb384..c9837fb7075 100644 --- a/integrationTests/vm/txsFee/relayedESDT_test.go +++ b/integrationTests/vm/txsFee/relayedESDT_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -17,6 +13,10 @@ import ( ) func TestRelayedESDTTransferShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -62,6 +62,10 @@ func TestRelayedESDTTransferShouldWork(t *testing.T) { } func TestTestRelayedESTTransferNotEnoughESTValueShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/relayedMoveBalance_test.go b/integrationTests/vm/txsFee/relayedMoveBalance_test.go index 2c7e230941d..accdffbfb4e 100644 --- a/integrationTests/vm/txsFee/relayedMoveBalance_test.go +++ b/integrationTests/vm/txsFee/relayedMoveBalance_test.go @@ -19,6 +19,10 @@ import ( ) func TestRelayedMoveBalanceShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -65,6 +69,10 @@ func TestRelayedMoveBalanceShouldWork(t *testing.T) { } func TestRelayedMoveBalanceInvalidGasLimitShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -97,6 +105,10 @@ func TestRelayedMoveBalanceInvalidGasLimitShouldConsumeGas(t *testing.T) { } func TestRelayedMoveBalanceInvalidUserTxShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -129,6 +141,10 @@ func TestRelayedMoveBalanceInvalidUserTxShouldConsumeGas(t *testing.T) { } func TestRelayedMoveBalanceInvalidUserTxValueShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ RelayedNonceFixEnableEpoch: 1, }) @@ -163,6 +179,10 @@ func TestRelayedMoveBalanceInvalidUserTxValueShouldConsumeGas(t *testing.T) { } func TestRelayedMoveBalanceHigherNonce(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ RelayedNonceFixEnableEpoch: 1, }) @@ -215,6 +235,10 @@ func TestRelayedMoveBalanceHigherNonce(t *testing.T) { } func TestRelayedMoveBalanceLowerNonce(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ RelayedNonceFixEnableEpoch: 1, }) @@ -267,6 +291,10 @@ func TestRelayedMoveBalanceLowerNonce(t *testing.T) { } func TestRelayedMoveBalanceHigherNonceWithActivatedFixCrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ RelayedNonceFixEnableEpoch: 0, } diff --git a/integrationTests/vm/txsFee/relayedScCalls_test.go b/integrationTests/vm/txsFee/relayedScCalls_test.go index d5e0e46179e..36febda356e 100644 --- a/integrationTests/vm/txsFee/relayedScCalls_test.go +++ b/integrationTests/vm/txsFee/relayedScCalls_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -19,6 +15,10 @@ import ( ) func TestRelayedScCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, }) @@ -63,6 +63,10 @@ func TestRelayedScCallShouldWork(t *testing.T) { } func TestRelayedScCallContractNotFoundShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -102,6 +106,10 @@ func TestRelayedScCallContractNotFoundShouldConsumeGas(t *testing.T) { } func TestRelayedScCallInvalidMethodShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -141,6 +149,10 @@ func TestRelayedScCallInvalidMethodShouldConsumeGas(t *testing.T) { } func TestRelayedScCallInsufficientGasLimitShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -179,6 +191,10 @@ func TestRelayedScCallInsufficientGasLimitShouldConsumeGas(t *testing.T) { } func TestRelayedScCallOutOfGasShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -218,6 +234,10 @@ func TestRelayedScCallOutOfGasShouldConsumeGas(t *testing.T) { } func TestRelayedDeployInvalidContractShouldIncrementNonceOnSender(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + senderAddr := []byte("12345678901234567890123456789011") t.Run("nonce fix is disabled, should increase the sender's nonce if inner tx has correct nonce", func(t *testing.T) { diff --git a/integrationTests/vm/txsFee/relayedScDeploy_test.go b/integrationTests/vm/txsFee/relayedScDeploy_test.go index 8a8f7f52d8c..15d6d677b44 100644 --- a/integrationTests/vm/txsFee/relayedScDeploy_test.go +++ b/integrationTests/vm/txsFee/relayedScDeploy_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -17,6 +13,10 @@ import ( ) func TestRelayedScDeployShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -57,6 +57,10 @@ func TestRelayedScDeployShouldWork(t *testing.T) { } func TestRelayedScDeployInvalidCodeShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -98,6 +102,10 @@ func TestRelayedScDeployInvalidCodeShouldConsumeGas(t *testing.T) { } func TestRelayedScDeployInsufficientGasLimitShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -137,6 +145,10 @@ func TestRelayedScDeployInsufficientGasLimitShouldConsumeGas(t *testing.T) { } func TestRelayedScDeployOutOfGasShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/scCalls_test.go b/integrationTests/vm/txsFee/scCalls_test.go index db01a33cd11..2a523825f96 100644 --- a/integrationTests/vm/txsFee/scCalls_test.go +++ b/integrationTests/vm/txsFee/scCalls_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -23,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" @@ -60,7 +57,6 @@ func prepareTestContextForEpoch836(tb testing.TB) (*vm.VMTestContext, []byte) { testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGasAndRoundConfig( config.EnableEpochs{ GovernanceEnableEpoch: unreachableEpoch, - WaitingListFixEnableEpoch: unreachableEpoch, SetSenderInEeiOutputTransferEnableEpoch: unreachableEpoch, RefactorPeersMiniBlocksEnableEpoch: unreachableEpoch, MaxBlockchainHookCountersEnableEpoch: unreachableEpoch, @@ -69,7 +65,7 @@ func prepareTestContextForEpoch836(tb testing.TB) (*vm.VMTestContext, []byte) { mock.NewMultiShardsCoordinatorMock(2), db, gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), ) require.Nil(tb, err) @@ -90,6 +86,10 @@ func prepareTestContextForEpoch836(tb testing.TB) (*vm.VMTestContext, []byte) { } func TestScCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, }) @@ -134,6 +134,10 @@ func TestScCallShouldWork(t *testing.T) { } func TestScCallContractNotFoundShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -163,6 +167,10 @@ func TestScCallContractNotFoundShouldConsumeGas(t *testing.T) { } func TestScCallInvalidMethodToCallShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -196,6 +204,10 @@ func TestScCallInvalidMethodToCallShouldConsumeGas(t *testing.T) { } func TestScCallInsufficientGasLimitShouldNotConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -230,6 +242,10 @@ func TestScCallInsufficientGasLimitShouldNotConsumeGas(t *testing.T) { } func TestScCallOutOfGasShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -263,6 +279,10 @@ func TestScCallOutOfGasShouldConsumeGas(t *testing.T) { } func TestScCallAndGasChangeShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, }) @@ -308,6 +328,10 @@ func TestScCallAndGasChangeShouldWork(t *testing.T) { } func TestESDTScCallAndGasChangeShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -368,7 +392,6 @@ func prepareTestContextForEpoch460(tb testing.TB) (*vm.VMTestContext, []byte) { testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ GovernanceEnableEpoch: unreachableEpoch, - WaitingListFixEnableEpoch: unreachableEpoch, ScheduledMiniBlocksEnableEpoch: unreachableEpoch, CorrectJailedNotUnstakedEmptyQueueEpoch: unreachableEpoch, OptimizeNFTStoreEnableEpoch: unreachableEpoch, @@ -419,6 +442,10 @@ func prepareTestContextForEpoch460(tb testing.TB) (*vm.VMTestContext, []byte) { } func TestScCallBuyNFT_OneFailedTxAndOneOkTx(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, scAddress := prepareTestContextForEpoch460(t) defer testContext.Close() @@ -488,6 +515,10 @@ func TestScCallBuyNFT_OneFailedTxAndOneOkTx(t *testing.T) { } func TestScCallBuyNFT_TwoOkTxs(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, scAddress := prepareTestContextForEpoch460(t) defer testContext.Close() @@ -557,6 +588,10 @@ func TestScCallBuyNFT_TwoOkTxs(t *testing.T) { } func TestScCallDistributeStakingRewards_ShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, scAddress := prepareTestContextForEpoch836(t) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/scDeploy_test.go b/integrationTests/vm/txsFee/scDeploy_test.go index 875fde2fe58..8410bcf4917 100644 --- a/integrationTests/vm/txsFee/scDeploy_test.go +++ b/integrationTests/vm/txsFee/scDeploy_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -17,6 +13,10 @@ import ( ) func TestScDeployShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -48,6 +48,10 @@ func TestScDeployShouldWork(t *testing.T) { } func TestScDeployInvalidContractCodeShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -80,6 +84,10 @@ func TestScDeployInvalidContractCodeShouldConsumeGas(t *testing.T) { } func TestScDeployInsufficientGasLimitShouldNotConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() @@ -111,6 +119,10 @@ func TestScDeployInsufficientGasLimitShouldNotConsumeGas(t *testing.T) { } func TestScDeployOutOfGasShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/validatorSC_test.go b/integrationTests/vm/txsFee/validatorSC_test.go index 31fbaea8dae..6de545c5c93 100644 --- a/integrationTests/vm/txsFee/validatorSC_test.go +++ b/integrationTests/vm/txsFee/validatorSC_test.go @@ -10,12 +10,12 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" - "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/process/smartContract/hooks" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" vmAddr "github.com/multiversx/mx-chain-go/vm" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -28,6 +28,9 @@ const ( validatorStakeData = "stake@01@" + validatorBLSKey + "@0b823739887c40e9331f70c5a140623dfaf4558a9138b62f4473b26bbafdd4f58cb5889716a71c561c9e20e7a280e985@b2a11555ce521e4944e09ab17549d85b487dcd26c84b5017a39e31a3670889ba" cannotUnBondTokensMessage = "cannot unBond tokens, the validator would remain without min deposit, nodes are still active" noTokensToUnBondMessage = "no tokens that can be unbond at this time" + delegationManagementKey = "delegationManagement" + stakingV4Step1EnableEpoch = 4443 + stakingV4Step2EnableEpoch = 4444 ) var ( @@ -36,8 +39,6 @@ var ( value200EGLD, _ = big.NewInt(0).SetString("200000000000000000000", 10) ) -const delegationManagementKey = "delegationManagement" - func saveDelegationManagerConfig(testContext *vm.VMTestContext) { acc, _ := testContext.Accounts.LoadAccount(vmAddr.DelegationManagerSCAddress) userAcc, _ := acc.(state.UserAccountHandler) @@ -49,12 +50,16 @@ func saveDelegationManagerConfig(testContext *vm.VMTestContext) { } func TestValidatorsSC_DoStakePutInQueueUnStakeAndUnBondShouldRefund(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}) require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) saveDelegationManagerConfig(testContextMeta) @@ -105,12 +110,22 @@ func checkReturnLog(t *testing.T, testContextMeta *vm.VMTestContext, subStr stri } func TestValidatorsSC_DoStakePutInQueueUnStakeAndUnBondTokensShouldRefund(t *testing.T) { - testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard( + core.MetachainShardId, + config.EnableEpochs{ + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + }, + ) require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) @@ -135,13 +150,21 @@ func TestValidatorsSC_DoStakePutInQueueUnStakeAndUnBondTokensShouldRefund(t *tes } func TestValidatorsSC_DoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + argUnbondTokensV1 := config.EnableEpochs{ UnbondTokensV2EnableEpoch: 20000, + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, } testValidatorsSCDoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t, argUnbondTokensV1) argUnbondTokensV2 := config.EnableEpochs{ UnbondTokensV2EnableEpoch: 0, + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, } testValidatorsSCDoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t, argUnbondTokensV2) } @@ -152,7 +175,7 @@ func testValidatorsSCDoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t *t require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 0}) @@ -174,12 +197,22 @@ func testValidatorsSCDoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t *t } func TestValidatorsSC_ToStakePutInQueueUnStakeAndUnBondShouldRefundUnBondTokens(t *testing.T) { - testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard( + core.MetachainShardId, + config.EnableEpochs{ + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + }, + ) require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) @@ -220,12 +253,22 @@ func TestValidatorsSC_ToStakePutInQueueUnStakeAndUnBondShouldRefundUnBondTokens( } func TestValidatorsSC_ToStakePutInQueueUnStakeNodesAndUnBondNodesShouldRefund(t *testing.T) { - testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard( + core.MetachainShardId, + config.EnableEpochs{ + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + }, + ) require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) @@ -278,22 +321,3 @@ func executeTxAndCheckResults( require.Equal(t, vmCodeExpected, recCode) require.Equal(t, expectedErr, err) } - -func saveNodesConfig(t *testing.T, testContext *vm.VMTestContext, stakedNodes, minNumNodes, maxNumNodes int64) { - protoMarshalizer := &marshal.GogoProtoMarshalizer{} - - account, err := testContext.Accounts.LoadAccount(vmAddr.StakingSCAddress) - require.Nil(t, err) - userAccount, _ := account.(state.UserAccountHandler) - - nodesConfigData := &systemSmartContracts.StakingNodesConfig{ - StakedNodes: stakedNodes, - MinNumNodes: minNumNodes, - MaxNumNodes: maxNumNodes, - } - nodesDataBytes, _ := protoMarshalizer.Marshal(nodesConfigData) - - _ = userAccount.SaveKeyValue([]byte("nodesConfig"), nodesDataBytes) - _ = testContext.Accounts.SaveAccount(account) - _, _ = testContext.Accounts.Commit() -} diff --git a/integrationTests/vm/wasm/badcontracts/badcontracts_test.go b/integrationTests/vm/wasm/badcontracts/badcontracts_test.go index e4b3b1b7ab7..3ccd475e739 100644 --- a/integrationTests/vm/wasm/badcontracts/badcontracts_test.go +++ b/integrationTests/vm/wasm/badcontracts/badcontracts_test.go @@ -1,5 +1,3 @@ -//go:build !race - package badcontracts import ( @@ -11,9 +9,8 @@ import ( ) func Test_Bad_C_NoPanic(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } context := wasm.SetupTestContext(t) @@ -53,6 +50,10 @@ func Test_Bad_C_NoPanic(t *testing.T) { } func Test_Empty_C_NoPanic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -63,6 +64,10 @@ func Test_Empty_C_NoPanic(t *testing.T) { } func Test_Corrupt_NoPanic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -73,6 +78,10 @@ func Test_Corrupt_NoPanic(t *testing.T) { } func Test_NoMemoryDeclaration_NoPanic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -83,6 +92,10 @@ func Test_NoMemoryDeclaration_NoPanic(t *testing.T) { } func Test_BadFunctionNames_NoPanic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -91,6 +104,10 @@ func Test_BadFunctionNames_NoPanic(t *testing.T) { } func Test_BadReservedFunctions(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() diff --git a/integrationTests/vm/wasm/delegation/delegationSimulation_test.go b/integrationTests/vm/wasm/delegation/delegationSimulation_test.go index be67b8d32b1..55be9681586 100644 --- a/integrationTests/vm/wasm/delegation/delegationSimulation_test.go +++ b/integrationTests/vm/wasm/delegation/delegationSimulation_test.go @@ -1,5 +1,3 @@ -//go:build !race - package delegation import ( diff --git a/integrationTests/vm/wasm/delegation/delegation_test.go b/integrationTests/vm/wasm/delegation/delegation_test.go index 9f4d3501c1c..9e9f394122f 100644 --- a/integrationTests/vm/wasm/delegation/delegation_test.go +++ b/integrationTests/vm/wasm/delegation/delegation_test.go @@ -1,5 +1,3 @@ -//go:build !race - package delegation import ( @@ -33,9 +31,8 @@ var NewBalanceBig = wasm.NewBalanceBig var RequireAlmostEquals = wasm.RequireAlmostEquals func TestDelegation_Claims(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } context := wasm.SetupTestContext(t) diff --git a/integrationTests/vm/wasm/delegation/testRunner.go b/integrationTests/vm/wasm/delegation/testRunner.go index 343f3dace0f..e7bcb516b45 100644 --- a/integrationTests/vm/wasm/delegation/testRunner.go +++ b/integrationTests/vm/wasm/delegation/testRunner.go @@ -53,8 +53,7 @@ func RunDelegationStressTest( MaxBatchSize: 45000, MaxOpenFiles: 10, } - dbConfigHandler := factory.NewDBConfigHandler(dbConfig) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(dbConfig) if err != nil { return nil, err } diff --git a/integrationTests/vm/wasm/erc20/erc20_test.go b/integrationTests/vm/wasm/erc20/erc20_test.go index 7eed879eb50..ef4f45bf02c 100644 --- a/integrationTests/vm/wasm/erc20/erc20_test.go +++ b/integrationTests/vm/wasm/erc20/erc20_test.go @@ -1,5 +1,3 @@ -//go:build !race - package erc20 import ( @@ -10,9 +8,8 @@ import ( ) func Test_C_001(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } context := wasm.SetupTestContext(t) diff --git a/integrationTests/vm/wasm/queries/queries_test.go b/integrationTests/vm/wasm/queries/queries_test.go new file mode 100644 index 00000000000..e83170e6e0b --- /dev/null +++ b/integrationTests/vm/wasm/queries/queries_test.go @@ -0,0 +1,242 @@ +package queries + +import ( + "context" + "encoding/hex" + "fmt" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/integrationTests" + "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/factory" + "github.com/multiversx/mx-chain-go/vm" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + + "github.com/stretchr/testify/require" +) + +type now struct { + blockNonce uint64 + stateRootHash []byte +} + +func TestQueries(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + snapshotsOfGetNow := make(map[uint64]now) + snapshotsOfGetState := make(map[uint64]int) + historyOfGetNow := make(map[uint64]now) + historyOfGetState := make(map[uint64]int) + + network := integrationTests.NewMiniNetwork() + defer network.Stop() + + scOwner := network.AddUser(big.NewInt(10000000000000)) + + network.Start() + + // Block 1 + + scAddress := deploy(t, network, scOwner.Address, "../testdata/history/output/history.wasm") + network.Continue(t, 1) + + // Block 2 + + now := queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{}) + snapshotsOfGetNow[1] = now + network.Continue(t, 1) + + // Block 3 + + now = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{}) + snapshotsOfGetNow[2] = now + setState(t, network, scAddress, scOwner.Address, 42) + network.Continue(t, 1) + + // Block 4 + + state := getState(t, network.ShardNode, scAddress, core.OptionalUint64{}) + snapshotsOfGetState[3] = state + now = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{}) + snapshotsOfGetNow[3] = now + setState(t, network, scAddress, scOwner.Address, 43) + network.Continue(t, 1) + + // Block 4 + + state = getState(t, network.ShardNode, scAddress, core.OptionalUint64{}) + snapshotsOfGetState[4] = state + now = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{}) + snapshotsOfGetNow[4] = now + network.Continue(t, 1) + + // Check snapshots + block1, _ := network.ShardNode.GetShardHeader(1) + block2, _ := network.ShardNode.GetShardHeader(2) + block3, _ := network.ShardNode.GetShardHeader(3) + + require.Equal(t, uint64(1), snapshotsOfGetNow[1].blockNonce) + require.Equal(t, uint64(2), snapshotsOfGetNow[2].blockNonce) + require.Equal(t, uint64(3), snapshotsOfGetNow[3].blockNonce) + require.Equal(t, uint64(4), snapshotsOfGetNow[4].blockNonce) + + require.Equal(t, block1.GetRootHash(), snapshotsOfGetNow[1].stateRootHash) + require.Equal(t, block1.GetRootHash(), snapshotsOfGetNow[2].stateRootHash) + require.NotEqual(t, block2.GetRootHash(), snapshotsOfGetNow[3].stateRootHash) + require.NotEqual(t, block3.GetRootHash(), snapshotsOfGetNow[4].stateRootHash) + + require.Equal(t, 42, snapshotsOfGetState[3]) + require.Equal(t, 43, snapshotsOfGetState[4]) + + // Check history + historyOfGetState[1] = getState(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 1}) + historyOfGetNow[1] = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 1}) + + historyOfGetState[2] = getState(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 2}) + historyOfGetNow[2] = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 2}) + + historyOfGetState[3] = getState(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 3}) + historyOfGetNow[3] = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 3}) + + historyOfGetState[4] = getState(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 4}) + historyOfGetNow[4] = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 4}) + + require.Equal(t, snapshotsOfGetState[1], historyOfGetState[1]) + require.Equal(t, snapshotsOfGetNow[1].blockNonce, historyOfGetNow[1].blockNonce) + + require.Equal(t, snapshotsOfGetState[2], historyOfGetState[2]) + require.Equal(t, snapshotsOfGetNow[2].blockNonce, historyOfGetNow[2].blockNonce) + + require.Equal(t, snapshotsOfGetState[3], historyOfGetState[3]) + require.Equal(t, snapshotsOfGetNow[3].blockNonce, historyOfGetNow[3].blockNonce) + + require.Equal(t, snapshotsOfGetState[4], historyOfGetState[4]) + require.Equal(t, snapshotsOfGetNow[4].blockNonce, historyOfGetNow[4].blockNonce) +} + +func deploy(t *testing.T, network *integrationTests.MiniNetwork, sender []byte, codePath string) []byte { + code := wasm.GetSCCode(codePath) + data := fmt.Sprintf("%s@%s@0100", code, hex.EncodeToString(factory.WasmVirtualMachine)) + + _, err := network.SendTransaction( + sender, + make([]byte, 32), + big.NewInt(0), + data, + 1000, + ) + require.NoError(t, err) + + scAddress, _ := network.ShardNode.BlockchainHook.NewAddress(sender, 0, factory.WasmVirtualMachine) + return scAddress +} + +func setState(t *testing.T, network *integrationTests.MiniNetwork, scAddress []byte, sender []byte, value uint64) { + data := fmt.Sprintf("setState@%x", value) + + _, err := network.SendTransaction( + sender, + scAddress, + big.NewInt(0), + data, + 1000, + ) + + require.NoError(t, err) +} + +func getState(t *testing.T, node *integrationTests.TestProcessorNode, scAddress []byte, blockNonce core.OptionalUint64) int { + vmOutput, _, err := node.SCQueryService.ExecuteQuery(&process.SCQuery{ + ScAddress: scAddress, + FuncName: "getState", + Arguments: [][]byte{}, + BlockNonce: blockNonce, + }) + + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, vmOutput.ReturnCode) + data := vmOutput.ReturnData + + return int(big.NewInt(0).SetBytes(data[0]).Uint64()) +} + +func queryHistoryGetNow(t *testing.T, node *integrationTests.TestProcessorNode, scAddress []byte, blockNonce core.OptionalUint64) now { + vmOutput, _, err := node.SCQueryService.ExecuteQuery(&process.SCQuery{ + ScAddress: scAddress, + FuncName: "getNow", + Arguments: [][]byte{}, + BlockNonce: blockNonce, + }) + + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, vmOutput.ReturnCode) + data := vmOutput.ReturnData + + return now{ + blockNonce: big.NewInt(0).SetBytes(data[0]).Uint64(), + stateRootHash: data[1], + } +} + +func TestQueries_Metachain(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + network := integrationTests.NewMiniNetwork() + defer network.Stop() + + network.Start() + + alice := network.AddUser(big.NewInt(10000000000000)) + + // Issue fungible token + issueCost := big.NewInt(1000) + tokenNameHex := hex.EncodeToString([]byte("Test")) + tokenTickerHex := hex.EncodeToString([]byte("TEST")) + txData := fmt.Sprintf("issue@%s@%s@64@00", tokenNameHex, tokenTickerHex) + + _, err := network.SendTransaction( + alice.Address, + vm.ESDTSCAddress, + issueCost, + txData, + core.MinMetaTxExtraGasCost, + ) + + require.NoError(t, err) + network.Continue(t, 5) + + tokens, err := network.MetachainNode.Node.GetAllIssuedESDTs(core.FungibleESDT, context.Background()) + require.NoError(t, err) + require.Len(t, tokens, 1) + + // Query token on older block (should fail) + vmOutput, _, err := network.MetachainNode.SCQueryService.ExecuteQuery(&process.SCQuery{ + ScAddress: vm.ESDTSCAddress, + FuncName: "getTokenProperties", + Arguments: [][]byte{[]byte(tokens[0])}, + BlockNonce: core.OptionalUint64{HasValue: true, Value: 2}, + }) + + require.Nil(t, err) + require.Equal(t, vmcommon.UserError, vmOutput.ReturnCode) + require.Equal(t, "no ticker with given name", vmOutput.ReturnMessage) + + // Query token on newer block (should succeed) + vmOutput, _, err = network.MetachainNode.SCQueryService.ExecuteQuery(&process.SCQuery{ + ScAddress: vm.ESDTSCAddress, + FuncName: "getTokenProperties", + Arguments: [][]byte{[]byte(tokens[0])}, + BlockNonce: core.OptionalUint64{HasValue: true, Value: 4}, + }) + + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, vmOutput.ReturnCode) + require.Equal(t, "Test", string(vmOutput.ReturnData[0])) +} diff --git a/integrationTests/vm/wasm/testdata/history/history.c b/integrationTests/vm/wasm/testdata/history/history.c new file mode 100644 index 00000000000..322e216aca8 --- /dev/null +++ b/integrationTests/vm/wasm/testdata/history/history.c @@ -0,0 +1,51 @@ +typedef unsigned char byte; +typedef unsigned int i32; +typedef unsigned long long i64; + +int getArgument(int argumentIndex, byte *argument); +long long int64getArgument(int argumentIndex); +long long getBlockNonce(); +long long getBlockEpoch(); +void getStateRootHash(byte *hash); + +int int64storageStore(byte *key, int keyLength, long long value); +long long int64storageLoad(byte *key, int keyLength); + +void finish(byte *data, int length); +void int64finish(long long value); + +byte zero32_buffer_a[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +byte zero32_buffer_b[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +byte zero32_buffer_c[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +byte storageKey[] = "state"; + +void init() +{ +} + +void upgrade() +{ +} + +void setState() +{ + i64 state = int64getArgument(0); + int64storageStore(storageKey, sizeof(storageKey) - 1, state); +} + +void getState() +{ + i64 state = int64storageLoad(storageKey, sizeof(storageKey) - 1); + int64finish(state); +} + +void getNow() +{ + i64 nonce = getBlockNonce(); + + byte *stateRootHash = zero32_buffer_a; + getStateRootHash(stateRootHash); + + int64finish(nonce); + finish(stateRootHash, 32); +} diff --git a/integrationTests/vm/wasm/testdata/history/history.export b/integrationTests/vm/wasm/testdata/history/history.export new file mode 100644 index 00000000000..b6646aa3aef --- /dev/null +++ b/integrationTests/vm/wasm/testdata/history/history.export @@ -0,0 +1,5 @@ +init +upgrade +getNow +setState +getState diff --git a/integrationTests/vm/wasm/testdata/history/output/history.wasm b/integrationTests/vm/wasm/testdata/history/output/history.wasm new file mode 100755 index 00000000000..5e34d9a0ab0 Binary files /dev/null and b/integrationTests/vm/wasm/testdata/history/output/history.wasm differ diff --git a/integrationTests/vm/wasm/testdata/transferValue/output/transferValue.wasm b/integrationTests/vm/wasm/testdata/transferValue/output/transferValue.wasm new file mode 100755 index 00000000000..cea133a3b2f Binary files /dev/null and b/integrationTests/vm/wasm/testdata/transferValue/output/transferValue.wasm differ diff --git a/integrationTests/vm/wasm/testdata/transferValue/transferValue.c b/integrationTests/vm/wasm/testdata/transferValue/transferValue.c new file mode 100644 index 00000000000..e82fc4054d8 --- /dev/null +++ b/integrationTests/vm/wasm/testdata/transferValue/transferValue.c @@ -0,0 +1,58 @@ +typedef unsigned char byte; +typedef unsigned int i32; +typedef unsigned long long i64; + +int getArgument(int argumentIndex, byte *argument); +int transferValueExecute(byte *destination, byte *value, long long gas, byte *function, int functionLength, int numArguments, byte *argumentsLengths, byte *arguments); +void getCaller(byte *callerAddress); +i32 createAsyncCall(byte *destination, byte *value, byte *data, int dataLength, byte *success, int successLength, byte *error, int errorLength, long long gas, long long extraGasForCallback); + +byte zero32_a[32] = {0}; +byte zero32_b[32] = {0}; +byte zero32_c[32] = {0}; + +byte oneAtomOfEGLD[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}; +byte functionNameAskMoney[] = "askMoney"; +byte functionNameMyCallback[] = "myCallback"; + +void init() +{ +} + +void upgrade() +{ +} + +void fund() +{ +} + +void forwardAskMoney() +{ + byte *otherContract = zero32_a; + getArgument(0, otherContract); + + createAsyncCall( + otherContract, + 0, + functionNameAskMoney, + sizeof(functionNameAskMoney) - 1, + functionNameMyCallback, + sizeof(functionNameMyCallback) - 1, + functionNameMyCallback, + sizeof(functionNameMyCallback) - 1, + 15000000, + 0); +} + +void askMoney() +{ + byte *caller = zero32_a; + + getCaller(caller); + transferValueExecute(caller, oneAtomOfEGLD, 0, 0, 0, 0, 0, 0); +} + +void myCallback() +{ +} diff --git a/integrationTests/vm/wasm/testdata/transferValue/transferValue.export b/integrationTests/vm/wasm/testdata/transferValue/transferValue.export new file mode 100644 index 00000000000..c9613a09af3 --- /dev/null +++ b/integrationTests/vm/wasm/testdata/transferValue/transferValue.export @@ -0,0 +1,6 @@ +init +upgrade +fund +forwardAskMoney +askMoney +myCallback diff --git a/integrationTests/vm/wasm/transfers/transfers_test.go b/integrationTests/vm/wasm/transfers/transfers_test.go new file mode 100644 index 00000000000..63e4b120f02 --- /dev/null +++ b/integrationTests/vm/wasm/transfers/transfers_test.go @@ -0,0 +1,64 @@ +package transfers + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" + "github.com/stretchr/testify/require" +) + +func TestTransfers_DuplicatedTransferValueEvents(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + context := wasm.SetupTestContext(t) + defer context.Close() + + err := context.DeploySC("../testdata/transferValue/output/transferValue.wasm", "") + require.Nil(t, err) + vault := context.ScAddress + + err = context.DeploySC("../testdata/transferValue/output/transferValue.wasm", "") + require.Nil(t, err) + forwarder := context.ScAddress + + // Add money to the vault + context.ScAddress = vault + err = context.ExecuteSCWithValue(&context.Owner, "fund", big.NewInt(42)) + require.Nil(t, err) + + // Ask money from the vault, via the forwarder + context.ScAddress = forwarder + err = context.ExecuteSC(&context.Owner, fmt.Sprintf("forwardAskMoney@%s", hex.EncodeToString(vault))) + require.Nil(t, err) + require.Len(t, context.LastLogs, 1) + require.Len(t, context.LastLogs[0].GetLogEvents(), 5) + + events := context.LastLogs[0].GetLogEvents() + + require.Equal(t, "transferValueOnly", string(events[0].GetIdentifier())) + require.Equal(t, "AsyncCall", string(events[0].GetData())) + require.Equal(t, []byte{}, events[0].GetTopics()[0]) + require.Equal(t, forwarder, events[0].GetAddress()) + require.Equal(t, vault, events[0].GetTopics()[1]) + + require.Equal(t, "transferValueOnly", string(events[1].GetIdentifier())) + require.Equal(t, "BackTransfer", string(events[1].GetData())) + require.Equal(t, []byte{0x01}, events[1].GetTopics()[0]) + require.Equal(t, vault, events[1].GetAddress()) + require.Equal(t, forwarder, events[1].GetTopics()[1]) + + // Duplicated "transferValueOnly" events are fixed in #5936. + require.Equal(t, "transferValueOnly", string(events[2].GetIdentifier())) + require.Equal(t, "AsyncCallback", string(events[2].GetData())) + require.Equal(t, []byte{}, events[2].GetTopics()[0]) + require.Equal(t, vault, events[2].GetAddress()) + require.Equal(t, forwarder, events[2].GetTopics()[1]) + + require.Equal(t, "writeLog", string(events[3].GetIdentifier())) + require.Equal(t, "completedTxEvent", string(events[4].GetIdentifier())) +} diff --git a/integrationTests/vm/wasm/upgrades/upgrades_test.go b/integrationTests/vm/wasm/upgrades/upgrades_test.go index c989498c955..4a01b67a4ec 100644 --- a/integrationTests/vm/wasm/upgrades/upgrades_test.go +++ b/integrationTests/vm/wasm/upgrades/upgrades_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package upgrades import ( @@ -10,9 +6,7 @@ import ( "math/big" "testing" - "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/integrationTests" - "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/factory" @@ -21,6 +15,10 @@ import ( ) func TestUpgrades_Hello(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -45,6 +43,10 @@ func TestUpgrades_Hello(t *testing.T) { } func TestUpgrades_HelloDoesNotUpgradeWhenNotUpgradeable(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -63,6 +65,10 @@ func TestUpgrades_HelloDoesNotUpgradeWhenNotUpgradeable(t *testing.T) { } func TestUpgrades_HelloUpgradesToNotUpgradeable(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -88,6 +94,10 @@ func TestUpgrades_HelloUpgradesToNotUpgradeable(t *testing.T) { } func TestUpgrades_ParentAndChildContracts(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -127,6 +137,10 @@ func TestUpgrades_ParentAndChildContracts(t *testing.T) { } func TestUpgrades_HelloCannotBeUpgradedByNonOwner(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -147,6 +161,10 @@ func TestUpgrades_HelloCannotBeUpgradedByNonOwner(t *testing.T) { } func TestUpgrades_CounterCannotBeUpgradedByNonOwner(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -172,61 +190,56 @@ func TestUpgrades_HelloTrialAndError(t *testing.T) { t.Skip("this is not a short test") } - network := integrationTests.NewOneNodeNetwork() + network := integrationTests.NewMiniNetwork() defer network.Stop() - alice := []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - bob := []byte("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") - network.Mint(alice, big.NewInt(10000000000000)) - network.Mint(bob, big.NewInt(10000000000000)) + alice := network.AddUser(big.NewInt(10000000000000)) + bob := network.AddUser(big.NewInt(10000000000000)) - network.GoToRoundOne() + network.Start() deployTxData := fmt.Sprintf("%s@%s@0100", wasm.GetSCCode("../testdata/hello-v1/output/answer.wasm"), hex.EncodeToString(factory.WasmVirtualMachine)) upgradeTxData := fmt.Sprintf("upgradeContract@%s@0100", wasm.GetSCCode("../testdata/hello-v2/output/answer.wasm")) // Deploy the smart contract. Alice is the owner - network.AddTxToPool(&transaction.Transaction{ - Nonce: 0, - Value: big.NewInt(0), - RcvAddr: vm.CreateEmptyAddress(), - SndAddr: alice, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte(deployTxData), - }) + _, err := network.SendTransaction( + alice.Address, + make([]byte, 32), + big.NewInt(0), + deployTxData, + 1000, + ) + require.Nil(t, err) - scAddress, _ := network.Node.BlockchainHook.NewAddress(alice, 0, factory.WasmVirtualMachine) + scAddress, _ := network.ShardNode.BlockchainHook.NewAddress(alice.Address, 0, factory.WasmVirtualMachine) network.Continue(t, 1) - require.Equal(t, []byte{24}, query(t, network.Node, scAddress, "getUltimateAnswer")) + require.Equal(t, []byte{24}, query(t, network.ShardNode, scAddress, "getUltimateAnswer")) // Upgrade as Bob - upgrade should fail, since Alice is the owner - network.AddTxToPool(&transaction.Transaction{ - Nonce: 0, - Value: big.NewInt(0), - RcvAddr: scAddress, - SndAddr: bob, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte(upgradeTxData), - }) + _, err = network.SendTransaction( + bob.Address, + scAddress, + big.NewInt(0), + upgradeTxData, + 1000, + ) + require.Nil(t, err) network.Continue(t, 1) - require.Equal(t, []byte{24}, query(t, network.Node, scAddress, "getUltimateAnswer")) + require.Equal(t, []byte{24}, query(t, network.ShardNode, scAddress, "getUltimateAnswer")) // Now upgrade as Alice, should work - network.AddTxToPool(&transaction.Transaction{ - Nonce: 1, - Value: big.NewInt(0), - RcvAddr: scAddress, - SndAddr: alice, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte(upgradeTxData), - }) + _, err = network.SendTransaction( + alice.Address, + scAddress, + big.NewInt(0), + upgradeTxData, + 1000, + ) + require.Nil(t, err) network.Continue(t, 1) - require.Equal(t, []byte{42}, query(t, network.Node, scAddress, "getUltimateAnswer")) + require.Equal(t, []byte{42}, query(t, network.ShardNode, scAddress, "getUltimateAnswer")) } func TestUpgrades_CounterTrialAndError(t *testing.T) { @@ -234,75 +247,69 @@ func TestUpgrades_CounterTrialAndError(t *testing.T) { t.Skip("this is not a short test") } - network := integrationTests.NewOneNodeNetwork() + network := integrationTests.NewMiniNetwork() defer network.Stop() - alice := []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - bob := []byte("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") - network.Mint(alice, big.NewInt(10000000000000)) - network.Mint(bob, big.NewInt(10000000000000)) + alice := network.AddUser(big.NewInt(10000000000000)) + bob := network.AddUser(big.NewInt(10000000000000)) - network.GoToRoundOne() + network.Start() deployTxData := fmt.Sprintf("%s@%s@0100", wasm.GetSCCode("../testdata/counter/output/counter.wasm"), hex.EncodeToString(factory.WasmVirtualMachine)) upgradeTxData := fmt.Sprintf("upgradeContract@%s@0100", wasm.GetSCCode("../testdata/counter/output/counter.wasm")) // Deploy the smart contract. Alice is the owner - network.AddTxToPool(&transaction.Transaction{ - Nonce: 0, - Value: big.NewInt(0), - RcvAddr: vm.CreateEmptyAddress(), - SndAddr: alice, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte(deployTxData), - }) + _, err := network.SendTransaction( + alice.Address, + make([]byte, 32), + big.NewInt(0), + deployTxData, + 1000, + ) + require.Nil(t, err) - scAddress, _ := network.Node.BlockchainHook.NewAddress(alice, 0, factory.WasmVirtualMachine) + scAddress, _ := network.ShardNode.BlockchainHook.NewAddress(alice.Address, 0, factory.WasmVirtualMachine) network.Continue(t, 1) - require.Equal(t, []byte{1}, query(t, network.Node, scAddress, "get")) + require.Equal(t, []byte{1}, query(t, network.ShardNode, scAddress, "get")) // Increment the counter (could be either Bob or Alice) - network.AddTxToPool(&transaction.Transaction{ - Nonce: 1, - Value: big.NewInt(0), - RcvAddr: scAddress, - SndAddr: alice, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte("increment"), - }) + _, err = network.SendTransaction( + alice.Address, + scAddress, + big.NewInt(0), + "increment", + 1000, + ) + require.Nil(t, err) network.Continue(t, 1) - require.Equal(t, []byte{2}, query(t, network.Node, scAddress, "get")) + require.Equal(t, []byte{2}, query(t, network.ShardNode, scAddress, "get")) // Upgrade as Bob - upgrade should fail, since Alice is the owner (counter.init() not executed, state not reset) - network.AddTxToPool(&transaction.Transaction{ - Nonce: 0, - Value: big.NewInt(0), - RcvAddr: scAddress, - SndAddr: bob, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte(upgradeTxData), - }) + _, err = network.SendTransaction( + bob.Address, + scAddress, + big.NewInt(0), + upgradeTxData, + 1000, + ) + require.Nil(t, err) network.Continue(t, 1) - require.Equal(t, []byte{2}, query(t, network.Node, scAddress, "get")) + require.Equal(t, []byte{2}, query(t, network.ShardNode, scAddress, "get")) // Now upgrade as Alice, should work (state is reset by counter.init()) - network.AddTxToPool(&transaction.Transaction{ - Nonce: 2, - Value: big.NewInt(0), - RcvAddr: scAddress, - SndAddr: alice, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte(upgradeTxData), - }) + _, err = network.SendTransaction( + alice.Address, + scAddress, + big.NewInt(0), + upgradeTxData, + 1000, + ) + require.Nil(t, err) network.Continue(t, 1) - require.Equal(t, []byte{1}, query(t, network.Node, scAddress, "get")) + require.Equal(t, []byte{1}, query(t, network.ShardNode, scAddress, "get")) } func query(t *testing.T, node *integrationTests.TestProcessorNode, scAddress []byte, function string) []byte { diff --git a/integrationTests/vm/wasm/utils.go b/integrationTests/vm/wasm/utils.go index e58d3e25c7b..d4f4207662d 100644 --- a/integrationTests/vm/wasm/utils.go +++ b/integrationTests/vm/wasm/utils.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/pubkeyConverter" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/rewardTx" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" @@ -102,6 +103,7 @@ type TestContext struct { ScAddress []byte ScCodeMetadata vmcommon.CodeMetadata Accounts *state.AccountsDB + TxLogsProcessor process.TransactionLogProcessor TxProcessor process.TransactionProcessor ScProcessor scrCommon.TestSmartContractProcessor QueryService external.SCQueryService @@ -112,6 +114,7 @@ type TestContext struct { LastTxHash []byte SCRForwarder *mock.IntermediateTransactionHandlerMock LastSCResults []*smartContractResult.SmartContractResult + LastLogs []*data.LogData } type testParticipant struct { @@ -154,7 +157,7 @@ func SetupTestContextWithGasSchedule(t *testing.T, gasSchedule map[string]map[st DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, }, context.EpochNotifier) context.RoundNotifier = &epochNotifier.RoundNotifierStub{} - context.EnableRoundsHandler, _ = enablers.NewEnableRoundsHandler(integrationTests.GetDefaultRoundsConfig(), context.RoundNotifier) + context.EnableRoundsHandler, _ = enablers.NewEnableRoundsHandler(testscommon.GetDefaultRoundsConfig(), context.RoundNotifier) context.WasmVMChangeLocker = &sync.RWMutex{} context.initAccounts() @@ -164,7 +167,7 @@ func SetupTestContextWithGasSchedule(t *testing.T, gasSchedule map[string]map[st context.initFeeHandlers() context.initVMAndBlockchainHook() context.initTxProcessorWithOneSCExecutorWithVMs() - context.ScAddress, _ = context.BlockchainHook.NewAddress(context.Owner.Address, context.Owner.Nonce, factory.WasmVirtualMachine) + argsNewSCQueryService := smartContract.ArgsNewSCQueryService{ VmContainer: context.VMContainer, EconomicsFee: context.EconomicsFee, @@ -247,10 +250,9 @@ func (context *TestContext) initFeeHandlers() { MaxGasPriceSetGuardian: "2000000000", }, }, - EpochNotifier: context.EpochNotifier, - EnableEpochsHandler: context.EnableEpochsHandler, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + EpochNotifier: context.EpochNotifier, + EnableEpochsHandler: context.EnableEpochsHandler, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) @@ -364,8 +366,11 @@ func (context *TestContext) initTxProcessorWithOneSCExecutorWithVMs() { defaults.FillGasMapInternal(gasSchedule, 1) argsLogProcessor := transactionLog.ArgTxLogProcessor{Marshalizer: marshalizer} - logsProcessor, _ := transactionLog.NewTxLogProcessor(argsLogProcessor) + context.TxLogsProcessor, err = transactionLog.NewTxLogProcessor(argsLogProcessor) + require.Nil(context.T, err) + context.SCRForwarder = &mock.IntermediateTransactionHandlerMock{} + argsNewSCProcessor := scrCommon.ArgsNewSmartContractProcessor{ VmContainer: context.VMContainer, ArgsParser: smartContract.NewArgumentParser(), @@ -385,14 +390,14 @@ func (context *TestContext) initTxProcessorWithOneSCExecutorWithVMs() { SetGasRefundedCalled: func(gasRefunded uint64, hash []byte) {}, }, GasSchedule: mock.NewGasScheduleNotifierMock(gasSchedule), - TxLogsProcessor: logsProcessor, + TxLogsProcessor: context.TxLogsProcessor, EnableRoundsHandler: context.EnableRoundsHandler, EnableEpochsHandler: context.EnableEpochsHandler, WasmVMChangeLocker: context.WasmVMChangeLocker, VMOutputCacher: txcache.NewDisabledCache(), } - context.ScProcessor, _ = processProxy.NewTestSmartContractProcessorProxy(argsNewSCProcessor, context.EpochNotifier) + context.ScProcessor, err = processProxy.NewTestSmartContractProcessorProxy(argsNewSCProcessor, context.EpochNotifier) require.Nil(context.T, err) argsNewTxProcessor := processTransaction.ArgsNewTxProcessor{ @@ -414,7 +419,7 @@ func (context *TestContext) initTxProcessorWithOneSCExecutorWithVMs() { EnableEpochsHandler: context.EnableEpochsHandler, TxVersionChecker: &testscommon.TxVersionCheckerStub{}, GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, - TxLogsProcessor: logsProcessor, + TxLogsProcessor: context.TxLogsProcessor, } context.TxProcessor, err = processTransaction.NewTxProcessor(argsNewTxProcessor) @@ -544,20 +549,20 @@ func (context *TestContext) DeploySC(wasmPath string, parametersString string) e return err } + context.ScAddress, _ = context.BlockchainHook.NewAddress(context.Owner.Address, context.Owner.Nonce, factory.WasmVirtualMachine) + owner.Nonce++ _, err = context.Accounts.Commit() if err != nil { return err } - err = context.GetCompositeTestError() + err = context.acquireOutcome() if err != nil { return err } - _ = context.UpdateLastSCResults() - - return nil + return context.GetCompositeTestError() } // UpgradeSC - @@ -604,14 +609,12 @@ func (context *TestContext) UpgradeSC(wasmPath string, parametersString string) return err } - err = context.GetCompositeTestError() + err = context.acquireOutcome() if err != nil { return err } - _ = context.UpdateLastSCResults() - - return nil + return context.GetCompositeTestError() } // GetSCCode - @@ -680,18 +683,16 @@ func (context *TestContext) ExecuteSCWithValue(sender *testParticipant, txData s return err } - err = context.GetCompositeTestError() + err = context.acquireOutcome() if err != nil { return err } - _ = context.UpdateLastSCResults() - - return nil + return context.GetCompositeTestError() } -// UpdateLastSCResults -- -func (context *TestContext) UpdateLastSCResults() error { +// acquireOutcome - +func (context *TestContext) acquireOutcome() error { transactions := context.SCRForwarder.GetIntermediateTransactions() context.LastSCResults = make([]*smartContractResult.SmartContractResult, len(transactions)) for i, tx := range transactions { @@ -703,6 +704,8 @@ func (context *TestContext) UpdateLastSCResults() error { } } + context.LastLogs = context.TxLogsProcessor.GetAllCurrentLogs() + return nil } diff --git a/integrationTests/vm/wasm/wasmer/wasmer_test.go b/integrationTests/vm/wasm/wasmer/wasmer_test.go index f73bceae6b5..d7eeb9260a4 100644 --- a/integrationTests/vm/wasm/wasmer/wasmer_test.go +++ b/integrationTests/vm/wasm/wasmer/wasmer_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package wasmer import ( @@ -21,6 +17,10 @@ import ( var ownerAddressBytes = []byte("12345678901234567890123456789012") func TestAllowNonFloatingPointSC(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + wasmvm, scAddress := deploy(t, "../testdata/floating_point/non_fp.wasm") defer closeVM(wasmvm) @@ -37,6 +37,10 @@ func TestAllowNonFloatingPointSC(t *testing.T) { } func TestDisallowFloatingPointSC(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + wasmvm, scAddress := deploy(t, "../testdata/floating_point/fp.wasm") defer closeVM(wasmvm) @@ -53,6 +57,10 @@ func TestDisallowFloatingPointSC(t *testing.T) { } func TestSCAbortExecution_DontAbort(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + wasmvm, scAddress := deploy(t, "../testdata/misc/test_abort/test_abort.wasm") defer closeVM(wasmvm) @@ -74,6 +82,10 @@ func TestSCAbortExecution_DontAbort(t *testing.T) { } func TestSCAbortExecution_Abort(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + wasmvm, scAddress := deploy(t, "../testdata/misc/test_abort/test_abort.wasm") defer closeVM(wasmvm) diff --git a/integrationTests/vm/wasm/wasmvm/asyncMockContracts_test.go b/integrationTests/vm/wasm/wasmvm/asyncMockContracts_test.go index 393ef51f5de..f7a3eece169 100644 --- a/integrationTests/vm/wasm/wasmvm/asyncMockContracts_test.go +++ b/integrationTests/vm/wasm/wasmvm/asyncMockContracts_test.go @@ -59,17 +59,22 @@ func TestMockContract_AsyncLegacy_InShard(t *testing.T) { } func TestMockContract_AsyncLegacy_CrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testMockContract_CrossShard(t, LegacyAsyncCallType) } func TestMockContract_NewAsync_CrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testMockContract_CrossShard(t, NewAsyncCallType) } func testMockContract_CrossShard(t *testing.T, asyncCallType []byte) { - if testing.Short() { - t.Skip("this is not a short test") - } transferEGLD := big.NewInt(42) numberOfShards := 2 diff --git a/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go b/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go index a4cfb755b76..a57599d2866 100644 --- a/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go +++ b/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go @@ -22,7 +22,7 @@ var senderBalance = big.NewInt(1000000000000) func TestScDeployShouldManageCorrectlyTheCodeMetadata(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorAndAccountsWithVMs( diff --git a/integrationTests/vm/wasm/wasmvm/deployment/upgrade_test.go b/integrationTests/vm/wasm/wasmvm/deployment/upgrade_test.go index 6d52f68acf2..22d2fc48a3f 100644 --- a/integrationTests/vm/wasm/wasmvm/deployment/upgrade_test.go +++ b/integrationTests/vm/wasm/wasmvm/deployment/upgrade_test.go @@ -20,7 +20,7 @@ const gasLimit = uint64(10000000) func TestScUpgradeShouldManageCorrectlyTheCodeMetadata(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorAndAccountsWithVMs( diff --git a/integrationTests/vm/wasm/wasmvm/executeViaBlockchainhook_test.go b/integrationTests/vm/wasm/wasmvm/executeViaBlockchainhook_test.go index e36c4bb744d..9d12746bff5 100644 --- a/integrationTests/vm/wasm/wasmvm/executeViaBlockchainhook_test.go +++ b/integrationTests/vm/wasm/wasmvm/executeViaBlockchainhook_test.go @@ -1,5 +1,3 @@ -//go:build !race - package wasmvm import ( @@ -17,6 +15,9 @@ import ( ) func TestExecuteOnDestCtx_BlockchainHook(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } net := integrationTests.NewTestNetworkSized(t, 1, 1, 1) net.Start() diff --git a/integrationTests/vm/wasm/wasmvm/gasSchedule_test.go b/integrationTests/vm/wasm/wasmvm/gasSchedule_test.go index 496a31c0c06..735fbdc2ac3 100644 --- a/integrationTests/vm/wasm/wasmvm/gasSchedule_test.go +++ b/integrationTests/vm/wasm/wasmvm/gasSchedule_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package wasmvm import ( @@ -17,22 +13,37 @@ import ( ) func Benchmark_VmDeployWithFibbonacciAndExecute(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/misc/fib_wasm/output/fib_wasm.wasm", 32, "_main", nil, b.N, nil) } func Benchmark_searchingForPanic(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } for i := 0; i < 10; i++ { runWASMVMBenchmark(b, "../testdata/misc/fib_wasm/output/fib_wasm.wasm", 100, "_main", nil, b.N, nil) } } func Test_searchingForPanic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + for i := 0; i < 10; i++ { runWASMVMBenchmark(t, "../testdata/misc/fib_wasm/output/fib_wasm.wasm", 100, "_main", nil, 1, nil) } } func Benchmark_VmDeployWithBadContractAndExecute(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig("../../../../cmd/node/config/gasSchedules/gasScheduleV4.toml") result, err := RunTest("../testdata/misc/bad.wasm", 0, "bigLoop", nil, b.N, gasSchedule, 1500000000) @@ -47,6 +58,10 @@ func Benchmark_VmDeployWithBadContractAndExecute(b *testing.B) { } func Benchmark_VmDeployWithBadContractAndExecute2(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig("../../../../cmd/node/config/gasSchedules/gasScheduleV4.toml") arg, _ := hex.DecodeString("012c") @@ -62,100 +77,196 @@ func Benchmark_VmDeployWithBadContractAndExecute2(b *testing.B) { } func Benchmark_VmDeployWithCPUCalculateAndExecute(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/misc/cpucalculate_wasm/output/cpucalculate.wasm", 8000, "cpuCalculate", nil, b.N, nil) } func Benchmark_VmDeployWithStringConcatAndExecute(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/misc/stringconcat_wasm/stringconcat_wasm.wasm", 10000, "_main", nil, b.N, nil) } func Benchmark_TestStore100(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/storage100/output/storage100.wasm", 0, "store100", nil, b.N, nil) } func Benchmark_TestStorageBigIntNew(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntNewTest", nil, b.N, nil) } func Benchmark_TestBigIntGetUnSignedBytes(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntGetUnsignedBytesTest", nil, b.N, nil) } func Benchmark_TestBigIntAdd(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntAddTest", nil, b.N, nil) } func Benchmark_TestBigIntMul(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntMulTest", nil, b.N, nil) } func Benchmark_TestBigIntMul25(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntMul25Test", nil, b.N, nil) } func Benchmark_TestBigIntMul32(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntMul32Test", nil, b.N, nil) } func Benchmark_TestBigIntTDiv(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntTDivTest", nil, b.N, nil) } func Benchmark_TestBigIntTMod(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntTModTest", nil, b.N, nil) } func Benchmark_TestBigIntEDiv(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntEDivTest", nil, b.N, nil) } func Benchmark_TestBigIntEMod(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntEModTest", nil, b.N, nil) } func Benchmark_TestBigIntShr(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntShrTest", nil, b.N, nil) } func Benchmark_TestBigIntSetup(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntInitSetup", nil, b.N, nil) } func Benchmark_TestCryptoSHA256(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "sha256Test", nil, b.N, nil) } func Benchmark_TestCryptoKeccak256(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "keccak256Test", nil, b.N, nil) } func Benchmark_TestCryptoRipMed160(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "ripemd160Test", nil, b.N, nil) } func Benchmark_TestCryptoBLS(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "verifyBLSTest", nil, b.N, nil) } func Benchmark_TestCryptoVerifyED25519(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "verifyEd25519Test", nil, b.N, nil) } func Benchmark_TestCryptoSecp256k1UnCompressed(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "verifySecp256k1UncompressedKeyTest", nil, b.N, nil) } func Benchmark_TestCryptoSecp256k1Compressed(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "verifySecp256k1CompressedKeyTest", nil, b.N, nil) } func Benchmark_TestEllipticCurveInitialVariablesAndCalls(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/ecBenchmark/output/ecBenchmark.wasm", 0, "initialVariablesAndCallsTest", nil, b.N, nil) } // elliptic curves func Benchmark_TestEllipticCurve(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + testEllipticCurve(b, "p224Add") testEllipticCurve(b, "p256Add") testEllipticCurve(b, "p384Add") @@ -191,21 +302,37 @@ func Benchmark_TestEllipticCurve(b *testing.B) { } func Benchmark_TestEllipticCurveScalarMultP224(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) runWASMVMBenchmark(b, "../testdata/c-api-tests/ecBenchmark/output/ecBenchmark.wasm", 0, "p224ScalarMultEcTest", getNumberOfRepsAndScalarLengthArgs(10), b.N, gasSchedule) } func Benchmark_TestEllipticCurveScalarMultP256(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) runWASMVMBenchmark(b, "../testdata/c-api-tests/ecBenchmark/output/ecBenchmark.wasm", 0, "p256ScalarMultEcTest", getNumberOfRepsAndScalarLengthArgs(10), b.N, gasSchedule) } func Benchmark_TestEllipticCurveScalarMultP384(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) runWASMVMBenchmark(b, "../testdata/c-api-tests/ecBenchmark/output/ecBenchmark.wasm", 0, "p384ScalarMultEcTest", getNumberOfRepsAndScalarLengthArgs(10), b.N, gasSchedule) } func Benchmark_TestEllipticCurveScalarMultP521(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) runWASMVMBenchmark(b, "../testdata/c-api-tests/ecBenchmark/output/ecBenchmark.wasm", 0, "p521ScalarMultEcTest", getNumberOfRepsAndScalarLengthArgs(10), b.N, gasSchedule) } @@ -216,10 +343,18 @@ func testEllipticCurve(b *testing.B, function string) { } func Benchmark_TestCryptoDoNothing(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "doNothing", nil, b.N, nil) } func Benchmark_TestStorageRust(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) buff := make([]byte, 100) _, _ = rand.Read(buff) @@ -228,6 +363,10 @@ func Benchmark_TestStorageRust(b *testing.B) { } func TestGasModel(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) totalOp := uint64(0) diff --git a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/adder/converterAdder_test.go b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/adder/converterAdder_test.go index b5d99257277..bf0fc2436fa 100644 --- a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/adder/converterAdder_test.go +++ b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/adder/converterAdder_test.go @@ -7,9 +7,17 @@ import ( ) func TestScenariosConverter_AdderWithExternalSteps(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + mc.CheckConverter(t, "./adder_with_external_steps.scen.json") } func Benchmark_ScenariosConverter_AdderWithExternalSteps(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + mc.BenchmarkScenariosSpecificTx(b, "./adder_with_external_steps.scen.json") } diff --git a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/ecBenchmarks/converterEllipticCurves_test.go b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/ecBenchmarks/converterEllipticCurves_test.go index 1978b6c0794..1f7b260e707 100644 --- a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/ecBenchmarks/converterEllipticCurves_test.go +++ b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/ecBenchmarks/converterEllipticCurves_test.go @@ -7,9 +7,17 @@ import ( ) func TestScenariosConverter_EllipticCurves(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + mc.CheckConverter(t, "./elliptic_curves.scen.json") } func Benchmark_ScenariosConverter_EllipticCurves(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + mc.BenchmarkScenariosSpecificTx(b, "./elliptic_curves.scen.json") } diff --git a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/mex/converterMex_test.go b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/mex/converterMex_test.go index bff4906aca6..c1719095a24 100644 --- a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/mex/converterMex_test.go +++ b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/mex/converterMex_test.go @@ -7,8 +7,16 @@ import ( ) func TestScenariosConverter_MexState(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + mc.CheckConverter(t, "./swap_fixed_input.scen.json") } func Benchmark_ScenariosConverter_SwapFixedInput(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + mc.BenchmarkScenariosSpecificTx(b, "./swap_fixed_input.scen.json") } diff --git a/integrationTests/vm/wasm/wasmvm/versionswitch/vm_test.go b/integrationTests/vm/wasm/wasmvm/versionswitch/vm_test.go index 45565934c77..e69b329162e 100644 --- a/integrationTests/vm/wasm/wasmvm/versionswitch/vm_test.go +++ b/integrationTests/vm/wasm/wasmvm/versionswitch/vm_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package versionswitch import ( diff --git a/integrationTests/vm/wasm/wasmvm/versionswitch_revert/vm_test.go b/integrationTests/vm/wasm/wasmvm/versionswitch_revert/vm_test.go index dac92a24a75..9563bc24615 100644 --- a/integrationTests/vm/wasm/wasmvm/versionswitch_revert/vm_test.go +++ b/integrationTests/vm/wasm/wasmvm/versionswitch_revert/vm_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package versionswitch_revert import ( diff --git a/integrationTests/vm/wasm/wasmvm/versionswitch_vmquery/vm_test.go b/integrationTests/vm/wasm/wasmvm/versionswitch_vmquery/vm_test.go index 4af3688e4fa..52cf2ccb190 100644 --- a/integrationTests/vm/wasm/wasmvm/versionswitch_vmquery/vm_test.go +++ b/integrationTests/vm/wasm/wasmvm/versionswitch_vmquery/vm_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package versionswitch_vmquery import ( diff --git a/integrationTests/vm/wasm/wasmvm/wasmVM_test.go b/integrationTests/vm/wasm/wasmvm/wasmVM_test.go index 9df0d4e22b5..53ace932675 100644 --- a/integrationTests/vm/wasm/wasmvm/wasmVM_test.go +++ b/integrationTests/vm/wasm/wasmvm/wasmVM_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package wasmvm import ( @@ -46,6 +42,10 @@ import ( var log = logger.GetOrCreate("wasmVMtest") func TestVmDeployWithTransferAndGasShouldDeploySCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(0) senderBalance := big.NewInt(100000000) @@ -92,6 +92,10 @@ func TestVmDeployWithTransferAndGasShouldDeploySCCode(t *testing.T) { } func TestVmSCDeployFactory(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(0) senderBalance := big.NewInt(100000000) @@ -148,6 +152,10 @@ func TestVmSCDeployFactory(t *testing.T) { } func TestSCMoveBalanceBeforeSCDeployV1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + ownerAddressBytes := []byte("12345678901234567890123456789012") ownerNonce := uint64(0) ownerBalance := big.NewInt(100000000) @@ -228,6 +236,10 @@ func TestSCMoveBalanceBeforeSCDeployV1(t *testing.T) { } func TestSCMoveBalanceBeforeSCDeploy(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + ownerAddressBytes := []byte("12345678901234567890123456789012") ownerNonce := uint64(0) ownerBalance := big.NewInt(100000000) @@ -307,6 +319,10 @@ func TestSCMoveBalanceBeforeSCDeploy(t *testing.T) { } func TestWASMMetering(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + ownerAddressBytes := []byte("12345678901234567890123456789012") ownerNonce := uint64(11) ownerBalance := big.NewInt(0xfffffffffffffff) @@ -408,6 +424,7 @@ func TestMultipleTimesERC20RustBigIntInBatches(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) durations, err := DeployAndExecuteERC20WithBigInt(3, 1000, gasSchedule, "../testdata/erc20-c-03/rust-simple-erc20.wasm", "transfer") require.Nil(t, err) @@ -446,6 +463,10 @@ func displayBenchmarksResults(durations []time.Duration) { } func TestDeployERC20WithNotEnoughGasShouldReturnOutOfGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) ownerAddressBytes := []byte("12345678901234567890123456789011") ownerNonce := uint64(11) @@ -480,8 +501,7 @@ func TestDeployERC20WithNotEnoughGasShouldReturnOutOfGas(t *testing.T) { } func TestJournalizingAndTimeToProcessChange(t *testing.T) { - // Only a test to benchmark jurnalizing and getting data from trie - t.Skip() + t.Skip("Only a test to benchmark jurnalizing and getting data from trie") numRun := 1000 ownerAddressBytes := []byte("12345678901234567890123456789011") @@ -577,8 +597,7 @@ func TestJournalizingAndTimeToProcessChange(t *testing.T) { } func TestExecuteTransactionAndTimeToProcessChange(t *testing.T) { - // Only a test to benchmark transaction processing - t.Skip() + t.Skip("Only a test to benchmark transaction processing") testMarshalizer := &marshal.JsonMarshalizer{} testHasher := sha256.NewSha256() @@ -817,6 +836,10 @@ func TestAndCatchTrieError(t *testing.T) { } func TestCommunityContract_InShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + zero := big.NewInt(0) transferEGLD := big.NewInt(42) @@ -859,6 +882,10 @@ func TestCommunityContract_InShard(t *testing.T) { } func TestCommunityContract_CrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + zero := big.NewInt(0) transferEGLD := big.NewInt(42) @@ -904,6 +931,10 @@ func TestCommunityContract_CrossShard(t *testing.T) { } func TestCommunityContract_CrossShard_TxProcessor(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + // Scenario: // 1. Deploy FunderSC on shard 0, owned by funderOwner // 2. Deploy ParentSC on shard 1, owned by parentOwner; deployment needs address of FunderSC @@ -1018,6 +1049,10 @@ func TestCommunityContract_CrossShard_TxProcessor(t *testing.T) { } func TestDeployDNSV2SetDeleteUserNames(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + senderAddressBytes, _ := vm.TestAddressPubkeyConverter.Decode(vm.DNSV2DeployerAddress) senderNonce := uint64(0) senderBalance := big.NewInt(100000000) diff --git a/keysManagement/managedPeersHolder.go b/keysManagement/managedPeersHolder.go index a347f4f2a53..8156b64c8eb 100644 --- a/keysManagement/managedPeersHolder.go +++ b/keysManagement/managedPeersHolder.go @@ -5,6 +5,7 @@ import ( "crypto/rand" "encoding/hex" "fmt" + "sort" "sync" "time" @@ -281,7 +282,7 @@ func (holder *managedPeersHolder) ResetRoundsWithoutReceivedMessages(pkBytes []b pInfo.resetRoundsWithoutReceivedMessages() } -// GetManagedKeysByCurrentNode returns all keys that will be managed by this node +// GetManagedKeysByCurrentNode returns all keys that should act as validator(main or backup that took over) and will be managed by this node func (holder *managedPeersHolder) GetManagedKeysByCurrentNode() map[string]crypto.PrivateKey { holder.mut.RLock() defer holder.mut.RUnlock() @@ -299,6 +300,23 @@ func (holder *managedPeersHolder) GetManagedKeysByCurrentNode() map[string]crypt return allManagedKeys } +// GetLoadedKeysByCurrentNode returns all keys that were loaded and will be managed by this node +func (holder *managedPeersHolder) GetLoadedKeysByCurrentNode() [][]byte { + holder.mut.RLock() + defer holder.mut.RUnlock() + + allLoadedKeys := make([][]byte, 0, len(holder.data)) + for pk := range holder.data { + allLoadedKeys = append(allLoadedKeys, []byte(pk)) + } + + sort.Slice(allLoadedKeys, func(i, j int) bool { + return string(allLoadedKeys[i]) < string(allLoadedKeys[j]) + }) + + return allLoadedKeys +} + // IsKeyManagedByCurrentNode returns true if the key is managed by the current node func (holder *managedPeersHolder) IsKeyManagedByCurrentNode(pkBytes []byte) bool { pInfo := holder.getPeerInfo(pkBytes) diff --git a/keysManagement/managedPeersHolder_test.go b/keysManagement/managedPeersHolder_test.go index 81f0dfff86b..9a8c66fb849 100644 --- a/keysManagement/managedPeersHolder_test.go +++ b/keysManagement/managedPeersHolder_test.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "errors" "fmt" + "runtime" "strings" "sync" "testing" @@ -13,7 +14,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-crypto-go" + crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/keysManagement" @@ -751,6 +752,24 @@ func TestManagedPeersHolder_GetManagedKeysByCurrentNode(t *testing.T) { }) } +func TestManagedPeersHolder_GetLoadedKeysByCurrentNode(t *testing.T) { + t.Parallel() + + args := createMockArgsManagedPeersHolder() + holder, _ := keysManagement.NewManagedPeersHolder(args) + _ = holder.AddManagedPeer(skBytes1) + _ = holder.AddManagedPeer(skBytes0) + + for i := 0; i < 10; i++ { + holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) + } + + result := holder.GetLoadedKeysByCurrentNode() + assert.Equal(t, 2, len(result)) + assert.Equal(t, pkBytes0, result[0]) + assert.Equal(t, pkBytes1, result[1]) +} + func TestManagedPeersHolder_IsKeyManagedByCurrentNode(t *testing.T) { t.Parallel() @@ -887,6 +906,10 @@ func TestManagedPeersHolder_IsKeyValidator(t *testing.T) { } func TestManagedPeersHolder_GetNextPeerAuthenticationTime(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("skipping on darwin") + } + t.Parallel() holder, _ := keysManagement.NewManagedPeersHolder(createMockArgsManagedPeersHolder()) diff --git a/keysManagement/managedPeersMonitor.go b/keysManagement/managedPeersMonitor.go index 2c2eef290b4..5f9f117cc2b 100644 --- a/keysManagement/managedPeersMonitor.go +++ b/keysManagement/managedPeersMonitor.go @@ -60,7 +60,7 @@ func (monitor *managedPeersMonitor) GetManagedKeysCount() int { return len(monitor.managedPeersHolder.GetManagedKeysByCurrentNode()) } -// GetManagedKeys returns all keys managed by the current node +// GetManagedKeys returns all keys that should act as validator(main or backup that took over) and will be managed by this node func (monitor *managedPeersMonitor) GetManagedKeys() [][]byte { managedKeysMap := monitor.managedPeersHolder.GetManagedKeysByCurrentNode() managedKeys := make([][]byte, 0, len(managedKeysMap)) @@ -75,6 +75,11 @@ func (monitor *managedPeersMonitor) GetManagedKeys() [][]byte { return managedKeys } +// GetLoadedKeys returns all keys that were loaded and will be managed by this node +func (monitor *managedPeersMonitor) GetLoadedKeys() [][]byte { + return monitor.managedPeersHolder.GetLoadedKeysByCurrentNode() +} + // GetEligibleManagedKeys returns eligible keys that are managed by the current node in the current epoch func (monitor *managedPeersMonitor) GetEligibleManagedKeys() ([][]byte, error) { epoch := monitor.epochProvider.CurrentEpoch() diff --git a/keysManagement/managedPeersMonitor_test.go b/keysManagement/managedPeersMonitor_test.go index 9ec9dbcd8ad..4be6a5282ca 100644 --- a/keysManagement/managedPeersMonitor_test.go +++ b/keysManagement/managedPeersMonitor_test.go @@ -281,3 +281,20 @@ func TestManagedPeersMonitor_GetManagedKeys(t *testing.T) { keys := monitor.GetManagedKeys() require.Equal(t, expectedManagedKeys, keys) } + +func TestManagedPeersMonitor_GetLoadedKeys(t *testing.T) { + t.Parallel() + + loadedKeys := [][]byte{[]byte("pk1"), []byte("pk2"), []byte("pk3")} + args := createMockArgManagedPeersMonitor() + args.ManagedPeersHolder = &testscommon.ManagedPeersHolderStub{ + GetLoadedKeysByCurrentNodeCalled: func() [][]byte { + return loadedKeys + }, + } + monitor, err := NewManagedPeersMonitor(args) + require.NoError(t, err) + + keys := monitor.GetLoadedKeys() + require.Equal(t, loadedKeys, keys) +} diff --git a/node/chainSimulator/chainSimulator.go b/node/chainSimulator/chainSimulator.go index dcd09ce4b65..a5292d72e40 100644 --- a/node/chainSimulator/chainSimulator.go +++ b/node/chainSimulator/chainSimulator.go @@ -2,36 +2,60 @@ package chainSimulator import ( "bytes" + "crypto/rand" + "encoding/hex" + "errors" "fmt" + "math/big" "sync" "time" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/sharding" + "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/data/transaction" crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-crypto-go/signing" + "github.com/multiversx/mx-chain-crypto-go/signing/mcl" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/node/chainSimulator/components" "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + mxChainSharding "github.com/multiversx/mx-chain-go/sharding" logger "github.com/multiversx/mx-chain-logger-go" ) +const delaySendTxs = time.Millisecond + var log = logger.GetOrCreate("chainSimulator") +type transactionWithResult struct { + hexHash string + tx *transaction.Transaction + result *transaction.ApiTransactionResult +} + // ArgsChainSimulator holds the arguments needed to create a new instance of simulator type ArgsChainSimulator struct { - BypassTxSignatureCheck bool - TempDir string - PathToInitialConfig string - NumOfShards uint32 - MinNodesPerShard uint32 - MetaChainMinNodes uint32 - GenesisTimestamp int64 - InitialRound int64 - RoundDurationInMillis uint64 - RoundsPerEpoch core.OptionalUint64 - ApiInterface components.APIConfigurator + BypassTxSignatureCheck bool + TempDir string + PathToInitialConfig string + NumOfShards uint32 + MinNodesPerShard uint32 + MetaChainMinNodes uint32 + NumNodesWaitingListShard uint32 + NumNodesWaitingListMeta uint32 + GenesisTimestamp int64 + InitialRound int64 + InitialEpoch uint32 + InitialNonce uint64 + RoundDurationInMillis uint64 + RoundsPerEpoch core.OptionalUint64 + ApiInterface components.APIConfigurator + AlterConfigsFunction func(cfg *config.Configs) } type simulator struct { @@ -39,6 +63,7 @@ type simulator struct { syncedBroadcastNetwork components.SyncedBroadcastNetworkHandler handlers []ChainHandler initialWalletKeys *dtos.InitialWalletKeys + initialStakedKeys map[string]*dtos.BLSKey validatorsPrivateKeys []crypto.PrivateKey nodes map[uint32]process.NodeHandler numOfShards uint32 @@ -56,6 +81,7 @@ func NewChainSimulator(args ArgsChainSimulator) (*simulator, error) { numOfShards: args.NumOfShards, chanStopNodeProcess: make(chan endProcess.ArgEndProcess), mutex: sync.RWMutex{}, + initialStakedKeys: make(map[string]*dtos.BLSKey), } err := instance.createChainHandlers(args) @@ -68,14 +94,18 @@ func NewChainSimulator(args ArgsChainSimulator) (*simulator, error) { func (s *simulator) createChainHandlers(args ArgsChainSimulator) error { outputConfigs, err := configs.CreateChainSimulatorConfigs(configs.ArgsChainSimulatorConfigs{ - NumOfShards: args.NumOfShards, - OriginalConfigsPath: args.PathToInitialConfig, - GenesisTimeStamp: computeStartTimeBaseOnInitialRound(args), - RoundDurationInMillis: args.RoundDurationInMillis, - TempDir: args.TempDir, - MinNodesPerShard: args.MinNodesPerShard, - MetaChainMinNodes: args.MetaChainMinNodes, - RoundsPerEpoch: args.RoundsPerEpoch, + NumOfShards: args.NumOfShards, + OriginalConfigsPath: args.PathToInitialConfig, + GenesisTimeStamp: computeStartTimeBaseOnInitialRound(args), + RoundDurationInMillis: args.RoundDurationInMillis, + TempDir: args.TempDir, + MinNodesPerShard: args.MinNodesPerShard, + MetaChainMinNodes: args.MetaChainMinNodes, + RoundsPerEpoch: args.RoundsPerEpoch, + InitialEpoch: args.InitialEpoch, + AlterConfigsFunction: args.AlterConfigsFunction, + NumNodesWaitingListShard: args.NumNodesWaitingListShard, + NumNodesWaitingListMeta: args.NumNodesWaitingListMeta, }) if err != nil { return err @@ -133,8 +163,10 @@ func (s *simulator) createTestNode( APIInterface: args.ApiInterface, BypassTxSignatureCheck: args.BypassTxSignatureCheck, InitialRound: args.InitialRound, + InitialNonce: args.InitialNonce, MinNodesPerShard: args.MinNodesPerShard, MinNodesMeta: args.MetaChainMinNodes, + RoundDurationInMillis: args.RoundDurationInMillis, } return components.NewTestOnlyProcessingNode(argsTestOnlyProcessorNode) @@ -155,6 +187,51 @@ func (s *simulator) GenerateBlocks(numOfBlocks int) error { return nil } +// GenerateBlocksUntilEpochIsReached will generate blocks until the epoch is reached +func (s *simulator) GenerateBlocksUntilEpochIsReached(targetEpoch int32) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + maxNumberOfRounds := 10000 + for idx := 0; idx < maxNumberOfRounds; idx++ { + s.incrementRoundOnAllValidators() + err := s.allNodesCreateBlocks() + if err != nil { + return err + } + + epochReachedOnAllNodes, err := s.isTargetEpochReached(targetEpoch) + if err != nil { + return err + } + + if epochReachedOnAllNodes { + return nil + } + } + return fmt.Errorf("exceeded rounds to generate blocks") +} + +func (s *simulator) isTargetEpochReached(targetEpoch int32) (bool, error) { + metachainNode := s.nodes[core.MetachainShardId] + metachainEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() + + for shardID, n := range s.nodes { + if shardID != core.MetachainShardId { + if int32(n.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch()) < int32(metachainEpoch-1) { + return false, fmt.Errorf("shard %d is with at least 2 epochs behind metachain shard node epoch %d, metachain node epoch %d", + shardID, n.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch(), metachainEpoch) + } + } + + if int32(n.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch()) < targetEpoch { + return false, nil + } + } + + return true, nil +} + func (s *simulator) incrementRoundOnAllValidators() { for _, node := range s.handlers { node.IncrementRound() @@ -165,6 +242,7 @@ func (s *simulator) allNodesCreateBlocks() error { for _, node := range s.handlers { // TODO MX-15150 remove this when we remove all goroutines time.Sleep(2 * time.Millisecond) + err := node.CreateNewBlock() if err != nil { return err @@ -215,6 +293,53 @@ func (s *simulator) AddValidatorKeys(validatorsPrivateKeys [][]byte) error { return nil } +// GenerateAndMintWalletAddress will generate an address in the provided shard and will mint that address with the provided value +// if the target shard ID value does not correspond to a node handled by the chain simulator, the address will be generated in a random shard ID +func (s *simulator) GenerateAndMintWalletAddress(targetShardID uint32, value *big.Int) (dtos.WalletAddress, error) { + addressConverter := s.nodes[core.MetachainShardId].GetCoreComponents().AddressPubKeyConverter() + nodeHandler := s.GetNodeHandler(targetShardID) + var buff []byte + if check.IfNil(nodeHandler) { + buff = generateAddress(addressConverter.Len()) + } else { + buff = generateAddressInShard(nodeHandler.GetShardCoordinator(), addressConverter.Len()) + } + + address, err := addressConverter.Encode(buff) + if err != nil { + return dtos.WalletAddress{}, err + } + + err = s.SetStateMultiple([]*dtos.AddressState{ + { + Address: address, + Balance: value.String(), + }, + }) + + return dtos.WalletAddress{ + Bech32: address, + Bytes: buff, + }, err +} + +func generateAddressInShard(shardCoordinator mxChainSharding.Coordinator, len int) []byte { + for { + buff := generateAddress(len) + shardID := shardCoordinator.ComputeId(buff) + if shardID == shardCoordinator.SelfId() { + return buff + } + } +} + +func generateAddress(len int) []byte { + buff := make([]byte, len) + _, _ = rand.Read(buff) + + return buff +} + func (s *simulator) setValidatorKeysForNode(node process.NodeHandler, validatorsPrivateKeys [][]byte) error { for idx, privateKey := range validatorsPrivateKeys { @@ -296,6 +421,120 @@ func (s *simulator) SetStateMultiple(stateSlice []*dtos.AddressState) error { return nil } +// SendTxAndGenerateBlockTilTxIsExecuted will send the provided transaction and generate block until the transaction is executed +func (s *simulator) SendTxAndGenerateBlockTilTxIsExecuted(txToSend *transaction.Transaction, maxNumOfBlocksToGenerateWhenExecutingTx int) (*transaction.ApiTransactionResult, error) { + result, err := s.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txToSend}, maxNumOfBlocksToGenerateWhenExecutingTx) + if err != nil { + return nil, err + } + + return result[0], nil +} + +// SendTxsAndGenerateBlocksTilAreExecuted will send the provided transactions and generate block until all transactions are executed +func (s *simulator) SendTxsAndGenerateBlocksTilAreExecuted(txsToSend []*transaction.Transaction, maxNumOfBlocksToGenerateWhenExecutingTx int) ([]*transaction.ApiTransactionResult, error) { + if len(txsToSend) == 0 { + return nil, errEmptySliceOfTxs + } + if maxNumOfBlocksToGenerateWhenExecutingTx == 0 { + return nil, errInvalidMaxNumOfBlocks + } + + transactionStatus := make([]*transactionWithResult, 0, len(txsToSend)) + for idx, tx := range txsToSend { + if tx == nil { + return nil, fmt.Errorf("%w on position %d", errNilTransaction, idx) + } + + txHashHex, err := s.sendTx(tx) + if err != nil { + return nil, err + } + + transactionStatus = append(transactionStatus, &transactionWithResult{ + hexHash: txHashHex, + tx: tx, + }) + } + + time.Sleep(delaySendTxs) + + for count := 0; count < maxNumOfBlocksToGenerateWhenExecutingTx; count++ { + err := s.GenerateBlocks(1) + if err != nil { + return nil, err + } + + txsAreExecuted := s.computeTransactionsStatus(transactionStatus) + if txsAreExecuted { + return getApiTransactionsFromResult(transactionStatus), nil + } + } + + return nil, errors.New("something went wrong. Transaction(s) is/are still in pending") +} + +func (s *simulator) computeTransactionsStatus(txsWithResult []*transactionWithResult) bool { + allAreExecuted := true + for _, resultTx := range txsWithResult { + if resultTx.result != nil { + continue + } + + sentTx := resultTx.tx + destinationShardID := s.GetNodeHandler(0).GetShardCoordinator().ComputeId(sentTx.RcvAddr) + result, errGet := s.GetNodeHandler(destinationShardID).GetFacadeHandler().GetTransaction(resultTx.hexHash, true) + if errGet == nil && result.Status != transaction.TxStatusPending { + log.Info("############## transaction was executed ##############", "txHash", resultTx.hexHash) + resultTx.result = result + continue + } + + allAreExecuted = false + } + + return allAreExecuted +} + +func getApiTransactionsFromResult(txWithResult []*transactionWithResult) []*transaction.ApiTransactionResult { + result := make([]*transaction.ApiTransactionResult, 0, len(txWithResult)) + for _, tx := range txWithResult { + result = append(result, tx.result) + } + + return result +} + +func (s *simulator) sendTx(tx *transaction.Transaction) (string, error) { + shardID := s.GetNodeHandler(0).GetShardCoordinator().ComputeId(tx.SndAddr) + err := s.GetNodeHandler(shardID).GetFacadeHandler().ValidateTransaction(tx) + if err != nil { + return "", err + } + + node := s.GetNodeHandler(shardID) + txHash, err := core.CalculateHash(node.GetCoreComponents().InternalMarshalizer(), node.GetCoreComponents().Hasher(), tx) + if err != nil { + return "", err + } + + txHashHex := hex.EncodeToString(txHash) + _, err = node.GetFacadeHandler().SendBulkTransactions([]*transaction.Transaction{tx}) + if err != nil { + return "", err + } + + for { + recoveredTx, _ := node.GetFacadeHandler().GetTransaction(txHashHex, false) + if recoveredTx != nil { + log.Info("############## send transaction ##############", "txHash", txHashHex) + return txHashHex, nil + } + + time.Sleep(delaySendTxs) + } +} + func (s *simulator) setStateSystemAccount(state *dtos.AddressState) error { for shard, node := range s.nodes { err := node.SetStateForAddress(core.SystemAccountAddress, state) @@ -307,8 +546,16 @@ func (s *simulator) setStateSystemAccount(state *dtos.AddressState) error { return nil } +// GetAccount will fetch the account of the provided address +func (s *simulator) GetAccount(address dtos.WalletAddress) (api.AccountResponse, error) { + destinationShardID := s.GetNodeHandler(0).GetShardCoordinator().ComputeId(address.Bytes) + + account, _, err := s.GetNodeHandler(destinationShardID).GetFacadeHandler().GetAccount(address.Bech32, api.AccountQueryOptions{}) + return account, err +} + // Close will stop and close the simulator -func (s *simulator) Close() error { +func (s *simulator) Close() { s.mutex.Lock() defer s.mutex.Unlock() @@ -320,14 +567,39 @@ func (s *simulator) Close() error { } } - if len(errorStrings) == 0 { - return nil + if len(errorStrings) != 0 { + log.Error("error closing chain simulator", "error", components.AggregateErrors(errorStrings, components.ErrClose)) } - - return components.AggregateErrors(errorStrings, components.ErrClose) } // IsInterfaceNil returns true if there is no value under the interface func (s *simulator) IsInterfaceNil() bool { return s == nil } + +// GenerateBlsPrivateKeys will generate bls keys +func GenerateBlsPrivateKeys(numOfKeys int) ([][]byte, []string, error) { + blockSigningGenerator := signing.NewKeyGenerator(mcl.NewSuiteBLS12()) + + secretKeysBytes := make([][]byte, 0, numOfKeys) + blsKeysHex := make([]string, 0, numOfKeys) + for idx := 0; idx < numOfKeys; idx++ { + secretKey, publicKey := blockSigningGenerator.GeneratePair() + + secretKeyBytes, err := secretKey.ToByteArray() + if err != nil { + return nil, nil, err + } + + secretKeysBytes = append(secretKeysBytes, secretKeyBytes) + + publicKeyBytes, err := publicKey.ToByteArray() + if err != nil { + return nil, nil, err + } + + blsKeysHex = append(blsKeysHex, hex.EncodeToString(publicKeyBytes)) + } + + return secretKeysBytes, blsKeysHex, nil +} diff --git a/node/chainSimulator/chainSimulator_test.go b/node/chainSimulator/chainSimulator_test.go index 17eebfc81d7..1a65b37ff78 100644 --- a/node/chainSimulator/chainSimulator_test.go +++ b/node/chainSimulator/chainSimulator_test.go @@ -2,8 +2,6 @@ package chainSimulator import ( "encoding/base64" - "encoding/hex" - "fmt" "math/big" "testing" "time" @@ -24,6 +22,10 @@ const ( ) func TestNewChainSimulator(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + startTime := time.Now().Unix() roundDurationInMillis := uint64(6000) chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ @@ -43,11 +45,14 @@ func TestNewChainSimulator(t *testing.T) { time.Sleep(time.Second) - err = chainSimulator.Close() - assert.Nil(t, err) + chainSimulator.Close() } func TestChainSimulator_GenerateBlocksShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + startTime := time.Now().Unix() roundDurationInMillis := uint64(6000) chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ @@ -57,22 +62,26 @@ func TestChainSimulator_GenerateBlocksShouldWork(t *testing.T) { NumOfShards: 3, GenesisTimestamp: startTime, RoundDurationInMillis: roundDurationInMillis, - RoundsPerEpoch: core.OptionalUint64{}, - ApiInterface: api.NewNoApiInterface(), - MinNodesPerShard: 1, - MetaChainMinNodes: 1, - InitialRound: 200000000, + RoundsPerEpoch: core.OptionalUint64{ + HasValue: true, + Value: 20, + }, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + InitialRound: 200000000, + InitialEpoch: 100, + InitialNonce: 100, }) require.Nil(t, err) require.NotNil(t, chainSimulator) + defer chainSimulator.Close() + time.Sleep(time.Second) - err = chainSimulator.GenerateBlocks(30) + err = chainSimulator.GenerateBlocks(50) require.Nil(t, err) - - err = chainSimulator.Close() - assert.Nil(t, err) } func TestChainSimulator_GenerateBlocksAndEpochChangeShouldWork(t *testing.T) { @@ -101,31 +110,42 @@ func TestChainSimulator_GenerateBlocksAndEpochChangeShouldWork(t *testing.T) { require.Nil(t, err) require.NotNil(t, chainSimulator) + defer chainSimulator.Close() + facade, err := NewChainSimulatorFacade(chainSimulator) require.Nil(t, err) - genesisAddressWithStake := chainSimulator.initialWalletKeys.InitialWalletWithStake.Address - initialAccount, err := facade.GetExistingAccountFromBech32AddressString(genesisAddressWithStake) - require.Nil(t, err) + genesisBalances := make(map[string]*big.Int) + for _, stakeWallet := range chainSimulator.initialWalletKeys.StakeWallets { + initialAccount, errGet := facade.GetExistingAccountFromBech32AddressString(stakeWallet.Address.Bech32) + require.Nil(t, errGet) + + genesisBalances[stakeWallet.Address.Bech32] = initialAccount.GetBalance() + } time.Sleep(time.Second) err = chainSimulator.GenerateBlocks(80) require.Nil(t, err) - accountAfterRewards, err := facade.GetExistingAccountFromBech32AddressString(genesisAddressWithStake) - require.Nil(t, err) - - assert.True(t, accountAfterRewards.GetBalance().Cmp(initialAccount.GetBalance()) > 0, - fmt.Sprintf("initial balance %s, balance after rewards %s", initialAccount.GetBalance().String(), accountAfterRewards.GetBalance().String())) + numAccountsWithIncreasedBalances := 0 + for _, stakeWallet := range chainSimulator.initialWalletKeys.StakeWallets { + account, errGet := facade.GetExistingAccountFromBech32AddressString(stakeWallet.Address.Bech32) + require.Nil(t, errGet) - fmt.Println(chainSimulator.GetRestAPIInterfaces()) + if account.GetBalance().Cmp(genesisBalances[stakeWallet.Address.Bech32]) > 0 { + numAccountsWithIncreasedBalances++ + } + } - err = chainSimulator.Close() - assert.Nil(t, err) + assert.True(t, numAccountsWithIncreasedBalances > 0) } func TestChainSimulator_SetState(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + startTime := time.Now().Unix() roundDurationInMillis := uint64(6000) roundsPerEpoch := core.OptionalUint64{ @@ -147,6 +167,8 @@ func TestChainSimulator_SetState(t *testing.T) { require.Nil(t, err) require.NotNil(t, chainSimulator) + defer chainSimulator.Close() + keyValueMap := map[string]string{ "01": "01", "02": "02", @@ -165,12 +187,11 @@ func TestChainSimulator_SetState(t *testing.T) { require.Equal(t, keyValueMap, keyValuePairs) } -// Test scenario -// 1. Add a new validator private key in the multi key handler -// 2. Do a stake transaction for the validator key -// 3. Do an unstake transaction (to make a place for the new validator) -// 4. Check if the new validator has generated rewards -func TestChainSimulator_AddValidatorKey(t *testing.T) { +func TestChainSimulator_SetEntireState(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + startTime := time.Now().Unix() roundDurationInMillis := uint64(6000) roundsPerEpoch := core.OptionalUint64{ @@ -186,141 +207,126 @@ func TestChainSimulator_AddValidatorKey(t *testing.T) { RoundDurationInMillis: roundDurationInMillis, RoundsPerEpoch: roundsPerEpoch, ApiInterface: api.NewNoApiInterface(), - MinNodesPerShard: 3, - MetaChainMinNodes: 3, + MinNodesPerShard: 1, + MetaChainMinNodes: 1, }) require.Nil(t, err) require.NotNil(t, chainSimulator) - err = chainSimulator.GenerateBlocks(30) - require.Nil(t, err) - - // Step 1 --- add a new validator key in the chain simulator - privateKeyBase64 := "NjRhYjk3NmJjYWVjZTBjNWQ4YmJhNGU1NjZkY2VmYWFiYjcxNDI1Y2JiZDcwYzc1ODA2MGUxNTE5MGM2ZjE1Zg==" - privateKeyHex, err := base64.StdEncoding.DecodeString(privateKeyBase64) - require.Nil(t, err) - privateKeyBytes, err := hex.DecodeString(string(privateKeyHex)) - require.Nil(t, err) - - err = chainSimulator.AddValidatorKeys([][]byte{privateKeyBytes}) - require.Nil(t, err) - - newValidatorOwner := "erd1l6xt0rqlyzw56a3k8xwwshq2dcjwy3q9cppucvqsmdyw8r98dz3sae0kxl" - newValidatorOwnerBytes, _ := chainSimulator.nodes[1].GetCoreComponents().AddressPubKeyConverter().Decode(newValidatorOwner) - rcv := "erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqplllst77y4l" - rcvAddrBytes, _ := chainSimulator.nodes[1].GetCoreComponents().AddressPubKeyConverter().Decode(rcv) + defer chainSimulator.Close() - // Step 2 --- set an initial balance for the address that will initialize all the transactions - err = chainSimulator.SetStateMultiple([]*dtos.AddressState{ - { - Address: "erd1l6xt0rqlyzw56a3k8xwwshq2dcjwy3q9cppucvqsmdyw8r98dz3sae0kxl", - Balance: "10000000000000000000000", + balance := "431271308732096033771131" + contractAddress := "erd1qqqqqqqqqqqqqpgqmzzm05jeav6d5qvna0q2pmcllelkz8xddz3syjszx5" + accountState := &dtos.AddressState{ + Address: contractAddress, + Nonce: new(uint64), + Balance: balance, + Code: "0061736d010000000129086000006000017f60027f7f017f60027f7f0060017f0060037f7f7f017f60037f7f7f0060017f017f0290020b03656e7619626967496e74476574556e7369676e6564417267756d656e74000303656e760f6765744e756d417267756d656e7473000103656e760b7369676e616c4572726f72000303656e76126d42756666657253746f726167654c6f6164000203656e76176d427566666572546f426967496e74556e7369676e6564000203656e76196d42756666657246726f6d426967496e74556e7369676e6564000203656e76136d42756666657253746f7261676553746f7265000203656e760f6d4275666665725365744279746573000503656e760e636865636b4e6f5061796d656e74000003656e7614626967496e7446696e697368556e7369676e6564000403656e7609626967496e744164640006030b0a010104070301000000000503010003060f027f0041a080080b7f0041a080080b074607066d656d6f7279020004696e697400110667657453756d00120361646400130863616c6c4261636b00140a5f5f646174615f656e6403000b5f5f686561705f6261736503010aca010a0e01017f4100100c2200100020000b1901017f419c8008419c800828020041016b220036020020000b1400100120004604400f0b4180800841191002000b16002000100c220010031a2000100c220010041a20000b1401017f100c2202200110051a2000200210061a0b1301017f100c220041998008410310071a20000b1401017f10084101100d100b210010102000100f0b0e0010084100100d1010100e10090b2201037f10084101100d100b210110102202100e220020002001100a20022000100f0b0300010b0b2f0200418080080b1c77726f6e67206e756d626572206f6620617267756d656e747373756d00419c80080b049cffffff", + CodeHash: "n9EviPlHS6EV+3Xp0YqP28T0IUfeAFRFBIRC1Jw6pyU=", + RootHash: "76cr5Jhn6HmBcDUMIzikEpqFgZxIrOzgNkTHNatXzC4=", + CodeMetadata: "BQY=", + Owner: "erd1ss6u80ruas2phpmr82r42xnkd6rxy40g9jl69frppl4qez9w2jpsqj8x97", + DeveloperRewards: "5401004999998", + Keys: map[string]string{ + "73756d": "0a", }, - }) - require.Nil(t, err) - - blsKey := "9b7de1b2d2c90b7bea8f6855075c77d6c63b5dada29abb9b87c52cfae9d4112fcac13279e1a07d94672a5e62a83e3716555513014324d5c6bb4261b465f1b8549a7a338bc3ae8edc1e940958f9c2e296bd3c118a4466dec99dda0ceee3eb6a8c" - - // Step 3 --- generate and send a stake transaction with the BLS key of the validator key that was added at step 1 - stakeValue, _ := big.NewInt(0).SetString("2500000000000000000000", 10) - tx := &transaction.Transaction{ - Nonce: 0, - Value: stakeValue, - SndAddr: newValidatorOwnerBytes, - RcvAddr: rcvAddrBytes, - Data: []byte(fmt.Sprintf("stake@01@%s@010101", blsKey)), - GasLimit: 50_000_000, - GasPrice: 1000000000, - Signature: []byte("dummy"), - ChainID: []byte(configs.ChainID), - Version: 1, } - err = chainSimulator.nodes[1].GetFacadeHandler().ValidateTransaction(tx) + err = chainSimulator.SetStateMultiple([]*dtos.AddressState{accountState}) require.Nil(t, err) - _, err = chainSimulator.nodes[1].GetFacadeHandler().SendBulkTransactions([]*transaction.Transaction{tx}) + err = chainSimulator.GenerateBlocks(30) require.Nil(t, err) - time.Sleep(100 * time.Millisecond) - - // Step 4 --- generate 5 blocks so that the transaction from step 2 can be executed - err = chainSimulator.GenerateBlocks(5) + nodeHandler := chainSimulator.GetNodeHandler(1) + scAddress, _ := nodeHandler.GetCoreComponents().AddressPubKeyConverter().Decode(contractAddress) + res, _, err := nodeHandler.GetFacadeHandler().ExecuteSCQuery(&process.SCQuery{ + ScAddress: scAddress, + FuncName: "getSum", + CallerAddr: nil, + BlockNonce: core.OptionalUint64{}, + }) require.Nil(t, err) - txHash, err := computeTxHash(chainSimulator, tx) - require.Nil(t, err) - txFromMeta, err := chainSimulator.nodes[core.MetachainShardId].GetFacadeHandler().GetTransaction(txHash, true) - require.Nil(t, err) - require.NotNil(t, txFromMeta) - require.Equal(t, 2, len(txFromMeta.SmartContractResults)) + counterValue := big.NewInt(0).SetBytes(res.ReturnData[0]).Int64() + require.Equal(t, 10, int(counterValue)) - shardIDValidatorOwner := chainSimulator.nodes[0].GetShardCoordinator().ComputeId(newValidatorOwnerBytes) - accountValidatorOwner, _, err := chainSimulator.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(newValidatorOwner, coreAPI.AccountQueryOptions{}) - require.Nil(t, err) - balanceBeforeActiveValidator := accountValidatorOwner.Balance + time.Sleep(time.Second) - // Step 5 --- create an unStake transaction with the bls key of an initial validator and execute the transaction to make place for the validator that was added at step 3 - firstValidatorKey, err := chainSimulator.GetValidatorPrivateKeys()[0].GeneratePublic().ToByteArray() + account, _, err := nodeHandler.GetFacadeHandler().GetAccount(contractAddress, coreAPI.AccountQueryOptions{}) require.Nil(t, err) + require.Equal(t, accountState.Balance, account.Balance) + require.Equal(t, accountState.DeveloperRewards, account.DeveloperReward) + require.Equal(t, accountState.Code, account.Code) + require.Equal(t, accountState.CodeHash, base64.StdEncoding.EncodeToString(account.CodeHash)) + require.Equal(t, accountState.CodeMetadata, base64.StdEncoding.EncodeToString(account.CodeMetadata)) + require.Equal(t, accountState.Owner, account.OwnerAddress) + require.Equal(t, accountState.RootHash, base64.StdEncoding.EncodeToString(account.RootHash)) +} - initialAddressWithValidators := chainSimulator.GetInitialWalletKeys().InitialWalletWithStake.Address - senderBytes, _ := chainSimulator.nodes[1].GetCoreComponents().AddressPubKeyConverter().Decode(initialAddressWithValidators) - shardID := chainSimulator.nodes[0].GetShardCoordinator().ComputeId(senderBytes) - initialAccount, _, err := chainSimulator.nodes[shardID].GetFacadeHandler().GetAccount(initialAddressWithValidators, coreAPI.AccountQueryOptions{}) - require.Nil(t, err) - tx = &transaction.Transaction{ - Nonce: initialAccount.Nonce, - Value: big.NewInt(0), - SndAddr: senderBytes, - RcvAddr: rcvAddrBytes, - Data: []byte(fmt.Sprintf("unStake@%s", hex.EncodeToString(firstValidatorKey))), - GasLimit: 50_000_000, - GasPrice: 1000000000, - Signature: []byte("dummy"), - ChainID: []byte(configs.ChainID), - Version: 1, +func TestChainSimulator_GetAccount(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") } - err = chainSimulator.nodes[shardID].GetFacadeHandler().ValidateTransaction(tx) - require.Nil(t, err) - _, err = chainSimulator.nodes[shardID].GetFacadeHandler().SendBulkTransactions([]*transaction.Transaction{tx}) + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) require.Nil(t, err) + require.NotNil(t, chainSimulator) - time.Sleep(100 * time.Millisecond) - - // Step 6 --- generate 5 blocks so that the transaction from step 5 can be executed - err = chainSimulator.GenerateBlocks(5) - require.Nil(t, err) + // the facade's GetAccount method requires that at least one block was produced over the genesis block + _ = chainSimulator.GenerateBlocks(1) - txHash, err = computeTxHash(chainSimulator, tx) - require.Nil(t, err) - txFromMeta, err = chainSimulator.nodes[core.MetachainShardId].GetFacadeHandler().GetTransaction(txHash, true) - require.Nil(t, err) - require.NotNil(t, txFromMeta) - require.Equal(t, 2, len(txFromMeta.SmartContractResults)) + defer chainSimulator.Close() - // Step 6 --- generate 50 blocks to pass 2 epochs and the validator to generate rewards - err = chainSimulator.GenerateBlocks(50) - require.Nil(t, err) + address := dtos.WalletAddress{ + Bech32: "erd1qtc600lryvytxuy4h7vn7xmsy5tw6vuw3tskr75cwnmv4mnyjgsq6e5zgj", + } + address.Bytes, err = chainSimulator.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(address.Bech32) + assert.Nil(t, err) - accountValidatorOwner, _, err = chainSimulator.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(newValidatorOwner, coreAPI.AccountQueryOptions{}) - require.Nil(t, err) - balanceAfterActiveValidator := accountValidatorOwner.Balance + account, err := chainSimulator.GetAccount(address) + assert.Nil(t, err) + assert.Equal(t, uint64(0), account.Nonce) + assert.Equal(t, "0", account.Balance) - log.Info("balance before validator", "value", balanceBeforeActiveValidator) - log.Info("balance after validator", "value", balanceAfterActiveValidator) + nonce := uint64(37) + err = chainSimulator.SetStateMultiple([]*dtos.AddressState{ + { + Address: address.Bech32, + Nonce: &nonce, + Balance: big.NewInt(38).String(), + }, + }) + assert.Nil(t, err) - balanceBeforeBig, _ := big.NewInt(0).SetString(balanceBeforeActiveValidator, 10) - balanceAfterBig, _ := big.NewInt(0).SetString(balanceAfterActiveValidator, 10) - diff := balanceAfterBig.Sub(balanceAfterBig, balanceBeforeBig) - log.Info("difference", "value", diff.String()) + // without this call the test will fail because the latest produced block points to a state roothash that tells that + // the account has the nonce 0 + _ = chainSimulator.GenerateBlocks(1) - // Step 7 --- check the balance of the validator owner has been increased - require.True(t, diff.Cmp(big.NewInt(0)) > 0) + account, err = chainSimulator.GetAccount(address) + assert.Nil(t, err) + assert.Equal(t, uint64(37), account.Nonce) + assert.Equal(t, "38", account.Balance) } -func TestChainSimulator_SetEntireState(t *testing.T) { +func TestSimulator_SendTransactions(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") } @@ -346,61 +352,91 @@ func TestChainSimulator_SetEntireState(t *testing.T) { require.Nil(t, err) require.NotNil(t, chainSimulator) - balance := "431271308732096033771131" - contractAddress := "erd1qqqqqqqqqqqqqpgqmzzm05jeav6d5qvna0q2pmcllelkz8xddz3syjszx5" - accountState := &dtos.AddressState{ - Address: contractAddress, - Nonce: 0, - Balance: balance, - Code: "0061736d010000000129086000006000017f60027f7f017f60027f7f0060017f0060037f7f7f017f60037f7f7f0060017f017f0290020b03656e7619626967496e74476574556e7369676e6564417267756d656e74000303656e760f6765744e756d417267756d656e7473000103656e760b7369676e616c4572726f72000303656e76126d42756666657253746f726167654c6f6164000203656e76176d427566666572546f426967496e74556e7369676e6564000203656e76196d42756666657246726f6d426967496e74556e7369676e6564000203656e76136d42756666657253746f7261676553746f7265000203656e760f6d4275666665725365744279746573000503656e760e636865636b4e6f5061796d656e74000003656e7614626967496e7446696e697368556e7369676e6564000403656e7609626967496e744164640006030b0a010104070301000000000503010003060f027f0041a080080b7f0041a080080b074607066d656d6f7279020004696e697400110667657453756d00120361646400130863616c6c4261636b00140a5f5f646174615f656e6403000b5f5f686561705f6261736503010aca010a0e01017f4100100c2200100020000b1901017f419c8008419c800828020041016b220036020020000b1400100120004604400f0b4180800841191002000b16002000100c220010031a2000100c220010041a20000b1401017f100c2202200110051a2000200210061a0b1301017f100c220041998008410310071a20000b1401017f10084101100d100b210010102000100f0b0e0010084100100d1010100e10090b2201037f10084101100d100b210110102202100e220020002001100a20022000100f0b0300010b0b2f0200418080080b1c77726f6e67206e756d626572206f6620617267756d656e747373756d00419c80080b049cffffff", - CodeHash: "n9EviPlHS6EV+3Xp0YqP28T0IUfeAFRFBIRC1Jw6pyU=", - RootHash: "76cr5Jhn6HmBcDUMIzikEpqFgZxIrOzgNkTHNatXzC4=", - CodeMetadata: "BQY=", - Owner: "erd1ss6u80ruas2phpmr82r42xnkd6rxy40g9jl69frppl4qez9w2jpsqj8x97", - DeveloperRewards: "5401004999998", - Keys: map[string]string{ - "73756d": "0a", - }, - } + defer chainSimulator.Close() - err = chainSimulator.SetStateMultiple([]*dtos.AddressState{accountState}) - require.Nil(t, err) + oneEgld := big.NewInt(1000000000000000000) + initialMinting := big.NewInt(0).Mul(oneEgld, big.NewInt(100)) + transferValue := big.NewInt(0).Mul(oneEgld, big.NewInt(5)) - err = chainSimulator.GenerateBlocks(30) + wallet0, err := chainSimulator.GenerateAndMintWalletAddress(0, initialMinting) require.Nil(t, err) - nodeHandler := chainSimulator.GetNodeHandler(1) - scAddress, _ := nodeHandler.GetCoreComponents().AddressPubKeyConverter().Decode(contractAddress) - res, _, err := nodeHandler.GetFacadeHandler().ExecuteSCQuery(&process.SCQuery{ - ScAddress: scAddress, - FuncName: "getSum", - CallerAddr: nil, - BlockNonce: core.OptionalUint64{}, - }) + wallet1, err := chainSimulator.GenerateAndMintWalletAddress(1, initialMinting) require.Nil(t, err) - counterValue := big.NewInt(0).SetBytes(res.ReturnData[0]).Int64() - require.Equal(t, 10, int(counterValue)) + wallet2, err := chainSimulator.GenerateAndMintWalletAddress(2, initialMinting) + require.Nil(t, err) - time.Sleep(time.Second) + wallet3, err := chainSimulator.GenerateAndMintWalletAddress(2, initialMinting) + require.Nil(t, err) - account, _, err := nodeHandler.GetFacadeHandler().GetAccount(contractAddress, coreAPI.AccountQueryOptions{}) + wallet4, err := chainSimulator.GenerateAndMintWalletAddress(2, initialMinting) require.Nil(t, err) - require.Equal(t, accountState.Balance, account.Balance) - require.Equal(t, accountState.DeveloperRewards, account.DeveloperReward) - require.Equal(t, accountState.Code, account.Code) - require.Equal(t, accountState.CodeHash, base64.StdEncoding.EncodeToString(account.CodeHash)) - require.Equal(t, accountState.CodeMetadata, base64.StdEncoding.EncodeToString(account.CodeMetadata)) - require.Equal(t, accountState.Owner, account.OwnerAddress) - require.Equal(t, accountState.RootHash, base64.StdEncoding.EncodeToString(account.RootHash)) + + gasLimit := uint64(50000) + tx0 := generateTransaction(wallet0.Bytes, 0, wallet2.Bytes, transferValue, "", gasLimit) + tx1 := generateTransaction(wallet1.Bytes, 0, wallet2.Bytes, transferValue, "", gasLimit) + tx3 := generateTransaction(wallet3.Bytes, 0, wallet4.Bytes, transferValue, "", gasLimit) + + maxNumOfBlockToGenerateWhenExecutingTx := 15 + + t.Run("nil or empty slice of transactions should error", func(t *testing.T) { + sentTxs, errSend := chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted(nil, 1) + assert.Equal(t, errEmptySliceOfTxs, errSend) + assert.Nil(t, sentTxs) + + sentTxs, errSend = chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted(make([]*transaction.Transaction, 0), 1) + assert.Equal(t, errEmptySliceOfTxs, errSend) + assert.Nil(t, sentTxs) + }) + t.Run("invalid max number of blocks to generate should error", func(t *testing.T) { + sentTxs, errSend := chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{tx0, tx1}, 0) + assert.Equal(t, errInvalidMaxNumOfBlocks, errSend) + assert.Nil(t, sentTxs) + }) + t.Run("nil transaction in slice should error", func(t *testing.T) { + sentTxs, errSend := chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{nil}, 1) + assert.ErrorIs(t, errSend, errNilTransaction) + assert.Nil(t, sentTxs) + }) + t.Run("2 transactions from different shard should call send correctly", func(t *testing.T) { + sentTxs, errSend := chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{tx0, tx1}, maxNumOfBlockToGenerateWhenExecutingTx) + assert.Equal(t, 2, len(sentTxs)) + assert.Nil(t, errSend) + + account, errGet := chainSimulator.GetAccount(wallet2) + assert.Nil(t, errGet) + expectedBalance := big.NewInt(0).Add(initialMinting, transferValue) + expectedBalance.Add(expectedBalance, transferValue) + assert.Equal(t, expectedBalance.String(), account.Balance) + }) + t.Run("1 transaction should be sent correctly", func(t *testing.T) { + _, errSend := chainSimulator.SendTxAndGenerateBlockTilTxIsExecuted(tx3, maxNumOfBlockToGenerateWhenExecutingTx) + assert.Nil(t, errSend) + + account, errGet := chainSimulator.GetAccount(wallet4) + assert.Nil(t, errGet) + expectedBalance := big.NewInt(0).Add(initialMinting, transferValue) + assert.Equal(t, expectedBalance.String(), account.Balance) + }) } -func computeTxHash(chainSimulator ChainSimulator, tx *transaction.Transaction) (string, error) { - txBytes, err := chainSimulator.GetNodeHandler(1).GetCoreComponents().InternalMarshalizer().Marshal(tx) - if err != nil { - return "", err +func generateTransaction(sender []byte, nonce uint64, receiver []byte, value *big.Int, data string, gasLimit uint64) *transaction.Transaction { + minGasPrice := uint64(1000000000) + txVersion := uint32(1) + mockTxSignature := "sig" + + transferValue := big.NewInt(0).Set(value) + return &transaction.Transaction{ + Nonce: nonce, + Value: transferValue, + SndAddr: sender, + RcvAddr: receiver, + Data: []byte(data), + GasLimit: gasLimit, + GasPrice: minGasPrice, + ChainID: []byte(configs.ChainID), + Version: txVersion, + Signature: []byte(mockTxSignature), } - - txHasBytes := chainSimulator.GetNodeHandler(1).GetCoreComponents().Hasher().Compute(string(txBytes)) - return hex.EncodeToString(txHasBytes), nil } diff --git a/node/chainSimulator/components/api/fixedAPIInterface_test.go b/node/chainSimulator/components/api/fixedAPIInterface_test.go new file mode 100644 index 00000000000..7348b717831 --- /dev/null +++ b/node/chainSimulator/components/api/fixedAPIInterface_test.go @@ -0,0 +1,20 @@ +package api + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +const apiInterface = "127.0.0.1:8080" + +func TestNewFixedPortAPIConfigurator(t *testing.T) { + t.Parallel() + + instance := NewFixedPortAPIConfigurator(apiInterface, map[uint32]int{0: 123}) + require.NotNil(t, instance) + + interf := instance.RestApiInterface(0) + require.Equal(t, fmt.Sprintf("%s:123", apiInterface), interf) +} diff --git a/node/chainSimulator/components/api/freeAPIInterface_test.go b/node/chainSimulator/components/api/freeAPIInterface_test.go new file mode 100644 index 00000000000..0b215aa0a57 --- /dev/null +++ b/node/chainSimulator/components/api/freeAPIInterface_test.go @@ -0,0 +1,19 @@ +package api + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewFreePortAPIConfigurator(t *testing.T) { + t.Parallel() + + instance := NewFreePortAPIConfigurator(apiInterface) + require.NotNil(t, instance) + + interf := instance.RestApiInterface(0) + require.True(t, strings.Contains(interf, fmt.Sprintf("%s:", apiInterface))) +} diff --git a/node/chainSimulator/components/api/noApiInterface_test.go b/node/chainSimulator/components/api/noApiInterface_test.go new file mode 100644 index 00000000000..ee8efbc5783 --- /dev/null +++ b/node/chainSimulator/components/api/noApiInterface_test.go @@ -0,0 +1,18 @@ +package api + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/facade" + "github.com/stretchr/testify/require" +) + +func TestNewNoApiInterface(t *testing.T) { + t.Parallel() + + instance := NewNoApiInterface() + require.NotNil(t, instance) + + interf := instance.RestApiInterface(0) + require.Equal(t, facade.DefaultRestPortOff, interf) +} diff --git a/node/chainSimulator/components/bootstrapComponents.go b/node/chainSimulator/components/bootstrapComponents.go index b40eeb0810d..7e0190ded2e 100644 --- a/node/chainSimulator/components/bootstrapComponents.go +++ b/node/chainSimulator/components/bootstrapComponents.go @@ -2,6 +2,7 @@ package components import ( "fmt" + "io" "github.com/multiversx/mx-chain-core-go/core" nodeFactory "github.com/multiversx/mx-chain-go/cmd/node/factory" @@ -10,6 +11,7 @@ import ( bootstrapComp "github.com/multiversx/mx-chain-go/factory/bootstrap" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" ) // ArgsBootstrapComponentsHolder will hold the components needed for the bootstrap components holders @@ -27,22 +29,21 @@ type ArgsBootstrapComponentsHolder struct { } type bootstrapComponentsHolder struct { - closeHandler *closeHandler - epochStartBootstrapper factory.EpochStartBootstrapper - epochBootstrapParams factory.BootstrapParamsHolder - nodeType core.NodeType - shardCoordinator sharding.Coordinator - versionedHeaderFactory nodeFactory.VersionedHeaderFactory - headerVersionHandler nodeFactory.HeaderVersionHandler - headerIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler - guardedAccountHandler process.GuardedAccountHandler + epochStartBootstrapper factory.EpochStartBootstrapper + epochBootstrapParams factory.BootstrapParamsHolder + nodeType core.NodeType + shardCoordinator sharding.Coordinator + versionedHeaderFactory nodeFactory.VersionedHeaderFactory + headerVersionHandler nodeFactory.HeaderVersionHandler + headerIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler + guardedAccountHandler process.GuardedAccountHandler + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory + managedBootstrapComponentsCloser io.Closer } // CreateBootstrapComponents will create a new instance of bootstrap components holder -func CreateBootstrapComponents(args ArgsBootstrapComponentsHolder) (factory.BootstrapComponentsHandler, error) { - instance := &bootstrapComponentsHolder{ - closeHandler: NewCloseHandler(), - } +func CreateBootstrapComponents(args ArgsBootstrapComponentsHolder) (*bootstrapComponentsHolder, error) { + instance := &bootstrapComponentsHolder{} args.PrefsConfig.Preferences.DestinationShardAsObserver = args.ShardIDStr @@ -81,12 +82,17 @@ func CreateBootstrapComponents(args ArgsBootstrapComponentsHolder) (factory.Boot instance.headerVersionHandler = managedBootstrapComponents.HeaderVersionHandler() instance.headerIntegrityVerifier = managedBootstrapComponents.HeaderIntegrityVerifier() instance.guardedAccountHandler = managedBootstrapComponents.GuardedAccountHandler() - - instance.collectClosableComponents() + instance.nodesCoordinatorRegistryFactory = managedBootstrapComponents.NodesCoordinatorRegistryFactory() + instance.managedBootstrapComponentsCloser = managedBootstrapComponents return instance, nil } +// NodesCoordinatorRegistryFactory will return the nodes coordinator registry factory +func (b *bootstrapComponentsHolder) NodesCoordinatorRegistryFactory() nodesCoordinator.NodesCoordinatorRegistryFactory { + return b.nodesCoordinatorRegistryFactory +} + // EpochStartBootstrapper will return the epoch start bootstrapper func (b *bootstrapComponentsHolder) EpochStartBootstrapper() factory.EpochStartBootstrapper { return b.epochStartBootstrapper @@ -127,13 +133,9 @@ func (b *bootstrapComponentsHolder) GuardedAccountHandler() process.GuardedAccou return b.guardedAccountHandler } -func (b *bootstrapComponentsHolder) collectClosableComponents() { - b.closeHandler.AddComponent(b.epochStartBootstrapper) -} - // Close will call the Close methods on all inner components func (b *bootstrapComponentsHolder) Close() error { - return b.closeHandler.Close() + return b.managedBootstrapComponentsCloser.Close() } // IsInterfaceNil returns true if there is no value under the interface diff --git a/node/chainSimulator/components/bootstrapComponents_test.go b/node/chainSimulator/components/bootstrapComponents_test.go new file mode 100644 index 00000000000..7e4becdc52e --- /dev/null +++ b/node/chainSimulator/components/bootstrapComponents_test.go @@ -0,0 +1,200 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/typeConverters" + "github.com/multiversx/mx-chain-core-go/hashing" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/stretchr/testify/require" +) + +func createArgsBootstrapComponentsHolder() ArgsBootstrapComponentsHolder { + return ArgsBootstrapComponentsHolder{ + CoreComponents: &factory.CoreComponentsHolderStub{ + ChainIDCalled: func() string { + return "T" + }, + GenesisNodesSetupCalled: func() sharding.GenesisNodesSetupHandler { + return &genesisMocks.NodesSetupStub{} + }, + InternalMarshalizerCalled: func() marshal.Marshalizer { + return &testscommon.MarshallerStub{} + }, + EpochNotifierCalled: func() process.EpochNotifier { + return &epochNotifier.EpochNotifierStub{} + }, + EconomicsDataCalled: func() process.EconomicsDataHandler { + return &economicsmocks.EconomicsHandlerMock{} + }, + RaterCalled: func() sharding.PeerAccountListAndRatingHandler { + return &testscommon.RaterMock{} + }, + NodesShufflerCalled: func() nodesCoordinator.NodesShuffler { + return &shardingMocks.NodeShufflerMock{} + }, + RoundHandlerCalled: func() consensus.RoundHandler { + return &testscommon.RoundHandlerMock{} + }, + HasherCalled: func() hashing.Hasher { + return &testscommon.HasherStub{} + }, + PathHandlerCalled: func() storage.PathManagerHandler { + return &testscommon.PathManagerStub{} + }, + TxMarshalizerCalled: func() marshal.Marshalizer { + return &testscommon.MarshallerStub{} + }, + AddressPubKeyConverterCalled: func() core.PubkeyConverter { + return &testscommon.PubkeyConverterStub{} + }, + Uint64ByteSliceConverterCalled: func() typeConverters.Uint64ByteSliceConverter { + return &mock.Uint64ByteSliceConverterMock{} + }, + TxSignHasherCalled: func() hashing.Hasher { + return &testscommon.HasherStub{} + }, + EnableEpochsHandlerCalled: func() common.EnableEpochsHandler { + return &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + }, + }, + CryptoComponents: &mock.CryptoComponentsStub{ + PubKey: &mock.PublicKeyMock{}, + BlockSig: &cryptoMocks.SingleSignerStub{}, + BlKeyGen: &cryptoMocks.KeyGenStub{}, + TxSig: &cryptoMocks.SingleSignerStub{}, + TxKeyGen: &cryptoMocks.KeyGenStub{}, + ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, + }, + NetworkComponents: &mock.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{}, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, + }, + StatusCoreComponents: &factory.StatusCoreComponentsStub{ + TrieSyncStatisticsField: &testscommon.SizeSyncStatisticsHandlerStub{}, + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + StateStatsHandlerField: &testscommon.StateStatisticsHandlerStub{}, + }, + WorkingDir: ".", + FlagsConfig: config.ContextFlagsConfig{}, + ImportDBConfig: config.ImportDbConfig{}, + PrefsConfig: config.Preferences{}, + Config: config.Config{ + EpochStartConfig: config.EpochStartConfig{ + MinNumConnectedPeersToStart: 1, + MinNumOfPeersToConsiderBlockValid: 1, + }, + TrieSync: config.TrieSyncConfig{ + MaxHardCapForMissingNodes: 1, + NumConcurrentTrieSyncers: 1, + }, + GeneralSettings: config.GeneralSettingsConfig{ + SetGuardianEpochsDelay: 1, + }, + Versions: config.VersionsConfig{ + Cache: config.CacheConfig{ + Type: "LRU", + Capacity: 123, + }, + DefaultVersion: "1", + VersionsByEpochs: []config.VersionByEpochs{ + { + StartEpoch: 0, + Version: "1", + }, + }, + }, + WhiteListPool: config.CacheConfig{ + Type: "LRU", + Capacity: 123, + }, + }, + ShardIDStr: "0", + } +} + +func TestCreateBootstrapComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateBootstrapComponents(createArgsBootstrapComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewBootstrapComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsBootstrapComponentsHolder() + args.StatusCoreComponents = &factory.StatusCoreComponentsStub{} + comp, err := CreateBootstrapComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("managedBootstrapCreate failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsBootstrapComponentsHolder() + args.StatusCoreComponents = &factory.StatusCoreComponentsStub{ + TrieSyncStatisticsField: &testscommon.SizeSyncStatisticsHandlerStub{}, + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + } + comp, err := CreateBootstrapComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestBootstrapComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *bootstrapComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateBootstrapComponents(createArgsBootstrapComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestBootstrapComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateBootstrapComponents(createArgsBootstrapComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.EpochStartBootstrapper()) + require.NotNil(t, comp.EpochBootstrapParams()) + require.NotEmpty(t, comp.NodeType()) + require.NotNil(t, comp.ShardCoordinator()) + require.NotNil(t, comp.VersionedHeaderFactory()) + require.NotNil(t, comp.HeaderVersionHandler()) + require.NotNil(t, comp.HeaderIntegrityVerifier()) + require.NotNil(t, comp.GuardedAccountHandler()) + require.NotNil(t, comp.NodesCoordinatorRegistryFactory()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/closeHandler_test.go b/node/chainSimulator/components/closeHandler_test.go new file mode 100644 index 00000000000..f8a88576c3c --- /dev/null +++ b/node/chainSimulator/components/closeHandler_test.go @@ -0,0 +1,69 @@ +package components + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +// localErrorlessCloser implements errorlessCloser interface +type localErrorlessCloser struct { + wasCalled bool +} + +// Close - +func (closer *localErrorlessCloser) Close() { + closer.wasCalled = true +} + +// localCloser implements io.Closer interface +type localCloser struct { + wasCalled bool + expectedError error +} + +// Close - +func (closer *localCloser) Close() error { + closer.wasCalled = true + return closer.expectedError +} + +// localCloseAllHandler implements allCloser interface +type localCloseAllHandler struct { + wasCalled bool + expectedError error +} + +// CloseAll - +func (closer *localCloseAllHandler) CloseAll() error { + closer.wasCalled = true + return closer.expectedError +} + +func TestCloseHandler(t *testing.T) { + t.Parallel() + + handler := NewCloseHandler() + require.NotNil(t, handler) + + handler.AddComponent(nil) // for coverage only + + lec := &localErrorlessCloser{} + handler.AddComponent(lec) + + lcNoError := &localCloser{} + handler.AddComponent(lcNoError) + + lcWithError := &localCloser{expectedError: expectedErr} + handler.AddComponent(lcWithError) + + lcahNoError := &localCloseAllHandler{} + handler.AddComponent(lcahNoError) + + lcahWithError := &localCloseAllHandler{expectedError: expectedErr} + handler.AddComponent(lcahWithError) + + err := handler.Close() + require.True(t, strings.Contains(err.Error(), expectedErr.Error())) +} diff --git a/node/chainSimulator/components/coreComponents.go b/node/chainSimulator/components/coreComponents.go index 373e34de033..08c7105e0ef 100644 --- a/node/chainSimulator/components/coreComponents.go +++ b/node/chainSimulator/components/coreComponents.go @@ -28,7 +28,7 @@ import ( "github.com/multiversx/mx-chain-go/ntp" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/economics" - "github.com/multiversx/mx-chain-go/process/smartContract" + "github.com/multiversx/mx-chain-go/process/rating" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/statusHandler" @@ -81,6 +81,7 @@ type ArgsCoreComponentsHolder struct { EnableEpochsConfig config.EnableEpochs RoundsConfig config.RoundConfig EconomicsConfig config.EconomicsConfig + RatingConfig config.RatingsConfig ChanStopNodeProcess chan endProcess.ArgEndProcess InitialRound int64 NodesSetupPath string @@ -88,12 +89,13 @@ type ArgsCoreComponentsHolder struct { NumShards uint32 WorkingDir string - MinNodesPerShard uint32 - MinNodesMeta uint32 + MinNodesPerShard uint32 + MinNodesMeta uint32 + RoundDurationInMs uint64 } // CreateCoreComponents will create a new instance of factory.CoreComponentsHolder -func CreateCoreComponents(args ArgsCoreComponentsHolder) (factory.CoreComponentsHandler, error) { +func CreateCoreComponents(args ArgsCoreComponentsHolder) (*coreComponentsHolder, error) { var err error instance := &coreComponentsHolder{ closeHandler: NewCloseHandler(), @@ -159,38 +161,15 @@ func CreateCoreComponents(args ArgsCoreComponentsHolder) (factory.CoreComponents return nil, err } - argsGasSchedule := forking.ArgsNewGasScheduleNotifier{ - GasScheduleConfig: config.GasScheduleConfig{ - GasScheduleByEpochs: []config.GasScheduleByEpochs{ - { - StartEpoch: 0, - FileName: args.GasScheduleFilename, - }, - }, - }, - ConfigDir: "", - EpochNotifier: instance.epochNotifier, - WasmVMChangeLocker: instance.wasmVMChangeLocker, - } - gasScheduleNotifier, err := forking.NewGasScheduleNotifier(argsGasSchedule) - if err != nil { - return nil, err - } - - builtInCostHandler, err := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - ArgsParser: smartContract.NewArgumentParser(), - GasSchedule: gasScheduleNotifier, - }) if err != nil { return nil, err } argsEconomicsHandler := economics.ArgsNewEconomicsData{ - TxVersionChecker: instance.txVersionChecker, - BuiltInFunctionsCostHandler: builtInCostHandler, - Economics: &args.EconomicsConfig, - EpochNotifier: instance.epochNotifier, - EnableEpochsHandler: instance.enableEpochsHandler, + TxVersionChecker: instance.txVersionChecker, + Economics: &args.EconomicsConfig, + EpochNotifier: instance.epochNotifier, + EnableEpochsHandler: instance.enableEpochsHandler, } instance.economicsData, err = economics.NewEconomicsData(argsEconomicsHandler) @@ -199,9 +178,23 @@ func CreateCoreComponents(args ArgsCoreComponentsHolder) (factory.CoreComponents } instance.apiEconomicsData = instance.economicsData - // TODO check if we need this - instance.ratingsData = &testscommon.RatingsInfoMock{} - instance.rater = &testscommon.RaterMock{} + // TODO fix this min nodes per shard to be configurable + instance.ratingsData, err = rating.NewRatingsData(rating.RatingsDataArg{ + Config: args.RatingConfig, + ShardConsensusSize: 1, + MetaConsensusSize: 1, + ShardMinNodes: args.MinNodesPerShard, + MetaMinNodes: args.MinNodesMeta, + RoundDurationMiliseconds: args.RoundDurationInMs, + }) + if err != nil { + return nil, err + } + + instance.rater, err = rating.NewBlockSigningRater(instance.ratingsData) + if err != nil { + return nil, err + } instance.nodesShuffler, err = nodesCoordinator.NewHashValidatorsShuffler(&nodesCoordinator.NodesShufflerArgs{ NodesShard: args.MinNodesPerShard, @@ -211,6 +204,7 @@ func CreateCoreComponents(args ArgsCoreComponentsHolder) (factory.CoreComponents ShuffleBetweenShards: true, MaxNodesEnableConfig: args.EnableEpochsConfig.MaxNodesChangeEnableEpoch, EnableEpochsHandler: instance.enableEpochsHandler, + EnableEpochs: args.EnableEpochsConfig, }) if err != nil { return nil, err diff --git a/node/chainSimulator/components/coreComponents_test.go b/node/chainSimulator/components/coreComponents_test.go new file mode 100644 index 00000000000..619eb9d3a2e --- /dev/null +++ b/node/chainSimulator/components/coreComponents_test.go @@ -0,0 +1,303 @@ +package components + +import ( + "encoding/hex" + "testing" + + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-go/config" + "github.com/stretchr/testify/require" +) + +func createArgsCoreComponentsHolder() ArgsCoreComponentsHolder { + return ArgsCoreComponentsHolder{ + Config: config.Config{ + Marshalizer: config.MarshalizerConfig{ + Type: "json", + }, + TxSignMarshalizer: config.TypeConfig{ + Type: "json", + }, + VmMarshalizer: config.TypeConfig{ + Type: "json", + }, + Hasher: config.TypeConfig{ + Type: "blake2b", + }, + TxSignHasher: config.TypeConfig{ + Type: "blake2b", + }, + AddressPubkeyConverter: config.PubkeyConfig{ + Length: 32, + Type: "hex", + }, + ValidatorPubkeyConverter: config.PubkeyConfig{ + Length: 128, + Type: "hex", + }, + GeneralSettings: config.GeneralSettingsConfig{ + ChainID: "T", + MinTransactionVersion: 1, + }, + Hardfork: config.HardforkConfig{ + PublicKeyToListenFrom: "41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081", + }, + }, + EnableEpochsConfig: config.EnableEpochs{}, + RoundsConfig: config.RoundConfig{ + RoundActivations: map[string]config.ActivationRoundByName{ + "DisableAsyncCallV1": { + Round: "18446744073709551615", + }, + }, + }, + EconomicsConfig: config.EconomicsConfig{ + GlobalSettings: config.GlobalSettings{ + GenesisTotalSupply: "2000000000000000000000", + MinimumInflation: 0, + YearSettings: []*config.YearSetting{ + { + Year: 0, + MaximumInflation: 0.01, + }, + }, + }, + FeeSettings: config.FeeSettings{ + GasLimitSettings: []config.GasLimitSetting{ + { + MaxGasLimitPerBlock: "10000000000", + MaxGasLimitPerMiniBlock: "10000000000", + MaxGasLimitPerMetaBlock: "10000000000", + MaxGasLimitPerMetaMiniBlock: "10000000000", + MaxGasLimitPerTx: "10000000000", + MinGasLimit: "10", + ExtraGasLimitGuardedTx: "50000", + }, + }, + GasPriceModifier: 0.01, + MinGasPrice: "100", + GasPerDataByte: "1", + MaxGasPriceSetGuardian: "100", + }, + RewardsSettings: config.RewardsSettings{ + RewardsConfigByEpoch: []config.EpochRewardSettings{ + { + LeaderPercentage: 0.1, + DeveloperPercentage: 0.1, + ProtocolSustainabilityPercentage: 0.1, + ProtocolSustainabilityAddress: "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp", + TopUpGradientPoint: "300000000000000000000", + TopUpFactor: 0.25, + EpochEnable: 0, + }, + }, + }, + }, + RatingConfig: config.RatingsConfig{ + General: config.General{ + StartRating: 4000, + MaxRating: 10000, + MinRating: 1, + SignedBlocksThreshold: 0.025, + SelectionChances: []*config.SelectionChance{ + {MaxThreshold: 0, ChancePercent: 1}, + {MaxThreshold: 1, ChancePercent: 2}, + {MaxThreshold: 10000, ChancePercent: 4}, + }, + }, + ShardChain: config.ShardChain{ + RatingSteps: config.RatingSteps{ + HoursToMaxRatingFromStartRating: 2, + ProposerValidatorImportance: 1, + ProposerDecreaseFactor: -4, + ValidatorDecreaseFactor: -4, + ConsecutiveMissedBlocksPenalty: 1.2, + }, + }, + MetaChain: config.MetaChain{ + RatingSteps: config.RatingSteps{ + HoursToMaxRatingFromStartRating: 2, + ProposerValidatorImportance: 1, + ProposerDecreaseFactor: -4, + ValidatorDecreaseFactor: -4, + ConsecutiveMissedBlocksPenalty: 1.3, + }, + }, + }, + ChanStopNodeProcess: make(chan endProcess.ArgEndProcess), + InitialRound: 0, + NodesSetupPath: "../../../sharding/mock/testdata/nodesSetupMock.json", + GasScheduleFilename: "../../../cmd/node/config/gasSchedules/gasScheduleV7.toml", + NumShards: 3, + WorkingDir: ".", + MinNodesPerShard: 1, + MinNodesMeta: 1, + RoundDurationInMs: 6000, + } +} + +func TestCreateCoreComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateCoreComponents(createArgsCoreComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("internal NewMarshalizer failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.Marshalizer.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("tx NewMarshalizer failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.TxSignMarshalizer.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("vm NewMarshalizer failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.VmMarshalizer.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("main NewHasher failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.Hasher.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("tx NewHasher failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.TxSignHasher.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("address NewPubkeyConverter failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.AddressPubkeyConverter.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("validator NewPubkeyConverter failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.ValidatorPubkeyConverter.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewNodesSetup failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.NumShards = 0 + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewEconomicsData failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.EconomicsConfig.GlobalSettings.MinimumInflation = -1.0 + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("validatorPubKeyConverter.Decode failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.Hardfork.PublicKeyToListenFrom = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestCoreComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *coreComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateCoreComponents(createArgsCoreComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestCoreComponents_GettersSetters(t *testing.T) { + t.Parallel() + + comp, err := CreateCoreComponents(createArgsCoreComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.InternalMarshalizer()) + require.Nil(t, comp.SetInternalMarshalizer(nil)) + require.Nil(t, comp.InternalMarshalizer()) + + require.NotNil(t, comp.TxMarshalizer()) + require.NotNil(t, comp.VmMarshalizer()) + require.NotNil(t, comp.Hasher()) + require.NotNil(t, comp.TxSignHasher()) + require.NotNil(t, comp.Uint64ByteSliceConverter()) + require.NotNil(t, comp.AddressPubKeyConverter()) + require.NotNil(t, comp.ValidatorPubKeyConverter()) + require.NotNil(t, comp.PathHandler()) + require.NotNil(t, comp.Watchdog()) + require.NotNil(t, comp.AlarmScheduler()) + require.NotNil(t, comp.SyncTimer()) + require.NotNil(t, comp.RoundHandler()) + require.NotNil(t, comp.EconomicsData()) + require.NotNil(t, comp.APIEconomicsData()) + require.NotNil(t, comp.RatingsData()) + require.NotNil(t, comp.Rater()) + require.NotNil(t, comp.GenesisNodesSetup()) + require.NotNil(t, comp.NodesShuffler()) + require.NotNil(t, comp.EpochNotifier()) + require.NotNil(t, comp.EnableRoundsHandler()) + require.NotNil(t, comp.RoundNotifier()) + require.NotNil(t, comp.EpochStartNotifierWithConfirm()) + require.NotNil(t, comp.ChanStopNodeProcess()) + require.NotNil(t, comp.GenesisTime()) + require.Equal(t, "T", comp.ChainID()) + require.Equal(t, uint32(1), comp.MinTransactionVersion()) + require.NotNil(t, comp.TxVersionChecker()) + require.Equal(t, uint32(64), comp.EncodedAddressLen()) + hfPk, _ := hex.DecodeString("41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081") + require.Equal(t, hfPk, comp.HardforkTriggerPubKey()) + require.NotNil(t, comp.NodeTypeProvider()) + require.NotNil(t, comp.WasmVMChangeLocker()) + require.NotNil(t, comp.ProcessStatusHandler()) + require.NotNil(t, comp.ProcessStatusHandler()) + require.NotNil(t, comp.EnableEpochsHandler()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/cryptoComponents.go b/node/chainSimulator/components/cryptoComponents.go index 42432636724..3fcd7e205b7 100644 --- a/node/chainSimulator/components/cryptoComponents.go +++ b/node/chainSimulator/components/cryptoComponents.go @@ -2,6 +2,7 @@ package components import ( "fmt" + "io" "github.com/multiversx/mx-chain-core-go/core" crypto "github.com/multiversx/mx-chain-crypto-go" @@ -26,28 +27,29 @@ type ArgsCryptoComponentsHolder struct { } type cryptoComponentsHolder struct { - publicKey crypto.PublicKey - privateKey crypto.PrivateKey - p2pPublicKey crypto.PublicKey - p2pPrivateKey crypto.PrivateKey - p2pSingleSigner crypto.SingleSigner - txSingleSigner crypto.SingleSigner - blockSigner crypto.SingleSigner - multiSignerContainer cryptoCommon.MultiSignerContainer - peerSignatureHandler crypto.PeerSignatureHandler - blockSignKeyGen crypto.KeyGenerator - txSignKeyGen crypto.KeyGenerator - p2pKeyGen crypto.KeyGenerator - messageSignVerifier vm.MessageSignVerifier - consensusSigningHandler consensus.SigningHandler - managedPeersHolder common.ManagedPeersHolder - keysHandler consensus.KeysHandler - publicKeyBytes []byte - publicKeyString string + publicKey crypto.PublicKey + privateKey crypto.PrivateKey + p2pPublicKey crypto.PublicKey + p2pPrivateKey crypto.PrivateKey + p2pSingleSigner crypto.SingleSigner + txSingleSigner crypto.SingleSigner + blockSigner crypto.SingleSigner + multiSignerContainer cryptoCommon.MultiSignerContainer + peerSignatureHandler crypto.PeerSignatureHandler + blockSignKeyGen crypto.KeyGenerator + txSignKeyGen crypto.KeyGenerator + p2pKeyGen crypto.KeyGenerator + messageSignVerifier vm.MessageSignVerifier + consensusSigningHandler consensus.SigningHandler + managedPeersHolder common.ManagedPeersHolder + keysHandler consensus.KeysHandler + publicKeyBytes []byte + publicKeyString string + managedCryptoComponentsCloser io.Closer } // CreateCryptoComponents will create a new instance of cryptoComponentsHolder -func CreateCryptoComponents(args ArgsCryptoComponentsHolder) (factory.CryptoComponentsHandler, error) { +func CreateCryptoComponents(args ArgsCryptoComponentsHolder) (*cryptoComponentsHolder, error) { instance := &cryptoComponentsHolder{} cryptoComponentsHandlerArgs := cryptoComp.CryptoComponentsFactoryArgs{ @@ -104,6 +106,7 @@ func CreateCryptoComponents(args ArgsCryptoComponentsHolder) (factory.CryptoComp instance.consensusSigningHandler = managedCryptoComponents.ConsensusSigningHandler() instance.managedPeersHolder = managedCryptoComponents.ManagedPeersHolder() instance.keysHandler = managedCryptoComponents.KeysHandler() + instance.managedCryptoComponentsCloser = managedCryptoComponents if args.BypassTxSignatureCheck { instance.txSingleSigner = &singlesig.DisabledSingleSig{} @@ -219,24 +222,25 @@ func (c *cryptoComponentsHolder) KeysHandler() consensus.KeysHandler { // Clone will clone the cryptoComponentsHolder func (c *cryptoComponentsHolder) Clone() interface{} { return &cryptoComponentsHolder{ - publicKey: c.PublicKey(), - privateKey: c.PrivateKey(), - p2pPublicKey: c.P2pPublicKey(), - p2pPrivateKey: c.P2pPrivateKey(), - p2pSingleSigner: c.P2pSingleSigner(), - txSingleSigner: c.TxSingleSigner(), - blockSigner: c.BlockSigner(), - multiSignerContainer: c.MultiSignerContainer(), - peerSignatureHandler: c.PeerSignatureHandler(), - blockSignKeyGen: c.BlockSignKeyGen(), - txSignKeyGen: c.TxSignKeyGen(), - p2pKeyGen: c.P2pKeyGen(), - messageSignVerifier: c.MessageSignVerifier(), - consensusSigningHandler: c.ConsensusSigningHandler(), - managedPeersHolder: c.ManagedPeersHolder(), - keysHandler: c.KeysHandler(), - publicKeyBytes: c.PublicKeyBytes(), - publicKeyString: c.PublicKeyString(), + publicKey: c.PublicKey(), + privateKey: c.PrivateKey(), + p2pPublicKey: c.P2pPublicKey(), + p2pPrivateKey: c.P2pPrivateKey(), + p2pSingleSigner: c.P2pSingleSigner(), + txSingleSigner: c.TxSingleSigner(), + blockSigner: c.BlockSigner(), + multiSignerContainer: c.MultiSignerContainer(), + peerSignatureHandler: c.PeerSignatureHandler(), + blockSignKeyGen: c.BlockSignKeyGen(), + txSignKeyGen: c.TxSignKeyGen(), + p2pKeyGen: c.P2pKeyGen(), + messageSignVerifier: c.MessageSignVerifier(), + consensusSigningHandler: c.ConsensusSigningHandler(), + managedPeersHolder: c.ManagedPeersHolder(), + keysHandler: c.KeysHandler(), + publicKeyBytes: c.PublicKeyBytes(), + publicKeyString: c.PublicKeyString(), + managedCryptoComponentsCloser: c.managedCryptoComponentsCloser, } } @@ -261,5 +265,5 @@ func (c *cryptoComponentsHolder) String() string { // Close will do nothing func (c *cryptoComponentsHolder) Close() error { - return nil + return c.managedCryptoComponentsCloser.Close() } diff --git a/node/chainSimulator/components/cryptoComponents_test.go b/node/chainSimulator/components/cryptoComponents_test.go new file mode 100644 index 00000000000..fc8087f5cd4 --- /dev/null +++ b/node/chainSimulator/components/cryptoComponents_test.go @@ -0,0 +1,168 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/stretchr/testify/require" +) + +func createArgsCryptoComponentsHolder() ArgsCryptoComponentsHolder { + return ArgsCryptoComponentsHolder{ + Config: config.Config{ + Consensus: config.ConsensusConfig{ + Type: "bls", + }, + MultisigHasher: config.TypeConfig{ + Type: "blake2b", + }, + PublicKeyPIDSignature: config.CacheConfig{ + Capacity: 1000, + Type: "LRU", + }, + }, + EnableEpochsConfig: config.EnableEpochs{ + BLSMultiSignerEnableEpoch: []config.MultiSignerConfig{ + { + EnableEpoch: 0, + Type: "no-KOSK", + }, + { + EnableEpoch: 10, + Type: "KOSK", + }, + }, + }, + Preferences: config.Preferences{}, + CoreComponentsHolder: &factory.CoreComponentsHolderStub{ + ValidatorPubKeyConverterCalled: func() core.PubkeyConverter { + return &testscommon.PubkeyConverterStub{ + EncodeCalled: func(pkBytes []byte) (string, error) { + return "public key", nil + }, + } + }, + }, + AllValidatorKeysPemFileName: "allValidatorKeys.pem", + BypassTxSignatureCheck: false, + } +} + +func TestCreateCryptoComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateCryptoComponents(createArgsCryptoComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("should work with bypass tx sig check", func(t *testing.T) { + t.Parallel() + + args := createArgsCryptoComponentsHolder() + args.BypassTxSignatureCheck = true + comp, err := CreateCryptoComponents(args) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewCryptoComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCryptoComponentsHolder() + args.CoreComponentsHolder = &factory.CoreComponentsHolderStub{ + ValidatorPubKeyConverterCalled: func() core.PubkeyConverter { + return nil + }, + } + comp, err := CreateCryptoComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("managedCryptoComponents.Create failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCryptoComponentsHolder() + args.CoreComponentsHolder = &factory.CoreComponentsHolderStub{ + ValidatorPubKeyConverterCalled: func() core.PubkeyConverter { + return &testscommon.PubkeyConverterStub{ + EncodeCalled: func(pkBytes []byte) (string, error) { + return "", expectedErr + }, + } + }, + } + comp, err := CreateCryptoComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestCryptoComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *cryptoComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateCryptoComponents(createArgsCryptoComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestCryptoComponentsHolder_GettersSetters(t *testing.T) { + t.Parallel() + + comp, err := CreateCryptoComponents(createArgsCryptoComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.PublicKey()) + require.NotNil(t, comp.PrivateKey()) + require.NotEmpty(t, comp.PublicKeyString()) + require.NotEmpty(t, comp.PublicKeyBytes()) + require.NotNil(t, comp.P2pPublicKey()) + require.NotNil(t, comp.P2pPrivateKey()) + require.NotNil(t, comp.P2pSingleSigner()) + require.NotNil(t, comp.TxSingleSigner()) + require.NotNil(t, comp.BlockSigner()) + container := comp.MultiSignerContainer() + require.NotNil(t, container) + require.Nil(t, comp.SetMultiSignerContainer(nil)) + require.Nil(t, comp.MultiSignerContainer()) + require.Nil(t, comp.SetMultiSignerContainer(container)) + signer, err := comp.GetMultiSigner(0) + require.NoError(t, err) + require.NotNil(t, signer) + require.NotNil(t, comp.PeerSignatureHandler()) + require.NotNil(t, comp.BlockSignKeyGen()) + require.NotNil(t, comp.TxSignKeyGen()) + require.NotNil(t, comp.P2pKeyGen()) + require.NotNil(t, comp.MessageSignVerifier()) + require.NotNil(t, comp.ConsensusSigningHandler()) + require.NotNil(t, comp.ManagedPeersHolder()) + require.NotNil(t, comp.KeysHandler()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} + +func TestCryptoComponentsHolder_Clone(t *testing.T) { + t.Parallel() + + comp, err := CreateCryptoComponents(createArgsCryptoComponentsHolder()) + require.NoError(t, err) + + compClone := comp.Clone() + require.Equal(t, comp, compClone) + require.False(t, comp == compClone) // pointer testing + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/dataComponents.go b/node/chainSimulator/components/dataComponents.go index 9eb8605af12..8f04c351509 100644 --- a/node/chainSimulator/components/dataComponents.go +++ b/node/chainSimulator/components/dataComponents.go @@ -25,7 +25,7 @@ type dataComponentsHolder struct { } // CreateDataComponents will create the data components holder -func CreateDataComponents(args ArgsDataComponentsHolder) (factory.DataComponentsHandler, error) { +func CreateDataComponents(args ArgsDataComponentsHolder) (*dataComponentsHolder, error) { miniBlockStorer, err := args.StorageService.GetStorer(dataRetriever.MiniBlockUnit) if err != nil { return nil, err @@ -89,6 +89,7 @@ func (d *dataComponentsHolder) Clone() interface{} { storageService: d.storageService, dataPool: d.dataPool, miniBlockProvider: d.miniBlockProvider, + closeHandler: d.closeHandler, } } diff --git a/node/chainSimulator/components/dataComponents_test.go b/node/chainSimulator/components/dataComponents_test.go new file mode 100644 index 00000000000..a74f0b751f6 --- /dev/null +++ b/node/chainSimulator/components/dataComponents_test.go @@ -0,0 +1,110 @@ +package components + +import ( + "testing" + + retriever "github.com/multiversx/mx-chain-go/dataRetriever" + chainStorage "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/stretchr/testify/require" +) + +func createArgsDataComponentsHolder() ArgsDataComponentsHolder { + return ArgsDataComponentsHolder{ + Chain: &testscommon.ChainHandlerStub{}, + StorageService: &storage.ChainStorerStub{ + GetStorerCalled: func(unitType retriever.UnitType) (chainStorage.Storer, error) { + return &storage.StorerStub{}, nil + }, + }, + DataPool: &dataRetriever.PoolsHolderStub{ + MiniBlocksCalled: func() chainStorage.Cacher { + return &testscommon.CacherStub{} + }, + }, + InternalMarshaller: &testscommon.MarshallerStub{}, + } +} + +func TestCreateDataComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateDataComponents(createArgsDataComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewMiniBlockProvider failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsDataComponentsHolder() + args.DataPool = &dataRetriever.PoolsHolderStub{ + MiniBlocksCalled: func() chainStorage.Cacher { + return nil + }, + } + comp, err := CreateDataComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("GetStorer failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsDataComponentsHolder() + args.StorageService = &storage.ChainStorerStub{ + GetStorerCalled: func(unitType retriever.UnitType) (chainStorage.Storer, error) { + return nil, expectedErr + }, + } + comp, err := CreateDataComponents(args) + require.Equal(t, expectedErr, err) + require.Nil(t, comp) + }) +} + +func TestDataComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *dataComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateDataComponents(createArgsDataComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestDataComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateDataComponents(createArgsDataComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.Blockchain()) + require.Nil(t, comp.SetBlockchain(nil)) + require.Nil(t, comp.Blockchain()) + require.NotNil(t, comp.StorageService()) + require.NotNil(t, comp.Datapool()) + require.NotNil(t, comp.MiniBlocksProvider()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} + +func TestDataComponentsHolder_Clone(t *testing.T) { + t.Parallel() + + comp, err := CreateDataComponents(createArgsDataComponentsHolder()) + require.NoError(t, err) + + compClone := comp.Clone() + require.Equal(t, comp, compClone) + require.False(t, comp == compClone) // pointer testing + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/instantBroadcastMessenger_test.go b/node/chainSimulator/components/instantBroadcastMessenger_test.go new file mode 100644 index 00000000000..361caa03bbc --- /dev/null +++ b/node/chainSimulator/components/instantBroadcastMessenger_test.go @@ -0,0 +1,134 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus/mock" + errorsMx "github.com/multiversx/mx-chain-go/errors" + "github.com/stretchr/testify/require" +) + +func TestNewInstantBroadcastMessenger(t *testing.T) { + t.Parallel() + + t.Run("nil broadcastMessenger should error", func(t *testing.T) { + t.Parallel() + + mes, err := NewInstantBroadcastMessenger(nil, nil) + require.Equal(t, errorsMx.ErrNilBroadcastMessenger, err) + require.Nil(t, mes) + }) + t.Run("nil shardCoordinator should error", func(t *testing.T) { + t.Parallel() + + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{}, nil) + require.Equal(t, errorsMx.ErrNilShardCoordinator, err) + require.Nil(t, mes) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{}, &mock.ShardCoordinatorMock{}) + require.NoError(t, err) + require.NotNil(t, mes) + }) +} + +func TestInstantBroadcastMessenger_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var mes *instantBroadcastMessenger + require.True(t, mes.IsInterfaceNil()) + + mes, _ = NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{}, &mock.ShardCoordinatorMock{}) + require.False(t, mes.IsInterfaceNil()) +} + +func TestInstantBroadcastMessenger_BroadcastBlockDataLeader(t *testing.T) { + t.Parallel() + + t.Run("meta should work", func(t *testing.T) { + t.Parallel() + + providedMBs := map[uint32][]byte{ + 0: []byte("mb shard 0"), + 1: []byte("mb shard 1"), + common.MetachainShardId: []byte("mb shard meta"), + } + providedTxs := map[string][][]byte{ + "topic_0": {[]byte("txs topic 0")}, + "topic_1": {[]byte("txs topic 1")}, + } + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{ + BroadcastMiniBlocksCalled: func(mbs map[uint32][]byte, bytes []byte) error { + require.Equal(t, providedMBs, mbs) + return expectedErr // for coverage only + }, + BroadcastTransactionsCalled: func(txs map[string][][]byte, bytes []byte) error { + require.Equal(t, providedTxs, txs) + return expectedErr // for coverage only + }, + }, &mock.ShardCoordinatorMock{ + ShardID: common.MetachainShardId, + }) + require.NoError(t, err) + + err = mes.BroadcastBlockDataLeader(nil, providedMBs, providedTxs, []byte("pk")) + require.NoError(t, err) + }) + t.Run("shard should work", func(t *testing.T) { + t.Parallel() + + providedMBs := map[uint32][]byte{ + 0: []byte("mb shard 0"), // for coverage only + common.MetachainShardId: []byte("mb shard meta"), + } + expectedMBs := map[uint32][]byte{ + common.MetachainShardId: []byte("mb shard meta"), + } + providedTxs := map[string][][]byte{ + "topic_0": {[]byte("txs topic 1")}, // for coverage only + "topic_0_META": {[]byte("txs topic meta")}, + } + expectedTxs := map[string][][]byte{ + "topic_0_META": {[]byte("txs topic meta")}, + } + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{ + BroadcastMiniBlocksCalled: func(mbs map[uint32][]byte, bytes []byte) error { + require.Equal(t, expectedMBs, mbs) + return nil + }, + BroadcastTransactionsCalled: func(txs map[string][][]byte, bytes []byte) error { + require.Equal(t, expectedTxs, txs) + return nil + }, + }, &mock.ShardCoordinatorMock{ + ShardID: 0, + }) + require.NoError(t, err) + + err = mes.BroadcastBlockDataLeader(nil, providedMBs, providedTxs, []byte("pk")) + require.NoError(t, err) + }) + t.Run("shard, empty miniblocks should early exit", func(t *testing.T) { + t.Parallel() + + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{ + BroadcastMiniBlocksCalled: func(mbs map[uint32][]byte, bytes []byte) error { + require.Fail(t, "should have not been called") + return nil + }, + BroadcastTransactionsCalled: func(txs map[string][][]byte, bytes []byte) error { + require.Fail(t, "should have not been called") + return nil + }, + }, &mock.ShardCoordinatorMock{ + ShardID: 0, + }) + require.NoError(t, err) + + err = mes.BroadcastBlockDataLeader(nil, nil, nil, []byte("pk")) + require.NoError(t, err) + }) +} diff --git a/node/chainSimulator/components/manualRoundHandler.go b/node/chainSimulator/components/manualRoundHandler.go index 3639bf23752..479cf63a1f5 100644 --- a/node/chainSimulator/components/manualRoundHandler.go +++ b/node/chainSimulator/components/manualRoundHandler.go @@ -9,6 +9,7 @@ type manualRoundHandler struct { index int64 genesisTimeStamp int64 roundDuration time.Duration + initialRound int64 } // NewManualRoundHandler returns a manual round handler instance @@ -17,6 +18,7 @@ func NewManualRoundHandler(genesisTimeStamp int64, roundDuration time.Duration, genesisTimeStamp: genesisTimeStamp, roundDuration: roundDuration, index: initialRound, + initialRound: initialRound, } } @@ -44,7 +46,7 @@ func (handler *manualRoundHandler) TimeStamp() time.Time { rounds := atomic.LoadInt64(&handler.index) timeFromGenesis := handler.roundDuration * time.Duration(rounds) timestamp := time.Unix(handler.genesisTimeStamp, 0).Add(timeFromGenesis) - + timestamp = time.Unix(timestamp.Unix()-int64(handler.roundDuration.Seconds())*handler.initialRound, 0) return timestamp } diff --git a/node/chainSimulator/components/manualRoundHandler_test.go b/node/chainSimulator/components/manualRoundHandler_test.go new file mode 100644 index 00000000000..8a866d6ccec --- /dev/null +++ b/node/chainSimulator/components/manualRoundHandler_test.go @@ -0,0 +1,44 @@ +package components + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestNewManualRoundHandler(t *testing.T) { + t.Parallel() + + handler := NewManualRoundHandler(100, time.Second, 0) + require.NotNil(t, handler) +} + +func TestManualRoundHandler_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var handler *manualRoundHandler + require.True(t, handler.IsInterfaceNil()) + + handler = NewManualRoundHandler(100, time.Second, 0) + require.False(t, handler.IsInterfaceNil()) +} + +func TestManualRoundHandler_Operations(t *testing.T) { + t.Parallel() + + genesisTime := time.Now() + providedIndex := int64(0) + providedRoundDuration := time.Second + handler := NewManualRoundHandler(genesisTime.Unix(), providedRoundDuration, providedIndex) + require.Equal(t, providedIndex, handler.Index()) + handler.IncrementIndex() + require.Equal(t, providedIndex+1, handler.Index()) + expectedTimestamp := time.Unix(handler.genesisTimeStamp, 0).Add(providedRoundDuration) + require.Equal(t, expectedTimestamp, handler.TimeStamp()) + require.Equal(t, providedRoundDuration, handler.TimeDuration()) + providedMaxTime := time.Minute + require.Equal(t, providedMaxTime, handler.RemainingTime(time.Now(), providedMaxTime)) + require.False(t, handler.BeforeGenesis()) + handler.UpdateRound(time.Now(), time.Now()) // for coverage only +} diff --git a/node/chainSimulator/components/memoryComponents.go b/node/chainSimulator/components/memoryComponents.go index 92b562beb6f..3b12e720756 100644 --- a/node/chainSimulator/components/memoryComponents.go +++ b/node/chainSimulator/components/memoryComponents.go @@ -23,6 +23,13 @@ type trieStorage struct { storage.Storer } +// CreateMemUnitForTries returns a special type of storer used on tries instances +func CreateMemUnitForTries() storage.Storer { + return &trieStorage{ + Storer: CreateMemUnit(), + } +} + // SetEpochForPutOperation does nothing func (store *trieStorage) SetEpochForPutOperation(_ uint32) { } @@ -73,10 +80,3 @@ func (store *trieStorage) RemoveFromCurrentEpoch(key []byte) error { func (store *trieStorage) RemoveFromAllActiveEpochs(key []byte) error { return store.Remove(key) } - -// CreateMemUnitForTries returns a special type of storer used on tries instances -func CreateMemUnitForTries() storage.Storer { - return &trieStorage{ - Storer: CreateMemUnit(), - } -} diff --git a/node/chainSimulator/components/memoryComponents_test.go b/node/chainSimulator/components/memoryComponents_test.go new file mode 100644 index 00000000000..b393bca7d47 --- /dev/null +++ b/node/chainSimulator/components/memoryComponents_test.go @@ -0,0 +1,55 @@ +package components + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCreateMemUnitForTries(t *testing.T) { + t.Parallel() + + memUnitStorer := CreateMemUnitForTries() + require.NotNil(t, memUnitStorer) + + memUnit, ok := memUnitStorer.(*trieStorage) + require.True(t, ok) + memUnit.SetEpochForPutOperation(0) // for coverage only + key := []byte("key") + data := []byte("data") + require.NoError(t, memUnit.Put(key, data)) + + require.NoError(t, memUnit.PutInEpoch(key, data, 0)) + require.NoError(t, memUnit.PutInEpochWithoutCache(key, data, 0)) + + value, _, err := memUnit.GetFromOldEpochsWithoutAddingToCache(key) + require.NoError(t, err) + require.Equal(t, data, value) + + latest, err := memUnit.GetLatestStorageEpoch() + require.NoError(t, err) + require.Zero(t, latest) + + value, err = memUnit.GetFromCurrentEpoch(key) + require.NoError(t, err) + require.Equal(t, data, value) + + value, err = memUnit.GetFromEpoch(key, 0) + require.NoError(t, err) + require.Equal(t, data, value) + + value, err = memUnit.GetFromLastEpoch(key) + require.NoError(t, err) + require.Equal(t, data, value) + + require.NoError(t, memUnit.RemoveFromCurrentEpoch(key)) + value, err = memUnit.GetFromCurrentEpoch(key) + require.Error(t, err) + require.Empty(t, value) + + require.NoError(t, memUnit.PutInEpoch(key, data, 0)) + require.NoError(t, memUnit.RemoveFromAllActiveEpochs(key)) + value, err = memUnit.GetFromCurrentEpoch(key) + require.Error(t, err) + require.Empty(t, value) +} diff --git a/node/chainSimulator/components/networkComponents.go b/node/chainSimulator/components/networkComponents.go index 6a6bf8d346b..6b791f6927b 100644 --- a/node/chainSimulator/components/networkComponents.go +++ b/node/chainSimulator/components/networkComponents.go @@ -27,7 +27,7 @@ type networkComponentsHolder struct { } // CreateNetworkComponents creates a new networkComponentsHolder instance -func CreateNetworkComponents(network SyncedBroadcastNetworkHandler) (factory.NetworkComponentsHandler, error) { +func CreateNetworkComponents(network SyncedBroadcastNetworkHandler) (*networkComponentsHolder, error) { messenger, err := NewSyncedMessenger(network) if err != nil { return nil, err diff --git a/node/chainSimulator/components/networkComponents_test.go b/node/chainSimulator/components/networkComponents_test.go new file mode 100644 index 00000000000..9c184d4d608 --- /dev/null +++ b/node/chainSimulator/components/networkComponents_test.go @@ -0,0 +1,62 @@ +package components + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCreateNetworkComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateNetworkComponents(NewSyncedBroadcastNetwork()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("nil network should error", func(t *testing.T) { + t.Parallel() + + comp, err := CreateNetworkComponents(nil) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestNetworkComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *networkComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateNetworkComponents(NewSyncedBroadcastNetwork()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestNetworkComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateNetworkComponents(NewSyncedBroadcastNetwork()) + require.NoError(t, err) + + require.NotNil(t, comp.NetworkMessenger()) + require.NotNil(t, comp.InputAntiFloodHandler()) + require.NotNil(t, comp.OutputAntiFloodHandler()) + require.NotNil(t, comp.PubKeyCacher()) + require.NotNil(t, comp.PeerBlackListHandler()) + require.NotNil(t, comp.PeerHonestyHandler()) + require.NotNil(t, comp.PreferredPeersHolderHandler()) + require.NotNil(t, comp.PeersRatingHandler()) + require.NotNil(t, comp.PeersRatingMonitor()) + require.NotNil(t, comp.FullArchiveNetworkMessenger()) + require.NotNil(t, comp.FullArchivePreferredPeersHolderHandler()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/processComponents.go b/node/chainSimulator/components/processComponents.go index 27b1e358614..efa7af79c10 100644 --- a/node/chainSimulator/components/processComponents.go +++ b/node/chainSimulator/components/processComponents.go @@ -2,6 +2,7 @@ package components import ( "fmt" + "io" "math/big" "path/filepath" "time" @@ -42,6 +43,7 @@ type ArgsProcessComponentsHolder struct { NodesCoordinator nodesCoordinator.NodesCoordinator EpochConfig config.EpochConfig + RoundConfig config.RoundConfig ConfigurationPathsHolder config.ConfigurationPathsHolder FlagsConfig config.ContextFlagsConfig ImportDBConfig config.ImportDbConfig @@ -49,10 +51,12 @@ type ArgsProcessComponentsHolder struct { Config config.Config EconomicsConfig config.EconomicsConfig SystemSCConfig config.SystemSmartContractsConfig + + GenesisNonce uint64 + GenesisRound uint64 } type processComponentsHolder struct { - closeHandler *closeHandler receiptsRepository factory.ReceiptsRepository nodesCoordinator nodesCoordinator.NodesCoordinator shardCoordinator sharding.Coordinator @@ -93,11 +97,12 @@ type processComponentsHolder struct { processedMiniBlocksTracker process.ProcessedMiniBlocksTracker esdtDataStorageHandlerForAPI vmcommon.ESDTNFTStorageHandler accountsParser genesis.AccountsParser - sendSignatureTracker process.SentSignaturesTracker + sentSignatureTracker process.SentSignaturesTracker + managedProcessComponentsCloser io.Closer } // CreateProcessComponents will create the process components holder -func CreateProcessComponents(args ArgsProcessComponentsHolder) (factory.ProcessComponentsHandler, error) { +func CreateProcessComponents(args ArgsProcessComponentsHolder) (*processComponentsHolder, error) { importStartHandler, err := trigger.NewImportStartHandler(filepath.Join(args.FlagsConfig.DbDir, common.DefaultDBPath), args.FlagsConfig.Version) if err != nil { return nil, err @@ -180,20 +185,14 @@ func CreateProcessComponents(args ArgsProcessComponentsHolder) (factory.ProcessC processArgs := processComp.ProcessComponentsFactoryArgs{ Config: args.Config, EpochConfig: args.EpochConfig, + RoundConfig: args.RoundConfig, PrefConfigs: args.PrefsConfig, ImportDBConfig: args.ImportDBConfig, + EconomicsConfig: args.EconomicsConfig, AccountsParser: accountsParser, SmartContractParser: smartContractParser, GasSchedule: gasScheduleNotifier, NodesCoordinator: args.NodesCoordinator, - Data: args.DataComponents, - CoreData: args.CoreComponents, - Crypto: args.CryptoComponents, - State: args.StateComponents, - Network: args.NetworkComponents, - BootstrapComponents: args.BootstrapComponents, - StatusComponents: args.StatusComponents, - StatusCoreComponents: args.StatusCoreComponents, RequestedItemsHandler: requestedItemsHandler, WhiteListHandler: whiteListRequest, WhiteListerVerifiedTxs: whiteListerVerifiedTxs, @@ -202,7 +201,17 @@ func CreateProcessComponents(args ArgsProcessComponentsHolder) (factory.ProcessC ImportStartHandler: importStartHandler, HistoryRepo: historyRepository, FlagsConfig: args.FlagsConfig, + Data: args.DataComponents, + CoreData: args.CoreComponents, + Crypto: args.CryptoComponents, + State: args.StateComponents, + Network: args.NetworkComponents, + BootstrapComponents: args.BootstrapComponents, + StatusComponents: args.StatusComponents, + StatusCoreComponents: args.StatusCoreComponents, TxExecutionOrderHandler: txExecutionOrderHandler, + GenesisNonce: args.GenesisNonce, + GenesisRound: args.GenesisRound, } processComponentsFactory, err := processComp.NewProcessComponentsFactory(processArgs) if err != nil { @@ -220,7 +229,6 @@ func CreateProcessComponents(args ArgsProcessComponentsHolder) (factory.ProcessC } instance := &processComponentsHolder{ - closeHandler: NewCloseHandler(), receiptsRepository: managedProcessComponents.ReceiptsRepository(), nodesCoordinator: managedProcessComponents.NodesCoordinator(), shardCoordinator: managedProcessComponents.ShardCoordinator(), @@ -261,17 +269,16 @@ func CreateProcessComponents(args ArgsProcessComponentsHolder) (factory.ProcessC processedMiniBlocksTracker: managedProcessComponents.ProcessedMiniBlocksTracker(), esdtDataStorageHandlerForAPI: managedProcessComponents.ESDTDataStorageHandlerForAPI(), accountsParser: managedProcessComponents.AccountsParser(), - sendSignatureTracker: managedProcessComponents.SentSignaturesTracker(), + sentSignatureTracker: managedProcessComponents.SentSignaturesTracker(), + managedProcessComponentsCloser: managedProcessComponents, } - instance.collectClosableComponents() - return instance, nil } -// SentSignaturesTracker will return the send signature tracker +// SentSignaturesTracker will return the sent signature tracker func (p *processComponentsHolder) SentSignaturesTracker() process.SentSignaturesTracker { - return p.sendSignatureTracker + return p.sentSignatureTracker } // NodesCoordinator will return the nodes coordinator @@ -474,19 +481,9 @@ func (p *processComponentsHolder) ReceiptsRepository() factory.ReceiptsRepositor return p.receiptsRepository } -func (p *processComponentsHolder) collectClosableComponents() { - p.closeHandler.AddComponent(p.interceptorsContainer) - p.closeHandler.AddComponent(p.fullArchiveInterceptorsContainer) - p.closeHandler.AddComponent(p.resolversContainer) - p.closeHandler.AddComponent(p.epochStartTrigger) - p.closeHandler.AddComponent(p.blockProcessor) - p.closeHandler.AddComponent(p.validatorsProvider) - p.closeHandler.AddComponent(p.txsSenderHandler) -} - // Close will call the Close methods on all inner components func (p *processComponentsHolder) Close() error { - return p.closeHandler.Close() + return p.managedProcessComponentsCloser.Close() } // IsInterfaceNil returns true if there is no value under the interface diff --git a/node/chainSimulator/components/processComponents_test.go b/node/chainSimulator/components/processComponents_test.go new file mode 100644 index 00000000000..4628bbc4f66 --- /dev/null +++ b/node/chainSimulator/components/processComponents_test.go @@ -0,0 +1,414 @@ +package components + +import ( + "math/big" + "sync" + "testing" + + coreData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/hashing/blake2b" + "github.com/multiversx/mx-chain-core-go/hashing/keccak" + "github.com/multiversx/mx-chain-core-go/marshal" + commonFactory "github.com/multiversx/mx-chain-go/common/factory" + disabledStatistics "github.com/multiversx/mx-chain-go/common/statistics/disabled" + "github.com/multiversx/mx-chain-go/config" + retriever "github.com/multiversx/mx-chain-go/dataRetriever" + mockFactory "github.com/multiversx/mx-chain-go/factory/mock" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/sharding" + chainStorage "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks" + "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" + "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" + "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" + "github.com/multiversx/mx-chain-go/testscommon/outport" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/storage" + updateMocks "github.com/multiversx/mx-chain-go/update/mock" + "github.com/stretchr/testify/require" +) + +const testingProtocolSustainabilityAddress = "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp" + +var ( + addrPubKeyConv, _ = commonFactory.NewPubkeyConverter(config.PubkeyConfig{ + Length: 32, + Type: "bech32", + SignatureLength: 0, + Hrp: "erd", + }) + valPubKeyConv, _ = commonFactory.NewPubkeyConverter(config.PubkeyConfig{ + Length: 96, + Type: "hex", + SignatureLength: 48, + }) +) + +func createArgsProcessComponentsHolder() ArgsProcessComponentsHolder { + nodesSetup, _ := sharding.NewNodesSetup("../../../integrationTests/factory/testdata/nodesSetup.json", addrPubKeyConv, valPubKeyConv, 3) + + args := ArgsProcessComponentsHolder{ + Config: testscommon.GetGeneralConfig(), + EpochConfig: config.EpochConfig{ + GasSchedule: config.GasScheduleConfig{ + GasScheduleByEpochs: []config.GasScheduleByEpochs{ + { + StartEpoch: 0, + FileName: "../../../cmd/node/config/gasSchedules/gasScheduleV7.toml", + }, + }, + }, + }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + PrefsConfig: config.Preferences{}, + ImportDBConfig: config.ImportDbConfig{}, + FlagsConfig: config.ContextFlagsConfig{ + Version: "v1.0.0", + }, + NodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, + SystemSCConfig: config.SystemSmartContractsConfig{ + ESDTSystemSCConfig: config.ESDTSystemSCConfig{ + BaseIssuingCost: "1000", + OwnerAddress: "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c", + }, + GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ + V1: config.GovernanceSystemSCConfigV1{ + ProposalCost: "500", + NumNodes: 100, + MinQuorum: 50, + MinPassThreshold: 50, + MinVetoThreshold: 50, + }, + Active: config.GovernanceSystemSCConfigActive{ + ProposalCost: "500", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, + }, + OwnerAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + }, + StakingSystemSCConfig: config.StakingSystemSCConfig{ + GenesisNodePrice: "2500000000000000000000", + MinStakeValue: "1", + UnJailValue: "1", + MinStepValue: "1", + UnBondPeriod: 0, + NumRoundsWithoutBleed: 0, + MaximumPercentageToBleed: 0, + BleedPercentagePerRound: 0, + MaxNumberOfNodesForStake: 10, + ActivateBLSPubKeyMessageVerification: false, + MinUnstakeTokensValue: "1", + NodeLimitPercentage: 0.1, + StakeLimitPercentage: 1, + UnBondPeriodInEpochs: 10, + }, + DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ + MinCreationDeposit: "100", + MinStakeAmount: "100", + ConfigChangeAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + }, + DelegationSystemSCConfig: config.DelegationSystemSCConfig{ + MinServiceFee: 0, + MaxServiceFee: 100, + }, + }, + DataComponents: &mock.DataComponentsStub{ + DataPool: dataRetriever.NewPoolsHolderMock(), + BlockChain: &testscommon.ChainHandlerStub{ + GetGenesisHeaderHashCalled: func() []byte { + return []byte("genesis hash") + }, + GetGenesisHeaderCalled: func() coreData.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, + MbProvider: &mock.MiniBlocksProviderStub{}, + Store: genericMocks.NewChainStorerMock(0), + }, + CoreComponents: &mockFactory.CoreComponentsMock{ + IntMarsh: &marshal.GogoProtoMarshalizer{}, + TxMarsh: &marshal.JsonMarshalizer{}, + UInt64ByteSliceConv: &mock.Uint64ByteSliceConverterMock{}, + AddrPubKeyConv: addrPubKeyConv, + ValPubKeyConv: valPubKeyConv, + NodesConfig: nodesSetup, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{ + ProtocolSustainabilityAddressCalled: func() string { + return testingProtocolSustainabilityAddress + }, + GenesisTotalSupplyCalled: func() *big.Int { + return big.NewInt(0).Mul(big.NewInt(1000000000000000000), big.NewInt(20000000)) + }, + }, + Hash: blake2b.NewBlake2b(), + TxVersionCheckHandler: &testscommon.TxVersionCheckerStub{}, + RatingHandler: &testscommon.RaterMock{}, + EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, + EpochNotifierWithConfirm: &updateMocks.EpochStartNotifierStub{}, + RoundHandlerField: &testscommon.RoundHandlerMock{}, + RoundChangeNotifier: &epochNotifier.RoundNotifierStub{}, + ChanStopProcess: make(chan endProcess.ArgEndProcess, 1), + TxSignHasherField: keccak.NewKeccak(), + HardforkTriggerPubKeyField: []byte("hardfork pub key"), + WasmVMChangeLockerInternal: &sync.RWMutex{}, + NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, + RatingsConfig: &testscommon.RatingsInfoMock{}, + PathHdl: &testscommon.PathManagerStub{}, + ProcessStatusHandlerInternal: &testscommon.ProcessStatusHandlerStub{}, + }, + CryptoComponents: &mock.CryptoComponentsStub{ + BlKeyGen: &cryptoMocks.KeyGenStub{}, + BlockSig: &cryptoMocks.SingleSignerStub{}, + MultiSigContainer: &cryptoMocks.MultiSignerContainerMock{ + MultiSigner: &cryptoMocks.MultisignerMock{}, + }, + PrivKey: &cryptoMocks.PrivateKeyStub{}, + PubKey: &cryptoMocks.PublicKeyStub{}, + PubKeyString: "pub key string", + PubKeyBytes: []byte("pub key bytes"), + TxKeyGen: &cryptoMocks.KeyGenStub{}, + TxSig: &cryptoMocks.SingleSignerStub{}, + PeerSignHandler: &cryptoMocks.PeerSignatureHandlerStub{}, + MsgSigVerifier: &testscommon.MessageSignVerifierMock{}, + ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, + KeysHandlerField: &testscommon.KeysHandlerStub{}, + }, + NetworkComponents: &mock.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{}, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, + InputAntiFlood: &mock.P2PAntifloodHandlerStub{}, + OutputAntiFlood: &mock.P2PAntifloodHandlerStub{}, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + }, + BootstrapComponents: &mainFactoryMocks.BootstrapComponentsStub{ + ShCoordinator: mock.NewMultiShardsCoordinatorMock(2), + BootstrapParams: &bootstrapMocks.BootstrapParamsHandlerMock{}, + HdrIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, + GuardedAccountHandlerField: &guardianMocks.GuardedAccountHandlerStub{}, + VersionedHdrFactory: &testscommon.VersionedHeaderFactoryStub{}, + }, + StatusComponents: &mock.StatusComponentsStub{ + Outport: &outport.OutportStub{}, + }, + StatusCoreComponents: &factory.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + StateStatsHandlerField: disabledStatistics.NewStateStatistics(), + }, + EconomicsConfig: config.EconomicsConfig{ + GlobalSettings: config.GlobalSettings{ + GenesisTotalSupply: "20000000000000000000000000", + MinimumInflation: 0, + GenesisMintingSenderAddress: "erd17rc0pu8s7rc0pu8s7rc0pu8s7rc0pu8s7rc0pu8s7rc0pu8s7rcqqkhty3", + YearSettings: []*config.YearSetting{ + { + Year: 0, + MaximumInflation: 0.01, + }, + }, + }, + }, + ConfigurationPathsHolder: config.ConfigurationPathsHolder{ + Genesis: "../../../integrationTests/factory/testdata/genesis.json", + SmartContracts: "../../../integrationTests/factory/testdata/genesisSmartContracts.json", + Nodes: "../../../integrationTests/factory/testdata/genesis.json", + }, + } + + args.StateComponents = components.GetStateComponents(args.CoreComponents, args.StatusCoreComponents) + return args +} + +func TestCreateProcessComponents(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("should work", func(t *testing.T) { + comp, err := CreateProcessComponents(createArgsProcessComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewImportStartHandler failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.FlagsConfig.Version = "" + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("total supply conversion failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.EconomicsConfig.GlobalSettings.GenesisTotalSupply = "invalid number" + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewAccountsParser failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.ConfigurationPathsHolder.Genesis = "" + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewSmartContractsParser failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.ConfigurationPathsHolder.SmartContracts = "" + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewHistoryRepositoryFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + dataMock, ok := args.DataComponents.(*mock.DataComponentsStub) + require.True(t, ok) + dataMock.Store = nil + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("historyRepositoryFactory.Create failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.Config.DbLookupExtensions.Enabled = true + dataMock, ok := args.DataComponents.(*mock.DataComponentsStub) + require.True(t, ok) + dataMock.Store = &storage.ChainStorerStub{ + GetStorerCalled: func(unitType retriever.UnitType) (chainStorage.Storer, error) { + if unitType == retriever.ESDTSuppliesUnit { + return nil, expectedErr + } + return &storage.StorerStub{}, nil + }, + } + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewGasScheduleNotifier failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.EpochConfig.GasSchedule = config.GasScheduleConfig{} + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewProcessComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + dataMock, ok := args.DataComponents.(*mock.DataComponentsStub) + require.True(t, ok) + dataMock.BlockChain = nil + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("managedProcessComponents.Create failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.NodesCoordinator = nil + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestProcessComponentsHolder_IsInterfaceNil(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + var comp *processComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateProcessComponents(createArgsProcessComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestProcessComponentsHolder_Getters(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + comp, err := CreateProcessComponents(createArgsProcessComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.SentSignaturesTracker()) + require.NotNil(t, comp.NodesCoordinator()) + require.NotNil(t, comp.ShardCoordinator()) + require.NotNil(t, comp.InterceptorsContainer()) + require.NotNil(t, comp.FullArchiveInterceptorsContainer()) + require.NotNil(t, comp.ResolversContainer()) + require.NotNil(t, comp.RequestersFinder()) + require.NotNil(t, comp.RoundHandler()) + require.NotNil(t, comp.EpochStartTrigger()) + require.NotNil(t, comp.EpochStartNotifier()) + require.NotNil(t, comp.ForkDetector()) + require.NotNil(t, comp.BlockProcessor()) + require.NotNil(t, comp.BlackListHandler()) + require.NotNil(t, comp.BootStorer()) + require.NotNil(t, comp.HeaderSigVerifier()) + require.NotNil(t, comp.HeaderIntegrityVerifier()) + require.NotNil(t, comp.ValidatorsStatistics()) + require.NotNil(t, comp.ValidatorsProvider()) + require.NotNil(t, comp.BlockTracker()) + require.NotNil(t, comp.PendingMiniBlocksHandler()) + require.NotNil(t, comp.RequestHandler()) + require.NotNil(t, comp.TxLogsProcessor()) + require.NotNil(t, comp.HeaderConstructionValidator()) + require.NotNil(t, comp.PeerShardMapper()) + require.NotNil(t, comp.FullArchivePeerShardMapper()) + require.NotNil(t, comp.FallbackHeaderValidator()) + require.NotNil(t, comp.APITransactionEvaluator()) + require.NotNil(t, comp.WhiteListHandler()) + require.NotNil(t, comp.WhiteListerVerifiedTxs()) + require.NotNil(t, comp.HistoryRepository()) + require.NotNil(t, comp.ImportStartHandler()) + require.NotNil(t, comp.RequestedItemsHandler()) + require.NotNil(t, comp.NodeRedundancyHandler()) + require.NotNil(t, comp.CurrentEpochProvider()) + require.NotNil(t, comp.ScheduledTxsExecutionHandler()) + require.NotNil(t, comp.TxsSenderHandler()) + require.NotNil(t, comp.HardforkTrigger()) + require.NotNil(t, comp.ProcessedMiniBlocksTracker()) + require.NotNil(t, comp.ESDTDataStorageHandlerForAPI()) + require.NotNil(t, comp.AccountsParser()) + require.NotNil(t, comp.ReceiptsRepository()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/stateComponents.go b/node/chainSimulator/components/stateComponents.go index 65a1a064fe7..b3fddf55f40 100644 --- a/node/chainSimulator/components/stateComponents.go +++ b/node/chainSimulator/components/stateComponents.go @@ -1,6 +1,8 @@ package components import ( + "io" + chainData "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" @@ -27,11 +29,11 @@ type stateComponentsHolder struct { triesContainer common.TriesHolder triesStorageManager map[string]common.StorageManager missingTrieNodesNotifier common.MissingTrieNodesNotifier - closeFunc func() error + stateComponentsCloser io.Closer } // CreateStateComponents will create the state components holder -func CreateStateComponents(args ArgsStateComponents) (factory.StateComponentsHandler, error) { +func CreateStateComponents(args ArgsStateComponents) (*stateComponentsHolder, error) { stateComponentsFactory, err := factoryState.NewStateComponentsFactory(factoryState.StateComponentsFactoryArgs{ Config: args.Config, Core: args.CoreComponents, @@ -68,7 +70,7 @@ func CreateStateComponents(args ArgsStateComponents) (factory.StateComponentsHan triesContainer: stateComp.TriesContainer(), triesStorageManager: stateComp.TrieStorageManagers(), missingTrieNodesNotifier: stateComp.MissingTrieNodesNotifier(), - closeFunc: stateComp.Close, + stateComponentsCloser: stateComp, }, nil } @@ -109,7 +111,7 @@ func (s *stateComponentsHolder) MissingTrieNodesNotifier() common.MissingTrieNod // Close will close the state components func (s *stateComponentsHolder) Close() error { - return s.closeFunc() + return s.stateComponentsCloser.Close() } // IsInterfaceNil returns true if there is no value under the interface diff --git a/node/chainSimulator/components/stateComponents_test.go b/node/chainSimulator/components/stateComponents_test.go new file mode 100644 index 00000000000..5422d2ea352 --- /dev/null +++ b/node/chainSimulator/components/stateComponents_test.go @@ -0,0 +1,99 @@ +package components + +import ( + "testing" + + disabledStatistics "github.com/multiversx/mx-chain-go/common/statistics/disabled" + mockFactory "github.com/multiversx/mx-chain-go/factory/mock" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/stretchr/testify/require" +) + +func createArgsStateComponents() ArgsStateComponents { + return ArgsStateComponents{ + Config: testscommon.GetGeneralConfig(), + CoreComponents: &mockFactory.CoreComponentsMock{ + IntMarsh: &testscommon.MarshallerStub{}, + Hash: &testscommon.HasherStub{}, + PathHdl: &testscommon.PathManagerStub{}, + ProcessStatusHandlerInternal: &testscommon.ProcessStatusHandlerStub{}, + EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, + }, + StatusCore: &factory.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + StateStatsHandlerField: disabledStatistics.NewStateStatistics(), + }, + StoreService: genericMocks.NewChainStorerMock(0), + ChainHandler: &testscommon.ChainHandlerStub{}, + } +} + +func TestCreateStateComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStateComponents(createArgsStateComponents()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewStateComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsStateComponents() + args.CoreComponents = nil + comp, err := CreateStateComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("stateComp.Create failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsStateComponents() + coreMock, ok := args.CoreComponents.(*mockFactory.CoreComponentsMock) + require.True(t, ok) + coreMock.EnableEpochsHandlerField = nil + comp, err := CreateStateComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestStateComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *stateComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateStateComponents(createArgsStateComponents()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestStateComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateStateComponents(createArgsStateComponents()) + require.NoError(t, err) + + require.NotNil(t, comp.PeerAccounts()) + require.NotNil(t, comp.AccountsAdapter()) + require.NotNil(t, comp.AccountsAdapterAPI()) + require.NotNil(t, comp.AccountsRepository()) + require.NotNil(t, comp.TriesContainer()) + require.NotNil(t, comp.TrieStorageManagers()) + require.NotNil(t, comp.MissingTrieNodesNotifier()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/statusComponents.go b/node/chainSimulator/components/statusComponents.go index 9aef2ea484b..65f9dbb7667 100644 --- a/node/chainSimulator/components/statusComponents.go +++ b/node/chainSimulator/components/statusComponents.go @@ -13,7 +13,6 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/errors" - "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/integrationTests/mock" "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/process" @@ -33,7 +32,7 @@ type statusComponentsHolder struct { } // CreateStatusComponents will create a new instance of status components holder -func CreateStatusComponents(shardID uint32, appStatusHandler core.AppStatusHandler, statusPollingIntervalSec int) (factory.StatusComponentsHandler, error) { +func CreateStatusComponents(shardID uint32, appStatusHandler core.AppStatusHandler, statusPollingIntervalSec int) (*statusComponentsHolder, error) { if check.IfNil(appStatusHandler) { return nil, core.ErrNilAppStatusHandler } diff --git a/node/chainSimulator/components/statusComponents_test.go b/node/chainSimulator/components/statusComponents_test.go new file mode 100644 index 00000000000..0e83e435003 --- /dev/null +++ b/node/chainSimulator/components/statusComponents_test.go @@ -0,0 +1,135 @@ +package components + +import ( + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/atomic" + "github.com/multiversx/mx-chain-go/common" + mxErrors "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/stretchr/testify/require" +) + +func TestCreateStatusComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("nil app status handler should error", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, nil, 5) + require.Equal(t, core.ErrNilAppStatusHandler, err) + require.Nil(t, comp) + }) +} + +func TestStatusComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *statusComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestStatusComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5) + require.NoError(t, err) + + require.NotNil(t, comp.OutportHandler()) + require.NotNil(t, comp.SoftwareVersionChecker()) + require.NotNil(t, comp.ManagedPeersMonitor()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + + require.Nil(t, comp.Close()) +} +func TestStatusComponentsHolder_SetForkDetector(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5) + require.NoError(t, err) + + err = comp.SetForkDetector(nil) + require.Equal(t, process.ErrNilForkDetector, err) + + err = comp.SetForkDetector(&mock.ForkDetectorStub{}) + require.NoError(t, err) + + require.Nil(t, comp.Close()) +} + +func TestStatusComponentsHolder_StartPolling(t *testing.T) { + t.Parallel() + + t.Run("nil fork detector should error", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5) + require.NoError(t, err) + + err = comp.StartPolling() + require.Equal(t, process.ErrNilForkDetector, err) + }) + t.Run("NewAppStatusPolling failure should error", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 0) + require.NoError(t, err) + + err = comp.SetForkDetector(&mock.ForkDetectorStub{}) + require.NoError(t, err) + + err = comp.StartPolling() + require.Equal(t, mxErrors.ErrStatusPollingInit, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedHighestNonce := uint64(123) + providedStatusPollingIntervalSec := 1 + wasSetUInt64ValueCalled := atomic.Flag{} + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) { + require.Equal(t, common.MetricProbableHighestNonce, key) + require.Equal(t, providedHighestNonce, value) + wasSetUInt64ValueCalled.SetValue(true) + }, + } + comp, err := CreateStatusComponents(0, appStatusHandler, providedStatusPollingIntervalSec) + require.NoError(t, err) + + forkDetector := &mock.ForkDetectorStub{ + ProbableHighestNonceCalled: func() uint64 { + return providedHighestNonce + }, + } + err = comp.SetForkDetector(forkDetector) + require.NoError(t, err) + + err = comp.StartPolling() + require.NoError(t, err) + + time.Sleep(time.Duration(providedStatusPollingIntervalSec+1) * time.Second) + require.True(t, wasSetUInt64ValueCalled.IsSet()) + + require.Nil(t, comp.Close()) + }) +} diff --git a/node/chainSimulator/components/statusCoreComponents.go b/node/chainSimulator/components/statusCoreComponents.go index 47428f14a95..7ac3b9045fa 100644 --- a/node/chainSimulator/components/statusCoreComponents.go +++ b/node/chainSimulator/components/statusCoreComponents.go @@ -1,6 +1,8 @@ package components import ( + "io" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" @@ -10,18 +12,18 @@ import ( ) type statusCoreComponentsHolder struct { - closeHandler *closeHandler - resourceMonitor factory.ResourceMonitor - networkStatisticsProvider factory.NetworkStatisticsProvider - trieSyncStatisticsProvider factory.TrieSyncStatisticsProvider - statusHandler core.AppStatusHandler - statusMetrics external.StatusMetricsHandler - persistentStatusHandler factory.PersistentStatusHandler - stateStatisticsHandler common.StateStatisticsHandler + resourceMonitor factory.ResourceMonitor + networkStatisticsProvider factory.NetworkStatisticsProvider + trieSyncStatisticsProvider factory.TrieSyncStatisticsProvider + statusHandler core.AppStatusHandler + statusMetrics external.StatusMetricsHandler + persistentStatusHandler factory.PersistentStatusHandler + stateStatisticsHandler common.StateStatisticsHandler + managedStatusCoreComponentsCloser io.Closer } // CreateStatusCoreComponents will create a new instance of factory.StatusCoreComponentsHandler -func CreateStatusCoreComponents(configs config.Configs, coreComponents factory.CoreComponentsHolder) (factory.StatusCoreComponentsHandler, error) { +func CreateStatusCoreComponents(configs config.Configs, coreComponents factory.CoreComponentsHolder) (*statusCoreComponentsHolder, error) { var err error statusCoreComponentsFactory, err := statusCore.NewStatusCoreComponentsFactory(statusCore.StatusCoreComponentsFactoryArgs{ @@ -50,18 +52,16 @@ func CreateStatusCoreComponents(configs config.Configs, coreComponents factory.C _ = managedStatusCoreComponents.ResourceMonitor().Close() instance := &statusCoreComponentsHolder{ - closeHandler: NewCloseHandler(), - resourceMonitor: managedStatusCoreComponents.ResourceMonitor(), - networkStatisticsProvider: managedStatusCoreComponents.NetworkStatistics(), - trieSyncStatisticsProvider: managedStatusCoreComponents.TrieSyncStatistics(), - statusHandler: managedStatusCoreComponents.AppStatusHandler(), - statusMetrics: managedStatusCoreComponents.StatusMetrics(), - persistentStatusHandler: managedStatusCoreComponents.PersistentStatusHandler(), - stateStatisticsHandler: managedStatusCoreComponents.StateStatsHandler(), + resourceMonitor: managedStatusCoreComponents.ResourceMonitor(), + networkStatisticsProvider: managedStatusCoreComponents.NetworkStatistics(), + trieSyncStatisticsProvider: managedStatusCoreComponents.TrieSyncStatistics(), + statusHandler: managedStatusCoreComponents.AppStatusHandler(), + statusMetrics: managedStatusCoreComponents.StatusMetrics(), + persistentStatusHandler: managedStatusCoreComponents.PersistentStatusHandler(), + stateStatisticsHandler: managedStatusCoreComponents.StateStatsHandler(), + managedStatusCoreComponentsCloser: managedStatusCoreComponents, } - instance.collectClosableComponents() - return instance, nil } @@ -100,16 +100,9 @@ func (s *statusCoreComponentsHolder) PersistentStatusHandler() factory.Persisten return s.persistentStatusHandler } -func (s *statusCoreComponentsHolder) collectClosableComponents() { - s.closeHandler.AddComponent(s.resourceMonitor) - s.closeHandler.AddComponent(s.networkStatisticsProvider) - s.closeHandler.AddComponent(s.statusHandler) - s.closeHandler.AddComponent(s.persistentStatusHandler) -} - // Close will call the Close methods on all inner components func (s *statusCoreComponentsHolder) Close() error { - return s.closeHandler.Close() + return s.managedStatusCoreComponentsCloser.Close() } // IsInterfaceNil returns true if there is no value under the interface diff --git a/node/chainSimulator/components/statusCoreComponents_test.go b/node/chainSimulator/components/statusCoreComponents_test.go new file mode 100644 index 00000000000..a616890644f --- /dev/null +++ b/node/chainSimulator/components/statusCoreComponents_test.go @@ -0,0 +1,113 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/factory/mock" + mockTests "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/stretchr/testify/require" +) + +func createArgs() (config.Configs, factory.CoreComponentsHolder) { + generalCfg := testscommon.GetGeneralConfig() + ratingsCfg := components.CreateDummyRatingsConfig() + economicsCfg := components.CreateDummyEconomicsConfig() + cfg := config.Configs{ + GeneralConfig: &generalCfg, + EpochConfig: &config.EpochConfig{ + GasSchedule: config.GasScheduleConfig{ + GasScheduleByEpochs: []config.GasScheduleByEpochs{ + { + StartEpoch: 0, + FileName: "gasScheduleV1.toml", + }, + }, + }, + }, + RoundConfig: &config.RoundConfig{ + RoundActivations: map[string]config.ActivationRoundByName{ + "Example": { + Round: "18446744073709551615", + }, + }, + }, + RatingsConfig: &ratingsCfg, + EconomicsConfig: &economicsCfg, + } + + return cfg, &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + IntMarsh: &testscommon.MarshallerStub{}, + UInt64ByteSliceConv: &mockTests.Uint64ByteSliceConverterMock{}, + NodesConfig: &genesisMocks.NodesSetupStub{}, + } +} + +func TestCreateStatusCoreComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cfg, coreComp := createArgs() + comp, err := CreateStatusCoreComponents(cfg, coreComp) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewStatusCoreComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + cfg, _ := createArgs() + comp, err := CreateStatusCoreComponents(cfg, nil) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("managedStatusCoreComponents.Create failure should error", func(t *testing.T) { + t.Parallel() + + cfg, coreComp := createArgs() + cfg.GeneralConfig.ResourceStats.RefreshIntervalInSec = 0 + comp, err := CreateStatusCoreComponents(cfg, coreComp) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestStatusCoreComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *statusCoreComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + cfg, coreComp := createArgs() + comp, _ = CreateStatusCoreComponents(cfg, coreComp) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestStatusCoreComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + cfg, coreComp := createArgs() + comp, err := CreateStatusCoreComponents(cfg, coreComp) + require.NoError(t, err) + + require.NotNil(t, comp.ResourceMonitor()) + require.NotNil(t, comp.NetworkStatistics()) + require.NotNil(t, comp.TrieSyncStatistics()) + require.NotNil(t, comp.AppStatusHandler()) + require.NotNil(t, comp.StatusMetrics()) + require.NotNil(t, comp.PersistentStatusHandler()) + require.NotNil(t, comp.StateStatsHandler()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) +} diff --git a/node/chainSimulator/components/storageService_test.go b/node/chainSimulator/components/storageService_test.go new file mode 100644 index 00000000000..3be398b53e6 --- /dev/null +++ b/node/chainSimulator/components/storageService_test.go @@ -0,0 +1,51 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/stretchr/testify/require" +) + +func TestCreateStore(t *testing.T) { + t.Parallel() + + store := CreateStore(2) + require.NotNil(t, store) + + expectedUnits := []dataRetriever.UnitType{ + dataRetriever.TransactionUnit, + dataRetriever.MiniBlockUnit, + dataRetriever.MetaBlockUnit, + dataRetriever.PeerChangesUnit, + dataRetriever.BlockHeaderUnit, + dataRetriever.UnsignedTransactionUnit, + dataRetriever.RewardTransactionUnit, + dataRetriever.MetaHdrNonceHashDataUnit, + dataRetriever.BootstrapUnit, + dataRetriever.StatusMetricsUnit, + dataRetriever.ReceiptsUnit, + dataRetriever.ScheduledSCRsUnit, + dataRetriever.TxLogsUnit, + dataRetriever.UserAccountsUnit, + dataRetriever.PeerAccountsUnit, + dataRetriever.ESDTSuppliesUnit, + dataRetriever.RoundHdrHashDataUnit, + dataRetriever.MiniblocksMetadataUnit, + dataRetriever.MiniblockHashByTxHashUnit, + dataRetriever.EpochByHashUnit, + dataRetriever.ResultsHashesByTxHashUnit, + dataRetriever.TrieEpochRootHashUnit, + dataRetriever.ShardHdrNonceHashDataUnit, + dataRetriever.UnitType(101), // shard 2 + } + + all := store.GetAllStorers() + require.Equal(t, len(expectedUnits), len(all)) + + for i := 0; i < len(expectedUnits); i++ { + unit, err := store.GetStorer(expectedUnits[i]) + require.NoError(t, err) + require.NotNil(t, unit) + } +} diff --git a/node/chainSimulator/components/syncedBroadcastNetwork.go b/node/chainSimulator/components/syncedBroadcastNetwork.go index 572689b0c0a..99e8168c45e 100644 --- a/node/chainSimulator/components/syncedBroadcastNetwork.go +++ b/node/chainSimulator/components/syncedBroadcastNetwork.go @@ -62,6 +62,7 @@ func (network *syncedBroadcastNetwork) Broadcast(pid core.PeerID, topic string, DataField: buff, TopicField: topic, BroadcastMethodField: p2p.Broadcast, + PeerField: pid, } handler.receive(pid, message) @@ -84,6 +85,7 @@ func (network *syncedBroadcastNetwork) SendDirectly(from core.PeerID, topic stri DataField: buff, TopicField: topic, BroadcastMethodField: p2p.Direct, + PeerField: from, } handler.receive(from, message) diff --git a/node/chainSimulator/components/syncedBroadcastNetwork_test.go b/node/chainSimulator/components/syncedBroadcastNetwork_test.go index 1067e1155be..74e061a819a 100644 --- a/node/chainSimulator/components/syncedBroadcastNetwork_test.go +++ b/node/chainSimulator/components/syncedBroadcastNetwork_test.go @@ -23,7 +23,7 @@ func TestSyncedBroadcastNetwork_BroadcastShouldWorkOn3Peers(t *testing.T) { peer1, err := NewSyncedMessenger(network) assert.Nil(t, err) - processor1 := createMessageProcessor(messages, peer1.ID()) + processor1 := createMessageProcessor(t, messages, peer1.ID()) _ = peer1.CreateTopic(globalTopic, true) _ = peer1.RegisterMessageProcessor(globalTopic, "", processor1) _ = peer1.CreateTopic(oneTwoTopic, true) @@ -33,7 +33,7 @@ func TestSyncedBroadcastNetwork_BroadcastShouldWorkOn3Peers(t *testing.T) { peer2, err := NewSyncedMessenger(network) assert.Nil(t, err) - processor2 := createMessageProcessor(messages, peer2.ID()) + processor2 := createMessageProcessor(t, messages, peer2.ID()) _ = peer2.CreateTopic(globalTopic, true) _ = peer2.RegisterMessageProcessor(globalTopic, "", processor2) _ = peer2.CreateTopic(oneTwoTopic, true) @@ -43,7 +43,7 @@ func TestSyncedBroadcastNetwork_BroadcastShouldWorkOn3Peers(t *testing.T) { peer3, err := NewSyncedMessenger(network) assert.Nil(t, err) - processor3 := createMessageProcessor(messages, peer3.ID()) + processor3 := createMessageProcessor(t, messages, peer3.ID()) _ = peer3.CreateTopic(globalTopic, true) _ = peer3.RegisterMessageProcessor(globalTopic, "", processor3) _ = peer3.CreateTopic(oneThreeTopic, true) @@ -88,13 +88,13 @@ func TestSyncedBroadcastNetwork_BroadcastOnAnUnjoinedTopicShouldDiscardMessage(t peer1, err := NewSyncedMessenger(network) assert.Nil(t, err) - processor1 := createMessageProcessor(messages, peer1.ID()) + processor1 := createMessageProcessor(t, messages, peer1.ID()) _ = peer1.CreateTopic(globalTopic, true) _ = peer1.RegisterMessageProcessor(globalTopic, "", processor1) peer2, err := NewSyncedMessenger(network) assert.Nil(t, err) - processor2 := createMessageProcessor(messages, peer2.ID()) + processor2 := createMessageProcessor(t, messages, peer2.ID()) _ = peer2.CreateTopic(globalTopic, true) _ = peer2.RegisterMessageProcessor(globalTopic, "", processor2) _ = peer2.CreateTopic(twoThreeTopic, true) @@ -102,7 +102,7 @@ func TestSyncedBroadcastNetwork_BroadcastOnAnUnjoinedTopicShouldDiscardMessage(t peer3, err := NewSyncedMessenger(network) assert.Nil(t, err) - processor3 := createMessageProcessor(messages, peer3.ID()) + processor3 := createMessageProcessor(t, messages, peer3.ID()) _ = peer3.CreateTopic(globalTopic, true) _ = peer3.RegisterMessageProcessor(globalTopic, "", processor3) _ = peer3.CreateTopic(twoThreeTopic, true) @@ -128,13 +128,13 @@ func TestSyncedBroadcastNetwork_SendDirectlyShouldWorkBetween2peers(t *testing.T peer1, err := NewSyncedMessenger(network) assert.Nil(t, err) - processor1 := createMessageProcessor(messages, peer1.ID()) + processor1 := createMessageProcessor(t, messages, peer1.ID()) _ = peer1.CreateTopic(topic, true) _ = peer1.RegisterMessageProcessor(topic, "", processor1) peer2, err := NewSyncedMessenger(network) assert.Nil(t, err) - processor2 := createMessageProcessor(messages, peer2.ID()) + processor2 := createMessageProcessor(t, messages, peer2.ID()) _ = peer2.CreateTopic(topic, true) _ = peer2.RegisterMessageProcessor(topic, "", processor2) @@ -156,13 +156,13 @@ func TestSyncedBroadcastNetwork_SendDirectlyToSelfShouldWork(t *testing.T) { peer1, err := NewSyncedMessenger(network) assert.Nil(t, err) - processor1 := createMessageProcessor(messages, peer1.ID()) + processor1 := createMessageProcessor(t, messages, peer1.ID()) _ = peer1.CreateTopic(topic, true) _ = peer1.RegisterMessageProcessor(topic, "", processor1) peer2, err := NewSyncedMessenger(network) assert.Nil(t, err) - processor2 := createMessageProcessor(messages, peer2.ID()) + processor2 := createMessageProcessor(t, messages, peer2.ID()) _ = peer2.CreateTopic(topic, true) _ = peer2.RegisterMessageProcessor(topic, "", processor2) @@ -184,7 +184,7 @@ func TestSyncedBroadcastNetwork_SendDirectlyShouldNotDeadlock(t *testing.T) { peer1, err := NewSyncedMessenger(network) assert.Nil(t, err) - processor1 := createMessageProcessor(messages, peer1.ID()) + processor1 := createMessageProcessor(t, messages, peer1.ID()) _ = peer1.CreateTopic(topic, true) _ = peer1.RegisterMessageProcessor(topic, "", processor1) @@ -283,7 +283,7 @@ func TestSyncedBroadcastNetwork_GetConnectedPeersOnTopic(t *testing.T) { assert.Equal(t, 3, len(peersInfo.UnknownPeers)) } -func createMessageProcessor(dataMap map[core.PeerID]map[string][]byte, pid core.PeerID) p2p.MessageProcessor { +func createMessageProcessor(t *testing.T, dataMap map[core.PeerID]map[string][]byte, pid core.PeerID) p2p.MessageProcessor { return &p2pmocks.MessageProcessorStub{ ProcessReceivedMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { m, found := dataMap[pid] @@ -292,6 +292,9 @@ func createMessageProcessor(dataMap map[core.PeerID]map[string][]byte, pid core. dataMap[pid] = m } + // some interceptors/resolvers require that the peer field should be the same + assert.Equal(t, message.Peer().Bytes(), message.From()) + assert.Equal(t, message.Peer(), fromConnectedPeer) m[message.Topic()] = message.Data() return nil diff --git a/node/chainSimulator/components/syncedMessenger.go b/node/chainSimulator/components/syncedMessenger.go index f69f572191c..d30ac85b409 100644 --- a/node/chainSimulator/components/syncedMessenger.go +++ b/node/chainSimulator/components/syncedMessenger.go @@ -27,9 +27,12 @@ var ( errTopicNotCreated = errors.New("topic not created") errTopicHasProcessor = errors.New("there is already a message processor for provided topic and identifier") errInvalidSignature = errors.New("invalid signature") + errMessengerIsClosed = errors.New("messenger is closed") ) type syncedMessenger struct { + mutIsClosed sync.RWMutex + isClosed bool mutOperation sync.RWMutex topics map[string]map[string]p2p.MessageProcessor network SyncedBroadcastNetworkHandler @@ -66,6 +69,9 @@ func (messenger *syncedMessenger) HasCompatibleProtocolID(_ string) bool { } func (messenger *syncedMessenger) receive(fromConnectedPeer core.PeerID, message p2p.MessageP2P) { + if messenger.closed() { + return + } if check.IfNil(message) { return } @@ -90,6 +96,10 @@ func (messenger *syncedMessenger) ProcessReceivedMessage(_ p2p.MessageP2P, _ cor // CreateTopic will create a topic for receiving data func (messenger *syncedMessenger) CreateTopic(name string, _ bool) error { + if messenger.closed() { + return errMessengerIsClosed + } + messenger.mutOperation.Lock() defer messenger.mutOperation.Unlock() @@ -115,6 +125,9 @@ func (messenger *syncedMessenger) HasTopic(name string) bool { // RegisterMessageProcessor will try to register a message processor on the provided topic & identifier func (messenger *syncedMessenger) RegisterMessageProcessor(topic string, identifier string, handler p2p.MessageProcessor) error { + if messenger.closed() { + return errMessengerIsClosed + } if check.IfNil(handler) { return fmt.Errorf("programming error in syncedMessenger.RegisterMessageProcessor, "+ "%w for topic %s and identifier %s", errNilMessageProcessor, topic, identifier) @@ -170,6 +183,9 @@ func (messenger *syncedMessenger) UnregisterMessageProcessor(topic string, ident // Broadcast will broadcast the provided buffer on the topic in a synchronous manner func (messenger *syncedMessenger) Broadcast(topic string, buff []byte) { + if messenger.closed() { + return + } if !messenger.HasTopic(topic) { return } @@ -194,6 +210,10 @@ func (messenger *syncedMessenger) BroadcastOnChannelUsingPrivateKey(_ string, to // SendToConnectedPeer will send the message to the peer func (messenger *syncedMessenger) SendToConnectedPeer(topic string, buff []byte, peerID core.PeerID) error { + if messenger.closed() { + return errMessengerIsClosed + } + if !messenger.HasTopic(topic) { return nil } @@ -356,9 +376,20 @@ func (messenger *syncedMessenger) SetDebugger(_ p2p.Debugger) error { // Close does nothing and returns nil func (messenger *syncedMessenger) Close() error { + messenger.mutIsClosed.Lock() + messenger.isClosed = true + messenger.mutIsClosed.Unlock() + return nil } +func (messenger *syncedMessenger) closed() bool { + messenger.mutIsClosed.RLock() + defer messenger.mutIsClosed.RUnlock() + + return messenger.isClosed +} + // IsInterfaceNil returns true if there is no value under the interface func (messenger *syncedMessenger) IsInterfaceNil() bool { return messenger == nil diff --git a/node/chainSimulator/components/testOnlyProcessingNode.go b/node/chainSimulator/components/testOnlyProcessingNode.go index 14ec26cba86..07c8561c73f 100644 --- a/node/chainSimulator/components/testOnlyProcessingNode.go +++ b/node/chainSimulator/components/testOnlyProcessingNode.go @@ -23,7 +23,6 @@ import ( "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/postprocess" - "github.com/multiversx/mx-chain-go/process/economics" "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" @@ -39,12 +38,14 @@ type ArgsTestOnlyProcessingNode struct { SyncedBroadcastNetwork SyncedBroadcastNetworkHandler InitialRound int64 + InitialNonce uint64 GasScheduleFilename string NumShards uint32 ShardIDStr string BypassTxSignatureCheck bool MinNodesPerShard uint32 MinNodesMeta uint32 + RoundDurationInMillis uint64 } type testOnlyProcessingNode struct { @@ -59,14 +60,13 @@ type testOnlyProcessingNode struct { ProcessComponentsHolder factory.ProcessComponentsHandler DataComponentsHolder factory.DataComponentsHandler - NodesCoordinator nodesCoordinator.NodesCoordinator - ChainHandler chainData.ChainHandler - ArgumentsParser process.ArgumentsParser - TransactionFeeHandler process.TransactionFeeHandler - StoreService dataRetriever.StorageService - BuiltinFunctionsCostHandler economics.BuiltInFunctionsCostHandler - DataPool dataRetriever.PoolsHolder - broadcastMessenger consensus.BroadcastMessenger + NodesCoordinator nodesCoordinator.NodesCoordinator + ChainHandler chainData.ChainHandler + ArgumentsParser process.ArgumentsParser + TransactionFeeHandler process.TransactionFeeHandler + StoreService dataRetriever.StorageService + DataPool dataRetriever.PoolsHolder + broadcastMessenger consensus.BroadcastMessenger httpServer shared.UpgradeableHttpServerHandler facadeHandler shared.FacadeHandler @@ -81,10 +81,7 @@ func NewTestOnlyProcessingNode(args ArgsTestOnlyProcessingNode) (*testOnlyProces } var err error - instance.TransactionFeeHandler, err = postprocess.NewFeeAccumulator() - if err != nil { - return nil, err - } + instance.TransactionFeeHandler = postprocess.NewFeeAccumulator() instance.CoreComponentsHolder, err = CreateCoreComponents(ArgsCoreComponentsHolder{ Config: *args.Configs.GeneralConfig, @@ -99,6 +96,8 @@ func NewTestOnlyProcessingNode(args ArgsTestOnlyProcessingNode) (*testOnlyProces InitialRound: args.InitialRound, MinNodesPerShard: args.MinNodesPerShard, MinNodesMeta: args.MinNodesMeta, + RoundDurationInMs: args.RoundDurationInMillis, + RatingConfig: *args.Configs.RatingsConfig, }) if err != nil { return nil, err @@ -202,9 +201,12 @@ func NewTestOnlyProcessingNode(args ArgsTestOnlyProcessingNode) (*testOnlyProces EconomicsConfig: *args.Configs.EconomicsConfig, SystemSCConfig: *args.Configs.SystemSCConfig, EpochConfig: *args.Configs.EpochConfig, + RoundConfig: *args.Configs.RoundConfig, ConfigurationPathsHolder: *args.Configs.ConfigurationPathsHolder, NodesCoordinator: instance.NodesCoordinator, DataComponents: instance.DataComponentsHolder, + GenesisNonce: args.InitialNonce, + GenesisRound: uint64(args.InitialRound), }) if err != nil { return nil, err @@ -220,7 +222,7 @@ func NewTestOnlyProcessingNode(args ArgsTestOnlyProcessingNode) (*testOnlyProces return nil, err } - err = instance.createBroadcastMessanger() + err = instance.createBroadcastMessenger() if err != nil { return nil, err } @@ -300,6 +302,7 @@ func (node *testOnlyProcessingNode) createNodesCoordinator(pref config.Preferenc node.CoreComponentsHolder.NodeTypeProvider(), node.CoreComponentsHolder.EnableEpochsHandler(), node.DataPool.CurrentEpochValidatorInfo(), + node.BootstrapComponentsHolder.NodesCoordinatorRegistryFactory(), ) if err != nil { return err @@ -308,7 +311,7 @@ func (node *testOnlyProcessingNode) createNodesCoordinator(pref config.Preferenc return nil } -func (node *testOnlyProcessingNode) createBroadcastMessanger() error { +func (node *testOnlyProcessingNode) createBroadcastMessenger() error { broadcastMessenger, err := sposFactory.GetBroadcastMessenger( node.CoreComponentsHolder.InternalMarshalizer(), node.CoreComponentsHolder.Hasher(), @@ -441,16 +444,7 @@ func (node *testOnlyProcessingNode) SetStateForAddress(address []byte, addressSt return err } - // set nonce to zero - userAccount.IncreaseNonce(-userAccount.GetNonce()) - // set nonce with the provided value - userAccount.IncreaseNonce(addressState.Nonce) - - bigValue, ok := big.NewInt(0).SetString(addressState.Balance, 10) - if !ok { - return errors.New("cannot convert string balance to *big.Int") - } - err = userAccount.AddToBalance(bigValue) + err = setNonceAndBalanceForAccount(userAccount, addressState.Nonce, addressState.Balance) if err != nil { return err } @@ -469,7 +463,9 @@ func (node *testOnlyProcessingNode) SetStateForAddress(address []byte, addressSt if err != nil { return err } - userAccount.SetRootHash(rootHash) + if len(rootHash) != 0 { + userAccount.SetRootHash(rootHash) + } accountsAdapter := node.StateComponentsHolder.AccountsAdapter() err = accountsAdapter.SaveAccount(userAccount) @@ -481,40 +477,77 @@ func (node *testOnlyProcessingNode) SetStateForAddress(address []byte, addressSt return err } -func (node *testOnlyProcessingNode) setScDataIfNeeded(address []byte, userAccount state.UserAccountHandler, addressState *dtos.AddressState) error { - if !core.IsSmartContractAddress(address) { +func setNonceAndBalanceForAccount(userAccount state.UserAccountHandler, nonce *uint64, balance string) error { + if nonce != nil { + // set nonce to zero + userAccount.IncreaseNonce(-userAccount.GetNonce()) + // set nonce with the provided value + userAccount.IncreaseNonce(*nonce) + } + + if balance == "" { return nil } - decodedCode, err := hex.DecodeString(addressState.Code) - if err != nil { - return err + providedBalance, ok := big.NewInt(0).SetString(balance, 10) + if !ok { + return errors.New("cannot convert string balance to *big.Int") } - userAccount.SetCode(decodedCode) - codeHash, err := base64.StdEncoding.DecodeString(addressState.CodeHash) + // set balance to zero + userBalance := userAccount.GetBalance() + err := userAccount.AddToBalance(userBalance.Neg(userBalance)) if err != nil { return err } - userAccount.SetCodeHash(codeHash) + // set provided balance + return userAccount.AddToBalance(providedBalance) +} - decodedCodeMetadata, err := base64.StdEncoding.DecodeString(addressState.CodeMetadata) - if err != nil { - return err +func (node *testOnlyProcessingNode) setScDataIfNeeded(address []byte, userAccount state.UserAccountHandler, addressState *dtos.AddressState) error { + if !core.IsSmartContractAddress(address) { + return nil } - userAccount.SetCodeMetadata(decodedCodeMetadata) - ownerAddress, err := node.CoreComponentsHolder.AddressPubKeyConverter().Decode(addressState.Owner) - if err != nil { - return err + if addressState.Code != "" { + decodedCode, err := hex.DecodeString(addressState.Code) + if err != nil { + return err + } + userAccount.SetCode(decodedCode) } - userAccount.SetOwnerAddress(ownerAddress) - developerRewards, ok := big.NewInt(0).SetString(addressState.DeveloperRewards, 10) - if !ok { - return errors.New("cannot convert string developer rewards to *big.Int") + if addressState.CodeHash != "" { + codeHash, errD := base64.StdEncoding.DecodeString(addressState.CodeHash) + if errD != nil { + return errD + } + userAccount.SetCodeHash(codeHash) + } + + if addressState.CodeMetadata != "" { + decodedCodeMetadata, errD := base64.StdEncoding.DecodeString(addressState.CodeMetadata) + if errD != nil { + return errD + } + userAccount.SetCodeMetadata(decodedCodeMetadata) + } + + if addressState.Owner != "" { + ownerAddress, errD := node.CoreComponentsHolder.AddressPubKeyConverter().Decode(addressState.Owner) + if errD != nil { + return errD + } + userAccount.SetOwnerAddress(ownerAddress) + } + + if addressState.DeveloperRewards != "" { + developerRewards, ok := big.NewInt(0).SetString(addressState.DeveloperRewards, 10) + if !ok { + return errors.New("cannot convert string developer rewards to *big.Int") + } + userAccount.AddToDeveloperReward(developerRewards) } - userAccount.AddToDeveloperReward(developerRewards) return nil } diff --git a/node/chainSimulator/components/testOnlyProcessingNode_test.go b/node/chainSimulator/components/testOnlyProcessingNode_test.go index 64dbf32b8e3..5924663217b 100644 --- a/node/chainSimulator/components/testOnlyProcessingNode_test.go +++ b/node/chainSimulator/components/testOnlyProcessingNode_test.go @@ -1,17 +1,26 @@ package components import ( + "errors" + "math/big" "strings" "testing" "time" "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/state" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +var expectedErr = errors.New("expected error") + func createMockArgsTestOnlyProcessingNode(t *testing.T) ArgsTestOnlyProcessingNode { outputConfigs, err := configs.CreateChainSimulatorConfigs(configs.ArgsChainSimulatorConfigs{ NumOfShards: 3, @@ -33,28 +42,24 @@ func createMockArgsTestOnlyProcessingNode(t *testing.T) ArgsTestOnlyProcessingNo ChanStopNodeProcess: make(chan endProcess.ArgEndProcess), APIInterface: api.NewNoApiInterface(), ShardIDStr: "0", + RoundDurationInMillis: 6000, + MinNodesMeta: 1, + MinNodesPerShard: 1, } } func TestNewTestOnlyProcessingNode(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } t.Run("should work", func(t *testing.T) { - if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") - } - args := createMockArgsTestOnlyProcessingNode(t) node, err := NewTestOnlyProcessingNode(args) assert.Nil(t, err) assert.NotNil(t, node) }) - t.Run("try commit a block", func(t *testing.T) { - if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") - } - args := createMockArgsTestOnlyProcessingNode(t) node, err := NewTestOnlyProcessingNode(args) assert.Nil(t, err) @@ -81,27 +86,383 @@ func TestNewTestOnlyProcessingNode(t *testing.T) { err = node.ProcessComponentsHolder.BlockProcessor().CommitBlock(header, block) assert.Nil(t, err) }) + t.Run("CreateCoreComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.GeneralConfig.Marshalizer.Type = "invalid type" + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateCryptoComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.GeneralConfig.PublicKeyPIDSignature.Type = "invalid type" + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateNetworkComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.SyncedBroadcastNetwork = nil + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateBootstrapComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.FlagsConfig.WorkingDir = "" + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateStateComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.ShardIDStr = common.MetachainShardName // coverage only + args.Configs.GeneralConfig.StateTriesConfig.MaxStateTrieLevelInMemory = 0 + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateProcessComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.FlagsConfig.Version = "" + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("createFacade failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.EpochConfig.GasSchedule.GasScheduleByEpochs = nil + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) } -func TestOnlyProcessingNodeSetStateShouldError(t *testing.T) { - args := createMockArgsTestOnlyProcessingNode(t) - node, err := NewTestOnlyProcessingNode(args) - require.Nil(t, err) +func TestTestOnlyProcessingNode_SetKeyValueForAddress(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + goodKeyValueMap := map[string]string{ + "01": "02", + } + node, err := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, err) + + address := "erd1qtc600lryvytxuy4h7vn7xmsy5tw6vuw3tskr75cwnmv4mnyjgsq6e5zgj" + addressBytes, _ := node.CoreComponentsHolder.AddressPubKeyConverter().Decode(address) + + t.Run("should work", func(t *testing.T) { + _, err = node.StateComponentsHolder.AccountsAdapter().GetExistingAccount(addressBytes) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "account was not found")) + + err = node.SetKeyValueForAddress(addressBytes, goodKeyValueMap) + require.NoError(t, err) + + _, err = node.StateComponentsHolder.AccountsAdapter().GetExistingAccount(addressBytes) + require.NoError(t, err) + }) + t.Run("decode key failure should error", func(t *testing.T) { + keyValueMap := map[string]string{ + "nonHex": "01", + } + err = node.SetKeyValueForAddress(addressBytes, keyValueMap) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), "cannot decode key")) + }) + t.Run("decode value failure should error", func(t *testing.T) { + keyValueMap := map[string]string{ + "01": "nonHex", + } + err = node.SetKeyValueForAddress(addressBytes, keyValueMap) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), "cannot decode value")) + }) + t.Run("LoadAccount failure should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return nil, expectedErr + }, + }, + } + + errLocal = nodeLocal.SetKeyValueForAddress(addressBytes, nil) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("account un-castable to UserAccountHandler should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return &state.PeerAccountHandlerMock{}, nil + }, + }, + } + + errLocal = nodeLocal.SetKeyValueForAddress(addressBytes, nil) + require.Error(t, errLocal) + require.Equal(t, "cannot cast AccountHandler to UserAccountHandler", errLocal.Error()) + }) + t.Run("SaveKeyValue failure should error", func(t *testing.T) { + nodeLocal, errLocal := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return &state.UserAccountStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + return expectedErr + }, + }, nil + }, + }, + } + + errLocal = nodeLocal.SetKeyValueForAddress(addressBytes, goodKeyValueMap) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("SaveAccount failure should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + SaveAccountCalled: func(account vmcommon.AccountHandler) error { + return expectedErr + }, + }, + } + + errLocal = nodeLocal.SetKeyValueForAddress(addressBytes, goodKeyValueMap) + require.Equal(t, expectedErr, errLocal) + }) +} + +func TestTestOnlyProcessingNode_SetStateForAddress(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + node, err := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, err) + nonce := uint64(100) address := "erd1qtc600lryvytxuy4h7vn7xmsy5tw6vuw3tskr75cwnmv4mnyjgsq6e5zgj" + scAddress := "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" addressBytes, _ := node.CoreComponentsHolder.AddressPubKeyConverter().Decode(address) + scAddressBytes, _ := node.CoreComponentsHolder.AddressPubKeyConverter().Decode(scAddress) + addressState := &dtos.AddressState{ + Address: "erd1qtc600lryvytxuy4h7vn7xmsy5tw6vuw3tskr75cwnmv4mnyjgsq6e5zgj", + Nonce: &nonce, + Balance: "1000000000000000000", + Keys: map[string]string{ + "01": "02", + }, + } + + t.Run("should work", func(t *testing.T) { + _, err = node.StateComponentsHolder.AccountsAdapter().GetExistingAccount(addressBytes) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "account was not found")) + + err = node.SetStateForAddress(addressBytes, addressState) + require.NoError(t, err) + + account, err := node.StateComponentsHolder.AccountsAdapter().GetExistingAccount(addressBytes) + require.NoError(t, err) + require.Equal(t, *addressState.Nonce, account.GetNonce()) + }) + t.Run("LoadAccount failure should error", func(t *testing.T) { + nodeLocal, errLocal := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return nil, expectedErr + }, + }, + } + + errLocal = nodeLocal.SetStateForAddress([]byte("address"), nil) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("state balance invalid should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Balance = "invalid balance" + err = node.SetStateForAddress(addressBytes, &addressStateCopy) + require.Error(t, err) + require.Equal(t, "cannot convert string balance to *big.Int", err.Error()) + }) + t.Run("AddToBalance failure should error", func(t *testing.T) { + nodeLocal, errLocal := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return &state.UserAccountStub{ + AddToBalanceCalled: func(value *big.Int) error { + return expectedErr + }, + Balance: big.NewInt(0), + }, nil + }, + }, + } + + errLocal = nodeLocal.SetStateForAddress([]byte("address"), addressState) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("SaveKeyValue failure should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return &state.UserAccountStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + return expectedErr + }, + Balance: big.NewInt(0), + }, nil + }, + }, + } + + errLocal = nodeLocal.SetStateForAddress(addressBytes, addressState) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("invalid sc code should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Address = scAddress + addressStateCopy.Code = "invalid code" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid sc code hash should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Address = scAddress + addressStateCopy.CodeHash = "invalid code hash" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid sc code metadata should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Address = scAddress + addressStateCopy.CodeMetadata = "invalid code metadata" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid sc owner should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Address = scAddress + addressStateCopy.Owner = "invalid owner" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid sc dev rewards should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Owner = address + addressStateCopy.Address = scAddress + addressStateCopy.DeveloperRewards = "invalid dev rewards" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid root hash should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Owner = address + addressStateCopy.Address = scAddress // coverage + addressStateCopy.DeveloperRewards = "1000000" + addressStateCopy.RootHash = "invalid root hash" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("SaveAccount failure should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + SaveAccountCalled: func(account vmcommon.AccountHandler) error { + return expectedErr + }, + }, + } + + errLocal = nodeLocal.SetStateForAddress(addressBytes, addressState) + require.Equal(t, expectedErr, errLocal) + }) +} + +func TestTestOnlyProcessingNode_IsInterfaceNil(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + var node *testOnlyProcessingNode + require.True(t, node.IsInterfaceNil()) + + node, _ = NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.False(t, node.IsInterfaceNil()) +} - keyValueMap := map[string]string{ - "nonHex": "01", +func TestTestOnlyProcessingNode_Close(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") } - err = node.SetKeyValueForAddress(addressBytes, keyValueMap) - require.NotNil(t, err) - require.True(t, strings.Contains(err.Error(), "cannot decode key")) - keyValueMap = map[string]string{ - "01": "nonHex", + node, err := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, err) + + require.NoError(t, node.Close()) +} + +func TestTestOnlyProcessingNode_Getters(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") } - err = node.SetKeyValueForAddress(addressBytes, keyValueMap) - require.NotNil(t, err) - require.True(t, strings.Contains(err.Error(), "cannot decode value")) + + node := &testOnlyProcessingNode{} + require.Nil(t, node.GetProcessComponents()) + require.Nil(t, node.GetChainHandler()) + require.Nil(t, node.GetBroadcastMessenger()) + require.Nil(t, node.GetCryptoComponents()) + require.Nil(t, node.GetCoreComponents()) + require.Nil(t, node.GetStateComponents()) + require.Nil(t, node.GetFacadeHandler()) + require.Nil(t, node.GetStatusCoreComponents()) + + node, err := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.Nil(t, err) + + require.NotNil(t, node.GetProcessComponents()) + require.NotNil(t, node.GetChainHandler()) + require.NotNil(t, node.GetBroadcastMessenger()) + require.NotNil(t, node.GetShardCoordinator()) + require.NotNil(t, node.GetCryptoComponents()) + require.NotNil(t, node.GetCoreComponents()) + require.NotNil(t, node.GetStateComponents()) + require.NotNil(t, node.GetFacadeHandler()) + require.NotNil(t, node.GetStatusCoreComponents()) } diff --git a/node/chainSimulator/configs/configs.go b/node/chainSimulator/configs/configs.go index 63aa3adc48b..731f8078eef 100644 --- a/node/chainSimulator/configs/configs.go +++ b/node/chainSimulator/configs/configs.go @@ -21,6 +21,7 @@ import ( "github.com/multiversx/mx-chain-go/common/factory" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/genesis/data" + "github.com/multiversx/mx-chain-go/node" "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/storage/storageunit" @@ -34,20 +35,23 @@ const ( // ChainID contains the chain id ChainID = "chain" - shardIDWalletWithStake = 0 allValidatorsPemFileName = "allValidatorsKeys.pem" ) // ArgsChainSimulatorConfigs holds all the components needed to create the chain simulator configs type ArgsChainSimulatorConfigs struct { - NumOfShards uint32 - OriginalConfigsPath string - GenesisTimeStamp int64 - RoundDurationInMillis uint64 - TempDir string - MinNodesPerShard uint32 - MetaChainMinNodes uint32 - RoundsPerEpoch core.OptionalUint64 + NumOfShards uint32 + OriginalConfigsPath string + GenesisTimeStamp int64 + RoundDurationInMillis uint64 + TempDir string + MinNodesPerShard uint32 + MetaChainMinNodes uint32 + InitialEpoch uint32 + RoundsPerEpoch core.OptionalUint64 + NumNodesWaitingListShard uint32 + NumNodesWaitingListMeta uint32 + AlterConfigsFunction func(cfg *config.Configs) } // ArgsConfigsSimulator holds the configs for the chain simulator @@ -82,33 +86,27 @@ func CreateChainSimulatorConfigs(args ArgsChainSimulatorConfigs) (*ArgsConfigsSi // generate validators key and nodesSetup.json privateKeys, publicKeys, err := generateValidatorsKeyAndUpdateFiles( configs, - initialWallets.InitialWalletWithStake.Address, + initialWallets.StakeWallets, args, ) if err != nil { return nil, err } - configs.ConfigurationPathsHolder.AllValidatorKeys = path.Join(args.OriginalConfigsPath, allValidatorsPemFileName) + configs.ConfigurationPathsHolder.AllValidatorKeys = path.Join(args.TempDir, allValidatorsPemFileName) err = generateValidatorsPem(configs.ConfigurationPathsHolder.AllValidatorKeys, publicKeys, privateKeys) if err != nil { return nil, err } - gasScheduleName, err := GetLatestGasScheduleFilename(configs.ConfigurationPathsHolder.GasScheduleDirectoryName) - if err != nil { - return nil, err - } - configs.GeneralConfig.SmartContractsStorage.DB.Type = string(storageunit.MemoryDB) configs.GeneralConfig.SmartContractsStorageForSCQuery.DB.Type = string(storageunit.MemoryDB) configs.GeneralConfig.SmartContractsStorageSimulate.DB.Type = string(storageunit.MemoryDB) - maxNumNodes := uint64(args.MinNodesPerShard*args.NumOfShards+args.MetaChainMinNodes) + uint64(args.NumOfShards+1) - configs.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake = maxNumNodes - for idx := 0; idx < len(configs.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch); idx++ { - configs.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[idx].MaxNumNodes = uint32(maxNumNodes) - } + maxNumNodes := uint64((args.MinNodesPerShard+args.NumNodesWaitingListShard)*args.NumOfShards) + + uint64(args.MetaChainMinNodes+args.NumNodesWaitingListMeta) + + SetMaxNumberOfNodesInConfigs(configs, maxNumNodes, args.NumOfShards) // set compatible trie configs configs.GeneralConfig.StateTriesConfig.SnapshotsEnabled = false @@ -117,11 +115,23 @@ func CreateChainSimulatorConfigs(args ArgsChainSimulatorConfigs) (*ArgsConfigsSi configs.GeneralConfig.DbLookupExtensions.Enabled = true configs.GeneralConfig.EpochStartConfig.ExtraDelayForRequestBlockInfoInMilliseconds = 1 + configs.GeneralConfig.EpochStartConfig.GenesisEpoch = args.InitialEpoch if args.RoundsPerEpoch.HasValue { configs.GeneralConfig.EpochStartConfig.RoundsPerEpoch = int64(args.RoundsPerEpoch.Value) } + gasScheduleName, err := GetLatestGasScheduleFilename(configs.ConfigurationPathsHolder.GasScheduleDirectoryName) + if err != nil { + return nil, err + } + + node.ApplyArchCustomConfigs(configs) + + if args.AlterConfigsFunction != nil { + args.AlterConfigsFunction(configs) + } + return &ArgsConfigsSimulator{ Configs: *configs, ValidatorsPrivateKeys: privateKeys, @@ -130,6 +140,44 @@ func CreateChainSimulatorConfigs(args ArgsChainSimulatorConfigs) (*ArgsConfigsSi }, nil } +// SetMaxNumberOfNodesInConfigs will correctly set the max number of nodes in configs +func SetMaxNumberOfNodesInConfigs(cfg *config.Configs, maxNumNodes uint64, numOfShards uint32) { + cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake = maxNumNodes + numMaxNumNodesEnableEpochs := len(cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch) + for idx := 0; idx < numMaxNumNodesEnableEpochs-1; idx++ { + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[idx].MaxNumNodes = uint32(maxNumNodes) + } + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[numMaxNumNodesEnableEpochs-1].EpochEnable = cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch + prevEntry := cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[numMaxNumNodesEnableEpochs-2] + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[numMaxNumNodesEnableEpochs-1].NodesToShufflePerShard = prevEntry.NodesToShufflePerShard + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[numMaxNumNodesEnableEpochs-1].MaxNumNodes = prevEntry.MaxNumNodes - (numOfShards+1)*prevEntry.NodesToShufflePerShard +} + +// SetQuickJailRatingConfig will set the rating config in a way that leads to rapid jailing of a node +func SetQuickJailRatingConfig(cfg *config.Configs) { + cfg.RatingsConfig.ShardChain.RatingSteps.ConsecutiveMissedBlocksPenalty = 100 + cfg.RatingsConfig.ShardChain.RatingSteps.HoursToMaxRatingFromStartRating = 1 + cfg.RatingsConfig.MetaChain.RatingSteps.ConsecutiveMissedBlocksPenalty = 100 + cfg.RatingsConfig.MetaChain.RatingSteps.HoursToMaxRatingFromStartRating = 1 +} + +// SetStakingV4ActivationEpochs configures activation epochs for Staking V4. +// It takes an initial epoch and sets three consecutive steps for enabling Staking V4 features: +// - Step 1 activation epoch +// - Step 2 activation epoch +// - Step 3 activation epoch +func SetStakingV4ActivationEpochs(cfg *config.Configs, initialEpoch uint32) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = initialEpoch + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = initialEpoch + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = initialEpoch + 1 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = initialEpoch + 2 + + // Set the MaxNodesChange enable epoch for index 2 + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = initialEpoch + 2 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 +} + func generateGenesisFile(args ArgsChainSimulatorConfigs, configs *config.Configs) (*dtos.InitialWalletKeys, error) { addressConverter, err := factory.NewPubkeyConverter(configs.GeneralConfig.AddressPubkeyConverter) if err != nil { @@ -137,29 +185,33 @@ func generateGenesisFile(args ArgsChainSimulatorConfigs, configs *config.Configs } initialWalletKeys := &dtos.InitialWalletKeys{ - ShardWallets: make(map[uint32]*dtos.WalletKey), + BalanceWallets: make(map[uint32]*dtos.WalletKey), + StakeWallets: make([]*dtos.WalletKey, 0), } - initialAddressWithStake, err := generateWalletKeyForShard(shardIDWalletWithStake, args.NumOfShards, addressConverter) - if err != nil { - return nil, err - } + addresses := make([]data.InitialAccount, 0) + numOfNodes := int((args.NumNodesWaitingListShard+args.MinNodesPerShard)*args.NumOfShards + args.NumNodesWaitingListMeta + args.MetaChainMinNodes) + for i := 0; i < numOfNodes; i++ { + wallet, errGenerate := generateWalletKey(addressConverter) + if errGenerate != nil { + return nil, errGenerate + } - initialWalletKeys.InitialWalletWithStake = initialAddressWithStake + stakedValue := big.NewInt(0).Set(initialStakedEgldPerNode) + addresses = append(addresses, data.InitialAccount{ + Address: wallet.Address.Bech32, + StakingValue: stakedValue, + Supply: stakedValue, + }) - addresses := make([]data.InitialAccount, 0) - stakedValue := big.NewInt(0).Set(initialStakedEgldPerNode) - numOfNodes := args.MinNodesPerShard*args.NumOfShards + args.MetaChainMinNodes - stakedValue = stakedValue.Mul(stakedValue, big.NewInt(int64(numOfNodes))) // 2500 EGLD * number of nodes - addresses = append(addresses, data.InitialAccount{ - Address: initialAddressWithStake.Address, - StakingValue: stakedValue, - Supply: stakedValue, - }) + initialWalletKeys.StakeWallets = append(initialWalletKeys.StakeWallets, wallet) + } // generate an address for every shard initialBalance := big.NewInt(0).Set(initialSupply) - initialBalance = initialBalance.Sub(initialBalance, stakedValue) + totalStakedValue := big.NewInt(int64(numOfNodes)) + totalStakedValue = totalStakedValue.Mul(totalStakedValue, big.NewInt(0).Set(initialStakedEgldPerNode)) + initialBalance = initialBalance.Sub(initialBalance, totalStakedValue) walletBalance := big.NewInt(0).Set(initialBalance) walletBalance.Div(walletBalance, big.NewInt(int64(args.NumOfShards))) @@ -175,16 +227,16 @@ func generateGenesisFile(args ArgsChainSimulatorConfigs, configs *config.Configs } addresses = append(addresses, data.InitialAccount{ - Address: walletKey.Address, + Address: walletKey.Address.Bech32, Balance: big.NewInt(0).Set(walletBalance), Supply: big.NewInt(0).Set(walletBalance), }) - initialWalletKeys.ShardWallets[shardID] = walletKey + initialWalletKeys.BalanceWallets[shardID] = walletKey } - addresses[1].Balance.Add(walletBalance, remainder) - addresses[1].Supply.Add(walletBalance, remainder) + addresses[len(addresses)-1].Balance.Add(walletBalance, remainder) + addresses[len(addresses)-1].Supply.Add(walletBalance, remainder) addressesBytes, errM := json.Marshal(addresses) if errM != nil { @@ -201,7 +253,7 @@ func generateGenesisFile(args ArgsChainSimulatorConfigs, configs *config.Configs func generateValidatorsKeyAndUpdateFiles( configs *config.Configs, - address string, + stakeWallets []*dtos.WalletKey, args ArgsChainSimulatorConfigs, ) ([]crypto.PrivateKey, []crypto.PublicKey, error) { blockSigningGenerator := signing.NewKeyGenerator(mcl.NewSuiteBLS12()) @@ -216,6 +268,7 @@ func generateValidatorsKeyAndUpdateFiles( nodes.RoundDuration = args.RoundDurationInMillis nodes.StartTime = args.GenesisTimeStamp + // TODO fix this to can be configurable nodes.ConsensusGroupSize = 1 nodes.MetaChainConsensusGroupSize = 1 nodes.Hysteresis = 0 @@ -226,8 +279,9 @@ func generateValidatorsKeyAndUpdateFiles( nodes.InitialNodes = make([]*sharding.InitialNode, 0) privateKeys := make([]crypto.PrivateKey, 0) publicKeys := make([]crypto.PublicKey, 0) + walletIndex := 0 // generate meta keys - for idx := uint32(0); idx < args.MetaChainMinNodes; idx++ { + for idx := uint32(0); idx < args.NumNodesWaitingListMeta+args.MetaChainMinNodes; idx++ { sk, pk := blockSigningGenerator.GeneratePair() privateKeys = append(privateKeys, sk) publicKeys = append(publicKeys, pk) @@ -239,13 +293,15 @@ func generateValidatorsKeyAndUpdateFiles( nodes.InitialNodes = append(nodes.InitialNodes, &sharding.InitialNode{ PubKey: hex.EncodeToString(pkBytes), - Address: address, + Address: stakeWallets[walletIndex].Address.Bech32, }) + + walletIndex++ } // generate shard keys for idx1 := uint32(0); idx1 < args.NumOfShards; idx1++ { - for idx2 := uint32(0); idx2 < args.MinNodesPerShard; idx2++ { + for idx2 := uint32(0); idx2 < args.NumNodesWaitingListShard+args.MinNodesPerShard; idx2++ { sk, pk := blockSigningGenerator.GeneratePair() privateKeys = append(privateKeys, sk) publicKeys = append(publicKeys, pk) @@ -257,8 +313,9 @@ func generateValidatorsKeyAndUpdateFiles( nodes.InitialNodes = append(nodes.InitialNodes, &sharding.InitialNode{ PubKey: hex.EncodeToString(pkBytes), - Address: address, + Address: stakeWallets[walletIndex].Address.Bech32, }) + walletIndex++ } } @@ -351,35 +408,46 @@ func GetLatestGasScheduleFilename(directory string) (string, error) { } func generateWalletKeyForShard(shardID, numOfShards uint32, converter core.PubkeyConverter) (*dtos.WalletKey, error) { - walletSuite := ed25519.NewEd25519() - walletKeyGenerator := signing.NewKeyGenerator(walletSuite) - for { - sk, pk := walletKeyGenerator.GeneratePair() - - pubKeyBytes, err := pk.ToByteArray() + walletKey, err := generateWalletKey(converter) if err != nil { return nil, err } - addressShardID := shardingCore.ComputeShardID(pubKeyBytes, numOfShards) + addressShardID := shardingCore.ComputeShardID(walletKey.Address.Bytes, numOfShards) if addressShardID != shardID { continue } - privateKeyBytes, err := sk.ToByteArray() - if err != nil { - return nil, err - } + return walletKey, nil + } +} - address, err := converter.Encode(pubKeyBytes) - if err != nil { - return nil, err - } +func generateWalletKey(converter core.PubkeyConverter) (*dtos.WalletKey, error) { + walletSuite := ed25519.NewEd25519() + walletKeyGenerator := signing.NewKeyGenerator(walletSuite) + + sk, pk := walletKeyGenerator.GeneratePair() + pubKeyBytes, err := pk.ToByteArray() + if err != nil { + return nil, err + } + + privateKeyBytes, err := sk.ToByteArray() + if err != nil { + return nil, err + } - return &dtos.WalletKey{ - Address: address, - PrivateKeyHex: hex.EncodeToString(privateKeyBytes), - }, nil + bech32Address, err := converter.Encode(pubKeyBytes) + if err != nil { + return nil, err } + + return &dtos.WalletKey{ + Address: dtos.WalletAddress{ + Bech32: bech32Address, + Bytes: pubKeyBytes, + }, + PrivateKeyHex: hex.EncodeToString(privateKeyBytes), + }, nil } diff --git a/node/chainSimulator/dtos/keys.go b/node/chainSimulator/dtos/keys.go new file mode 100644 index 00000000000..7f4c0e613e9 --- /dev/null +++ b/node/chainSimulator/dtos/keys.go @@ -0,0 +1,25 @@ +package dtos + +// WalletKey holds the public and the private key of a wallet +type WalletKey struct { + Address WalletAddress `json:"address"` + PrivateKeyHex string `json:"privateKeyHex"` +} + +// InitialWalletKeys holds the initial wallet keys +type InitialWalletKeys struct { + StakeWallets []*WalletKey `json:"stakeWallets"` + BalanceWallets map[uint32]*WalletKey `json:"balanceWallets"` +} + +// WalletAddress holds the address in multiple formats +type WalletAddress struct { + Bech32 string `json:"bech32"` + Bytes []byte `json:"bytes"` +} + +// BLSKey holds the BLS key in multiple formats +type BLSKey struct { + Hex string + Bytes []byte +} diff --git a/node/chainSimulator/dtos/state.go b/node/chainSimulator/dtos/state.go index 2d2d59f7763..a8edb7e212d 100644 --- a/node/chainSimulator/dtos/state.go +++ b/node/chainSimulator/dtos/state.go @@ -3,7 +3,7 @@ package dtos // AddressState will hold the address state type AddressState struct { Address string `json:"address"` - Nonce uint64 `json:"nonce,omitempty"` + Nonce *uint64 `json:"nonce,omitempty"` Balance string `json:"balance,omitempty"` Code string `json:"code,omitempty"` RootHash string `json:"rootHash,omitempty"` diff --git a/node/chainSimulator/dtos/wallet.go b/node/chainSimulator/dtos/wallet.go deleted file mode 100644 index a007bc8b735..00000000000 --- a/node/chainSimulator/dtos/wallet.go +++ /dev/null @@ -1,13 +0,0 @@ -package dtos - -// WalletKey holds the public and the private key of a wallet bey -type WalletKey struct { - Address string `json:"address"` - PrivateKeyHex string `json:"privateKeyHex"` -} - -// InitialWalletKeys holds the initial wallet keys -type InitialWalletKeys struct { - InitialWalletWithStake *WalletKey `json:"initialWalletWithStake"` - ShardWallets map[uint32]*WalletKey `json:"shardWallets"` -} diff --git a/node/chainSimulator/errors.go b/node/chainSimulator/errors.go index 57f0db0c457..5e2dec0c16a 100644 --- a/node/chainSimulator/errors.go +++ b/node/chainSimulator/errors.go @@ -3,7 +3,10 @@ package chainSimulator import "errors" var ( - errNilChainSimulator = errors.New("nil chain simulator") - errNilMetachainNode = errors.New("nil metachain node") - errShardSetupError = errors.New("shard setup error") + errNilChainSimulator = errors.New("nil chain simulator") + errNilMetachainNode = errors.New("nil metachain node") + errShardSetupError = errors.New("shard setup error") + errEmptySliceOfTxs = errors.New("empty slice of transactions to send") + errNilTransaction = errors.New("nil transaction") + errInvalidMaxNumOfBlocks = errors.New("invalid max number of blocks to generate") ) diff --git a/node/chainSimulator/facade_test.go b/node/chainSimulator/facade_test.go new file mode 100644 index 00000000000..908704c05a0 --- /dev/null +++ b/node/chainSimulator/facade_test.go @@ -0,0 +1,193 @@ +package chainSimulator + +import ( + "errors" + "testing" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/factory" + factoryMock "github.com/multiversx/mx-chain-go/factory/mock" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/chainSimulator" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" + "github.com/multiversx/mx-chain-go/testscommon/vmcommonMocks" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +var expectedErr = errors.New("expected error") + +func TestNewChainSimulatorFacade(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return &chainSimulator.NodeHandlerMock{} + }, + }) + require.NoError(t, err) + require.NotNil(t, facade) + }) + t.Run("nil chain simulator should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(nil) + require.Equal(t, errNilChainSimulator, err) + require.Nil(t, facade) + }) + t.Run("nil node handler returned by chain simulator should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return nil + }, + }) + require.Equal(t, errNilMetachainNode, err) + require.Nil(t, facade) + }) +} + +func TestChainSimulatorFacade_GetExistingAccountFromBech32AddressString(t *testing.T) { + t.Parallel() + + t.Run("address decode failure should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &mock.CoreComponentsStub{ + AddressPubKeyConverterField: &testscommon.PubkeyConverterStub{ + DecodeCalled: func(humanReadable string) ([]byte, error) { + return nil, expectedErr + }, + }, + } + }, + } + }, + }) + require.NoError(t, err) + + handler, err := facade.GetExistingAccountFromBech32AddressString("address") + require.Equal(t, expectedErr, err) + require.Nil(t, handler) + }) + t.Run("nil shard node should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + if shardID != common.MetachainShardId { + return nil + } + + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &mock.CoreComponentsStub{ + AddressPubKeyConverterField: &testscommon.PubkeyConverterStub{}, + } + }, + GetShardCoordinatorCalled: func() sharding.Coordinator { + return &testscommon.ShardsCoordinatorMock{ + ComputeIdCalled: func(address []byte) uint32 { + return 0 + }, + } + }, + } + }, + }) + require.NoError(t, err) + + handler, err := facade.GetExistingAccountFromBech32AddressString("address") + require.True(t, errors.Is(err, errShardSetupError)) + require.Nil(t, handler) + }) + t.Run("shard node GetExistingAccount should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &mock.CoreComponentsStub{ + AddressPubKeyConverterField: &testscommon.PubkeyConverterStub{}, + } + }, + GetShardCoordinatorCalled: func() sharding.Coordinator { + return &testscommon.ShardsCoordinatorMock{ + ComputeIdCalled: func(address []byte) uint32 { + return 0 + }, + } + }, + GetStateComponentsCalled: func() factory.StateComponentsHolder { + return &factoryMock.StateComponentsHolderStub{ + AccountsAdapterCalled: func() state.AccountsAdapter { + return &stateMock.AccountsStub{ + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return nil, expectedErr + }, + } + }, + } + }, + } + }, + }) + require.NoError(t, err) + + handler, err := facade.GetExistingAccountFromBech32AddressString("address") + require.Equal(t, expectedErr, err) + require.Nil(t, handler) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedAccount := &vmcommonMocks.UserAccountStub{} + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &mock.CoreComponentsStub{ + AddressPubKeyConverterField: &testscommon.PubkeyConverterStub{}, + } + }, + GetShardCoordinatorCalled: func() sharding.Coordinator { + return &testscommon.ShardsCoordinatorMock{ + ComputeIdCalled: func(address []byte) uint32 { + return 0 + }, + } + }, + GetStateComponentsCalled: func() factory.StateComponentsHolder { + return &factoryMock.StateComponentsHolderStub{ + AccountsAdapterCalled: func() state.AccountsAdapter { + return &stateMock.AccountsStub{ + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return providedAccount, nil + }, + } + }, + } + }, + } + }, + }) + require.NoError(t, err) + + handler, err := facade.GetExistingAccountFromBech32AddressString("address") + require.NoError(t, err) + require.True(t, handler == providedAccount) // pointer testing + }) +} diff --git a/node/chainSimulator/interface.go b/node/chainSimulator/interface.go index b1540611302..0b2f51ca457 100644 --- a/node/chainSimulator/interface.go +++ b/node/chainSimulator/interface.go @@ -11,6 +11,7 @@ type ChainHandler interface { // ChainSimulator defines what a chain simulator should be able to do type ChainSimulator interface { + GenerateBlocks(numOfBlocks int) error GetNodeHandler(shardID uint32) process.NodeHandler IsInterfaceNil() bool } diff --git a/node/chainSimulator/process/errors.go b/node/chainSimulator/process/errors.go new file mode 100644 index 00000000000..eb1a69656e7 --- /dev/null +++ b/node/chainSimulator/process/errors.go @@ -0,0 +1,6 @@ +package process + +import "errors" + +// ErrNilNodeHandler signals that a nil node handler has been provided +var ErrNilNodeHandler = errors.New("nil node handler") diff --git a/node/chainSimulator/process/processor.go b/node/chainSimulator/process/processor.go index e47ccb92b50..d8f225bfde8 100644 --- a/node/chainSimulator/process/processor.go +++ b/node/chainSimulator/process/processor.go @@ -1,6 +1,7 @@ package process import ( + "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus/spos" @@ -20,6 +21,10 @@ type blocksCreator struct { // NewBlocksCreator will create a new instance of blocksCreator func NewBlocksCreator(nodeHandler NodeHandler) (*blocksCreator, error) { + if check.IfNil(nodeHandler) { + return nil, ErrNilNodeHandler + } + return &blocksCreator{ nodeHandler: nodeHandler, }, nil @@ -38,8 +43,9 @@ func (creator *blocksCreator) IncrementRound() { func (creator *blocksCreator) CreateNewBlock() error { bp := creator.nodeHandler.GetProcessComponents().BlockProcessor() - nonce, round, prevHash, prevRandSeed, epoch := creator.getPreviousHeaderData() - newHeader, err := bp.CreateNewHeader(round+1, nonce+1) + nonce, _, prevHash, prevRandSeed, epoch := creator.getPreviousHeaderData() + round := creator.nodeHandler.GetCoreComponents().RoundHandler().Index() + newHeader, err := bp.CreateNewHeader(uint64(round), nonce+1) if err != nil { return err } @@ -70,7 +76,7 @@ func (creator *blocksCreator) CreateNewBlock() error { return err } - headerCreationTime := creator.nodeHandler.GetProcessComponents().RoundHandler().TimeStamp() + headerCreationTime := creator.nodeHandler.GetCoreComponents().RoundHandler().TimeStamp() err = newHeader.SetTimeStamp(uint64(headerCreationTime.Unix())) if err != nil { return err @@ -127,7 +133,12 @@ func (creator *blocksCreator) CreateNewBlock() error { return err } - return creator.nodeHandler.GetBroadcastMessenger().BroadcastBlockDataLeader(header, miniBlocks, transactions, blsKey.PubKey()) + err = creator.nodeHandler.GetBroadcastMessenger().BroadcastMiniBlocks(miniBlocks, blsKey.PubKey()) + if err != nil { + return err + } + + return creator.nodeHandler.GetBroadcastMessenger().BroadcastTransactions(transactions, blsKey.PubKey()) } func (creator *blocksCreator) getPreviousHeaderData() (nonce, round uint64, prevHash, prevRandSeed []byte, epoch uint32) { @@ -144,6 +155,8 @@ func (creator *blocksCreator) getPreviousHeaderData() (nonce, round uint64, prev prevHash = creator.nodeHandler.GetChainHandler().GetGenesisHeaderHash() prevRandSeed = creator.nodeHandler.GetChainHandler().GetGenesisHeader().GetRandSeed() round = uint64(creator.nodeHandler.GetCoreComponents().RoundHandler().Index()) - 1 + epoch = creator.nodeHandler.GetChainHandler().GetGenesisHeader().GetEpoch() + nonce = creator.nodeHandler.GetChainHandler().GetGenesisHeader().GetNonce() return } diff --git a/node/chainSimulator/process/processor_test.go b/node/chainSimulator/process/processor_test.go new file mode 100644 index 00000000000..80ffd568134 --- /dev/null +++ b/node/chainSimulator/process/processor_test.go @@ -0,0 +1,631 @@ +package process_test + +import ( + "errors" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/hashing" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus" + mockConsensus "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + chainSimulatorProcess "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/chainSimulator" + testsConsensus "github.com/multiversx/mx-chain-go/testscommon/consensus" + testsFactory "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/stretchr/testify/require" +) + +var expectedErr = errors.New("expected error") + +func TestNewBlocksCreator(t *testing.T) { + t.Parallel() + + t.Run("nil node handler should error", func(t *testing.T) { + t.Parallel() + + creator, err := chainSimulatorProcess.NewBlocksCreator(nil) + require.Equal(t, chainSimulatorProcess.ErrNilNodeHandler, err) + require.Nil(t, creator) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + creator, err := chainSimulatorProcess.NewBlocksCreator(&chainSimulator.NodeHandlerMock{}) + require.NoError(t, err) + require.NotNil(t, creator) + }) +} + +func TestBlocksCreator_IsInterfaceNil(t *testing.T) { + t.Parallel() + + creator, _ := chainSimulatorProcess.NewBlocksCreator(nil) + require.True(t, creator.IsInterfaceNil()) + + creator, _ = chainSimulatorProcess.NewBlocksCreator(&chainSimulator.NodeHandlerMock{}) + require.False(t, creator.IsInterfaceNil()) +} + +func TestBlocksCreator_IncrementRound(t *testing.T) { + t.Parallel() + + wasIncrementIndexCalled := false + wasSetUInt64ValueCalled := false + nodeHandler := &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &testsFactory.CoreComponentsHolderStub{ + RoundHandlerCalled: func() consensus.RoundHandler { + return &testscommon.RoundHandlerMock{ + IncrementIndexCalled: func() { + wasIncrementIndexCalled = true + }, + } + }, + } + }, + GetStatusCoreComponentsCalled: func() factory.StatusCoreComponentsHolder { + return &testsFactory.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) { + wasSetUInt64ValueCalled = true + require.Equal(t, common.MetricCurrentRound, key) + }, + }, + } + }, + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + creator.IncrementRound() + require.True(t, wasIncrementIndexCalled) + require.True(t, wasSetUInt64ValueCalled) +} + +func TestBlocksCreator_CreateNewBlock(t *testing.T) { + t.Parallel() + + t.Run("CreateNewHeader failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return nil, expectedErr + }, + } + nodeHandler := getNodeHandler() + nodeHandler.GetProcessComponentsCalled = func() factory.ProcessComponentsHolder { + return &mock.ProcessComponentsStub{ + BlockProcess: blockProcess, + } + } + nodeHandler.GetChainHandlerCalled = func() data.ChainHandler { + return &testscommon.ChainHandlerStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return &block.HeaderV2{} // coverage for getPreviousHeaderData + }, + } + } + + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("SetShardID failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetShardIDCalled: func(shardId uint32) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetPrevHash failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetPrevHashCalled: func(hash []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetPrevRandSeed failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetPrevRandSeedCalled: func(seed []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetPubKeysBitmap failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetPubKeysBitmapCalled: func(bitmap []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetChainID failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetChainIDCalled: func(chainID []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetTimeStamp failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetTimeStampCalled: func(timestamp uint64) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("ComputeConsensusGroup failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + nodeHandler.GetProcessComponentsCalled = func() factory.ProcessComponentsHolder { + return &mock.ProcessComponentsStub{ + BlockProcess: &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + }, + NodesCoord: &shardingMocks.NodesCoordinatorStub{ + ComputeConsensusGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + return nil, expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("key not managed by the current node should return nil", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return false + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.NoError(t, err) + }) + t.Run("CreateSignatureForPublicKey failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + kh := nodeHandler.GetCryptoComponents().KeysHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: kh, + SigHandler: &testsConsensus.SigningHandlerStub{ + CreateSignatureForPublicKeyCalled: func(message []byte, publicKeyBytes []byte) ([]byte, error) { + return nil, expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("SetRandSeed failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetRandSeedCalled: func(seed []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("CreateBlock failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return nil, nil, expectedErr + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("setHeaderSignatures.Marshal failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + rh := nodeHandler.GetCoreComponents().RoundHandler() + nodeHandler.GetCoreComponentsCalled = func() factory.CoreComponentsHolder { + return &testsFactory.CoreComponentsHolderStub{ + RoundHandlerCalled: func() consensus.RoundHandler { + return rh + }, + InternalMarshalizerCalled: func() marshal.Marshalizer { + return &testscommon.MarshallerStub{ + MarshalCalled: func(obj interface{}) ([]byte, error) { + return nil, expectedErr + }, + } + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("setHeaderSignatures.Reset failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + kh := nodeHandler.GetCryptoComponents().KeysHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: kh, + SigHandler: &testsConsensus.SigningHandlerStub{ + ResetCalled: func(pubKeys []string) error { + return expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("setHeaderSignatures.CreateSignatureShareForPublicKey failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + kh := nodeHandler.GetCryptoComponents().KeysHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: kh, + SigHandler: &testsConsensus.SigningHandlerStub{ + CreateSignatureShareForPublicKeyCalled: func(message []byte, index uint16, epoch uint32, publicKeyBytes []byte) ([]byte, error) { + return nil, expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("setHeaderSignatures.AggregateSigs failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + kh := nodeHandler.GetCryptoComponents().KeysHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: kh, + SigHandler: &testsConsensus.SigningHandlerStub{ + AggregateSigsCalled: func(bitmap []byte, epoch uint32) ([]byte, error) { + return nil, expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("setHeaderSignatures.SetSignature failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + SetSignatureCalled: func(signature []byte) error { + return expectedErr + }, + }, &block.Body{}, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("createLeaderSignature.SetLeaderSignature failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{ + SetLeaderSignatureCalled: func(signature []byte) error { + return expectedErr + }, + } + }, + }, &block.Body{}, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("createLeaderSignature.SetLeaderSignature failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{ + SetLeaderSignatureCalled: func(signature []byte) error { + return expectedErr + }, + } + }, + }, &block.Body{}, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("setHeaderSignatures.SetLeaderSignature failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + SetLeaderSignatureCalled: func(signature []byte) error { + return expectedErr + }, + }, &block.Body{}, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("CommitBlock failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, &block.Body{}, nil + }, + CommitBlockCalled: func(header data.HeaderHandler, body data.BodyHandler) error { + return expectedErr + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("MarshalizedDataToBroadcast failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, &block.Body{}, nil + }, + MarshalizedDataToBroadcastCalled: func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { + return nil, nil, expectedErr + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("BroadcastHeader failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + nodeHandler.GetBroadcastMessengerCalled = func() consensus.BroadcastMessenger { + return &mockConsensus.BroadcastMessengerMock{ + BroadcastHeaderCalled: func(handler data.HeaderHandler, bytes []byte) error { + return expectedErr + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + creator, err := chainSimulatorProcess.NewBlocksCreator(getNodeHandler()) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.NoError(t, err) + }) +} + +func testCreateNewBlock(t *testing.T, blockProcess process.BlockProcessor, expectedErr error) { + nodeHandler := getNodeHandler() + nc := nodeHandler.GetProcessComponents().NodesCoordinator() + nodeHandler.GetProcessComponentsCalled = func() factory.ProcessComponentsHolder { + return &mock.ProcessComponentsStub{ + BlockProcess: blockProcess, + NodesCoord: nc, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) +} + +func getNodeHandler() *chainSimulator.NodeHandlerMock { + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &testsFactory.CoreComponentsHolderStub{ + RoundHandlerCalled: func() consensus.RoundHandler { + return &testscommon.RoundHandlerMock{ + TimeStampCalled: func() time.Time { + return time.Now() + }, + } + }, + InternalMarshalizerCalled: func() marshal.Marshalizer { + return &testscommon.MarshallerStub{} + }, + HasherCalled: func() hashing.Hasher { + return &testscommon.HasherStub{ + ComputeCalled: func(s string) []byte { + return []byte("hash") + }, + } + }, + } + }, + GetProcessComponentsCalled: func() factory.ProcessComponentsHolder { + return &mock.ProcessComponentsStub{ + BlockProcess: &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + haveTime() // coverage only + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, &block.Body{}, nil + }, + MarshalizedDataToBroadcastCalled: func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { + return make(map[uint32][]byte), make(map[string][][]byte), nil + }, + }, + NodesCoord: &shardingMocks.NodesCoordinatorStub{ + ComputeConsensusGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + return []nodesCoordinator.Validator{ + shardingMocks.NewValidatorMock([]byte("A"), 1, 1), + }, nil + }, + }, + } + }, + GetChainHandlerCalled: func() data.ChainHandler { + return &testscommon.ChainHandlerStub{ + GetGenesisHeaderCalled: func() data.HeaderHandler { + return &block.HeaderV2{} + }, + } + }, + GetShardCoordinatorCalled: func() sharding.Coordinator { + return &testscommon.ShardsCoordinatorMock{} + }, + GetCryptoComponentsCalled: func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return true + }, + }, + SigHandler: &testsConsensus.SigningHandlerStub{}, + } + }, + GetBroadcastMessengerCalled: func() consensus.BroadcastMessenger { + return &mockConsensus.BroadcastMessengerMock{} + }, + } +} diff --git a/node/customConfigsArm64.go b/node/customConfigsArm64.go new file mode 100644 index 00000000000..ce62a5fa604 --- /dev/null +++ b/node/customConfigsArm64.go @@ -0,0 +1,29 @@ +//go:build arm64 + +package node + +import ( + "runtime" + + "github.com/multiversx/mx-chain-go/config" +) + +// ApplyArchCustomConfigs will apply configuration tweaks based on the architecture the node is running on +func ApplyArchCustomConfigs(configs *config.Configs) { + log.Debug("ApplyArchCustomConfigs", "architecture", runtime.GOARCH) + + firstSupportedWasmer2VMVersion := "v1.5" + log.Debug("ApplyArchCustomConfigs - hardcoding the initial VM to " + firstSupportedWasmer2VMVersion) + configs.GeneralConfig.VirtualMachine.Execution.WasmVMVersions = []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: firstSupportedWasmer2VMVersion, + }, + } + configs.GeneralConfig.VirtualMachine.Querying.WasmVMVersions = []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: firstSupportedWasmer2VMVersion, + }, + } +} diff --git a/node/customConfigsArm64_test.go b/node/customConfigsArm64_test.go new file mode 100644 index 00000000000..925774a3318 --- /dev/null +++ b/node/customConfigsArm64_test.go @@ -0,0 +1,91 @@ +//go:build arm64 + +package node + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/config" + "github.com/stretchr/testify/assert" +) + +func TestApplyArchCustomConfigs(t *testing.T) { + t.Parallel() + + executionVMConfig := config.VirtualMachineConfig{ + WasmVMVersions: []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: "v1.2", + }, + { + StartEpoch: 1, + Version: "v1.3", + }, + { + StartEpoch: 2, + Version: "v1.4", + }, + { + StartEpoch: 3, + Version: "v1.5", + }, + }, + TimeOutForSCExecutionInMilliseconds: 1, + WasmerSIGSEGVPassthrough: true, + } + + queryVMConfig := config.QueryVirtualMachineConfig{ + VirtualMachineConfig: executionVMConfig, + NumConcurrentVMs: 15, + } + + expectedVMWasmVersionsConfig := []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: "v1.5", + }, + } + + t.Run("providing a configuration should alter it", func(t *testing.T) { + t.Parallel() + + providedConfigs := &config.Configs{ + GeneralConfig: &config.Config{ + VirtualMachine: config.VirtualMachineServicesConfig{ + Execution: executionVMConfig, + Querying: queryVMConfig, + }, + }, + } + + expectedVMConfig := providedConfigs.GeneralConfig.VirtualMachine + expectedVMConfig.Execution.WasmVMVersions = expectedVMWasmVersionsConfig + expectedVMConfig.Querying.WasmVMVersions = expectedVMWasmVersionsConfig + + ApplyArchCustomConfigs(providedConfigs) + + assert.Equal(t, expectedVMConfig, providedConfigs.GeneralConfig.VirtualMachine) + }) + t.Run("empty config should return an altered config", func(t *testing.T) { + t.Parallel() + + providedConfigs := &config.Configs{ + GeneralConfig: &config.Config{}, + } + + expectedVMConfig := providedConfigs.GeneralConfig.VirtualMachine + expectedVMConfig.Execution.WasmVMVersions = expectedVMWasmVersionsConfig + expectedVMConfig.Querying.WasmVMVersions = expectedVMWasmVersionsConfig + + ApplyArchCustomConfigs(providedConfigs) + + expectedConfig := &config.Configs{ + GeneralConfig: &config.Config{ + VirtualMachine: expectedVMConfig, + }, + } + + assert.Equal(t, expectedConfig, providedConfigs) + }) +} diff --git a/node/customConfigsDefault.go b/node/customConfigsDefault.go new file mode 100644 index 00000000000..b762871db10 --- /dev/null +++ b/node/customConfigsDefault.go @@ -0,0 +1,14 @@ +//go:build !arm64 + +package node + +import ( + "runtime" + + "github.com/multiversx/mx-chain-go/config" +) + +// ApplyArchCustomConfigs will apply configuration tweaks based on the architecture the node is running on +func ApplyArchCustomConfigs(_ *config.Configs) { + log.Debug("ApplyArchCustomConfigs - nothing to do", "architecture", runtime.GOARCH) +} diff --git a/node/customConfigsDefault_test.go b/node/customConfigsDefault_test.go new file mode 100644 index 00000000000..8f9e8eb6521 --- /dev/null +++ b/node/customConfigsDefault_test.go @@ -0,0 +1,74 @@ +//go:build !arm64 + +package node + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/config" + "github.com/stretchr/testify/assert" +) + +func TestApplyArchCustomConfigs(t *testing.T) { + t.Parallel() + + executionVMConfig := config.VirtualMachineConfig{ + WasmVMVersions: []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: "v1.2", + }, + { + StartEpoch: 1, + Version: "v1.3", + }, + { + StartEpoch: 2, + Version: "v1.4", + }, + { + StartEpoch: 3, + Version: "v1.5", + }, + }, + TimeOutForSCExecutionInMilliseconds: 1, + WasmerSIGSEGVPassthrough: true, + } + + queryVMConfig := config.QueryVirtualMachineConfig{ + VirtualMachineConfig: executionVMConfig, + NumConcurrentVMs: 15, + } + + t.Run("providing a configuration should not alter it", func(t *testing.T) { + t.Parallel() + + providedConfigs := &config.Configs{ + GeneralConfig: &config.Config{ + VirtualMachine: config.VirtualMachineServicesConfig{ + Execution: executionVMConfig, + Querying: queryVMConfig, + }, + }, + } + + ApplyArchCustomConfigs(providedConfigs) + + assert.Equal(t, executionVMConfig, providedConfigs.GeneralConfig.VirtualMachine.Execution) + assert.Equal(t, queryVMConfig, providedConfigs.GeneralConfig.VirtualMachine.Querying) + }) + t.Run("empty config should return an empty config", func(t *testing.T) { + t.Parallel() + + // this test will prevent adding new config changes without handling them in this test + providedConfigs := &config.Configs{ + GeneralConfig: &config.Config{}, + } + emptyConfigs := &config.Configs{ + GeneralConfig: &config.Config{}, + } + ApplyArchCustomConfigs(providedConfigs) + + assert.Equal(t, emptyConfigs, providedConfigs) + }) +} diff --git a/node/external/nodeApiResolver.go b/node/external/nodeApiResolver.go index d980e9ad91f..0ae0356f4f7 100644 --- a/node/external/nodeApiResolver.go +++ b/node/external/nodeApiResolver.go @@ -40,7 +40,9 @@ type ArgNodeApiResolver struct { AccountsParser genesis.AccountsParser GasScheduleNotifier common.GasScheduleNotifierAPI ManagedPeersMonitor common.ManagedPeersMonitor + PublicKey string NodesCoordinator nodesCoordinator.NodesCoordinator + StorageManagers []common.StorageManager } // nodeApiResolver can resolve API requests @@ -59,7 +61,9 @@ type nodeApiResolver struct { accountsParser genesis.AccountsParser gasScheduleNotifier common.GasScheduleNotifierAPI managedPeersMonitor common.ManagedPeersMonitor + publicKey string nodesCoordinator nodesCoordinator.NodesCoordinator + storageManagers []common.StorageManager } // NewNodeApiResolver creates a new nodeApiResolver instance @@ -125,7 +129,9 @@ func NewNodeApiResolver(arg ArgNodeApiResolver) (*nodeApiResolver, error) { accountsParser: arg.AccountsParser, gasScheduleNotifier: arg.GasScheduleNotifier, managedPeersMonitor: arg.ManagedPeersMonitor, + publicKey: arg.PublicKey, nodesCoordinator: arg.NodesCoordinator, + storageManagers: arg.StorageManagers, }, nil } @@ -151,6 +157,15 @@ func (nar *nodeApiResolver) SimulateTransactionExecution(tx *transaction.Transac // Close closes all underlying components func (nar *nodeApiResolver) Close() error { + for _, sm := range nar.storageManagers { + if check.IfNil(sm) { + continue + } + + err := sm.Close() + log.LogIfError(err) + } + return nar.scQueryService.Close() } @@ -345,12 +360,23 @@ func (nar *nodeApiResolver) GetManagedKeysCount() int { return nar.managedPeersMonitor.GetManagedKeysCount() } -// GetManagedKeys returns all keys managed by the current node when running in multikey mode +// GetManagedKeys returns all keys that should act as validator(main or backup that took over) and will be managed by this node func (nar *nodeApiResolver) GetManagedKeys() []string { managedKeys := nar.managedPeersMonitor.GetManagedKeys() return nar.parseKeys(managedKeys) } +// GetLoadedKeys returns all keys that were loaded by this node +func (nar *nodeApiResolver) GetLoadedKeys() []string { + loadedKeys := nar.managedPeersMonitor.GetLoadedKeys() + if len(loadedKeys) > 0 { + return nar.parseKeys(loadedKeys) + } + + // node is in single key mode, returning the main public key + return []string{nar.publicKey} +} + // GetEligibleManagedKeys returns the eligible managed keys when node is running in multikey mode func (nar *nodeApiResolver) GetEligibleManagedKeys() ([]string, error) { eligibleKeys, err := nar.managedPeersMonitor.GetEligibleManagedKeys() diff --git a/node/external/nodeApiResolver_test.go b/node/external/nodeApiResolver_test.go index 9e1d0ee516d..5a1cec19787 100644 --- a/node/external/nodeApiResolver_test.go +++ b/node/external/nodeApiResolver_test.go @@ -39,7 +39,7 @@ func createMockArgs() external.ArgNodeApiResolver { APIBlockHandler: &mock.BlockAPIHandlerStub{}, APITransactionHandler: &mock.TransactionAPIHandlerStub{}, APIInternalBlockHandler: &mock.InternalBlockApiHandlerStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, AccountsParser: &genesisMocks.AccountsParserStub{}, GasScheduleNotifier: &testscommon.GasScheduleNotifierMock{}, @@ -594,7 +594,7 @@ func TestNodeApiResolver_GetGenesisNodesPubKeys(t *testing.T) { } arg := createMockArgs() - arg.GenesisNodesSetupHandler = &testscommon.NodesSetupStub{ + arg.GenesisNodesSetupHandler = &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return eligible, waiting }, @@ -739,6 +739,59 @@ func TestNodeApiResolver_GetManagedKeys(t *testing.T) { require.Equal(t, expectedKeys, keys) } +func TestNodeApiResolver_GetLoadedKeys(t *testing.T) { + t.Parallel() + + t.Run("multikey should work", func(t *testing.T) { + t.Parallel() + + providedKeys := [][]byte{ + []byte("pk1"), + []byte("pk2"), + } + expectedKeys := []string{ + "pk1", + "pk2", + } + args := createMockArgs() + args.ManagedPeersMonitor = &testscommon.ManagedPeersMonitorStub{ + GetLoadedKeysCalled: func() [][]byte { + return providedKeys + }, + } + args.ValidatorPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + return string(pkBytes) + }, + } + nar, err := external.NewNodeApiResolver(args) + require.NoError(t, err) + + keys := nar.GetLoadedKeys() + require.Equal(t, expectedKeys, keys) + }) + t.Run("single key should work", func(t *testing.T) { + t.Parallel() + + providedKey := "pk1" + expectedKeys := []string{ + "pk1", + } + args := createMockArgs() + args.PublicKey = providedKey + args.ManagedPeersMonitor = &testscommon.ManagedPeersMonitorStub{ + GetLoadedKeysCalled: func() [][]byte { + return [][]byte{} + }, + } + nar, err := external.NewNodeApiResolver(args) + require.NoError(t, err) + + keys := nar.GetLoadedKeys() + require.Equal(t, expectedKeys, keys) + }) +} + func TestNodeApiResolver_GetEligibleManagedKeys(t *testing.T) { t.Parallel() diff --git a/node/external/timemachine/fee/feeComputer_test.go b/node/external/timemachine/fee/feeComputer_test.go index faf1996940e..46e2904d6d2 100644 --- a/node/external/timemachine/fee/feeComputer_test.go +++ b/node/external/timemachine/fee/feeComputer_test.go @@ -21,8 +21,7 @@ import ( func createEconomicsData() process.EconomicsDataHandler { economicsConfig := testscommon.GetEconomicsConfig() economicsData, _ := economics.NewEconomicsData(economics.ArgsNewEconomicsData{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - Economics: &economicsConfig, + Economics: &economicsConfig, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { if flag == common.PenalizedTooMuchGasFlag { diff --git a/node/external/timemachine/fee/memoryFootprint/memory_test.go b/node/external/timemachine/fee/memoryFootprint/memory_test.go index 2f32427e4de..a854a286ddd 100644 --- a/node/external/timemachine/fee/memoryFootprint/memory_test.go +++ b/node/external/timemachine/fee/memoryFootprint/memory_test.go @@ -30,8 +30,7 @@ func TestFeeComputer_MemoryFootprint(t *testing.T) { economicsConfig := testscommon.GetEconomicsConfig() economicsData, _ := economics.NewEconomicsData(economics.ArgsNewEconomicsData{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - Economics: &economicsConfig, + Economics: &economicsConfig, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { if flag == common.PenalizedTooMuchGasFlag { diff --git a/node/external/transactionAPI/apiTransactionProcessor.go b/node/external/transactionAPI/apiTransactionProcessor.go index 404cc8eba8d..b12aa9ac86f 100644 --- a/node/external/transactionAPI/apiTransactionProcessor.go +++ b/node/external/transactionAPI/apiTransactionProcessor.go @@ -8,6 +8,7 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" rewardTxData "github.com/multiversx/mx-chain-core-go/data/rewardTx" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" @@ -308,41 +309,43 @@ func (atp *apiTransactionProcessor) getUnsignedTransactionsFromPool(requestedFie } func (atp *apiTransactionProcessor) extractRequestedTxInfo(wrappedTx *txcache.WrappedTransaction, requestedFieldsHandler fieldsHandler) common.Transaction { + fieldGetters := atp.getFieldGettersForTx(wrappedTx) tx := common.Transaction{ TxFields: make(map[string]interface{}), } - tx.TxFields[hashField] = hex.EncodeToString(wrappedTx.TxHash) - - if requestedFieldsHandler.HasNonce { - tx.TxFields[nonceField] = wrappedTx.Tx.GetNonce() + for field, value := range fieldGetters { + if requestedFieldsHandler.IsFieldSet(field) { + tx.TxFields[field] = value + } } - if requestedFieldsHandler.HasSender { - tx.TxFields[senderField], _ = atp.addressPubKeyConverter.Encode(wrappedTx.Tx.GetSndAddr()) - } + return tx +} - if requestedFieldsHandler.HasReceiver { - tx.TxFields[receiverField], _ = atp.addressPubKeyConverter.Encode(wrappedTx.Tx.GetRcvAddr()) +func (atp *apiTransactionProcessor) getFieldGettersForTx(wrappedTx *txcache.WrappedTransaction) map[string]interface{} { + var fieldGetters = map[string]interface{}{ + hashField: hex.EncodeToString(wrappedTx.TxHash), + nonceField: wrappedTx.Tx.GetNonce(), + senderField: atp.addressPubKeyConverter.SilentEncode(wrappedTx.Tx.GetSndAddr(), log), + receiverField: atp.addressPubKeyConverter.SilentEncode(wrappedTx.Tx.GetRcvAddr(), log), + gasLimitField: wrappedTx.Tx.GetGasLimit(), + gasPriceField: wrappedTx.Tx.GetGasPrice(), + rcvUsernameField: wrappedTx.Tx.GetRcvUserName(), + dataField: wrappedTx.Tx.GetData(), + valueField: getTxValue(wrappedTx), + senderShardID: wrappedTx.SenderShardID, + receiverShardID: wrappedTx.ReceiverShardID, } - if requestedFieldsHandler.HasGasLimit { - tx.TxFields[gasLimitField] = wrappedTx.Tx.GetGasLimit() - } - if requestedFieldsHandler.HasGasPrice { - tx.TxFields[gasPriceField] = wrappedTx.Tx.GetGasPrice() - } - if requestedFieldsHandler.HasRcvUsername { - tx.TxFields[rcvUsernameField] = wrappedTx.Tx.GetRcvUserName() - } - if requestedFieldsHandler.HasData { - tx.TxFields[dataField] = wrappedTx.Tx.GetData() - } - if requestedFieldsHandler.HasValue { - tx.TxFields[valueField] = getTxValue(wrappedTx) + guardedTx, isGuardedTx := wrappedTx.Tx.(data.GuardedTransactionHandler) + if isGuardedTx { + fieldGetters[signatureField] = hex.EncodeToString(guardedTx.GetSignature()) + fieldGetters[guardianField] = atp.addressPubKeyConverter.SilentEncode(guardedTx.GetGuardianAddr(), log) + fieldGetters[guardianSignatureField] = hex.EncodeToString(guardedTx.GetGuardianSignature()) } - return tx + return fieldGetters } func (atp *apiTransactionProcessor) fetchTxsForSender(sender string, senderShard uint32) []*txcache.WrappedTransaction { diff --git a/node/external/transactionAPI/apiTransactionProcessor_test.go b/node/external/transactionAPI/apiTransactionProcessor_test.go index f7d90c8f15b..7d86a1610c5 100644 --- a/node/external/transactionAPI/apiTransactionProcessor_test.go +++ b/node/external/transactionAPI/apiTransactionProcessor_test.go @@ -825,7 +825,7 @@ func TestApiTransactionProcessor_GetTransactionsPoolForSender(t *testing.T) { require.NoError(t, err) require.NotNil(t, atp) - res, err := atp.GetTransactionsPoolForSender(sender, "sender,value") + res, err := atp.GetTransactionsPoolForSender(sender, "*") require.NoError(t, err) expectedHashes := []string{hex.EncodeToString(txHash0), hex.EncodeToString(txHash1), hex.EncodeToString(txHash2), hex.EncodeToString(txHash3), hex.EncodeToString(txHash4)} expectedValues := []string{"100001", "100002", "100003", "100004", "100005"} diff --git a/node/external/transactionAPI/fieldsHandler.go b/node/external/transactionAPI/fieldsHandler.go index 43ea27d473a..4f837968cb7 100644 --- a/node/external/transactionAPI/fieldsHandler.go +++ b/node/external/transactionAPI/fieldsHandler.go @@ -5,39 +5,60 @@ import ( ) const ( - hashField = "hash" - nonceField = "nonce" - senderField = "sender" - receiverField = "receiver" - gasLimitField = "gaslimit" - gasPriceField = "gasprice" - rcvUsernameField = "receiverusername" - dataField = "data" - valueField = "value" + hashField = "hash" + nonceField = "nonce" + senderField = "sender" + receiverField = "receiver" + gasLimitField = "gaslimit" + gasPriceField = "gasprice" + rcvUsernameField = "receiverusername" + dataField = "data" + valueField = "value" + signatureField = "signature" + guardianField = "guardian" + guardianSignatureField = "guardiansignature" + senderShardID = "sendershard" + receiverShardID = "receivershard" + wildCard = "*" + + separator = "," ) type fieldsHandler struct { - HasNonce bool - HasSender bool - HasReceiver bool - HasGasLimit bool - HasGasPrice bool - HasRcvUsername bool - HasData bool - HasValue bool + fieldsMap map[string]struct{} } func newFieldsHandler(parameters string) fieldsHandler { + if len(parameters) == 0 { + return fieldsHandler{ + fieldsMap: map[string]struct{}{ + hashField: {}, // hash should always be returned + }, + } + } + parameters = strings.ToLower(parameters) - ph := fieldsHandler{ - HasNonce: strings.Contains(parameters, nonceField), - HasSender: strings.Contains(parameters, senderField), - HasReceiver: strings.Contains(parameters, receiverField), - HasGasLimit: strings.Contains(parameters, gasLimitField), - HasGasPrice: strings.Contains(parameters, gasPriceField), - HasRcvUsername: strings.Contains(parameters, rcvUsernameField), - HasData: strings.Contains(parameters, dataField), - HasValue: strings.Contains(parameters, valueField), + return fieldsHandler{ + fieldsMap: sliceToMap(strings.Split(parameters, separator)), + } +} + +// IsFieldSet returns true if the provided field is set +func (handler *fieldsHandler) IsFieldSet(field string) bool { + _, hasWildCard := handler.fieldsMap[wildCard] + if hasWildCard { + return true } - return ph + + _, has := handler.fieldsMap[strings.ToLower(field)] + return has +} + +func sliceToMap(providedSlice []string) map[string]struct{} { + result := make(map[string]struct{}, len(providedSlice)) + for _, entry := range providedSlice { + result[entry] = struct{}{} + } + + return result } diff --git a/node/external/transactionAPI/fieldsHandler_test.go b/node/external/transactionAPI/fieldsHandler_test.go index 0948483fd11..fab3b3a41d9 100644 --- a/node/external/transactionAPI/fieldsHandler_test.go +++ b/node/external/transactionAPI/fieldsHandler_test.go @@ -1,6 +1,8 @@ package transactionAPI import ( + "fmt" + "strings" "testing" "github.com/stretchr/testify/require" @@ -10,18 +12,17 @@ func Test_newFieldsHandler(t *testing.T) { t.Parallel() fh := newFieldsHandler("") - require.Equal(t, fieldsHandler{}, fh) + require.Equal(t, fieldsHandler{map[string]struct{}{hashField: {}}}, fh) - fh = newFieldsHandler("nOnCe,sender,receiver,gasLimit,GASprice,receiverusername,data,value") - expectedPH := fieldsHandler{ - HasNonce: true, - HasSender: true, - HasReceiver: true, - HasGasLimit: true, - HasGasPrice: true, - HasRcvUsername: true, - HasData: true, - HasValue: true, + providedFields := "nOnCe,sender,receiver,gasLimit,GASprice,receiverusername,data,value,signature,guardian,guardiansignature,sendershard,receivershard" + splitFields := strings.Split(providedFields, separator) + fh = newFieldsHandler(providedFields) + for _, field := range splitFields { + require.True(t, fh.IsFieldSet(field), fmt.Sprintf("field %s is not set", field)) + } + + fh = newFieldsHandler("*") + for _, field := range splitFields { + require.True(t, fh.IsFieldSet(field)) } - require.Equal(t, expectedPH, fh) } diff --git a/node/external/transactionAPI/gasUsedAndFeeProcessor.go b/node/external/transactionAPI/gasUsedAndFeeProcessor.go index a22b689d6a4..f0036bc136b 100644 --- a/node/external/transactionAPI/gasUsedAndFeeProcessor.go +++ b/node/external/transactionAPI/gasUsedAndFeeProcessor.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/transaction" + datafield "github.com/multiversx/mx-chain-vm-common-go/parsers/dataField" ) type gasUsedAndFeeProcessor struct { @@ -52,7 +53,7 @@ func (gfp *gasUsedAndFeeProcessor) prepareTxWithResultsBasedOnLogs( tx *transaction.ApiTransactionResult, hasRefund bool, ) { - if tx.Logs == nil { + if tx.Logs == nil || (tx.Function == "" && tx.Operation == datafield.OperationTransfer) { return } diff --git a/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go b/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go index 5c0ba4d4c05..99541bfef5d 100644 --- a/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go +++ b/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go @@ -20,11 +20,10 @@ import ( func createEconomicsData(enableEpochsHandler common.EnableEpochsHandler) process.EconomicsDataHandler { economicsConfig := testscommon.GetEconomicsConfig() economicsData, _ := economics.NewEconomicsData(economics.ArgsNewEconomicsData{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - Economics: &economicsConfig, - EnableEpochsHandler: enableEpochsHandler, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, + Economics: &economicsConfig, + EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, }) return economicsData diff --git a/node/metrics/metrics.go b/node/metrics/metrics.go index 3b1151f61af..ca2cd4e910a 100644 --- a/node/metrics/metrics.go +++ b/node/metrics/metrics.go @@ -53,6 +53,8 @@ func InitBaseMetrics(appStatusHandler core.AppStatusHandler) error { appStatusHandler.SetUInt64Value(common.MetricTrieSyncNumReceivedBytes, initUint) appStatusHandler.SetUInt64Value(common.MetricAccountsSnapshotInProgress, initUint) appStatusHandler.SetUInt64Value(common.MetricPeersSnapshotInProgress, initUint) + appStatusHandler.SetUInt64Value(common.MetricNonceAtEpochStart, initUint) + appStatusHandler.SetUInt64Value(common.MetricRoundAtEpochStart, initUint) appStatusHandler.SetInt64Value(common.MetricLastAccountsSnapshotDurationSec, initInt) appStatusHandler.SetInt64Value(common.MetricLastPeersSnapshotDurationSec, initInt) @@ -125,9 +127,7 @@ func InitConfigMetrics( appStatusHandler.SetUInt64Value(common.MetricESDTMultiTransferEnableEpoch, uint64(enableEpochs.ESDTMultiTransferEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricGlobalMintBurnDisableEpoch, uint64(enableEpochs.GlobalMintBurnDisableEpoch)) appStatusHandler.SetUInt64Value(common.MetricESDTTransferRoleEnableEpoch, uint64(enableEpochs.ESDTTransferRoleEnableEpoch)) - appStatusHandler.SetUInt64Value(common.MetricBuiltInFunctionOnMetaEnableEpoch, uint64(enableEpochs.BuiltInFunctionOnMetaEnableEpoch)) appStatusHandler.SetStringValue(common.MetricTotalSupply, economicsConfig.GlobalSettings.GenesisTotalSupply) - appStatusHandler.SetUInt64Value(common.MetricWaitingListFixEnableEpoch, uint64(enableEpochs.WaitingListFixEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricSetGuardianEnableEpoch, uint64(enableEpochs.SetGuardianEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricSetScToScLogEventEnableEpoch, uint64(enableEpochs.ScToScLogEventEnableEpoch)) diff --git a/node/metrics/metrics_test.go b/node/metrics/metrics_test.go index 0a0e3e57cc7..7da1a582626 100644 --- a/node/metrics/metrics_test.go +++ b/node/metrics/metrics_test.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/stretchr/testify/assert" @@ -63,6 +64,8 @@ func TestInitBaseMetrics(t *testing.T) { common.MetricAccountsSnapshotNumNodes, common.MetricTrieSyncNumProcessedNodes, common.MetricTrieSyncNumReceivedBytes, + common.MetricRoundAtEpochStart, + common.MetricNonceAtEpochStart, } keys := make(map[string]struct{}) @@ -134,10 +137,8 @@ func TestInitConfigMetrics(t *testing.T) { ESDTMultiTransferEnableEpoch: 31, GlobalMintBurnDisableEpoch: 32, ESDTTransferRoleEnableEpoch: 33, - BuiltInFunctionOnMetaEnableEpoch: 34, - WaitingListFixEnableEpoch: 35, - SetGuardianEnableEpoch: 36, - ScToScLogEventEnableEpoch: 37, + SetGuardianEnableEpoch: 34, + ScToScLogEventEnableEpoch: 35, MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ { EpochEnable: 0, @@ -186,8 +187,6 @@ func TestInitConfigMetrics(t *testing.T) { "erd_esdt_multi_transfer_enable_epoch": uint32(31), "erd_global_mint_burn_disable_epoch": uint32(32), "erd_esdt_transfer_role_enable_epoch": uint32(33), - "erd_builtin_function_on_meta_enable_epoch": uint32(34), - "erd_waiting_list_fix_enable_epoch": uint32(35), "erd_max_nodes_change_enable_epoch": nil, "erd_total_supply": "12345", "erd_hysteresis": "0.100000", @@ -195,8 +194,8 @@ func TestInitConfigMetrics(t *testing.T) { "erd_max_nodes_change_enable_epoch0_epoch_enable": uint32(0), "erd_max_nodes_change_enable_epoch0_max_num_nodes": uint32(1), "erd_max_nodes_change_enable_epoch0_nodes_to_shuffle_per_shard": uint32(2), - "erd_set_guardian_feature_enable_epoch": uint32(36), - "erd_set_sc_to_sc_log_event_enable_epoch": uint32(37), + "erd_set_guardian_feature_enable_epoch": uint32(34), + "erd_set_sc_to_sc_log_event_enable_epoch": uint32(35), common.MetricGatewayMetricsEndpoint: "http://localhost:8080", } @@ -206,7 +205,7 @@ func TestInitConfigMetrics(t *testing.T) { }, } - genesisNodesConfig := &testscommon.NodesSetupStub{ + genesisNodesConfig := &genesisMocks.NodesSetupStub{ GetAdaptivityCalled: func() bool { return true }, @@ -237,7 +236,7 @@ func TestInitConfigMetrics(t *testing.T) { assert.Equal(t, v, keys[k]) } - genesisNodesConfig = &testscommon.NodesSetupStub{ + genesisNodesConfig = &genesisMocks.NodesSetupStub{ GetAdaptivityCalled: func() bool { return false }, @@ -363,7 +362,7 @@ func TestInitMetrics(t *testing.T) { return 0 }, } - nodesSetup := &testscommon.NodesSetupStub{ + nodesSetup := &genesisMocks.NodesSetupStub{ GetShardConsensusGroupSizeCalled: func() uint32 { return 63 }, diff --git a/node/mock/peerProcessorMock.go b/node/mock/peerProcessorMock.go deleted file mode 100644 index 7ae112df225..00000000000 --- a/node/mock/peerProcessorMock.go +++ /dev/null @@ -1,133 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/state" -) - -// ValidatorStatisticsProcessorMock - -type ValidatorStatisticsProcessorMock struct { - UpdatePeerStateCalled func(header data.MetaHeaderHandler) ([]byte, error) - RevertPeerStateCalled func(header data.MetaHeaderHandler) error - IsInterfaceNilCalled func() bool - - GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) - RootHashCalled func() ([]byte, error) - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error - PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo - SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) -} - -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorMock) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - -// PeerAccountToValidatorInfo - -func (vsp *ValidatorStatisticsProcessorMock) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { - if vsp.PeerAccountToValidatorInfoCalled != nil { - return vsp.PeerAccountToValidatorInfoCalled(peerAccount) - } - return nil -} - -// UpdatePeerState - -func (vsp *ValidatorStatisticsProcessorMock) UpdatePeerState(header data.MetaHeaderHandler, _ map[string]data.HeaderHandler) ([]byte, error) { - if vsp.UpdatePeerStateCalled != nil { - return vsp.UpdatePeerStateCalled(header) - } - return nil, nil -} - -// Process - -func (vsp *ValidatorStatisticsProcessorMock) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if vsp.ProcessCalled != nil { - return vsp.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (vsp *ValidatorStatisticsProcessorMock) Commit() ([]byte, error) { - if vsp.CommitCalled != nil { - return vsp.CommitCalled() - } - - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorMock) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - - return nil -} - -// ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorMock) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { - if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { - return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) - } - return nil -} - -// GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorMock) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { - if vsp.GetValidatorInfoForRootHashCalled != nil { - return vsp.GetValidatorInfoForRootHashCalled(rootHash) - } - return nil, nil -} - -// RevertPeerState - -func (vsp *ValidatorStatisticsProcessorMock) RevertPeerState(header data.MetaHeaderHandler) error { - if vsp.RevertPeerStateCalled != nil { - return vsp.RevertPeerStateCalled(header) - } - return nil -} - -// RootHash - -func (vsp *ValidatorStatisticsProcessorMock) RootHash() ([]byte, error) { - if vsp.RootHashCalled != nil { - return vsp.RootHashCalled() - } - return nil, nil -} - -// GetExistingPeerAccount - -func (vsp *ValidatorStatisticsProcessorMock) GetExistingPeerAccount(address []byte) (state.PeerAccountHandler, error) { - if vsp.GetPeerAccountCalled != nil { - return vsp.GetPeerAccountCalled(address) - } - - return nil, nil -} - -// DisplayRatings - -func (vsp *ValidatorStatisticsProcessorMock) DisplayRatings(_ uint32) { -} - -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorMock) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorMock) LastFinalizedRootHash() []byte { - return nil -} - -// IsInterfaceNil - -func (vsp *ValidatorStatisticsProcessorMock) IsInterfaceNil() bool { - return false -} diff --git a/node/mock/validatorStatisticsProcessorStub.go b/node/mock/validatorStatisticsProcessorStub.go deleted file mode 100644 index 2233bc84f03..00000000000 --- a/node/mock/validatorStatisticsProcessorStub.go +++ /dev/null @@ -1,130 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/state" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - UpdatePeerStateCalled func(header data.MetaHeaderHandler) ([]byte, error) - RevertPeerStateCalled func(header data.MetaHeaderHandler) error - GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) - RootHashCalled func() ([]byte, error) - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo - SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) -} - -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - -// PeerAccountToValidatorInfo - -func (vsp *ValidatorStatisticsProcessorStub) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { - if vsp.PeerAccountToValidatorInfoCalled != nil { - return vsp.PeerAccountToValidatorInfoCalled(peerAccount) - } - return nil -} - -// Process - -func (vsp *ValidatorStatisticsProcessorStub) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if vsp.ProcessCalled != nil { - return vsp.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (vsp *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { - if vsp.CommitCalled != nil { - return vsp.CommitCalled() - } - - return nil, nil -} - -// ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { - if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { - return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) - } - return nil -} - -// GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { - if vsp.GetValidatorInfoForRootHashCalled != nil { - return vsp.GetValidatorInfoForRootHashCalled(rootHash) - } - return nil, nil -} - -// UpdatePeerState - -func (vsp *ValidatorStatisticsProcessorStub) UpdatePeerState(header data.MetaHeaderHandler, _ map[string]data.HeaderHandler) ([]byte, error) { - if vsp.UpdatePeerStateCalled != nil { - return vsp.UpdatePeerStateCalled(header) - } - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - return nil -} - -// RevertPeerState - -func (vsp *ValidatorStatisticsProcessorStub) RevertPeerState(header data.MetaHeaderHandler) error { - if vsp.RevertPeerStateCalled != nil { - return vsp.RevertPeerStateCalled(header) - } - return nil -} - -// RootHash - -func (vsp *ValidatorStatisticsProcessorStub) RootHash() ([]byte, error) { - if vsp.RootHashCalled != nil { - return vsp.RootHashCalled() - } - return nil, nil -} - -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { - return nil -} - -// GetExistingPeerAccount - -func (vsp *ValidatorStatisticsProcessorStub) GetExistingPeerAccount(address []byte) (state.PeerAccountHandler, error) { - if vsp.GetPeerAccountCalled != nil { - return vsp.GetPeerAccountCalled(address) - } - - return nil, nil -} - -// DisplayRatings - -func (vsp *ValidatorStatisticsProcessorStub) DisplayRatings(_ uint32) { -} - -// IsInterfaceNil - -func (vsp *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - return false -} diff --git a/node/node.go b/node/node.go index 001bbd23f30..978fd45dc99 100644 --- a/node/node.go +++ b/node/node.go @@ -1008,6 +1008,11 @@ func (n *Node) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatisti return n.processComponents.ValidatorsProvider().GetLatestValidators(), nil } +// AuctionListApi will return the auction list config along with qualified nodes +func (n *Node) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { + return n.processComponents.ValidatorsProvider().GetAuctionList() +} + // DirectTrigger will start the hardfork trigger func (n *Node) DirectTrigger(epoch uint32, withEarlyEndOfEpoch bool) error { return n.processComponents.HardforkTrigger().Trigger(epoch, withEarlyEndOfEpoch) diff --git a/node/nodeRunner.go b/node/nodeRunner.go index 10021772c39..54ffe84b4e3 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -167,12 +167,10 @@ func printEnableEpochs(configs *config.Configs) { log.Debug(readEpochFor("save jailed always"), "epoch", enableEpochs.SaveJailedAlwaysEnableEpoch) log.Debug(readEpochFor("validator to delegation"), "epoch", enableEpochs.ValidatorToDelegationEnableEpoch) log.Debug(readEpochFor("re-delegate below minimum check"), "epoch", enableEpochs.ReDelegateBelowMinCheckEnableEpoch) - log.Debug(readEpochFor("waiting waiting list"), "epoch", enableEpochs.WaitingListFixEnableEpoch) log.Debug(readEpochFor("increment SCR nonce in multi transfer"), "epoch", enableEpochs.IncrementSCRNonceInMultiTransferEnableEpoch) log.Debug(readEpochFor("esdt and NFT multi transfer"), "epoch", enableEpochs.ESDTMultiTransferEnableEpoch) log.Debug(readEpochFor("contract global mint and burn"), "epoch", enableEpochs.GlobalMintBurnDisableEpoch) log.Debug(readEpochFor("contract transfer role"), "epoch", enableEpochs.ESDTTransferRoleEnableEpoch) - log.Debug(readEpochFor("built in functions on metachain"), "epoch", enableEpochs.BuiltInFunctionOnMetaEnableEpoch) log.Debug(readEpochFor("compute rewards checkpoint on delegation"), "epoch", enableEpochs.ComputeRewardCheckpointEnableEpoch) log.Debug(readEpochFor("esdt NFT create on multiple shards"), "epoch", enableEpochs.ESDTNFTCreateOnMultiShardEnableEpoch) log.Debug(readEpochFor("SCR size invariant check"), "epoch", enableEpochs.SCRSizeInvariantCheckEnableEpoch) @@ -208,6 +206,11 @@ func printEnableEpochs(configs *config.Configs) { log.Debug(readEpochFor("refactor peers mini blocks"), "epoch", enableEpochs.RefactorPeersMiniBlocksEnableEpoch) log.Debug(readEpochFor("runtime memstore limit"), "epoch", enableEpochs.RuntimeMemStoreLimitEnableEpoch) log.Debug(readEpochFor("max blockchainhook counters"), "epoch", enableEpochs.MaxBlockchainHookCountersEnableEpoch) + log.Debug(readEpochFor("limit validators"), "epoch", enableEpochs.StakeLimitsEnableEpoch) + log.Debug(readEpochFor("staking v4 step 1"), "epoch", enableEpochs.StakingV4Step1EnableEpoch) + log.Debug(readEpochFor("staking v4 step 2"), "epoch", enableEpochs.StakingV4Step2EnableEpoch) + log.Debug(readEpochFor("staking v4 step 3"), "epoch", enableEpochs.StakingV4Step3EnableEpoch) + gasSchedule := configs.EpochConfig.GasSchedule log.Debug(readEpochFor("gas schedule directories paths"), "epoch", gasSchedule.GasScheduleByEpochs) @@ -269,6 +272,10 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( chanStopNodeProcess chan endProcess.ArgEndProcess, ) (bool, error) { goRoutinesNumberStart := runtime.NumGoroutine() + + log.Debug("applying custom configs based on the current architecture") + ApplyArchCustomConfigs(nr.configs) + configs := nr.configs flagsConfig := configs.FlagsConfig configurationPaths := configs.ConfigurationPathsHolder @@ -284,6 +291,11 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( return true, err } + err = config.SanityCheckNodesConfig(managedCoreComponents.GenesisNodesSetup(), configs.EpochConfig.EnableEpochs) + if err != nil { + return true, err + } + log.Debug("creating status core components") managedStatusCoreComponents, err := nr.CreateManagedStatusCoreComponents(managedCoreComponents) if err != nil { @@ -375,6 +387,7 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) if err != nil { return true, err @@ -430,7 +443,6 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( managedStateComponents, managedBootstrapComponents, managedProcessComponents, - managedStatusCoreComponents, ) if err != nil { return true, err @@ -559,7 +571,6 @@ func addSyncersToAccountsDB( stateComponents mainFactory.StateComponentsHolder, bootstrapComponents mainFactory.BootstrapComponentsHolder, processComponents mainFactory.ProcessComponentsHolder, - statusCoreComponents mainFactory.StatusCoreComponentsHolder, ) error { selfId := bootstrapComponents.ShardCoordinator().SelfId() if selfId == core.MetachainShardId { @@ -569,7 +580,6 @@ func addSyncersToAccountsDB( dataComponents, stateComponents, processComponents, - statusCoreComponents, ) if err != nil { return err @@ -593,7 +603,6 @@ func addSyncersToAccountsDB( stateComponents, bootstrapComponents, processComponents, - statusCoreComponents, ) if err != nil { return err @@ -613,7 +622,6 @@ func getUserAccountSyncer( stateComponents mainFactory.StateComponentsHolder, bootstrapComponents mainFactory.BootstrapComponentsHolder, processComponents mainFactory.ProcessComponentsHolder, - statusCoreComponents mainFactory.StatusCoreComponentsHolder, ) (process.AccountsDBSyncer, error) { maxTrieLevelInMemory := config.StateTriesConfig.MaxStateTrieLevelInMemory userTrie := stateComponents.TriesContainer().Get([]byte(dataRetriever.UserAccountsUnit.String())) @@ -631,7 +639,6 @@ func getUserAccountSyncer( dataComponents, processComponents, storageManager, - statusCoreComponents, maxTrieLevelInMemory, ), ShardId: bootstrapComponents.ShardCoordinator().SelfId(), @@ -648,7 +655,6 @@ func getValidatorAccountSyncer( dataComponents mainFactory.DataComponentsHolder, stateComponents mainFactory.StateComponentsHolder, processComponents mainFactory.ProcessComponentsHolder, - statusCoreComponents mainFactory.StatusCoreComponentsHolder, ) (process.AccountsDBSyncer, error) { maxTrieLevelInMemory := config.StateTriesConfig.MaxPeerTrieLevelInMemory peerTrie := stateComponents.TriesContainer().Get([]byte(dataRetriever.PeerAccountsUnit.String())) @@ -661,7 +667,6 @@ func getValidatorAccountSyncer( dataComponents, processComponents, storageManager, - statusCoreComponents, maxTrieLevelInMemory, ), } @@ -675,7 +680,6 @@ func getBaseAccountSyncerArgs( dataComponents mainFactory.DataComponentsHolder, processComponents mainFactory.ProcessComponentsHolder, storageManager common.StorageManager, - statusCoreComponents mainFactory.StatusCoreComponentsHolder, maxTrieLevelInMemory uint, ) syncer.ArgsNewBaseAccountsSyncer { return syncer.ArgsNewBaseAccountsSyncer{ @@ -1026,7 +1030,7 @@ func (nr *nodeRunner) logInformation( log.Info("Bootstrap", "epoch", bootstrapComponents.EpochBootstrapParams().Epoch()) if bootstrapComponents.EpochBootstrapParams().NodesConfig() != nil { log.Info("the epoch from nodesConfig is", - "epoch", bootstrapComponents.EpochBootstrapParams().NodesConfig().CurrentEpoch) + "epoch", bootstrapComponents.EpochBootstrapParams().NodesConfig().GetCurrentEpoch()) } var shardIdString = core.GetShardIDString(bootstrapComponents.ShardCoordinator().SelfId()) @@ -1234,8 +1238,10 @@ func (nr *nodeRunner) CreateManagedProcessComponents( processArgs := processComp.ProcessComponentsFactoryArgs{ Config: *configs.GeneralConfig, EpochConfig: *configs.EpochConfig, + RoundConfig: *configs.RoundConfig, PrefConfigs: *configs.PreferencesConfig, ImportDBConfig: *configs.ImportDbConfig, + EconomicsConfig: *configs.EconomicsConfig, AccountsParser: accountsParser, SmartContractParser: smartContractParser, GasSchedule: gasScheduleNotifier, diff --git a/node/nodeRunner_test.go b/node/nodeRunner_test.go index bb20b16fc47..5d0e9a7666c 100644 --- a/node/nodeRunner_test.go +++ b/node/nodeRunner_test.go @@ -1,5 +1,3 @@ -//go:build !race - package node import ( @@ -22,7 +20,9 @@ import ( const originalConfigsPath = "../cmd/node/config" func TestNewNodeRunner(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } t.Run("nil configs should error", func(t *testing.T) { t.Parallel() @@ -45,7 +45,9 @@ func TestNewNodeRunner(t *testing.T) { } func TestNodeRunner_StartAndCloseNodeUsingSIGINT(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } configs, err := testscommon.CreateTestConfigs(t.TempDir(), originalConfigsPath) require.Nil(t, err) @@ -76,7 +78,9 @@ func TestNodeRunner_StartAndCloseNodeUsingSIGINT(t *testing.T) { } func TestCopyDirectory(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } file1Name := "file1.toml" file1Contents := []byte("file1") @@ -134,7 +138,9 @@ func TestCopyDirectory(t *testing.T) { } func TestWaitForSignal(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } closedCalled := make(map[string]struct{}) healthServiceClosableComponent := &mock.CloserStub{ diff --git a/node/nodeTesting.go b/node/nodeTesting.go index 29683432508..bcd15052e21 100644 --- a/node/nodeTesting.go +++ b/node/nodeTesting.go @@ -264,7 +264,7 @@ func (n *Node) generateAndSignTxBuffArray( return tx, signedMarshalizedTx, nil } -//GenerateTransaction generates a new transaction with sender, receiver, amount and code +// GenerateTransaction generates a new transaction with sender, receiver, amount and code func (n *Node) GenerateTransaction(senderHex string, receiverHex string, value *big.Int, transactionData string, privateKey crypto.PrivateKey, chainID []byte, minTxVersion uint32) (*transaction.Transaction, error) { if check.IfNil(n.coreComponents.AddressPubKeyConverter()) { return nil, ErrNilPubkeyConverter diff --git a/node/node_test.go b/node/node_test.go index 152cf98bdd7..2cde11d08a0 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -56,11 +56,13 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" factoryTests "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" mockStorage "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -3203,12 +3205,11 @@ func TestNode_ValidatorStatisticsApi(t *testing.T) { initialPubKeys[1] = keys[1] initialPubKeys[2] = keys[2] - validatorsInfo := make(map[uint32][]*state.ValidatorInfo) + validatorsInfo := state.NewShardValidatorsInfoMap() for shardId, pubkeysPerShard := range initialPubKeys { - validatorsInfo[shardId] = make([]*state.ValidatorInfo, 0) for _, pubKey := range pubkeysPerShard { - validatorsInfo[shardId] = append(validatorsInfo[shardId], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte(pubKey), ShardId: shardId, List: "", @@ -3230,26 +3231,25 @@ func TestNode_ValidatorStatisticsApi(t *testing.T) { } } - vsp := &mock.ValidatorStatisticsProcessorStub{ + vsp := &testscommon.ValidatorStatisticsProcessorStub{ RootHashCalled: func() (i []byte, err error) { return []byte("hash"), nil }, - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (m map[uint32][]*state.ValidatorInfo, err error) { + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (m state.ShardValidatorsInfoMapHandler, err error) { return validatorsInfo, nil }, } - validatorProvider := &mock.ValidatorsProviderStub{GetLatestValidatorsCalled: func() map[string]*validator.ValidatorStatistics { - apiResponses := make(map[string]*validator.ValidatorStatistics) + validatorProvider := &stakingcommon.ValidatorsProviderStub{ + GetLatestValidatorsCalled: func() map[string]*validator.ValidatorStatistics { + apiResponses := make(map[string]*validator.ValidatorStatistics) - for _, vis := range validatorsInfo { - for _, vi := range vis { + for _, vi := range validatorsInfo.GetAllValidatorsInfo() { apiResponses[hex.EncodeToString(vi.GetPublicKey())] = &validator.ValidatorStatistics{} } - } - return apiResponses - }, + return apiResponses + }, } processComponents := getDefaultProcessComponents() @@ -5100,7 +5100,7 @@ func getDefaultCoreComponents() *nodeMockFactory.CoreComponentsMock { APIEconomicsHandler: &economicsmocks.EconomicsHandlerMock{}, RatingsConfig: &testscommon.RatingsInfoMock{}, RatingHandler: &testscommon.RaterMock{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &genesisMocks.NodesSetupStub{}, StartTime: time.Time{}, EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, TxVersionCheckHandler: versioning.NewTxVersionChecker(0), @@ -5125,8 +5125,8 @@ func getDefaultProcessComponents() *factoryMock.ProcessComponentsMock { BootSore: &mock.BootstrapStorerMock{}, HeaderSigVerif: &mock.HeaderSigVerifierStub{}, HeaderIntegrVerif: &mock.HeaderIntegrityVerifierStub{}, - ValidatorStatistics: &mock.ValidatorStatisticsProcessorMock{}, - ValidatorProvider: &mock.ValidatorsProviderStub{}, + ValidatorStatistics: &testscommon.ValidatorStatisticsProcessorStub{}, + ValidatorProvider: &stakingcommon.ValidatorsProviderStub{}, BlockTrack: &mock.BlockTrackerStub{}, PendingMiniBlocksHdl: &mock.PendingMiniBlocksHandlerStub{}, ReqHandler: &testscommon.RequestHandlerStub{}, diff --git a/outport/process/interface.go b/outport/process/interface.go index abcbbe10fec..5fcb19020f3 100644 --- a/outport/process/interface.go +++ b/outport/process/interface.go @@ -34,6 +34,7 @@ type GasConsumedProvider interface { type EconomicsDataHandler interface { ComputeGasUsedAndFeeBasedOnRefundValue(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int + ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int ComputeGasLimit(tx data.TransactionWithFeeHandler) uint64 IsInterfaceNil() bool MaxGasLimitPerBlock(shardID uint32) uint64 diff --git a/outport/process/transactionsfee/interface.go b/outport/process/transactionsfee/interface.go index fa09f18076a..53042467442 100644 --- a/outport/process/transactionsfee/interface.go +++ b/outport/process/transactionsfee/interface.go @@ -12,6 +12,7 @@ import ( type FeesProcessorHandler interface { ComputeGasUsedAndFeeBasedOnRefundValue(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int + ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int ComputeGasLimit(tx data.TransactionWithFeeHandler) uint64 IsInterfaceNil() bool } diff --git a/outport/process/transactionsfee/transactionsFeeProcessor.go b/outport/process/transactionsfee/transactionsFeeProcessor.go index 593a5d6b83b..c77956f5365 100644 --- a/outport/process/transactionsfee/transactionsFeeProcessor.go +++ b/outport/process/transactionsfee/transactionsFeeProcessor.go @@ -90,7 +90,7 @@ func (tep *transactionsFeeProcessor) PutFeeAndGasUsed(pool *outportcore.Transact func (tep *transactionsFeeProcessor) prepareInvalidTxs(pool *outportcore.TransactionPool) { for _, invalidTx := range pool.InvalidTxs { - fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(invalidTx.Transaction, invalidTx.Transaction.GasLimit) + fee := tep.txFeeCalculator.ComputeTxFee(invalidTx.Transaction) invalidTx.FeeInfo.SetGasUsed(invalidTx.Transaction.GetGasLimit()) invalidTx.FeeInfo.SetFee(fee) invalidTx.FeeInfo.SetInitialPaidFee(fee) @@ -103,7 +103,7 @@ func (tep *transactionsFeeProcessor) prepareNormalTxs(transactionsAndScrs *trans gasUsed := tep.txFeeCalculator.ComputeGasLimit(txHandler) fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txHandler, gasUsed) - initialPaidFee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txHandler, txHandler.GetGasLimit()) + initialPaidFee := tep.txFeeCalculator.ComputeTxFee(txHandler) feeInfo := txWithResult.GetFeeInfo() feeInfo.SetGasUsed(gasUsed) @@ -137,15 +137,23 @@ func (tep *transactionsFeeProcessor) prepareTxWithResults(txHashHex string, txWi } } - tep.prepareTxWithResultsBasedOnLogs(txWithResults, hasRefund) + tep.prepareTxWithResultsBasedOnLogs(txHashHex, txWithResults, hasRefund) } func (tep *transactionsFeeProcessor) prepareTxWithResultsBasedOnLogs( + txHashHex string, txWithResults *transactionWithResults, hasRefund bool, ) { - if check.IfNilReflect(txWithResults.log) { + tx := txWithResults.GetTxHandler() + if check.IfNil(tx) { + tep.log.Warn("tep.prepareTxWithResultsBasedOnLogs nil transaction handler", "txHash", txHashHex) + return + } + + res := tep.dataFieldParser.Parse(tx.GetData(), tx.GetSndAddr(), tx.GetRcvAddr(), tep.shardCoordinator.NumberOfShards()) + if check.IfNilReflect(txWithResults.log) || (res.Function == "" && res.Operation == datafield.OperationTransfer) { return } diff --git a/outport/process/transactionsfee/transactionsFeeProcessor_test.go b/outport/process/transactionsfee/transactionsFeeProcessor_test.go index e0efbab8ada..8ff4cf14501 100644 --- a/outport/process/transactionsfee/transactionsFeeProcessor_test.go +++ b/outport/process/transactionsfee/transactionsFeeProcessor_test.go @@ -212,11 +212,15 @@ func TestPutFeeAndGasUsedInvalidTxs(t *testing.T) { func TestPutFeeAndGasUsedLogWithErrorAndInformative(t *testing.T) { t.Parallel() + receiver, _ := hex.DecodeString("00000000000000000500d3b28828d62052124f07dcd50ed31b0825f60eee1526") tx1Hash := "h1" tx1 := &outportcore.TxInfo{ Transaction: &transaction.Transaction{ GasLimit: 30000000, GasPrice: 1000000000, + SndAddr: []byte("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), + RcvAddr: receiver, + Data: []byte("here"), }, FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, } @@ -226,6 +230,9 @@ func TestPutFeeAndGasUsedLogWithErrorAndInformative(t *testing.T) { Transaction: &transaction.Transaction{ GasLimit: 50000000, GasPrice: 1000000000, + SndAddr: []byte("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), + RcvAddr: receiver, + Data: []byte("here"), }, FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, } @@ -520,3 +527,59 @@ func TestPutFeeAndGasUsedScrWithRefund(t *testing.T) { require.Equal(t, big.NewInt(552865000000000), initialTx.GetFeeInfo().GetFee()) require.Equal(t, uint64(50_336_500), initialTx.GetFeeInfo().GetGasUsed()) } + +func TestMoveBalanceWithSignalError(t *testing.T) { + txHash := []byte("e3cdb8b4936fdbee2d3b1244b4c49959df5f90ada683d650019d244e5a64afaf") + initialTx := &outportcore.TxInfo{Transaction: &transaction.Transaction{ + Nonce: 1004, + GasLimit: 12_175_500, + GasPrice: 1000000000, + SndAddr: []byte("erd1s8jr8e8hsvv7c9ehmshcjlpzf9ua5l50qeswa8feshrp6xlz9c7quacmtx"), + RcvAddr: []byte("erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqzllls8a5w6u"), + Data: []byte("start@5465737420526166666c65203120f09f9a80@10000000000000000@0100000002@01000000006082a400@0100000001@01000000023232@"), + }, FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}} + + scrHash := []byte("scrHash") + scr := &outportcore.SCRInfo{ + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 1005, + SndAddr: []byte("erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqzllls8a5w6u"), + RcvAddr: []byte("erd1s8jr8e8hsvv7c9ehmshcjlpzf9ua5l50qeswa8feshrp6xlz9c7quacmtx"), + PrevTxHash: txHash, + OriginalTxHash: txHash, + Value: big.NewInt(0), + Data: []byte("@sending value to non payable contract"), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } + + pool := &outportcore.TransactionPool{ + SmartContractResults: map[string]*outportcore.SCRInfo{ + hex.EncodeToString(scrHash): scr, + }, + Transactions: map[string]*outportcore.TxInfo{ + hex.EncodeToString(txHash): initialTx, + }, + Logs: []*outportcore.LogData{ + { + Log: &transaction.Log{ + Events: []*transaction.Event{ + { + Identifier: []byte(core.SignalErrorOperation), + }, + }, + }, + TxHash: hex.EncodeToString(txHash), + }, + }, + } + + arg := prepareMockArg() + txsFeeProc, err := NewTransactionsFeeProcessor(arg) + require.NotNil(t, txsFeeProc) + require.Nil(t, err) + + err = txsFeeProc.PutFeeAndGasUsed(pool) + require.Nil(t, err) + require.Equal(t, uint64(225_500), initialTx.GetFeeInfo().GetGasUsed()) +} diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index fcd77d0c75d..b12aa6b2783 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -121,6 +121,7 @@ type baseProcessor struct { mutNonceOfFirstCommittedBlock sync.RWMutex nonceOfFirstCommittedBlock core.OptionalUint64 + extraDelayRequestBlockInfo time.Duration } type bootStorerDataArgs struct { @@ -1685,7 +1686,7 @@ func (bp *baseProcessor) requestMiniBlocksIfNeeded(headerHandler data.HeaderHand return } - waitTime := common.ExtraDelayForRequestBlockInfo + waitTime := bp.extraDelayRequestBlockInfo roundDifferences := bp.roundHandler.Index() - int64(headerHandler.GetRound()) if roundDifferences > 1 { waitTime = 0 diff --git a/process/block/export_test.go b/process/block/export_test.go index 5cd147dc794..2332115613c 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/scheduled" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" @@ -182,6 +183,10 @@ func (mp *metaProcessor) ReceivedShardHeader(header data.HeaderHandler, shardHea mp.receivedShardHeader(header, shardHeaderHash) } +func (mp *metaProcessor) GetDataPool() dataRetriever.PoolsHolder { + return mp.dataPool +} + func (mp *metaProcessor) AddHdrHashToRequestedList(hdr data.HeaderHandler, hdrHash []byte) { mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() defer mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() @@ -565,3 +570,139 @@ func (bp *baseProcessor) SetNonceOfFirstCommittedBlock(nonce uint64) { func (bp *baseProcessor) CheckSentSignaturesAtCommitTime(header data.HeaderHandler) error { return bp.checkSentSignaturesAtCommitTime(header) } + +// GetHdrForBlock - +func (mp *metaProcessor) GetHdrForBlock() *hdrForBlock { + return mp.hdrsForCurrBlock +} + +// ChannelReceiveAllHeaders - +func (mp *metaProcessor) ChannelReceiveAllHeaders() chan bool { + return mp.chRcvAllHdrs +} + +// ComputeExistingAndRequestMissingShardHeaders - +func (mp *metaProcessor) ComputeExistingAndRequestMissingShardHeaders(metaBlock *block.MetaBlock) (uint32, uint32) { + return mp.computeExistingAndRequestMissingShardHeaders(metaBlock) +} + +// ComputeExistingAndRequestMissingMetaHeaders - +func (sp *shardProcessor) ComputeExistingAndRequestMissingMetaHeaders(header data.ShardHeaderHandler) (uint32, uint32) { + return sp.computeExistingAndRequestMissingMetaHeaders(header) +} + +// GetHdrForBlock - +func (sp *shardProcessor) GetHdrForBlock() *hdrForBlock { + return sp.hdrsForCurrBlock +} + +// ChannelReceiveAllHeaders - +func (sp *shardProcessor) ChannelReceiveAllHeaders() chan bool { + return sp.chRcvAllMetaHdrs +} + +// InitMaps - +func (hfb *hdrForBlock) InitMaps() { + hfb.initMaps() + hfb.resetMissingHdrs() +} + +// Clone - +func (hfb *hdrForBlock) Clone() *hdrForBlock { + return hfb +} + +// SetNumMissingHdrs - +func (hfb *hdrForBlock) SetNumMissingHdrs(num uint32) { + hfb.mutHdrsForBlock.Lock() + hfb.missingHdrs = num + hfb.mutHdrsForBlock.Unlock() +} + +// SetNumMissingFinalityAttestingHdrs - +func (hfb *hdrForBlock) SetNumMissingFinalityAttestingHdrs(num uint32) { + hfb.mutHdrsForBlock.Lock() + hfb.missingFinalityAttestingHdrs = num + hfb.mutHdrsForBlock.Unlock() +} + +// SetHighestHdrNonce - +func (hfb *hdrForBlock) SetHighestHdrNonce(shardId uint32, nonce uint64) { + hfb.mutHdrsForBlock.Lock() + hfb.highestHdrNonce[shardId] = nonce + hfb.mutHdrsForBlock.Unlock() +} + +// HdrInfo - +type HdrInfo struct { + UsedInBlock bool + Hdr data.HeaderHandler +} + +// SetHdrHashAndInfo - +func (hfb *hdrForBlock) SetHdrHashAndInfo(hash string, info *HdrInfo) { + hfb.mutHdrsForBlock.Lock() + hfb.hdrHashAndInfo[hash] = &hdrInfo{ + hdr: info.Hdr, + usedInBlock: info.UsedInBlock, + } + hfb.mutHdrsForBlock.Unlock() +} + +// GetHdrHashMap - +func (hfb *hdrForBlock) GetHdrHashMap() map[string]data.HeaderHandler { + m := make(map[string]data.HeaderHandler) + + hfb.mutHdrsForBlock.RLock() + for hash, hi := range hfb.hdrHashAndInfo { + m[hash] = hi.hdr + } + hfb.mutHdrsForBlock.RUnlock() + + return m +} + +// GetHighestHdrNonce - +func (hfb *hdrForBlock) GetHighestHdrNonce() map[uint32]uint64 { + m := make(map[uint32]uint64) + + hfb.mutHdrsForBlock.RLock() + for shardId, nonce := range hfb.highestHdrNonce { + m[shardId] = nonce + } + hfb.mutHdrsForBlock.RUnlock() + + return m +} + +// GetMissingHdrs - +func (hfb *hdrForBlock) GetMissingHdrs() uint32 { + hfb.mutHdrsForBlock.RLock() + defer hfb.mutHdrsForBlock.RUnlock() + + return hfb.missingHdrs +} + +// GetMissingFinalityAttestingHdrs - +func (hfb *hdrForBlock) GetMissingFinalityAttestingHdrs() uint32 { + hfb.mutHdrsForBlock.RLock() + defer hfb.mutHdrsForBlock.RUnlock() + + return hfb.missingFinalityAttestingHdrs +} + +// GetHdrHashAndInfo - +func (hfb *hdrForBlock) GetHdrHashAndInfo() map[string]*HdrInfo { + hfb.mutHdrsForBlock.RLock() + defer hfb.mutHdrsForBlock.RUnlock() + + m := make(map[string]*HdrInfo) + for hash, hi := range hfb.hdrHashAndInfo { + m[hash] = &HdrInfo{ + UsedInBlock: hi.usedInBlock, + Hdr: hi.hdr, + } + } + + return m +} diff --git a/process/block/metablock.go b/process/block/metablock.go index 1ddf90723d8..390e1cebf25 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -137,6 +137,7 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { blockProcessingCutoffHandler: arguments.BlockProcessingCutoffHandler, managedPeersHolder: arguments.ManagedPeersHolder, sentSignaturesTracker: arguments.SentSignaturesTracker, + extraDelayRequestBlockInfo: time.Duration(arguments.Config.EpochStartConfig.ExtraDelayForRequestBlockInfoInMilliseconds) * time.Millisecond, } mp := metaProcessor{ @@ -438,7 +439,7 @@ func (mp *metaProcessor) processEpochStartMetaBlock( } if mp.isRewardsV2Enabled(header) { - err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, header.Nonce, header.Epoch) + err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, header) if err != nil { return err } @@ -453,7 +454,7 @@ func (mp *metaProcessor) processEpochStartMetaBlock( return err } - err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, header.Nonce, header.Epoch) + err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, header) if err != nil { return err } @@ -870,7 +871,7 @@ func (mp *metaProcessor) createEpochStartBody(metaBlock *block.MetaBlock) (data. var rewardMiniBlocks block.MiniBlockSlice if mp.isRewardsV2Enabled(metaBlock) { - err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, metaBlock.Nonce, metaBlock.Epoch) + err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, metaBlock) if err != nil { return nil, err } @@ -885,7 +886,7 @@ func (mp *metaProcessor) createEpochStartBody(metaBlock *block.MetaBlock) (data. return nil, err } - err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, metaBlock.Nonce, metaBlock.Epoch) + err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, metaBlock) if err != nil { return nil, err } diff --git a/process/block/metablockRequest_test.go b/process/block/metablockRequest_test.go new file mode 100644 index 00000000000..0718830a43c --- /dev/null +++ b/process/block/metablockRequest_test.go @@ -0,0 +1,653 @@ +package block_test + +import ( + "bytes" + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/multiversx/mx-chain-go/dataRetriever" + blockProcess "github.com/multiversx/mx-chain-go/process/block" + "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/pool" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" +) + +func TestMetaProcessor_computeExistingAndRequestMissingShardHeaders(t *testing.T) { + t.Parallel() + + noOfShards := uint32(2) + td := createTestData() + + t.Run("all referenced shard headers missing", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersForBlock := mp.GetHdrForBlock() + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + require.Equal(t, uint32(2), numMissing) + require.Equal(t, uint32(2), headersForBlock.GetMissingHdrs()) + // before receiving all missing headers referenced in metaBlock, the number of missing attestations is not updated + require.Equal(t, uint32(0), numAttestationMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 2) + require.Equal(t, uint32(0), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(2), numCallsMissingHeaders.Load()) + }) + t.Run("one referenced shard header present and one missing", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersPool := mp.GetDataPool().Headers() + // adding the existing header + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + headersForBlock := mp.GetHdrForBlock() + require.Equal(t, uint32(1), numMissing) + require.Equal(t, uint32(1), headersForBlock.GetMissingHdrs()) + // before receiving all missing headers referenced in metaBlock, the number of missing attestations is not updated + require.Equal(t, uint32(0), numAttestationMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 2) + require.Equal(t, uint32(0), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(1), numCallsMissingHeaders.Load()) + }) + t.Run("all referenced shard headers present, all attestation headers missing", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersPool := mp.GetDataPool().Headers() + // adding the existing headers + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + headersPool.AddHeader(td[1].referencedHeaderData.headerHash, td[1].referencedHeaderData.header) + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + headersForBlock := mp.GetHdrForBlock() + require.Equal(t, uint32(0), numMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingHdrs()) + require.Equal(t, uint32(2), numAttestationMissing) + require.Equal(t, uint32(2), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 2) + require.Equal(t, uint32(2), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(0), numCallsMissingHeaders.Load()) + }) + t.Run("all referenced shard headers present, one attestation header missing", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersPool := mp.GetDataPool().Headers() + // adding the existing headers + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + headersPool.AddHeader(td[1].referencedHeaderData.headerHash, td[1].referencedHeaderData.header) + headersPool.AddHeader(td[0].attestationHeaderData.headerHash, td[0].attestationHeaderData.header) + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + headersForBlock := mp.GetHdrForBlock() + require.Equal(t, uint32(0), numMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingHdrs()) + require.Equal(t, uint32(1), numAttestationMissing) + require.Equal(t, uint32(1), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 3) + require.Equal(t, uint32(1), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(0), numCallsMissingHeaders.Load()) + }) + t.Run("all referenced shard headers present, all attestation headers present", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersPool := mp.GetDataPool().Headers() + // adding the existing headers + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + headersPool.AddHeader(td[1].referencedHeaderData.headerHash, td[1].referencedHeaderData.header) + headersPool.AddHeader(td[0].attestationHeaderData.headerHash, td[0].attestationHeaderData.header) + headersPool.AddHeader(td[1].attestationHeaderData.headerHash, td[1].attestationHeaderData.header) + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + headersForBlock := mp.GetHdrForBlock() + require.Equal(t, uint32(0), numMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), numAttestationMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 4) + require.Equal(t, uint32(0), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(0), numCallsMissingHeaders.Load()) + }) +} + +func TestMetaProcessor_receivedShardHeader(t *testing.T) { + t.Parallel() + noOfShards := uint32(2) + td := createTestData() + + t.Run("receiving the last used in block shard header", func(t *testing.T) { + t.Parallel() + + numCalls := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + if nonce != attestationNonce { + require.Fail(t, fmt.Sprintf("nonce should have been %d", attestationNonce)) + } + numCalls.Add(1) + } + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + hdrsForBlock := mp.GetHdrForBlock() + hdrsForBlock.SetNumMissingHdrs(1) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(0, td[0].referencedHeaderData.header.GetNonce()-1) + hdrsForBlock.SetHdrHashAndInfo(string(td[0].referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + mp.ReceivedShardHeader(td[0].referencedHeaderData.header, td[0].referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + require.Equal(t, uint32(1), numCalls.Load()) + require.Equal(t, uint32(1), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + }) + + t.Run("shard header used in block received, not latest", func(t *testing.T) { + t.Parallel() + + numCalls := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + // for requesting attestation header + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + require.Equal(t, nonce, attestationNonce, fmt.Sprintf("nonce should have been %d", attestationNonce)) + numCalls.Add(1) + } + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + hdrsForBlock := mp.GetHdrForBlock() + hdrsForBlock.SetNumMissingHdrs(2) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + referencedHeaderData := td[1].referencedHeaderData + hdrsForBlock.SetHighestHdrNonce(0, referencedHeaderData.header.GetNonce()-1) + hdrsForBlock.SetHdrHashAndInfo(string(referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + mp.ReceivedShardHeader(referencedHeaderData.header, referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + // not yet requested attestation blocks as still missing one header + require.Equal(t, uint32(0), numCalls.Load()) + // not yet computed + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + }) + t.Run("all needed shard attestation headers received", func(t *testing.T) { + t.Parallel() + + numCalls := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + // for requesting attestation header + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + if nonce != attestationNonce { + require.Fail(t, "nonce should have been %d", attestationNonce) + } + numCalls.Add(1) + } + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + hdrsForBlock := mp.GetHdrForBlock() + hdrsForBlock.SetNumMissingHdrs(1) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + referencedHeaderData := td[0].referencedHeaderData + hdrsForBlock.SetHighestHdrNonce(0, referencedHeaderData.header.GetNonce()-1) + hdrsForBlock.SetHdrHashAndInfo(string(referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + // receive the missing header + headersPool := mp.GetDataPool().Headers() + headersPool.AddHeader(referencedHeaderData.headerHash, referencedHeaderData.header) + mp.ReceivedShardHeader(td[0].referencedHeaderData.header, referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + require.Equal(t, uint32(1), numCalls.Load()) + require.Equal(t, uint32(1), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + + // needs to be done before receiving the last header otherwise it will + // be blocked waiting on writing to the channel + wg := startWaitingForAllHeadersReceivedSignal(t, mp) + + // receive also the attestation header + attestationHeaderData := td[0].attestationHeaderData + headersPool.AddHeader(attestationHeaderData.headerHash, attestationHeaderData.header) + mp.ReceivedShardHeader(attestationHeaderData.header, attestationHeaderData.headerHash) + wg.Wait() + + require.Equal(t, uint32(1), numCalls.Load()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + }) + t.Run("all needed shard attestation headers received, when multiple shards headers missing", func(t *testing.T) { + t.Parallel() + + numCalls := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + // for requesting attestation header + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + if nonce != td[shardID].attestationHeaderData.header.GetNonce() { + require.Fail(t, fmt.Sprintf("requested nonce for shard %d should have been %d", shardID, attestationNonce)) + } + numCalls.Add(1) + } + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + hdrsForBlock := mp.GetHdrForBlock() + hdrsForBlock.SetNumMissingHdrs(2) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(0, 99) + hdrsForBlock.SetHighestHdrNonce(1, 97) + hdrsForBlock.SetHdrHashAndInfo(string(td[0].referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + hdrsForBlock.SetHdrHashAndInfo(string(td[1].referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + // receive the missing header for shard 0 + headersPool := mp.GetDataPool().Headers() + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + mp.ReceivedShardHeader(td[0].referencedHeaderData.header, td[0].referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + // the attestation header for shard 0 is not requested as the attestation header for shard 1 is missing + // TODO: refactor request logic to request missing attestation headers as soon as possible + require.Equal(t, uint32(0), numCalls.Load()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + + // receive the missing header for shard 1 + headersPool.AddHeader(td[1].referencedHeaderData.headerHash, td[1].referencedHeaderData.header) + mp.ReceivedShardHeader(td[1].referencedHeaderData.header, td[1].referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + require.Equal(t, uint32(2), numCalls.Load()) + require.Equal(t, uint32(2), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + + // needs to be done before receiving the last header otherwise it will + // be blocked writing to a channel no one is reading from + wg := startWaitingForAllHeadersReceivedSignal(t, mp) + + // receive also the attestation header + headersPool.AddHeader(td[0].attestationHeaderData.headerHash, td[0].attestationHeaderData.header) + mp.ReceivedShardHeader(td[0].attestationHeaderData.header, td[0].attestationHeaderData.headerHash) + + headersPool.AddHeader(td[1].attestationHeaderData.headerHash, td[1].attestationHeaderData.header) + mp.ReceivedShardHeader(td[1].attestationHeaderData.header, td[1].attestationHeaderData.headerHash) + wg.Wait() + + time.Sleep(100 * time.Millisecond) + // the receive of an attestation header, if not the last one, will trigger a new request of missing attestation headers + // TODO: refactor request logic to not request recently already requested headers + require.Equal(t, uint32(3), numCalls.Load()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + }) +} + +type receivedAllHeadersSignaler interface { + ChannelReceiveAllHeaders() chan bool +} + +func startWaitingForAllHeadersReceivedSignal(t *testing.T, mp receivedAllHeadersSignaler) *sync.WaitGroup { + wg := &sync.WaitGroup{} + wg.Add(1) + go func(w *sync.WaitGroup) { + receivedAllHeaders := checkReceivedAllHeaders(mp.ChannelReceiveAllHeaders()) + require.True(t, receivedAllHeaders) + wg.Done() + }(wg) + return wg +} + +func checkReceivedAllHeaders(channelReceiveAllHeaders chan bool) bool { + select { + case <-time.After(100 * time.Millisecond): + return false + case <-channelReceiveAllHeaders: + return true + } +} + +func createPoolsHolderForHeaderRequests() dataRetriever.HeadersPool { + headersInPool := make(map[string]data.HeaderHandler) + mutHeadersInPool := sync.RWMutex{} + errNotFound := errors.New("header not found") + + return &pool.HeadersPoolStub{ + AddCalled: func(headerHash []byte, header data.HeaderHandler) { + mutHeadersInPool.Lock() + headersInPool[string(headerHash)] = header + mutHeadersInPool.Unlock() + }, + GetHeaderByHashCalled: func(hash []byte) (data.HeaderHandler, error) { + mutHeadersInPool.RLock() + defer mutHeadersInPool.RUnlock() + if h, ok := headersInPool[string(hash)]; ok { + return h, nil + } + return nil, errNotFound + }, + GetHeaderByNonceAndShardIdCalled: func(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) { + mutHeadersInPool.RLock() + defer mutHeadersInPool.RUnlock() + for hash, h := range headersInPool { + if h.GetNonce() == hdrNonce && h.GetShardID() == shardId { + return []data.HeaderHandler{h}, [][]byte{[]byte(hash)}, nil + } + } + return nil, nil, errNotFound + }, + } +} + +func createMetaProcessorArguments(t *testing.T, noOfShards uint32) *blockProcess.ArgMetaProcessor { + poolMock := dataRetrieverMock.NewPoolsHolderMock() + poolMock.Headers() + coreComponents, dataComponents, bootstrapComponents, statusComponents := createMockComponentHolders() + coreComponents.Hash = &hashingMocks.HasherMock{} + dataComponents.DataPool = poolMock + dataComponents.Storage = initStore() + bootstrapComponents.Coordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil + }, + JournalLenCalled: func() int { + return 0 + }, + } + + startHeaders := createGenesisBlocks(bootstrapComponents.ShardCoordinator()) + arguments.BlockTracker = mock.NewBlockTrackerMock(bootstrapComponents.ShardCoordinator(), startHeaders) + arguments.ArgBaseProcessor.RequestHandler = &testscommon.RequestHandlerStub{ + RequestShardHeaderByNonceCalled: func(shardID uint32, nonce uint64) { + require.Fail(t, "should not have been called") + }, + RequestMetaHeaderByNonceCalled: func(nonce uint64) { + require.Fail(t, "should not have been called") + }, + + RequestShardHeaderCalled: func(shardID uint32, hash []byte) { + require.Fail(t, "should not have been called") + }, + RequestMetaHeaderCalled: func(hash []byte) { + require.Fail(t, "should not have been called") + }, + } + + return &arguments +} + +type shardHeaderData struct { + header *block.HeaderV2 + headerHash []byte +} + +type shardTestData struct { + referencedHeaderData *shardHeaderData + attestationHeaderData *shardHeaderData +} + +func createTestData() map[uint32]*shardTestData { + shard0Header1Hash := []byte("sh0TestHash1") + shard0header2Hash := []byte("sh0TestHash2") + shard1Header1Hash := []byte("sh1TestHash1") + shard1header2Hash := []byte("sh1TestHash2") + shard0ReferencedNonce := uint64(100) + shard1ReferencedNonce := uint64(98) + shard0AttestationNonce := shard0ReferencedNonce + 1 + shard1AttestationNonce := shard1ReferencedNonce + 1 + + shardsTestData := map[uint32]*shardTestData{ + 0: { + referencedHeaderData: &shardHeaderData{ + header: &block.HeaderV2{ + Header: &block.Header{ + ShardID: 0, + Round: 100, + Nonce: shard0ReferencedNonce, + }, + }, + headerHash: shard0Header1Hash, + }, + attestationHeaderData: &shardHeaderData{ + header: &block.HeaderV2{ + Header: &block.Header{ + ShardID: 0, + Round: 101, + Nonce: shard0AttestationNonce, + PrevHash: shard0Header1Hash, + }, + }, + headerHash: shard0header2Hash, + }, + }, + 1: { + referencedHeaderData: &shardHeaderData{ + header: &block.HeaderV2{ + Header: &block.Header{ + ShardID: 1, + Round: 100, + Nonce: shard1ReferencedNonce, + }, + }, + headerHash: shard1Header1Hash, + }, + attestationHeaderData: &shardHeaderData{ + header: &block.HeaderV2{ + Header: &block.Header{ + ShardID: 1, + Round: 101, + Nonce: shard1AttestationNonce, + PrevHash: shard1Header1Hash, + }, + }, + headerHash: shard1header2Hash, + }, + }, + } + + return shardsTestData +} + +func createShardInfo(referencedHeaders []*shardHeaderData) []block.ShardData { + shardData := make([]block.ShardData, len(referencedHeaders)) + for i, h := range referencedHeaders { + shardData[i] = block.ShardData{ + HeaderHash: h.headerHash, + Round: h.header.GetRound(), + PrevHash: h.header.GetPrevHash(), + Nonce: h.header.GetNonce(), + ShardID: h.header.GetShardID(), + } + } + + return shardData +} + +func updateRequestsHandlerForCountingRequests( + t *testing.T, + arguments *blockProcess.ArgMetaProcessor, + td map[uint32]*shardTestData, + metaBlock *block.MetaBlock, + numCallsMissingHeaders, numCallsMissingAttestation *atomic.Uint32, +) { + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + if nonce != attestationNonce { + require.Fail(t, fmt.Sprintf("nonce should have been %d", attestationNonce)) + } + numCallsMissingAttestation.Add(1) + } + requestHandler.RequestShardHeaderCalled = func(shardID uint32, hash []byte) { + for _, sh := range metaBlock.ShardInfo { + if bytes.Equal(sh.HeaderHash, hash) && sh.ShardID == shardID { + numCallsMissingHeaders.Add(1) + return + } + } + + require.Fail(t, fmt.Sprintf("header hash %s not found in meta block", hash)) + } +} diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index 331beae01ae..173e14ffb90 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -156,10 +156,10 @@ func createMockMetaArguments( PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, EpochStartDataCreator: &mock.EpochStartDataCreatorStub{}, EpochEconomics: &mock.EpochEconomicsStub{}, - EpochRewardsCreator: &mock.EpochRewardsCreatorStub{}, - EpochValidatorInfoCreator: &mock.EpochValidatorInfoCreatorStub{}, - ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorStub{}, - EpochSystemSCProcessor: &mock.EpochStartSystemSCStub{}, + EpochRewardsCreator: &testscommon.RewardsCreatorStub{}, + EpochValidatorInfoCreator: &testscommon.EpochValidatorInfoCreatorStub{}, + ValidatorStatisticsProcessor: &testscommon.ValidatorStatisticsProcessorStub{}, + EpochSystemSCProcessor: &testscommon.EpochStartSystemSCStub{}, } return arguments } @@ -1208,7 +1208,7 @@ func TestMetaProcessor_RevertStateRevertPeerStateFailsShouldErr(t *testing.T) { return nil }, } - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RevertPeerStateCalled: func(header data.MetaHeaderHandler) error { return expectedErr }, @@ -1237,7 +1237,7 @@ func TestMetaProcessor_RevertStateShouldWork(t *testing.T) { return nil }, } - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RevertPeerStateCalled: func(header data.MetaHeaderHandler) error { revertePeerStateWasCalled = true return nil @@ -3011,7 +3011,7 @@ func TestMetaProcessor_CreateAndProcessBlockCallsProcessAfterFirstEpoch(t *testi dataComponents.DataPool = dPool dataComponents.BlockChain = blkc calledSaveNodesCoordinator := false - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ SaveNodesCoordinatorUpdatesCalled: func(epoch uint32) (bool, error) { calledSaveNodesCoordinator = true return true, nil @@ -3019,7 +3019,7 @@ func TestMetaProcessor_CreateAndProcessBlockCallsProcessAfterFirstEpoch(t *testi } toggleCalled := false - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ ToggleUnStakeUnBondCalled: func(value bool) error { toggleCalled = true assert.Equal(t, value, true) @@ -3155,7 +3155,7 @@ func TestMetaProcessor_CreateNewHeaderValsOK(t *testing.T) { func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { t.Parallel() - header := &block.MetaBlock{ + headerMeta := &block.MetaBlock{ Nonce: 1, Round: 1, PrevHash: []byte("hash1"), @@ -3174,19 +3174,18 @@ func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { arguments := createMockMetaArguments(coreC, dataC, bootstrapC, statusC) wasCalled := false - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ VerifyRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { assert.True(t, wasCalled) return nil }, } - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ - ProcessSystemSmartContractCalled: func(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { - assert.Equal(t, header.GetEpoch(), epoch) - assert.Equal(t, header.GetNonce(), nonce) + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ + ProcessSystemSmartContractCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { + assert.Equal(t, headerMeta, header) wasCalled = true return nil }, @@ -3194,7 +3193,7 @@ func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { mp, _ := blproc.NewMetaProcessor(arguments) - err := mp.ProcessEpochStartMetaBlock(header, &block.Body{}) + err := mp.ProcessEpochStartMetaBlock(headerMeta, &block.Body{}) assert.Nil(t, err) }) @@ -3211,23 +3210,21 @@ func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { }, } arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) - - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{} + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{} wasCalled := false - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ VerifyRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { wasCalled = true return nil }, } - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ - ProcessSystemSmartContractCalled: func(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { - assert.Equal(t, header.GetEpoch(), epoch) - assert.Equal(t, header.GetNonce(), nonce) + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ + ProcessSystemSmartContractCalled: func(validatorInfos state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { + assert.Equal(t, headerMeta, header) assert.True(t, wasCalled) return nil }, @@ -3235,7 +3232,7 @@ func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { mp, _ := blproc.NewMetaProcessor(arguments) - err := mp.ProcessEpochStartMetaBlock(header, &block.Body{}) + err := mp.ProcessEpochStartMetaBlock(headerMeta, &block.Body{}) assert.Nil(t, err) }) } @@ -3324,7 +3321,7 @@ func TestMetaProcessor_CreateEpochStartBodyShouldFail(t *testing.T) { arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) expectedErr := errors.New("expected error") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RootHashCalled: func() ([]byte, error) { return nil, expectedErr }, @@ -3342,8 +3339,8 @@ func TestMetaProcessor_CreateEpochStartBodyShouldFail(t *testing.T) { arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) expectedErr := errors.New("expected error") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { return nil, expectedErr }, } @@ -3360,8 +3357,8 @@ func TestMetaProcessor_CreateEpochStartBodyShouldFail(t *testing.T) { arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) expectedErr := errors.New("expected error") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ - ProcessRatingsEndOfEpochCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo, epoch uint32) error { + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ + ProcessRatingsEndOfEpochCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler, epoch uint32) error { return expectedErr }, } @@ -3377,15 +3374,13 @@ func TestMetaProcessor_CreateEpochStartBodyShouldFail(t *testing.T) { func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { t.Parallel() - expectedValidatorsInfo := map[uint32][]*state.ValidatorInfo{ - 0: { - &state.ValidatorInfo{ - ShardId: 1, - RewardAddress: []byte("rewardAddr1"), - AccumulatedFees: big.NewInt(10), - }, - }, - } + expectedValidatorsInfo := state.NewShardValidatorsInfoMap() + _ = expectedValidatorsInfo.Add( + &state.ValidatorInfo{ + ShardId: 1, + RewardAddress: []byte("rewardAddr1"), + AccumulatedFees: big.NewInt(10), + }) rewardMiniBlocks := block.MiniBlockSlice{ &block.MiniBlock{ @@ -3426,11 +3421,11 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { } expectedRootHash := []byte("root hash") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RootHashCalled: func() ([]byte, error) { return expectedRootHash, nil }, - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { assert.Equal(t, expectedRootHash, rootHash) return expectedValidatorsInfo, nil @@ -3438,32 +3433,31 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { } wasCalled := false - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ - ProcessSystemSmartContractCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ + ProcessSystemSmartContractCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { wasCalled = true - assert.Equal(t, mb.GetNonce(), nonce) - assert.Equal(t, mb.GetEpoch(), epoch) + assert.Equal(t, mb, header) return nil }, } expectedRewardsForProtocolSustain := big.NewInt(11) - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { assert.Equal(t, expectedValidatorsInfo, validatorsInfo) assert.Equal(t, mb, metaBlock) assert.True(t, wasCalled) return rewardMiniBlocks, nil }, - GetProtocolSustainCalled: func() *big.Int { + GetProtocolSustainabilityRewardsCalled: func() *big.Int { return expectedRewardsForProtocolSustain }, } - arguments.EpochValidatorInfoCreator = &mock.EpochValidatorInfoCreatorStub{ - CreateValidatorInfoMiniBlocksCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { + arguments.EpochValidatorInfoCreator = &testscommon.EpochValidatorInfoCreatorStub{ + CreateValidatorInfoMiniBlocksCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) { assert.Equal(t, expectedValidatorsInfo, validatorsInfo) return validatorInfoMiniBlocks, nil }, @@ -3506,11 +3500,11 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { } expectedRootHash := []byte("root hash") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RootHashCalled: func() ([]byte, error) { return expectedRootHash, nil }, - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { assert.Equal(t, expectedRootHash, rootHash) return expectedValidatorsInfo, nil }, @@ -3518,32 +3512,31 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { wasCalled := false expectedRewardsForProtocolSustain := big.NewInt(11) - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { wasCalled = true assert.Equal(t, expectedValidatorsInfo, validatorsInfo) assert.Equal(t, mb, metaBlock) return rewardMiniBlocks, nil }, - GetProtocolSustainCalled: func() *big.Int { + GetProtocolSustainabilityRewardsCalled: func() *big.Int { return expectedRewardsForProtocolSustain }, } - arguments.EpochValidatorInfoCreator = &mock.EpochValidatorInfoCreatorStub{ - CreateValidatorInfoMiniBlocksCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { + arguments.EpochValidatorInfoCreator = &testscommon.EpochValidatorInfoCreatorStub{ + CreateValidatorInfoMiniBlocksCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) { assert.Equal(t, expectedValidatorsInfo, validatorsInfo) return validatorInfoMiniBlocks, nil }, } - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ - ProcessSystemSmartContractCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ + ProcessSystemSmartContractCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { assert.True(t, wasCalled) - assert.Equal(t, mb.GetNonce(), nonce) - assert.Equal(t, mb.GetEpoch(), epoch) + assert.Equal(t, mb, header) return nil }, } @@ -3618,8 +3611,7 @@ func TestMetaProcessor_getAllMarshalledTxs(t *testing.T) { t.Parallel() arguments := createMockMetaArguments(createMockComponentHolders()) - - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ CreateMarshalledDataCalled: func(body *block.Body) map[string][][]byte { marshalledData := make(map[string][][]byte) for _, miniBlock := range body.MiniBlocks { @@ -3632,7 +3624,7 @@ func TestMetaProcessor_getAllMarshalledTxs(t *testing.T) { }, } - arguments.EpochValidatorInfoCreator = &mock.EpochValidatorInfoCreatorStub{ + arguments.EpochValidatorInfoCreator = &testscommon.EpochValidatorInfoCreatorStub{ CreateMarshalledDataCalled: func(body *block.Body) map[string][][]byte { marshalledData := make(map[string][][]byte) for _, miniBlock := range body.MiniBlocks { diff --git a/process/block/metrics.go b/process/block/metrics.go index f9c3e0075b3..ce29ddb23f8 100644 --- a/process/block/metrics.go +++ b/process/block/metrics.go @@ -225,12 +225,12 @@ func indexValidatorsRating( return } - for shardID, validatorInfosInShard := range validators { + for shardID, validatorInfosInShard := range validators.GetShardValidatorsInfoMap() { validatorsInfos := make([]*outportcore.ValidatorRatingInfo, 0) for _, validatorInfo := range validatorInfosInShard { validatorsInfos = append(validatorsInfos, &outportcore.ValidatorRatingInfo{ - PublicKey: hex.EncodeToString(validatorInfo.PublicKey), - Rating: float32(validatorInfo.Rating) * 100 / 10000000, + PublicKey: hex.EncodeToString(validatorInfo.GetPublicKey()), + Rating: float32(validatorInfo.GetRating()) * 100 / 10000000, }) } diff --git a/process/block/postprocess/feeHandler.go b/process/block/postprocess/feeHandler.go index d4248154ef9..5cfc7996ab6 100644 --- a/process/block/postprocess/feeHandler.go +++ b/process/block/postprocess/feeHandler.go @@ -26,13 +26,14 @@ type feeHandler struct { } // NewFeeAccumulator constructor for the fee accumulator -func NewFeeAccumulator() (*feeHandler, error) { - f := &feeHandler{} - f.accumulatedFees = big.NewInt(0) - f.developerFees = big.NewInt(0) - f.mapHashFee = make(map[string]*feeData) - f.mapDependentHashes = make(map[string][]byte) - return f, nil +func NewFeeAccumulator() *feeHandler { + return &feeHandler{ + mut: sync.RWMutex{}, + mapHashFee: make(map[string]*feeData), + accumulatedFees: big.NewInt(0), + developerFees: big.NewInt(0), + mapDependentHashes: make(map[string][]byte), + } } // CreateBlockStarted does the cleanup before creating a new block diff --git a/process/block/postprocess/feeHandler_test.go b/process/block/postprocess/feeHandler_test.go index b74dbab4e0e..060276ba2fb 100644 --- a/process/block/postprocess/feeHandler_test.go +++ b/process/block/postprocess/feeHandler_test.go @@ -13,15 +13,14 @@ import ( func TestNewFeeAccumulator(t *testing.T) { t.Parallel() - feeHandler, err := postprocess.NewFeeAccumulator() - require.Nil(t, err) + feeHandler := postprocess.NewFeeAccumulator() require.NotNil(t, feeHandler) } func TestFeeHandler_CreateBlockStarted(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(50), []byte("txhash")) zeroGasAndFees := process.GetZeroGasAndFees() @@ -37,7 +36,7 @@ func TestFeeHandler_CreateBlockStarted(t *testing.T) { func TestFeeHandler_GetAccumulatedFees(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(50), []byte("txhash")) accumulatedFees := feeHandler.GetAccumulatedFees() @@ -47,7 +46,7 @@ func TestFeeHandler_GetAccumulatedFees(t *testing.T) { func TestFeeHandler_GetDeveloperFees(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(50), []byte("txhash")) devFees := feeHandler.GetDeveloperFees() @@ -57,7 +56,7 @@ func TestFeeHandler_GetDeveloperFees(t *testing.T) { func TestFeeHandler_ProcessTransactionFee(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(1000), big.NewInt(100), []byte("txhash1")) feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(10), []byte("txhash2")) @@ -72,7 +71,7 @@ func TestFeeHandler_ProcessTransactionFee(t *testing.T) { func TestFeeHandler_RevertFees(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(1000), big.NewInt(100), []byte("txhash1")) feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(10), []byte("txhash2")) @@ -89,7 +88,7 @@ func TestFeeHandler_RevertFees(t *testing.T) { func TestFeeHandler_CompleteRevertFeesUserTxs(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() userTxHashes := [][]byte{[]byte("txHash1"), []byte("txHash2"), []byte("txHash3")} originalTxHashes := [][]byte{[]byte("origTxHash1"), []byte("origTxHash2"), []byte("origTxHash3")} @@ -111,7 +110,7 @@ func TestFeeHandler_PartialRevertFeesUserTxs(t *testing.T) { originalTxHashes := [][]byte{[]byte("origTxHash1"), []byte("origTxHash2"), []byte("origTxHash3"), []byte("userTxHash4")} t.Run("revert partial originalTxs", func(t *testing.T) { - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(1000), big.NewInt(100), userTxHashes[0], originalTxHashes[0]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(100), big.NewInt(10), userTxHashes[1], originalTxHashes[1]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(10), big.NewInt(1), userTxHashes[2], originalTxHashes[2]) @@ -125,7 +124,7 @@ func TestFeeHandler_PartialRevertFeesUserTxs(t *testing.T) { require.Equal(t, big.NewInt(200), devFees) }) t.Run("revert all userTxs", func(t *testing.T) { - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(1000), big.NewInt(100), userTxHashes[0], originalTxHashes[0]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(100), big.NewInt(10), userTxHashes[1], originalTxHashes[1]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(10), big.NewInt(1), userTxHashes[2], originalTxHashes[2]) @@ -139,7 +138,7 @@ func TestFeeHandler_PartialRevertFeesUserTxs(t *testing.T) { require.Equal(t, big.NewInt(200), devFees) }) t.Run("revert partial userTxs", func(t *testing.T) { - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(1000), big.NewInt(100), userTxHashes[0], originalTxHashes[0]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(100), big.NewInt(10), userTxHashes[1], originalTxHashes[1]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(10), big.NewInt(1), userTxHashes[2], originalTxHashes[2]) @@ -157,6 +156,6 @@ func TestFeeHandler_PartialRevertFeesUserTxs(t *testing.T) { func TestFeeHandler_IsInterfaceNil(t *testing.T) { t.Parallel() - fee, _ := postprocess.NewFeeAccumulator() + fee := postprocess.NewFeeAccumulator() require.False(t, check.IfNil(fee)) } diff --git a/process/block/postprocess/intermediateResults_test.go b/process/block/postprocess/intermediateResults_test.go index d659730575a..b9a0a8e8f83 100644 --- a/process/block/postprocess/intermediateResults_test.go +++ b/process/block/postprocess/intermediateResults_test.go @@ -35,15 +35,15 @@ func createMockPubkeyConverter() *testscommon.PubkeyConverterMock { func createMockArgsNewIntermediateResultsProcessor() ArgsNewIntermediateResultsProcessor { args := ArgsNewIntermediateResultsProcessor{ - Hasher: &hashingMocks.HasherMock{}, - Marshalizer: &mock.MarshalizerMock{}, - Coordinator: mock.NewMultiShardsCoordinatorMock(5), - PubkeyConv: createMockPubkeyConverter(), - Store: &storage.ChainStorerStub{}, - BlockType: block.SmartContractResultBlock, - CurrTxs: &mock.TxForCurrentBlockStub{}, - EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + Coordinator: mock.NewMultiShardsCoordinatorMock(5), + PubkeyConv: createMockPubkeyConverter(), + Store: &storage.ChainStorerStub{}, + BlockType: block.SmartContractResultBlock, + CurrTxs: &mock.TxForCurrentBlockStub{}, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, } diff --git a/process/block/preprocess/transactionsV2_test.go b/process/block/preprocess/transactionsV2_test.go index 50203a1a5ae..9d4fb1cf686 100644 --- a/process/block/preprocess/transactionsV2_test.go +++ b/process/block/preprocess/transactionsV2_test.go @@ -15,9 +15,9 @@ import ( "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/storage/txcache" "github.com/multiversx/mx-chain-go/testscommon" + commonMocks "github.com/multiversx/mx-chain-go/testscommon/common" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" - commonMocks "github.com/multiversx/mx-chain-go/testscommon/common" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 9743abc0bb4..11e62f63ff9 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -122,6 +122,7 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { blockProcessingCutoffHandler: arguments.BlockProcessingCutoffHandler, managedPeersHolder: arguments.ManagedPeersHolder, sentSignaturesTracker: arguments.SentSignaturesTracker, + extraDelayRequestBlockInfo: time.Duration(arguments.Config.EpochStartConfig.ExtraDelayForRequestBlockInfoInMilliseconds) * time.Millisecond, } sp := shardProcessor{ diff --git a/process/block/shardblockRequest_test.go b/process/block/shardblockRequest_test.go new file mode 100644 index 00000000000..2440c6ecba5 --- /dev/null +++ b/process/block/shardblockRequest_test.go @@ -0,0 +1,584 @@ +package block_test + +import ( + "bytes" + "fmt" + "sync/atomic" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/stretchr/testify/require" + + blproc "github.com/multiversx/mx-chain-go/process/block" + "github.com/multiversx/mx-chain-go/testscommon" + dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" +) + +type headerData struct { + hash []byte + header data.HeaderHandler +} + +type shardBlockTestData struct { + headerData []*headerData +} + +func TestShardProcessor_RequestMissingFinalityAttestingHeaders(t *testing.T) { + t.Parallel() + + t.Run("missing attesting meta header", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + metaChainData := testData[core.MetachainShardId] + numCalls := atomic.Uint32{} + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + attestationNonce := metaChainData.headerData[1].header.GetNonce() + require.Equal(t, attestationNonce, nonce, fmt.Sprintf("nonce should have been %d", attestationNonce)) + numCalls.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + + metaBlockData := metaChainData.headerData[0] + // not adding the confirmation metaBlock to the headers pool means it will be missing and requested + sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, metaBlockData.header.GetNonce()) + res := sp.RequestMissingFinalityAttestingHeaders() + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(1), res) + require.Equal(t, uint32(1), numCalls.Load()) + }) + t.Run("no missing attesting meta header", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + metaChainData := testData[core.MetachainShardId] + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "should not request meta header by nonce") + } + sp, _ := blproc.NewShardProcessor(arguments) + + headersDataPool := arguments.DataComponents.Datapool().Headers() + require.NotNil(t, headersDataPool) + metaBlockData := metaChainData.headerData[0] + confirmationMetaBlockData := metaChainData.headerData[1] + headersDataPool.AddHeader(confirmationMetaBlockData.hash, confirmationMetaBlockData.header) + sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, metaBlockData.header.GetNonce()) + res := sp.RequestMissingFinalityAttestingHeaders() + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(0), res) + }) +} + +func TestShardProcessor_computeExistingAndRequestMissingMetaHeaders(t *testing.T) { + t.Parallel() + + shard1ID := uint32(1) + t.Run("one referenced metaBlock missing will be requested", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + metaChainData := testData[core.MetachainShardId] + shard1Data := testData[shard1ID] + numCalls := atomic.Uint32{} + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + // should only be called when requesting attestation meta header block + require.Fail(t, "should not request meta header by nonce") + } + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Equal(t, metaChainData.headerData[1].hash, hash) + numCalls.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + + metaBlockData := metaChainData.headerData[0] + sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, metaBlockData.header.GetNonce()) + // not adding the referenced metaBlock to the headers pool means it will be missing and requested + // first of the 2 referenced headers is added, the other will be missing + headersDataPool := arguments.DataComponents.Datapool().Headers() + headersDataPool.AddHeader(metaBlockData.hash, metaBlockData.header) + + blockBeingProcessed := shard1Data.headerData[1].header + shardBlockBeingProcessed := blockBeingProcessed.(*block.Header) + missingHeaders, missingFinalityAttestingHeaders := sp.ComputeExistingAndRequestMissingMetaHeaders(shardBlockBeingProcessed) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(1), missingHeaders) + require.Equal(t, uint32(0), missingFinalityAttestingHeaders) + require.Equal(t, uint32(1), numCalls.Load()) + }) + t.Run("multiple referenced metaBlocks missing will be requested", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + numCalls := atomic.Uint32{} + metaChainData := testData[core.MetachainShardId] + shard1Data := testData[shard1ID] + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + // not yet requesting the attestation metaBlock + require.Fail(t, "should not request meta header by nonce") + } + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + if !(bytes.Equal(hash, metaChainData.headerData[0].hash) || bytes.Equal(hash, metaChainData.headerData[1].hash)) { + require.Fail(t, "other requests than the expected 2 metaBlocks are not expected") + } + + numCalls.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + metaBlockData := testData[core.MetachainShardId].headerData[0] + // not adding the referenced metaBlock to the headers pool means it will be missing and requested + sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, metaBlockData.header.GetNonce()) + + blockBeingProcessed := shard1Data.headerData[1].header + shardBlockBeingProcessed := blockBeingProcessed.(*block.Header) + missingHeaders, missingFinalityAttestingHeaders := sp.ComputeExistingAndRequestMissingMetaHeaders(shardBlockBeingProcessed) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(2), missingHeaders) + require.Equal(t, uint32(0), missingFinalityAttestingHeaders) + require.Equal(t, uint32(2), numCalls.Load()) + }) + t.Run("all referenced metaBlocks existing with missing attestation, will request the attestation metaBlock", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + numCallsMissing := atomic.Uint32{} + numCallsAttestation := atomic.Uint32{} + metaChainData := testData[core.MetachainShardId] + shard1Data := testData[shard1ID] + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + // not yet requesting the attestation metaBlock + require.Equal(t, metaChainData.headerData[1].header.GetNonce()+1, nonce) + numCallsAttestation.Add(1) + } + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + if !(bytes.Equal(hash, metaChainData.headerData[0].hash) || bytes.Equal(hash, metaChainData.headerData[1].hash)) { + require.Fail(t, "other requests than the expected 2 metaBlocks are not expected") + } + + numCallsMissing.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + // not adding the referenced metaBlock to the headers pool means it will be missing and requested + headersDataPool := arguments.DataComponents.Datapool().Headers() + headersDataPool.AddHeader(metaChainData.headerData[0].hash, metaChainData.headerData[0].header) + headersDataPool.AddHeader(metaChainData.headerData[1].hash, metaChainData.headerData[1].header) + + blockBeingProcessed := shard1Data.headerData[1].header + shardBlockBeingProcessed := blockBeingProcessed.(*block.Header) + missingHeaders, missingFinalityAttestingHeaders := sp.ComputeExistingAndRequestMissingMetaHeaders(shardBlockBeingProcessed) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(0), missingHeaders) + require.Equal(t, uint32(1), missingFinalityAttestingHeaders) + require.Equal(t, uint32(0), numCallsMissing.Load()) + require.Equal(t, uint32(1), numCallsAttestation.Load()) + }) + t.Run("all referenced metaBlocks existing and existing attestation metaBlock will not request", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + numCallsMissing := atomic.Uint32{} + numCallsAttestation := atomic.Uint32{} + shard1Data := testData[shard1ID] + metaChainData := testData[core.MetachainShardId] + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + numCallsAttestation.Add(1) + } + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + numCallsMissing.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + // not adding the referenced metaBlock to the headers pool means it will be missing and requested + headersDataPool := arguments.DataComponents.Datapool().Headers() + headersDataPool.AddHeader(metaChainData.headerData[0].hash, metaChainData.headerData[0].header) + headersDataPool.AddHeader(metaChainData.headerData[1].hash, metaChainData.headerData[1].header) + attestationMetaBlock := &block.MetaBlock{ + Nonce: 102, + Round: 102, + PrevHash: metaChainData.headerData[1].hash, + ShardInfo: []block.ShardData{}, + } + attestationMetaBlockHash := []byte("attestationHash") + + headersDataPool.AddHeader(attestationMetaBlockHash, attestationMetaBlock) + + blockBeingProcessed := shard1Data.headerData[1].header + shardBlockBeingProcessed := blockBeingProcessed.(*block.Header) + missingHeaders, missingFinalityAttestingHeaders := sp.ComputeExistingAndRequestMissingMetaHeaders(shardBlockBeingProcessed) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(0), missingHeaders) + require.Equal(t, uint32(0), missingFinalityAttestingHeaders) + require.Equal(t, uint32(0), numCallsMissing.Load()) + require.Equal(t, uint32(0), numCallsAttestation.Load()) + }) +} + +func TestShardProcessor_receivedMetaBlock(t *testing.T) { + t.Parallel() + + t.Run("received non referenced metaBlock, while still having missing referenced metaBlocks", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + sp, _ := blproc.NewShardProcessor(arguments) + hdrsForBlock := sp.GetHdrForBlock() + + firstMissingMetaBlockData := testData[core.MetachainShardId].headerData[0] + secondMissingMetaBlockData := testData[core.MetachainShardId].headerData[1] + + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Fail(t, "no requests expected") + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "no requests expected") + } + + highestHeaderNonce := firstMissingMetaBlockData.header.GetNonce() - 1 + hdrsForBlock.SetNumMissingHdrs(2) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(core.MetachainShardId, highestHeaderNonce) + hdrsForBlock.SetHdrHashAndInfo(string(firstMissingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + hdrsForBlock.SetHdrHashAndInfo(string(secondMissingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + otherMetaBlock := &block.MetaBlock{ + Nonce: 102, + Round: 102, + PrevHash: []byte("other meta block prev hash"), + } + + otherMetaBlockHash := []byte("other meta block hash") + sp.ReceivedMetaBlock(otherMetaBlock, otherMetaBlockHash) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(2), hdrsForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + highestHeaderNonces := hdrsForBlock.GetHighestHdrNonce() + require.Equal(t, highestHeaderNonce, highestHeaderNonces[core.MetachainShardId]) + }) + t.Run("received missing referenced metaBlock, other referenced metaBlock still missing", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + sp, _ := blproc.NewShardProcessor(arguments) + hdrsForBlock := sp.GetHdrForBlock() + + firstMissingMetaBlockData := testData[core.MetachainShardId].headerData[0] + secondMissingMetaBlockData := testData[core.MetachainShardId].headerData[1] + + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Fail(t, "no requests expected") + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "no requests expected") + } + + highestHeaderNonce := firstMissingMetaBlockData.header.GetNonce() - 1 + hdrsForBlock.SetNumMissingHdrs(2) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(core.MetachainShardId, highestHeaderNonce) + hdrsForBlock.SetHdrHashAndInfo(string(firstMissingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + hdrsForBlock.SetHdrHashAndInfo(string(secondMissingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + sp.ReceivedMetaBlock(firstMissingMetaBlockData.header, firstMissingMetaBlockData.hash) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(1), hdrsForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + highestHeaderNonces := hdrsForBlock.GetHighestHdrNonce() + require.Equal(t, firstMissingMetaBlockData.header.GetNonce(), highestHeaderNonces[core.MetachainShardId]) + }) + t.Run("received non missing referenced metaBlock", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + sp, _ := blproc.NewShardProcessor(arguments) + hdrsForBlock := sp.GetHdrForBlock() + + notMissingReferencedMetaBlockData := testData[core.MetachainShardId].headerData[0] + missingMetaBlockData := testData[core.MetachainShardId].headerData[1] + + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Fail(t, "no requests expected") + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "no requests expected") + } + + highestHeaderNonce := notMissingReferencedMetaBlockData.header.GetNonce() - 1 + hdrsForBlock.SetNumMissingHdrs(1) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(core.MetachainShardId, highestHeaderNonce) + hdrsForBlock.SetHdrHashAndInfo(string(notMissingReferencedMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: notMissingReferencedMetaBlockData.header, + }) + hdrsForBlock.SetHdrHashAndInfo(string(missingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + headersDataPool := arguments.DataComponents.Datapool().Headers() + require.NotNil(t, headersDataPool) + headersDataPool.AddHeader(notMissingReferencedMetaBlockData.hash, notMissingReferencedMetaBlockData.header) + + sp.ReceivedMetaBlock(notMissingReferencedMetaBlockData.header, notMissingReferencedMetaBlockData.hash) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(1), hdrsForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + hdrsForBlockHighestNonces := hdrsForBlock.GetHighestHdrNonce() + require.Equal(t, highestHeaderNonce, hdrsForBlockHighestNonces[core.MetachainShardId]) + }) + t.Run("received missing attestation metaBlock", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + sp, _ := blproc.NewShardProcessor(arguments) + hdrsForBlock := sp.GetHdrForBlock() + + referencedMetaBlock := testData[core.MetachainShardId].headerData[0] + lastReferencedMetaBlock := testData[core.MetachainShardId].headerData[1] + attestationMetaBlockHash := []byte("attestation meta block hash") + attestationMetaBlock := &block.MetaBlock{ + Nonce: lastReferencedMetaBlock.header.GetNonce() + 1, + Round: lastReferencedMetaBlock.header.GetRound() + 1, + PrevHash: lastReferencedMetaBlock.hash, + } + + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Fail(t, "no requests expected") + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "no requests expected") + } + + hdrsForBlock.SetNumMissingHdrs(0) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(1) + hdrsForBlock.SetHighestHdrNonce(core.MetachainShardId, lastReferencedMetaBlock.header.GetNonce()) + hdrsForBlock.SetHdrHashAndInfo(string(referencedMetaBlock.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: referencedMetaBlock.header, + }) + hdrsForBlock.SetHdrHashAndInfo(string(lastReferencedMetaBlock.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: lastReferencedMetaBlock.header, + }) + + headersDataPool := arguments.DataComponents.Datapool().Headers() + require.NotNil(t, headersDataPool) + headersDataPool.AddHeader(referencedMetaBlock.hash, referencedMetaBlock.header) + headersDataPool.AddHeader(lastReferencedMetaBlock.hash, lastReferencedMetaBlock.header) + headersDataPool.AddHeader(attestationMetaBlockHash, attestationMetaBlock) + wg := startWaitingForAllHeadersReceivedSignal(t, sp) + + sp.ReceivedMetaBlock(attestationMetaBlock, attestationMetaBlockHash) + wg.Wait() + + require.Equal(t, uint32(0), hdrsForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + hdrsForBlockHighestNonces := hdrsForBlock.GetHighestHdrNonce() + require.Equal(t, lastReferencedMetaBlock.header.GetNonce(), hdrsForBlockHighestNonces[core.MetachainShardId]) + }) +} + +func shardBlockRequestTestInit(t *testing.T) (blproc.ArgShardProcessor, *testscommon.RequestHandlerStub) { + coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + poolMock := dataRetrieverMock.NewPoolsHolderMock() + dataComponents.DataPool = poolMock + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + poolsHolderAsInterface := arguments.DataComponents.Datapool() + poolsHolder, ok := poolsHolderAsInterface.(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + return arguments, requestHandler +} + +func createShardProcessorTestData() map[uint32]*shardBlockTestData { + // shard 0 miniblocks + mbHash1 := []byte("mb hash 1") + mbHash2 := []byte("mb hash 2") + mbHash3 := []byte("mb hash 3") + + // shard 1 miniblocks + mbHash4 := []byte("mb hash 4") + mbHash5 := []byte("mb hash 5") + mbHash6 := []byte("mb hash 6") + + prevMetaBlockHash := []byte("prev meta block hash") + metaBlockHash := []byte("meta block hash") + metaConfirmationHash := []byte("confirmation meta block hash") + + shard0Block0Hash := []byte("shard 0 block 0 hash") + shard0Block1Hash := []byte("shard 0 block 1 hash") + shard0Block2Hash := []byte("shard 0 block 2 hash") + + shard1Block0Hash := []byte("shard 1 block 0 hash") + shard1Block1Hash := []byte("shard 1 block 1 hash") + shard1Block2Hash := []byte("shard 1 block 2 hash") + + metaBlock := &block.MetaBlock{ + Nonce: 100, + Round: 100, + PrevHash: prevMetaBlockHash, + ShardInfo: []block.ShardData{ + { + ShardID: 0, + HeaderHash: shard0Block1Hash, + PrevHash: shard0Block0Hash, + ShardMiniBlockHeaders: []block.MiniBlockHeader{ + {Hash: mbHash1, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash2, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash3, SenderShardID: 0, ReceiverShardID: 1}, + }, + }, + }, + } + metaConfirmationBlock := &block.MetaBlock{ + Nonce: 101, + Round: 101, + PrevHash: metaBlockHash, + ShardInfo: []block.ShardData{}, + } + + shard0Block1 := &block.Header{ + ShardID: 0, + PrevHash: shard0Block0Hash, + Nonce: 98, + Round: 98, + MiniBlockHeaders: []block.MiniBlockHeader{ + {Hash: mbHash1, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash2, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash3, SenderShardID: 0, ReceiverShardID: 1}, + }, + } + + shard0Block2 := &block.Header{ + ShardID: 0, + PrevHash: shard0Block1Hash, + Nonce: 99, + Round: 99, + MiniBlockHeaders: []block.MiniBlockHeader{}, + } + + shard1Block1 := &block.Header{ + ShardID: 1, + PrevHash: shard1Block0Hash, + MetaBlockHashes: [][]byte{prevMetaBlockHash}, + Nonce: 102, + Round: 102, + MiniBlockHeaders: []block.MiniBlockHeader{ + {Hash: mbHash4, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash5, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash6, SenderShardID: 0, ReceiverShardID: 1}, + }, + } + + shard1Block2 := &block.Header{ + ShardID: 1, + PrevHash: shard1Block1Hash, + MetaBlockHashes: [][]byte{metaBlockHash, metaConfirmationHash}, + Nonce: 103, + Round: 103, + MiniBlockHeaders: []block.MiniBlockHeader{}, + } + + sbd := map[uint32]*shardBlockTestData{ + 0: { + headerData: []*headerData{ + { + hash: shard0Block1Hash, + header: shard0Block1, + }, + { + hash: shard0Block2Hash, + header: shard0Block2, + }, + }, + }, + 1: { + headerData: []*headerData{ + { + hash: shard1Block1Hash, + header: shard1Block1, + }, + { + hash: shard1Block2Hash, + header: shard1Block2, + }, + }, + }, + core.MetachainShardId: { + headerData: []*headerData{ + { + hash: metaBlockHash, + header: metaBlock, + }, + { + hash: metaConfirmationHash, + header: metaConfirmationBlock, + }, + }, + }, + } + + return sbd +} diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 35cd41ce982..39797f8db0c 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -22,6 +22,10 @@ import ( "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" @@ -45,9 +49,6 @@ import ( stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const MaxGasLimitPerBlock = uint64(100000) @@ -1677,21 +1678,6 @@ func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing. assert.Equal(t, err, process.ErrTimeIsOut) } -// -------- requestMissingFinalityAttestingHeaders -func TestShardProcessor_RequestMissingFinalityAttestingHeaders(t *testing.T) { - t.Parallel() - - tdp := dataRetrieverMock.NewPoolsHolderMock() - coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - dataComponents.DataPool = tdp - arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) - sp, _ := blproc.NewShardProcessor(arguments) - - sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, 1) - res := sp.RequestMissingFinalityAttestingHeaders() - assert.Equal(t, res > 0, true) -} - // --------- verifyIncludedMetaBlocksFinality func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldPass(t *testing.T) { t.Parallel() diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 0508620283e..e23c8f8f1ec 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -566,14 +566,14 @@ func createPreProcessorContainer() process.PreProcessorsContainer { func createInterimProcessorContainer() process.IntermediateProcessorContainer { argsFactory := shard.ArgsNewIntermediateProcessorsContainerFactory{ - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - PubkeyConverter: createMockPubkeyConverter(), - Store: initStore(), - PoolsHolder: initDataPool([]byte("test_hash1")), - EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + PubkeyConverter: createMockPubkeyConverter(), + Store: initStore(), + PoolsHolder: initDataPool([]byte("test_hash1")), + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &commonMock.TxExecutionOrderHandlerStub{}, } preFactory, _ := shard.NewIntermediateProcessorsContainerFactory(argsFactory) @@ -2210,14 +2210,14 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testi tdp := initDataPool(txHash) shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) argsFactory := shard.ArgsNewIntermediateProcessorsContainerFactory{ - ShardCoordinator: shardCoordinator, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - PubkeyConverter: createMockPubkeyConverter(), - Store: &storageStubs.ChainStorerStub{}, - PoolsHolder: tdp, - EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), + ShardCoordinator: shardCoordinator, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + PubkeyConverter: createMockPubkeyConverter(), + Store: &storageStubs.ChainStorerStub{}, + PoolsHolder: tdp, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &commonMock.TxExecutionOrderHandlerStub{}, } preFactory, _ := shard.NewIntermediateProcessorsContainerFactory(argsFactory) @@ -2278,7 +2278,7 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { return MaxGasLimitPerBlock }, }, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &commonMock.TxExecutionOrderHandlerStub{}, } interFactory, _ := shard.NewIntermediateProcessorsContainerFactory(argsFactory) diff --git a/process/economics/builtInFunctionsCost.go b/process/economics/builtInFunctionsCost.go deleted file mode 100644 index f784b5f2332..00000000000 --- a/process/economics/builtInFunctionsCost.go +++ /dev/null @@ -1,177 +0,0 @@ -package economics - -import ( - "github.com/mitchellh/mapstructure" - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/process" -) - -// ArgsBuiltInFunctionCost holds all components that are needed to create a new instance of builtInFunctionsCost -type ArgsBuiltInFunctionCost struct { - GasSchedule core.GasScheduleNotifier - ArgsParser process.ArgumentsParser -} - -type builtInFunctionsCost struct { - gasConfig *process.GasCost - specialBuiltInFunctions map[string]struct{} - argsParser process.ArgumentsParser -} - -// NewBuiltInFunctionsCost will create a new instance of builtInFunctionsCost -func NewBuiltInFunctionsCost(args *ArgsBuiltInFunctionCost) (*builtInFunctionsCost, error) { - if args == nil { - return nil, process.ErrNilArgsBuiltInFunctionsConstHandler - } - if check.IfNil(args.ArgsParser) { - return nil, process.ErrNilArgumentParser - } - if check.IfNil(args.GasSchedule) { - return nil, process.ErrNilGasSchedule - } - - bs := &builtInFunctionsCost{ - argsParser: args.ArgsParser, - } - - bs.initSpecialBuiltInFunctionCostMap() - - var err error - bs.gasConfig, err = createGasConfig(args.GasSchedule.LatestGasSchedule()) - if err != nil { - return nil, err - } - - args.GasSchedule.RegisterNotifyHandler(bs) - - return bs, nil -} - -func (bc *builtInFunctionsCost) initSpecialBuiltInFunctionCostMap() { - bc.specialBuiltInFunctions = map[string]struct{}{ - core.BuiltInFunctionClaimDeveloperRewards: {}, - core.BuiltInFunctionChangeOwnerAddress: {}, - core.BuiltInFunctionSetUserName: {}, - core.BuiltInFunctionSaveKeyValue: {}, - core.BuiltInFunctionESDTTransfer: {}, - core.BuiltInFunctionESDTBurn: {}, - core.BuiltInFunctionESDTLocalBurn: {}, - core.BuiltInFunctionESDTLocalMint: {}, - core.BuiltInFunctionESDTNFTAddQuantity: {}, - core.BuiltInFunctionESDTNFTBurn: {}, - core.BuiltInFunctionESDTNFTCreate: {}, - } -} - -// GasScheduleChange is called when gas schedule is changed, thus all contracts must be updated -func (bc *builtInFunctionsCost) GasScheduleChange(gasSchedule map[string]map[string]uint64) { - newGasConfig, err := createGasConfig(gasSchedule) - if err != nil { - return - } - - bc.gasConfig = newGasConfig -} - -// ComputeBuiltInCost will compute built-in function cost -func (bc *builtInFunctionsCost) ComputeBuiltInCost(tx data.TransactionWithFeeHandler) uint64 { - function, arguments, err := bc.argsParser.ParseCallData(string(tx.GetData())) - if err != nil { - return 0 - } - - switch function { - case core.BuiltInFunctionClaimDeveloperRewards: - return bc.gasConfig.BuiltInCost.ClaimDeveloperRewards - case core.BuiltInFunctionChangeOwnerAddress: - return bc.gasConfig.BuiltInCost.ChangeOwnerAddress - case core.BuiltInFunctionSetUserName: - return bc.gasConfig.BuiltInCost.SaveUserName - case core.BuiltInFunctionSaveKeyValue: - return bc.gasConfig.BuiltInCost.SaveKeyValue - case core.BuiltInFunctionESDTTransfer: - return bc.gasConfig.BuiltInCost.ESDTTransfer - case core.BuiltInFunctionESDTBurn: - return bc.gasConfig.BuiltInCost.ESDTBurn - case core.BuiltInFunctionESDTLocalBurn: - return bc.gasConfig.BuiltInCost.ESDTLocalBurn - case core.BuiltInFunctionESDTLocalMint: - return bc.gasConfig.BuiltInCost.ESDTLocalMint - case core.BuiltInFunctionESDTNFTAddQuantity: - return bc.gasConfig.BuiltInCost.ESDTNFTAddQuantity - case core.BuiltInFunctionESDTNFTBurn: - return bc.gasConfig.BuiltInCost.ESDTNFTBurn - case core.BuiltInFunctionESDTNFTCreate: - costStorage := calculateLenOfArguments(arguments) * bc.gasConfig.BaseOperationCost.StorePerByte - return bc.gasConfig.BuiltInCost.ESDTNFTCreate + costStorage - case core.BuiltInFunctionSetGuardian: - return bc.gasConfig.BuiltInCost.SetGuardian - case core.BuiltInFunctionGuardAccount: - return bc.gasConfig.BuiltInCost.GuardAccount - case core.BuiltInFunctionUnGuardAccount: - return bc.gasConfig.BuiltInCost.UnGuardAccount - default: - return 0 - } -} - -func calculateLenOfArguments(arguments [][]byte) uint64 { - totalLen := uint64(0) - for _, arg := range arguments { - totalLen += uint64(len(arg)) - } - - return totalLen -} - -// IsBuiltInFuncCall will check is the provided transaction is a build in function call -func (bc *builtInFunctionsCost) IsBuiltInFuncCall(tx data.TransactionWithFeeHandler) bool { - function, arguments, err := bc.argsParser.ParseCallData(string(tx.GetData())) - if err != nil { - return false - } - - _, isSpecialBuiltIn := bc.specialBuiltInFunctions[function] - isSCCallAfter := core.IsSmartContractAddress(tx.GetRcvAddr()) && len(arguments) > core.MinLenArgumentsESDTTransfer - - return isSpecialBuiltIn && !isSCCallAfter -} - -// IsInterfaceNil returns true if underlying object is nil -func (bc *builtInFunctionsCost) IsInterfaceNil() bool { - return bc == nil -} - -func createGasConfig(gasMap map[string]map[string]uint64) (*process.GasCost, error) { - baseOps := &process.BaseOperationCost{} - err := mapstructure.Decode(gasMap[common.BaseOperationCost], baseOps) - if err != nil { - return nil, err - } - - err = check.ForZeroUintFields(*baseOps) - if err != nil { - return nil, err - } - - builtInOps := &process.BuiltInCost{} - err = mapstructure.Decode(gasMap[common.BuiltInCost], builtInOps) - if err != nil { - return nil, err - } - - err = check.ForZeroUintFields(*builtInOps) - if err != nil { - return nil, err - } - - gasCost := process.GasCost{ - BaseOperationCost: *baseOps, - BuiltInCost: *builtInOps, - } - - return &gasCost, nil -} diff --git a/process/economics/builtInFunctionsCost_test.go b/process/economics/builtInFunctionsCost_test.go deleted file mode 100644 index befcca25912..00000000000 --- a/process/economics/builtInFunctionsCost_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package economics_test - -import ( - "testing" - - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/process" - "github.com/multiversx/mx-chain-go/process/economics" - "github.com/multiversx/mx-chain-go/process/mock" - "github.com/multiversx/mx-chain-go/testscommon" - "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" - "github.com/stretchr/testify/require" -) - -func TestNewBuiltInFunctionsCost(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - args func() *economics.ArgsBuiltInFunctionCost - exErr error - }{ - { - name: "NilArguments", - args: func() *economics.ArgsBuiltInFunctionCost { - return nil - }, - exErr: process.ErrNilArgsBuiltInFunctionsConstHandler, - }, - { - name: "NilArgumentsParser", - args: func() *economics.ArgsBuiltInFunctionCost { - return &economics.ArgsBuiltInFunctionCost{ - ArgsParser: nil, - GasSchedule: testscommon.NewGasScheduleNotifierMock(nil), - } - }, - exErr: process.ErrNilArgumentParser, - }, - { - name: "NilGasScheduleHandler", - args: func() *economics.ArgsBuiltInFunctionCost { - return &economics.ArgsBuiltInFunctionCost{ - ArgsParser: &mock.ArgumentParserMock{}, - GasSchedule: nil, - } - }, - exErr: process.ErrNilGasSchedule, - }, - { - name: "ShouldWork", - args: func() *economics.ArgsBuiltInFunctionCost { - return &economics.ArgsBuiltInFunctionCost{ - ArgsParser: &mock.ArgumentParserMock{}, - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - } - }, - exErr: nil, - }, - } - - for _, test := range tests { - _, err := economics.NewBuiltInFunctionsCost(test.args()) - require.Equal(t, test.exErr, err) - } -} - -func TestNewBuiltInFunctionsCost_GasConfig(t *testing.T) { - t.Parallel() - - args := &economics.ArgsBuiltInFunctionCost{ - ArgsParser: &mock.ArgumentParserMock{}, - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 0)), - } - - builtInCostHandler, err := economics.NewBuiltInFunctionsCost(args) - require.NotNil(t, err) - require.Nil(t, builtInCostHandler) - require.True(t, check.IfNil(builtInCostHandler)) -} diff --git a/process/economics/economicsData.go b/process/economics/economicsData.go index 60658b19bf2..5b7ce045237 100644 --- a/process/economics/economicsData.go +++ b/process/economics/economicsData.go @@ -27,31 +27,26 @@ var log = logger.GetOrCreate("process/economics") type economicsData struct { *gasConfigHandler *rewardsConfigHandler - gasPriceModifier float64 - minInflation float64 - yearSettings map[uint32]*config.YearSetting - mutYearSettings sync.RWMutex - statusHandler core.AppStatusHandler - builtInFunctionsCostHandler BuiltInFunctionsCostHandler - enableEpochsHandler common.EnableEpochsHandler - txVersionHandler process.TxVersionCheckerHandler - mut sync.RWMutex + gasPriceModifier float64 + minInflation float64 + yearSettings map[uint32]*config.YearSetting + mutYearSettings sync.RWMutex + statusHandler core.AppStatusHandler + enableEpochsHandler common.EnableEpochsHandler + txVersionHandler process.TxVersionCheckerHandler + mut sync.RWMutex } // ArgsNewEconomicsData defines the arguments needed for new economics economicsData type ArgsNewEconomicsData struct { - TxVersionChecker process.TxVersionCheckerHandler - BuiltInFunctionsCostHandler BuiltInFunctionsCostHandler - Economics *config.EconomicsConfig - EpochNotifier process.EpochNotifier - EnableEpochsHandler common.EnableEpochsHandler + TxVersionChecker process.TxVersionCheckerHandler + Economics *config.EconomicsConfig + EpochNotifier process.EpochNotifier + EnableEpochsHandler common.EnableEpochsHandler } // NewEconomicsData will create an object with information about economics parameters func NewEconomicsData(args ArgsNewEconomicsData) (*economicsData, error) { - if check.IfNil(args.BuiltInFunctionsCostHandler) { - return nil, process.ErrNilBuiltInFunctionsCostHandler - } if check.IfNil(args.TxVersionChecker) { return nil, process.ErrNilTransactionVersionChecker } @@ -75,12 +70,11 @@ func NewEconomicsData(args ArgsNewEconomicsData) (*economicsData, error) { } ed := &economicsData{ - minInflation: args.Economics.GlobalSettings.MinimumInflation, - gasPriceModifier: args.Economics.FeeSettings.GasPriceModifier, - statusHandler: statusHandler.NewNilStatusHandler(), - builtInFunctionsCostHandler: args.BuiltInFunctionsCostHandler, - enableEpochsHandler: args.EnableEpochsHandler, - txVersionHandler: args.TxVersionChecker, + minInflation: args.Economics.GlobalSettings.MinimumInflation, + gasPriceModifier: args.Economics.FeeSettings.GasPriceModifier, + statusHandler: statusHandler.NewNilStatusHandler(), + enableEpochsHandler: args.EnableEpochsHandler, + txVersionHandler: args.TxVersionChecker, } ed.yearSettings = make(map[uint32]*config.YearSetting) @@ -517,23 +511,8 @@ func (ed *economicsData) ComputeGasUsedAndFeeBasedOnRefundValue(tx data.Transact // ComputeGasUsedAndFeeBasedOnRefundValueInEpoch will compute gas used value and transaction fee using refund value from a SCR in a specific epoch func (ed *economicsData) ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) { if refundValue.Cmp(big.NewInt(0)) == 0 { - if ed.builtInFunctionsCostHandler.IsBuiltInFuncCall(tx) { - builtInCost := ed.builtInFunctionsCostHandler.ComputeBuiltInCost(tx) - computedGasLimit := ed.ComputeGasLimitInEpoch(tx, epoch) - - gasLimitWithBuiltInCost := builtInCost + computedGasLimit - txFee := ed.ComputeTxFeeBasedOnGasUsedInEpoch(tx, gasLimitWithBuiltInCost, epoch) - - gasLimitWithoutMoveBalance := tx.GetGasLimit() - computedGasLimit - // transaction will consume all the gas if sender provided too much gas - if isTooMuchGasProvided(gasLimitWithoutMoveBalance, gasLimitWithoutMoveBalance-builtInCost) { - return tx.GetGasLimit(), ed.ComputeTxFeeInEpoch(tx, epoch) - } - - return gasLimitWithBuiltInCost, txFee - } - txFee := ed.ComputeTxFeeInEpoch(tx, epoch) + return tx.GetGasLimit(), txFee } @@ -560,15 +539,6 @@ func (ed *economicsData) ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx data.T return gasUsed, txFee } -func isTooMuchGasProvided(gasProvided uint64, gasRemained uint64) bool { - if gasProvided <= gasRemained { - return false - } - - gasUsed := gasProvided - gasRemained - return gasProvided > gasUsed*process.MaxGasFeeHigherFactorAccepted -} - // ComputeTxFeeBasedOnGasUsed will compute transaction fee func (ed *economicsData) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int { currenEpoch := ed.enableEpochsHandler.GetCurrentEpoch() diff --git a/process/economics/economicsData_test.go b/process/economics/economicsData_test.go index 417ef1b7826..1f2c913a826 100644 --- a/process/economics/economicsData_test.go +++ b/process/economics/economicsData_test.go @@ -16,13 +16,10 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/economics" - "github.com/multiversx/mx-chain-go/process/mock" - "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" - "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -106,13 +103,12 @@ func createArgsForEconomicsData(gasModifier float64) economics.ArgsNewEconomicsD return flag == common.GasPriceModifierFlag }, }, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } return args } -func createArgsForEconomicsDataRealFees(handler economics.BuiltInFunctionsCostHandler) economics.ArgsNewEconomicsData { +func createArgsForEconomicsDataRealFees() economics.ArgsNewEconomicsData { feeSettings := feeSettingsReal() args := economics.ArgsNewEconomicsData{ Economics: createDummyEconomicsConfig(feeSettings), @@ -122,8 +118,7 @@ func createArgsForEconomicsDataRealFees(handler economics.BuiltInFunctionsCostHa return flag == common.GasPriceModifierFlag }, }, - BuiltInFunctionsCostHandler: handler, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } return args } @@ -525,16 +520,6 @@ func TestNewEconomicsData_InvalidTopUpGradientPointShouldErr(t *testing.T) { assert.True(t, errors.Is(err, process.ErrInvalidRewardsTopUpGradientPoint)) } -func TestNewEconomicsData_NilBuiltInFunctionsCostHandlerShouldErr(t *testing.T) { - t.Parallel() - - args := createArgsForEconomicsData(1) - args.BuiltInFunctionsCostHandler = nil - - _, err := economics.NewEconomicsData(args) - assert.Equal(t, process.ErrNilBuiltInFunctionsCostHandler, err) -} - func TestNewEconomicsData_NilTxVersionCheckerShouldErr(t *testing.T) { t.Parallel() @@ -1141,7 +1126,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueZero(t *testing.T) func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueCheckGasUsedValue(t *testing.T) { t.Parallel() - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{})) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) txData := []byte("0061736d0100000001150460037f7f7e017f60027f7f017e60017e0060000002420303656e7611696e74363473746f7261676553746f7265000003656e7610696e74363473746f726167654c6f6164000103656e760b696e74363466696e6973680002030504030303030405017001010105030100020608017f01419088040b072f05066d656d6f7279020004696e6974000309696e6372656d656e7400040964656372656d656e7400050367657400060a8a01041300418088808000410742011080808080001a0b2e01017e4180888080004107418088808000410710818080800042017c22001080808080001a20001082808080000b2e01017e41808880800041074180888080004107108180808000427f7c22001080808080001a20001082808080000b160041808880800041071081808080001082808080000b0b0f01004180080b08434f554e54455200@0500@0100") tx1 := &transaction.Transaction{ GasPrice: 1000000000, @@ -1194,7 +1179,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueCheckGasUsedValue(t func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueCheck(t *testing.T) { t.Parallel() - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{})) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) txData := []byte("0061736d0100000001150460037f7f7e017f60027f7f017e60017e0060000002420303656e7611696e74363473746f7261676553746f7265000003656e7610696e74363473746f726167654c6f6164000103656e760b696e74363466696e6973680002030504030303030405017001010105030100020608017f01419088040b072f05066d656d6f7279020004696e6974000309696e6372656d656e7400040964656372656d656e7400050367657400060a8a01041300418088808000410742011080808080001a0b2e01017e4180888080004107418088808000410710818080800042017c22001080808080001a20001082808080000b2e01017e41808880800041074180888080004107108180808000427f7c22001080808080001a20001082808080000b160041808880800041071081808080001082808080000b0b0f01004180080b08434f554e54455200@0500@0100") tx := &transaction.Transaction{ GasPrice: 1000000000, @@ -1214,11 +1199,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueCheck(t *testing.T) func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn_ToMuchGasProvided(t *testing.T) { t.Parallel() - builtInCostHandler, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - ArgsParser: smartContract.NewArgumentParser(), - }) - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(builtInCostHandler)) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) tx := &transaction.Transaction{ GasPrice: 1000000000, @@ -1236,11 +1217,6 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn_ToMu } func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueStakeTx(t *testing.T) { - builtInCostHandler, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - ArgsParser: smartContract.NewArgumentParser(), - }) - txStake := &transaction.Transaction{ GasPrice: 1000000000, GasLimit: 250000000, @@ -1250,7 +1226,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueStakeTx(t *testing. expectedGasUsed := uint64(39378847) expectedFee, _ := big.NewInt(0).SetString("39378847000000000", 10) - args := createArgsForEconomicsDataRealFees(builtInCostHandler) + args := createArgsForEconomicsDataRealFees() args.EpochNotifier = forking.NewGenericEpochNotifier() args.EnableEpochsHandler, _ = enablers.NewEnableEpochsHandler(config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: 1000, @@ -1267,11 +1243,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueStakeTx(t *testing. func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn(t *testing.T) { t.Parallel() - builtInCostHandler, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - ArgsParser: smartContract.NewArgumentParser(), - }) - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(builtInCostHandler)) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) tx := &transaction.Transaction{ GasPrice: 1000000000, @@ -1279,8 +1251,8 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn(t *t Data: []byte("ESDTTransfer@54474e2d383862383366@0a"), } - expectedGasUsed := uint64(104001) - expectedFee, _ := big.NewInt(0).SetString("104000010000000", 10) + expectedGasUsed := uint64(104009) + expectedFee, _ := big.NewInt(0).SetString("104000090000000", 10) refundValue, _ := big.NewInt(0).SetString("0", 10) gasUsed, fee := economicData.ComputeGasUsedAndFeeBasedOnRefundValue(tx, refundValue) @@ -1291,11 +1263,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn(t *t func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltInTooMuchGas(t *testing.T) { t.Parallel() - builtInCostHandler, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - ArgsParser: smartContract.NewArgumentParser(), - }) - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(builtInCostHandler)) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) tx := &transaction.Transaction{ GasPrice: 1000000000, @@ -1315,7 +1283,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltInTooMu func TestEconomicsData_ComputeGasLimitBasedOnBalance(t *testing.T) { t.Parallel() - args := createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{}) + args := createArgsForEconomicsDataRealFees() args.EpochNotifier = forking.NewGenericEpochNotifier() args.EnableEpochsHandler, _ = enablers.NewEnableEpochsHandler(config.EnableEpochs{ GasPriceModifierEnableEpoch: 1, @@ -1353,7 +1321,7 @@ func TestEconomicsData_ComputeGasLimitBasedOnBalance(t *testing.T) { func TestEconomicsData_MaxGasPriceSetGuardian(t *testing.T) { t.Parallel() - args := createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{}) + args := createArgsForEconomicsDataRealFees() maxGasPriceSetGuardianString := "2000000" expectedMaxGasPriceSetGuardian, err := strconv.ParseUint(maxGasPriceSetGuardianString, 10, 64) require.Nil(t, err) @@ -1369,7 +1337,7 @@ func TestEconomicsData_SetStatusHandler(t *testing.T) { t.Run("nil status handler should error", func(t *testing.T) { t.Parallel() - args := createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{}) + args := createArgsForEconomicsDataRealFees() economicData, _ := economics.NewEconomicsData(args) err := economicData.SetStatusHandler(nil) @@ -1378,7 +1346,7 @@ func TestEconomicsData_SetStatusHandler(t *testing.T) { t.Run("should work", func(t *testing.T) { t.Parallel() - args := createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{}) + args := createArgsForEconomicsDataRealFees() economicData, _ := economics.NewEconomicsData(args) err := economicData.SetStatusHandler(&statusHandler.AppStatusHandlerStub{}) diff --git a/process/economics/interface.go b/process/economics/interface.go index 766ba7563e3..41332c30eef 100644 --- a/process/economics/interface.go +++ b/process/economics/interface.go @@ -1,17 +1,9 @@ package economics import ( - "github.com/multiversx/mx-chain-core-go/data" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) -// BuiltInFunctionsCostHandler is able to calculate the cost of a built-in function call -type BuiltInFunctionsCostHandler interface { - ComputeBuiltInCost(tx data.TransactionWithFeeHandler) uint64 - IsBuiltInFuncCall(tx data.TransactionWithFeeHandler) bool - IsInterfaceNil() bool -} - // EpochNotifier raises epoch change events type EpochNotifier interface { RegisterNotifyHandler(handler vmcommon.EpochSubscriberHandler) diff --git a/process/errors.go b/process/errors.go index 52fcfd95a18..207184f3cb7 100644 --- a/process/errors.go +++ b/process/errors.go @@ -194,6 +194,9 @@ var ErrNilShardCoordinator = errors.New("nil shard coordinator") // ErrNilNodesCoordinator signals that an operation has been attempted to or with a nil nodes coordinator var ErrNilNodesCoordinator = errors.New("nil nodes coordinator") +// ErrNilStakingDataProvider signals that a nil staking data provider was used +var ErrNilStakingDataProvider = errors.New("nil staking data provider") + // ErrNilKeyGen signals that an operation has been attempted to or with a nil single sign key generator var ErrNilKeyGen = errors.New("nil key generator") @@ -981,12 +984,6 @@ var ErrMaxAccumulatedFeesExceeded = errors.New("max accumulated fees has been ex // ErrMaxDeveloperFeesExceeded signals that max developer fees has been exceeded var ErrMaxDeveloperFeesExceeded = errors.New("max developer fees has been exceeded") -// ErrNilBuiltInFunctionsCostHandler signals that a nil built-in functions cost handler has been provided -var ErrNilBuiltInFunctionsCostHandler = errors.New("nil built in functions cost handler") - -// ErrNilArgsBuiltInFunctionsConstHandler signals that a nil arguments struct for built-in functions cost handler has been provided -var ErrNilArgsBuiltInFunctionsConstHandler = errors.New("nil arguments for built in functions cost handler") - // ErrInvalidEpochStartMetaBlockConsensusPercentage signals that a small epoch start meta block consensus percentage has been provided var ErrInvalidEpochStartMetaBlockConsensusPercentage = errors.New("invalid epoch start meta block consensus percentage") diff --git a/process/factory/metachain/intermediateProcessorsContainerFactory_test.go b/process/factory/metachain/intermediateProcessorsContainerFactory_test.go index 79861ced4bd..f58b8e41f72 100644 --- a/process/factory/metachain/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/metachain/intermediateProcessorsContainerFactory_test.go @@ -23,14 +23,14 @@ func createMockPubkeyConverter() *testscommon.PubkeyConverterMock { func createMockArgsNewIntermediateProcessorsFactory() metachain.ArgsNewIntermediateProcessorsContainerFactory { args := metachain.ArgsNewIntermediateProcessorsContainerFactory{ - Hasher: &hashingMocks.HasherMock{}, - Marshalizer: &mock.MarshalizerMock{}, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), - PubkeyConverter: createMockPubkeyConverter(), - Store: &storageStubs.ChainStorerStub{}, - PoolsHolder: dataRetrieverMock.NewPoolsHolderMock(), - EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), + PubkeyConverter: createMockPubkeyConverter(), + Store: &storageStubs.ChainStorerStub{}, + PoolsHolder: dataRetrieverMock.NewPoolsHolderMock(), + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, } return args diff --git a/process/factory/metachain/vmContainerFactory.go b/process/factory/metachain/vmContainerFactory.go index c3dbb17e4e6..8f8fd90bbc9 100644 --- a/process/factory/metachain/vmContainerFactory.go +++ b/process/factory/metachain/vmContainerFactory.go @@ -44,10 +44,12 @@ type vmContainerFactory struct { scFactory vm.SystemSCContainerFactory shardCoordinator sharding.Coordinator enableEpochsHandler common.EnableEpochsHandler + nodesCoordinator vm.NodesCoordinator } // ArgsNewVMContainerFactory defines the arguments needed to create a new VM container factory type ArgsNewVMContainerFactory struct { + ArgBlockChainHook hooks.ArgBlockChainHook Economics process.EconomicsDataHandler MessageSignVerifier vm.MessageSignVerifier GasSchedule core.GasScheduleNotifier @@ -62,6 +64,7 @@ type ArgsNewVMContainerFactory struct { PubkeyConv core.PubkeyConverter BlockChainHook process.BlockChainHookWithAccountsAdapter EnableEpochsHandler common.EnableEpochsHandler + NodesCoordinator vm.NodesCoordinator } // NewVMContainerFactory is responsible for creating a new virtual machine factory object @@ -108,6 +111,9 @@ func NewVMContainerFactory(args ArgsNewVMContainerFactory) (*vmContainerFactory, if check.IfNil(args.EnableEpochsHandler) { return nil, vm.ErrNilEnableEpochsHandler } + if check.IfNil(args.NodesCoordinator) { + return nil, fmt.Errorf("%w in NewVMContainerFactory", process.ErrNilNodesCoordinator) + } cryptoHook := hooks.NewVMCryptoHook() @@ -127,6 +133,7 @@ func NewVMContainerFactory(args ArgsNewVMContainerFactory) (*vmContainerFactory, addressPubKeyConverter: args.PubkeyConv, shardCoordinator: args.ShardCoordinator, enableEpochsHandler: args.EnableEpochsHandler, + nodesCoordinator: args.NodesCoordinator, }, nil } @@ -200,6 +207,7 @@ func (vmf *vmContainerFactory) createSystemVMFactoryAndEEI() (vm.SystemSCContain AddressPubKeyConverter: vmf.addressPubKeyConverter, ShardCoordinator: vmf.shardCoordinator, EnableEpochsHandler: vmf.enableEpochsHandler, + NodesCoordinator: vmf.nodesCoordinator, } scFactory, err := systemVMFactory.NewSystemSCFactory(argsNewSystemScFactory) if err != nil { diff --git a/process/factory/metachain/vmContainerFactory_test.go b/process/factory/metachain/vmContainerFactory_test.go index 41212156305..ff542213ef4 100644 --- a/process/factory/metachain/vmContainerFactory_test.go +++ b/process/factory/metachain/vmContainerFactory_test.go @@ -17,6 +17,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/vm" wasmConfig "github.com/multiversx/mx-chain-vm-go/config" @@ -62,6 +63,14 @@ func createVmContainerMockArgument(gasSchedule core.GasScheduleNotifier) ArgsNew BleedPercentagePerRound: 1, MaxNumberOfNodesForStake: 1, ActivateBLSPubKeyMessageVerification: false, + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, + }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, }, }, ValidatorAccountsDB: &stateMock.AccountsStub{}, @@ -69,6 +78,9 @@ func createVmContainerMockArgument(gasSchedule core.GasScheduleNotifier) ArgsNew ChanceComputer: &mock.RaterMock{}, ShardCoordinator: &mock.ShardCoordinatorStub{}, EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakeFlag), + NodesCoordinator: &shardingMocks.NodesCoordinatorMock{GetNumTotalEligibleCalled: func() uint64 { + return 1000 + }}, } } @@ -228,6 +240,18 @@ func TestNewVMContainerFactory_NilShardCoordinator(t *testing.T) { assert.True(t, errors.Is(err, vm.ErrNilShardCoordinator)) } +func TestNewVMContainerFactory_NilNodesCoordinatorFails(t *testing.T) { + t.Parallel() + + gasSchedule := makeGasSchedule() + argsNewVmContainerFactory := createVmContainerMockArgument(gasSchedule) + argsNewVmContainerFactory.NodesCoordinator = nil + vmf, err := NewVMContainerFactory(argsNewVmContainerFactory) + + assert.True(t, check.IfNil(vmf)) + assert.True(t, errors.Is(err, process.ErrNilNodesCoordinator)) +} + func TestNewVMContainerFactory_NilEnableEpochsHandler(t *testing.T) { t.Parallel() @@ -296,10 +320,9 @@ func TestVmContainerFactory_Create(t *testing.T) { MaxGasPriceSetGuardian: "100000", }, }, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) @@ -342,6 +365,8 @@ func TestVmContainerFactory_Create(t *testing.T) { MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -352,12 +377,21 @@ func TestVmContainerFactory_Create(t *testing.T) { MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ValidatorAccountsDB: &stateMock.AccountsStub{}, UserAccountsDB: &stateMock.AccountsStub{}, ChanceComputer: &mock.RaterMock{}, ShardCoordinator: mock.NewMultiShardsCoordinatorMock(1), EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), + NodesCoordinator: &shardingMocks.NodesCoordinatorMock{GetNumTotalEligibleCalled: func() uint64 { + return 1000 + }}, } vmf, err := NewVMContainerFactory(argsNewVMContainerFactory) assert.NotNil(t, vmf) diff --git a/process/factory/shard/intermediateProcessorsContainerFactory_test.go b/process/factory/shard/intermediateProcessorsContainerFactory_test.go index 2f2cc7a9c52..5835a7361ac 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory_test.go @@ -57,14 +57,14 @@ func createMockPubkeyConverter() *testscommon.PubkeyConverterMock { func createMockArgsNewIntermediateProcessorsFactory() shard.ArgsNewIntermediateProcessorsContainerFactory { args := shard.ArgsNewIntermediateProcessorsContainerFactory{ - Hasher: &hashingMocks.HasherMock{}, - Marshalizer: &mock.MarshalizerMock{}, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), - PubkeyConverter: createMockPubkeyConverter(), - Store: &storageStubs.ChainStorerStub{}, - PoolsHolder: createDataPools(), - EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), + PubkeyConverter: createMockPubkeyConverter(), + Store: &storageStubs.ChainStorerStub{}, + PoolsHolder: createDataPools(), + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, } return args diff --git a/process/factory/shard/vmContainerFactory_test.go b/process/factory/shard/vmContainerFactory_test.go index df3ffab673e..a6d7184bd77 100644 --- a/process/factory/shard/vmContainerFactory_test.go +++ b/process/factory/shard/vmContainerFactory_test.go @@ -1,6 +1,7 @@ package shard import ( + "runtime" "sync" "testing" @@ -128,8 +129,6 @@ func TestNewVMContainerFactory_NilBlockChainHookShouldErr(t *testing.T) { } func TestNewVMContainerFactory_NilHasherShouldErr(t *testing.T) { - t.Parallel() - args := createMockVMAccountsArguments() args.Hasher = nil vmf, err := NewVMContainerFactory(args) @@ -139,7 +138,9 @@ func TestNewVMContainerFactory_NilHasherShouldErr(t *testing.T) { } func TestNewVMContainerFactory_OkValues(t *testing.T) { - t.Parallel() + if runtime.GOARCH == "arm64" { + t.Skip("skipping test on arm64") + } args := createMockVMAccountsArguments() vmf, err := NewVMContainerFactory(args) @@ -150,7 +151,9 @@ func TestNewVMContainerFactory_OkValues(t *testing.T) { } func TestVmContainerFactory_Create(t *testing.T) { - t.Parallel() + if runtime.GOARCH == "arm64" { + t.Skip("skipping test on arm64") + } args := createMockVMAccountsArguments() vmf, _ := NewVMContainerFactory(args) @@ -175,6 +178,10 @@ func TestVmContainerFactory_Create(t *testing.T) { } func TestVmContainerFactory_ResolveWasmVMVersion(t *testing.T) { + if runtime.GOARCH == "arm64" { + t.Skip("skipping test on arm64") + } + epochNotifierInstance := forking.NewGenericEpochNotifier() numCalled := 0 diff --git a/process/interface.go b/process/interface.go index d796bcd95c6..69b1b139e89 100644 --- a/process/interface.go +++ b/process/interface.go @@ -287,9 +287,9 @@ type ValidatorStatisticsProcessor interface { Process(shardValidatorInfo data.ShardValidatorInfoHandler) error IsInterfaceNil() bool RootHash() ([]byte, error) - ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error + ResetValidatorStatisticsAtNewEpoch(vInfos state.ShardValidatorsInfoMapHandler) error + GetValidatorInfoForRootHash(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) + ProcessRatingsEndOfEpoch(validatorInfos state.ShardValidatorsInfoMapHandler, epoch uint32) error Commit() ([]byte, error) DisplayRatings(epoch uint32) SetLastFinalizedRootHash([]byte) @@ -318,6 +318,8 @@ type TransactionLogProcessorDatabase interface { // ValidatorsProvider is the main interface for validators' provider type ValidatorsProvider interface { GetLatestValidators() map[string]*validator.ValidatorStatistics + GetAuctionList() ([]*common.AuctionListValidatorAPIResponse, error) + ForceUpdate() error IsInterfaceNil() bool Close() error } @@ -945,10 +947,10 @@ type EpochStartDataCreator interface { // RewardsCreator defines the functionality for the metachain to create rewards at end of epoch type RewardsCreator interface { CreateRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) VerifyRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error GetProtocolSustainabilityRewards() *big.Int GetLocalTxCache() epochStart.TransactionCacher @@ -962,8 +964,8 @@ type RewardsCreator interface { // EpochStartValidatorInfoCreator defines the functionality for the metachain to create validator statistics at end of epoch type EpochStartValidatorInfoCreator interface { - CreateValidatorInfoMiniBlocks(validatorInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) - VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error + CreateValidatorInfoMiniBlocks(validatorInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) + VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo state.ShardValidatorsInfoMapHandler) error GetLocalValidatorInfoCache() epochStart.ValidatorInfoCacher CreateMarshalledData(body *block.Body) map[string][][]byte GetValidatorInfoTxs(body *block.Body) map[string]*state.ShardValidatorInfo @@ -975,7 +977,10 @@ type EpochStartValidatorInfoCreator interface { // EpochStartSystemSCProcessor defines the functionality for the metachain to process system smart contract and end of epoch type EpochStartSystemSCProcessor interface { - ProcessSystemSmartContract(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error + ProcessSystemSmartContract( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + header data.HeaderHandler, + ) error ProcessDelegationRewards( miniBlocks block.MiniBlockSlice, rewardTxs epochStart.TransactionCacher, diff --git a/process/mock/builtInCostHandlerStub.go b/process/mock/builtInCostHandlerStub.go deleted file mode 100644 index 4ee3b23b062..00000000000 --- a/process/mock/builtInCostHandlerStub.go +++ /dev/null @@ -1,24 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// BuiltInCostHandlerStub - -type BuiltInCostHandlerStub struct { -} - -// ComputeBuiltInCost - -func (b *BuiltInCostHandlerStub) ComputeBuiltInCost(_ data.TransactionWithFeeHandler) uint64 { - return 1 -} - -// IsBuiltInFuncCall - -func (b *BuiltInCostHandlerStub) IsBuiltInFuncCall(_ data.TransactionWithFeeHandler) bool { - return false -} - -// IsInterfaceNil - -func (b *BuiltInCostHandlerStub) IsInterfaceNil() bool { - return b == nil -} diff --git a/process/mock/epochEconomicsStub.go b/process/mock/epochEconomicsStub.go index 99e8b0dd359..7a65f7c3fcf 100644 --- a/process/mock/epochEconomicsStub.go +++ b/process/mock/epochEconomicsStub.go @@ -19,7 +19,9 @@ func (e *EpochEconomicsStub) ComputeEndOfEpochEconomics(metaBlock *block.MetaBlo if e.ComputeEndOfEpochEconomicsCalled != nil { return e.ComputeEndOfEpochEconomicsCalled(metaBlock) } - return &block.Economics{}, nil + return &block.Economics{ + RewardsForProtocolSustainability: big.NewInt(0), + }, nil } // VerifyRewardsPerBlock - diff --git a/process/mock/epochRewardsCreatorStub.go b/process/mock/epochRewardsCreatorStub.go deleted file mode 100644 index ce17c1e636a..00000000000 --- a/process/mock/epochRewardsCreatorStub.go +++ /dev/null @@ -1,109 +0,0 @@ -package mock - -import ( - "math/big" - - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/state" -) - -// EpochRewardsCreatorStub - -type EpochRewardsCreatorStub struct { - CreateRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, - ) (block.MiniBlockSlice, error) - VerifyRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, - ) error - CreateMarshalledDataCalled func(body *block.Body) map[string][][]byte - SaveBlockDataToStorageCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - DeleteBlockDataFromStorageCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - RemoveBlockDataFromPoolsCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - GetRewardsTxsCalled func(body *block.Body) map[string]data.TransactionHandler - GetProtocolSustainCalled func() *big.Int - GetLocalTxCacheCalled func() epochStart.TransactionCacher -} - -// GetProtocolSustainabilityRewards - -func (e *EpochRewardsCreatorStub) GetProtocolSustainabilityRewards() *big.Int { - if e.GetProtocolSustainCalled != nil { - return e.GetProtocolSustainCalled() - } - return big.NewInt(0) -} - -// GetLocalTxCache - -func (e *EpochRewardsCreatorStub) GetLocalTxCache() epochStart.TransactionCacher { - if e.GetLocalTxCacheCalled != nil { - return e.GetLocalTxCacheCalled() - } - return &TxForCurrentBlockStub{} -} - -// CreateRewardsMiniBlocks - -func (e *EpochRewardsCreatorStub) CreateRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, - computedEconomics *block.Economics, -) (block.MiniBlockSlice, error) { - if e.CreateRewardsMiniBlocksCalled != nil { - return e.CreateRewardsMiniBlocksCalled(metaBlock, validatorsInfo, computedEconomics) - } - return nil, nil -} - -// VerifyRewardsMiniBlocks - -func (e *EpochRewardsCreatorStub) VerifyRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, - computedEconomics *block.Economics, -) error { - if e.VerifyRewardsMiniBlocksCalled != nil { - return e.VerifyRewardsMiniBlocksCalled(metaBlock, validatorsInfo, computedEconomics) - } - return nil -} - -// CreateMarshalledData - -func (e *EpochRewardsCreatorStub) CreateMarshalledData(body *block.Body) map[string][][]byte { - if e.CreateMarshalledDataCalled != nil { - return e.CreateMarshalledDataCalled(body) - } - return nil -} - -// GetRewardsTxs - -func (e *EpochRewardsCreatorStub) GetRewardsTxs(body *block.Body) map[string]data.TransactionHandler { - if e.GetRewardsTxsCalled != nil { - return e.GetRewardsTxsCalled(body) - } - return nil -} - -// SaveBlockDataToStorage - -func (e *EpochRewardsCreatorStub) SaveBlockDataToStorage(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.SaveBlockDataToStorageCalled != nil { - e.SaveBlockDataToStorageCalled(metaBlock, body) - } -} - -// DeleteBlockDataFromStorage - -func (e *EpochRewardsCreatorStub) DeleteBlockDataFromStorage(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.DeleteBlockDataFromStorageCalled != nil { - e.DeleteBlockDataFromStorageCalled(metaBlock, body) - } -} - -// IsInterfaceNil - -func (e *EpochRewardsCreatorStub) IsInterfaceNil() bool { - return e == nil -} - -// RemoveBlockDataFromPools - -func (e *EpochRewardsCreatorStub) RemoveBlockDataFromPools(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.RemoveBlockDataFromPoolsCalled != nil { - e.RemoveBlockDataFromPoolsCalled(metaBlock, body) - } -} diff --git a/process/mock/epochStartDataCreatorStub.go b/process/mock/epochStartDataCreatorStub.go index 1cbfccaec5b..dd38c5a1198 100644 --- a/process/mock/epochStartDataCreatorStub.go +++ b/process/mock/epochStartDataCreatorStub.go @@ -1,6 +1,9 @@ package mock -import "github.com/multiversx/mx-chain-core-go/data/block" +import ( + "github.com/multiversx/mx-chain-core-go/data/block" + "math/big" +) // EpochStartDataCreatorStub - type EpochStartDataCreatorStub struct { @@ -13,7 +16,11 @@ func (e *EpochStartDataCreatorStub) CreateEpochStartData() (*block.EpochStart, e if e.CreateEpochStartDataCalled != nil { return e.CreateEpochStartDataCalled() } - return &block.EpochStart{}, nil + return &block.EpochStart{ + LastFinalizedHeaders: []block.EpochStartShardData{{}}, + Economics: block.Economics{ + RewardsForProtocolSustainability: big.NewInt(0)}, + }, nil } // VerifyEpochStartDataForMetablock - diff --git a/process/mock/epochStartSystemSCStub.go b/process/mock/epochStartSystemSCStub.go deleted file mode 100644 index fd2c92553cf..00000000000 --- a/process/mock/epochStartSystemSCStub.go +++ /dev/null @@ -1,46 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/state" -) - -// EpochStartSystemSCStub - -type EpochStartSystemSCStub struct { - ProcessSystemSmartContractCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error - ProcessDelegationRewardsCalled func(miniBlocks block.MiniBlockSlice, txCache epochStart.TransactionCacher) error - ToggleUnStakeUnBondCalled func(value bool) error -} - -// ToggleUnStakeUnBond - -func (e *EpochStartSystemSCStub) ToggleUnStakeUnBond(value bool) error { - if e.ToggleUnStakeUnBondCalled != nil { - return e.ToggleUnStakeUnBondCalled(value) - } - return nil -} - -// ProcessSystemSmartContract - -func (e *EpochStartSystemSCStub) ProcessSystemSmartContract(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { - if e.ProcessSystemSmartContractCalled != nil { - return e.ProcessSystemSmartContractCalled(validatorInfos, nonce, epoch) - } - return nil -} - -// ProcessDelegationRewards - -func (e *EpochStartSystemSCStub) ProcessDelegationRewards( - miniBlocks block.MiniBlockSlice, - txCache epochStart.TransactionCacher, -) error { - if e.ProcessDelegationRewardsCalled != nil { - return e.ProcessDelegationRewardsCalled(miniBlocks, txCache) - } - return nil -} - -// IsInterfaceNil - -func (e *EpochStartSystemSCStub) IsInterfaceNil() bool { - return e == nil -} diff --git a/process/mock/nodesSetupStub.go b/process/mock/nodesSetupStub.go deleted file mode 100644 index b8e21ce5fcb..00000000000 --- a/process/mock/nodesSetupStub.go +++ /dev/null @@ -1,188 +0,0 @@ -package mock - -import "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" - -// NodesSetupStub - -type NodesSetupStub struct { - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - GetRoundDurationCalled func() uint64 - GetShardConsensusGroupSizeCalled func() uint32 - GetMetaConsensusGroupSizeCalled func() uint32 - NumberOfShardsCalled func() uint32 - MinNumberOfNodesCalled func() uint32 - AllInitialNodesCalled func() []nodesCoordinator.GenesisNodeInfoHandler - GetAdaptivityCalled func() bool - GetHysteresisCalled func() float32 - GetShardIDForPubKeyCalled func(pubkey []byte) (uint32, error) - InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) - InitialNodesPubKeysCalled func() map[uint32][]string - MinNumberOfMetaNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 - MinNumberOfNodesWithHysteresisCalled func() uint32 - MinShardHysteresisNodesCalled func() uint32 - MinMetaHysteresisNodesCalled func() uint32 -} - -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() - } - return 1 -} - -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() - } - return 0 -} - -// GetRoundDuration - -func (n *NodesSetupStub) GetRoundDuration() uint64 { - if n.GetRoundDurationCalled != nil { - return n.GetRoundDurationCalled() - } - return 0 -} - -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() - } - return 0 -} - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() - } - return 0 -} - -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() - } - return 0 -} - -// InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { - if n.InitialNodesInfoForShardCalled != nil { - return n.InitialNodesInfoForShardCalled(shardId) - } - return nil, nil, nil -} - -// InitialNodesInfo - -func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { - if n.InitialNodesInfoCalled != nil { - return n.InitialNodesInfoCalled() - } - return nil, nil -} - -// AllInitialNodes - -func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHandler { - if n.AllInitialNodesCalled != nil { - return n.AllInitialNodesCalled() - } - return nil -} - -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() - } - - return false -} - -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() - } - - return 0 -} - -// GetShardIDForPubKey - -func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { - if n.GetShardIDForPubKeyCalled != nil { - return n.GetShardIDForPubKeyCalled(pubkey) - } - return 0, nil -} - -// InitialEligibleNodesPubKeysForShard - -func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { - if n.InitialEligibleNodesPubKeysForShardCalled != nil { - return n.InitialEligibleNodesPubKeysForShardCalled(shardId) - } - - return []string{"val1", "val2"}, nil -} - -// InitialNodesPubKeys - -func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { - if n.InitialNodesPubKeysCalled != nil { - return n.InitialNodesPubKeysCalled() - } - - return map[uint32][]string{0: {"val1", "val2"}} -} - -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() - } - - return 1 -} - -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() - } - - return 1 -} - -// MinNumberOfNodesWithHysteresis - -func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { - if n.MinNumberOfNodesWithHysteresisCalled != nil { - return n.MinNumberOfNodesWithHysteresisCalled() - } - return n.MinNumberOfNodes() -} - -// MinShardHysteresisNodes - -func (n *NodesSetupStub) MinShardHysteresisNodes() uint32 { - if n.MinShardHysteresisNodesCalled != nil { - return n.MinShardHysteresisNodesCalled() - } - return 1 -} - -// MinMetaHysteresisNodes - -func (n *NodesSetupStub) MinMetaHysteresisNodes() uint32 { - if n.MinMetaHysteresisNodesCalled != nil { - return n.MinMetaHysteresisNodesCalled() - } - return 1 -} - -// IsInterfaceNil - -func (n *NodesSetupStub) IsInterfaceNil() bool { - return n == nil -} diff --git a/process/mock/transactionSimulatorStub.go b/process/mock/transactionSimulatorStub.go index 70363230936..971cda66d04 100644 --- a/process/mock/transactionSimulatorStub.go +++ b/process/mock/transactionSimulatorStub.go @@ -1,19 +1,20 @@ package mock import ( + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/transaction" txSimData "github.com/multiversx/mx-chain-go/process/transactionEvaluator/data" ) // TransactionSimulatorStub - type TransactionSimulatorStub struct { - ProcessTxCalled func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) + ProcessTxCalled func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) } // ProcessTx - -func (tss *TransactionSimulatorStub) ProcessTx(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { +func (tss *TransactionSimulatorStub) ProcessTx(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { if tss.ProcessTxCalled != nil { - return tss.ProcessTxCalled(tx) + return tss.ProcessTxCalled(tx, currentHeader) } return nil, nil diff --git a/process/mock/validatorsProviderStub.go b/process/mock/validatorsProviderStub.go deleted file mode 100644 index 98ea652340b..00000000000 --- a/process/mock/validatorsProviderStub.go +++ /dev/null @@ -1,28 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data/validator" -) - -// ValidatorsProviderStub - -type ValidatorsProviderStub struct { - GetLatestValidatorsCalled func() map[string]*validator.ValidatorStatistics -} - -// GetLatestValidators - -func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*validator.ValidatorStatistics { - if vp.GetLatestValidatorsCalled != nil { - return vp.GetLatestValidatorsCalled() - } - return nil -} - -// Close - -func (vp *ValidatorsProviderStub) Close() error { - return nil -} - -// IsInterfaceNil - -func (vp *ValidatorsProviderStub) IsInterfaceNil() bool { - return vp == nil -} diff --git a/process/peer/interface.go b/process/peer/interface.go index 94377bfdd53..2a8a447e694 100644 --- a/process/peer/interface.go +++ b/process/peer/interface.go @@ -2,6 +2,8 @@ package peer import ( "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/state" ) // DataPool indicates the main functionality needed in order to fetch the required blocks from the pool @@ -9,3 +11,12 @@ type DataPool interface { Headers() dataRetriever.HeadersPool IsInterfaceNil() bool } + +// StakingDataProviderAPI is able to provide staking data from the system smart contracts +type StakingDataProviderAPI interface { + ComputeUnQualifiedNodes(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) + FillValidatorInfo(validator state.ValidatorInfoHandler) error + GetOwnersData() map[string]*epochStart.OwnerData + Clean() + IsInterfaceNil() bool +} diff --git a/process/peer/process.go b/process/peer/process.go index 5c3364fe5f7..4c04de6a25d 100644 --- a/process/peer/process.go +++ b/process/peer/process.go @@ -196,6 +196,18 @@ func (vs *validatorStatistics) saveNodesCoordinatorUpdates(epoch uint32) (bool, } nodeForcedToRemain = nodeForcedToRemain || tmpNodeForcedToRemain + if vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + nodesMap, err = vs.nodesCoordinator.GetAllShuffledOutValidatorsPublicKeys(epoch) + if err != nil { + return false, err + } + + _, err = vs.saveUpdatesForNodesMap(nodesMap, common.AuctionList) + if err != nil { + return false, err + } + } + return nodeForcedToRemain, nil } @@ -238,12 +250,16 @@ func (vs *validatorStatistics) saveUpdatesForList( isNodeLeaving := (peerType == common.WaitingList || peerType == common.EligibleList) && peerAcc.GetList() == string(common.LeavingList) isNodeWithLowRating := vs.isValidatorWithLowRating(peerAcc) isNodeJailed := vs.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) && peerType == common.InactiveList && isNodeWithLowRating + isStakingV4Started := vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) if isNodeJailed { - peerAcc.SetListAndIndex(shardID, string(common.JailedList), uint32(index)) + peerAcc.SetListAndIndex(shardID, string(common.JailedList), uint32(index), isStakingV4Started) } else if isNodeLeaving { - peerAcc.SetListAndIndex(shardID, string(common.LeavingList), uint32(index)) + peerAcc.SetListAndIndex(shardID, string(common.LeavingList), uint32(index), isStakingV4Started) + if isStakingV4Started { + peerAcc.SetPreviousList(string(peerType)) + } } else { - peerAcc.SetListAndIndex(shardID, string(peerType), uint32(index)) + peerAcc.SetListAndIndex(shardID, string(peerType), uint32(index), isStakingV4Started) } err = vs.peerAdapter.SaveAccount(peerAcc) @@ -444,23 +460,19 @@ func (vs *validatorStatistics) RootHash() ([]byte, error) { func (vs *validatorStatistics) getValidatorDataFromLeaves( leavesChannels *common.TrieIteratorChannels, -) (map[uint32][]*state.ValidatorInfo, error) { - - validators := make(map[uint32][]*state.ValidatorInfo, vs.shardCoordinator.NumberOfShards()+1) - for i := uint32(0); i < vs.shardCoordinator.NumberOfShards(); i++ { - validators[i] = make([]*state.ValidatorInfo, 0) - } - validators[core.MetachainShardId] = make([]*state.ValidatorInfo, 0) - +) (state.ShardValidatorsInfoMapHandler, error) { + validators := state.NewShardValidatorsInfoMap() for pa := range leavesChannels.LeavesChan { peerAccount, err := vs.unmarshalPeer(pa) if err != nil { return nil, err } - currentShardId := peerAccount.GetShardId() validatorInfoData := vs.PeerAccountToValidatorInfo(peerAccount) - validators[currentShardId] = append(validators[currentShardId], validatorInfoData) + err = validators.Add(validatorInfoData) + if err != nil { + return nil, err + } } err := leavesChannels.ErrChan.ReadFromChanNonBlocking() @@ -503,7 +515,9 @@ func (vs *validatorStatistics) PeerAccountToValidatorInfo(peerAccount state.Peer PublicKey: peerAccount.AddressBytes(), ShardId: peerAccount.GetShardId(), List: list, + PreviousList: peerAccount.GetPreviousList(), Index: peerAccount.GetIndexInList(), + PreviousIndex: peerAccount.GetPreviousIndexInList(), TempRating: peerAccount.GetTempRating(), Rating: peerAccount.GetRating(), RatingModifier: ratingModifier, @@ -555,7 +569,7 @@ func (vs *validatorStatistics) jailValidatorIfBadRatingAndInactive(validatorAcco return } - validatorAccount.SetListAndIndex(validatorAccount.GetShardId(), string(common.JailedList), validatorAccount.GetIndexInList()) + validatorAccount.SetListAndIndex(validatorAccount.GetShardId(), string(common.JailedList), validatorAccount.GetIndexInList(), vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) } func (vs *validatorStatistics) unmarshalPeer(peerAccountData core.KeyValueHolder) (state.PeerAccountHandler, error) { @@ -571,7 +585,7 @@ func (vs *validatorStatistics) unmarshalPeer(peerAccountData core.KeyValueHolder } // GetValidatorInfoForRootHash returns all the peer accounts from the trie with the given rootHash -func (vs *validatorStatistics) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { +func (vs *validatorStatistics) GetValidatorInfoForRootHash(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { sw := core.NewStopWatch() sw.Start("GetValidatorInfoForRootHash") defer func() { @@ -598,10 +612,10 @@ func (vs *validatorStatistics) GetValidatorInfoForRootHash(rootHash []byte) (map // ProcessRatingsEndOfEpoch makes end of epoch process on the rating func (vs *validatorStatistics) ProcessRatingsEndOfEpoch( - validatorInfos map[uint32][]*state.ValidatorInfo, + validatorInfos state.ShardValidatorsInfoMapHandler, epoch uint32, ) error { - if len(validatorInfos) == 0 { + if validatorInfos == nil || len(validatorInfos.GetAllValidatorsInfo()) == 0 { return process.ErrNilValidatorInfos } @@ -610,14 +624,14 @@ func (vs *validatorStatistics) ProcessRatingsEndOfEpoch( } signedThreshold := vs.rater.GetSignedBlocksThreshold() - for shardId, validators := range validatorInfos { + for shardId, validators := range validatorInfos.GetShardValidatorsInfoMap() { for _, validator := range validators { if !vs.enableEpochsHandler.IsFlagEnabled(common.StakingV2FlagAfterEpoch) { - if validator.List != string(common.EligibleList) { + if validator.GetList() != string(common.EligibleList) { continue } } else { - if validator.List != string(common.EligibleList) && !validatorInfo.WasLeavingEligibleInCurrentEpoch(validator) { + if validator.GetList() != string(common.EligibleList) && !validatorInfo.WasLeavingEligibleInCurrentEpoch(validator) { continue } } @@ -633,7 +647,7 @@ func (vs *validatorStatistics) ProcessRatingsEndOfEpoch( } func (vs *validatorStatistics) verifySignaturesBelowSignedThreshold( - validator *state.ValidatorInfo, + validator state.ValidatorInfoHandler, signedThreshold float32, shardId uint32, epoch uint32, @@ -642,19 +656,19 @@ func (vs *validatorStatistics) verifySignaturesBelowSignedThreshold( return nil } - validatorOccurrences := core.MaxUint32(1, validator.ValidatorSuccess+validator.ValidatorFailure+validator.ValidatorIgnoredSignatures) - computedThreshold := float32(validator.ValidatorSuccess) / float32(validatorOccurrences) + validatorOccurrences := core.MaxUint32(1, validator.GetValidatorSuccess()+validator.GetValidatorFailure()+validator.GetValidatorIgnoredSignatures()) + computedThreshold := float32(validator.GetValidatorSuccess()) / float32(validatorOccurrences) if computedThreshold <= signedThreshold { increasedRatingTimes := uint32(0) if !vs.enableEpochsHandler.IsFlagEnabled(common.BelowSignedThresholdFlag) { - increasedRatingTimes = validator.ValidatorFailure + increasedRatingTimes = validator.GetValidatorFailure() } else { - increasedRatingTimes = validator.ValidatorSuccess + validator.ValidatorIgnoredSignatures + increasedRatingTimes = validator.GetValidatorSuccess() + validator.GetValidatorIgnoredSignatures() } - newTempRating := vs.rater.RevertIncreaseValidator(shardId, validator.TempRating, increasedRatingTimes) - pa, err := vs.loadPeerAccount(validator.PublicKey) + newTempRating := vs.rater.RevertIncreaseValidator(shardId, validator.GetTempRating(), increasedRatingTimes) + pa, err := vs.loadPeerAccount(validator.GetPublicKey()) if err != nil { return err } @@ -667,23 +681,23 @@ func (vs *validatorStatistics) verifySignaturesBelowSignedThreshold( } log.Debug("below signed blocks threshold", - "pk", validator.PublicKey, + "pk", validator.GetPublicKey(), "signed %", computedThreshold, - "validatorSuccess", validator.ValidatorSuccess, - "validatorFailure", validator.ValidatorFailure, - "validatorIgnored", validator.ValidatorIgnoredSignatures, + "validatorSuccess", validator.GetValidatorSuccess(), + "validatorFailure", validator.GetValidatorFailure(), + "validatorIgnored", validator.GetValidatorIgnoredSignatures(), "new tempRating", newTempRating, - "old tempRating", validator.TempRating, + "old tempRating", validator.GetTempRating(), ) - validator.TempRating = newTempRating + validator.SetTempRating(newTempRating) } return nil } // ResetValidatorStatisticsAtNewEpoch resets the validator info at the start of a new epoch -func (vs *validatorStatistics) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { +func (vs *validatorStatistics) ResetValidatorStatisticsAtNewEpoch(vInfos state.ShardValidatorsInfoMapHandler) error { sw := core.NewStopWatch() sw.Start("ResetValidatorStatisticsAtNewEpoch") defer func() { @@ -691,24 +705,22 @@ func (vs *validatorStatistics) ResetValidatorStatisticsAtNewEpoch(vInfos map[uin log.Debug("ResetValidatorStatisticsAtNewEpoch", sw.GetMeasurements()...) }() - for _, validators := range vInfos { - for _, validator := range validators { - account, err := vs.peerAdapter.LoadAccount(validator.GetPublicKey()) - if err != nil { - return err - } + for _, validator := range vInfos.GetAllValidatorsInfo() { + account, err := vs.peerAdapter.LoadAccount(validator.GetPublicKey()) + if err != nil { + return err + } - peerAccount, ok := account.(state.PeerAccountHandler) - if !ok { - return process.ErrWrongTypeAssertion - } - peerAccount.ResetAtNewEpoch() - vs.setToJailedIfNeeded(peerAccount, validator) + peerAccount, ok := account.(state.PeerAccountHandler) + if !ok { + return process.ErrWrongTypeAssertion + } + peerAccount.ResetAtNewEpoch() + vs.setToJailedIfNeeded(peerAccount, validator) - err = vs.peerAdapter.SaveAccount(peerAccount) - if err != nil { - return err - } + err = vs.peerAdapter.SaveAccount(peerAccount) + if err != nil { + return err } } @@ -717,23 +729,23 @@ func (vs *validatorStatistics) ResetValidatorStatisticsAtNewEpoch(vInfos map[uin func (vs *validatorStatistics) setToJailedIfNeeded( peerAccount state.PeerAccountHandler, - validator *state.ValidatorInfo, + validator state.ValidatorInfoHandler, ) { if !vs.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) { return } - if validator.List == string(common.WaitingList) || validator.List == string(common.EligibleList) { + if validator.GetList() == string(common.WaitingList) || validator.GetList() == string(common.EligibleList) { return } - if validator.List == string(common.JailedList) && peerAccount.GetList() != string(common.JailedList) { - peerAccount.SetListAndIndex(validator.ShardId, string(common.JailedList), validator.Index) + if validator.GetList() == string(common.JailedList) && peerAccount.GetList() != string(common.JailedList) { + peerAccount.SetListAndIndex(validator.GetShardId(), string(common.JailedList), validator.GetIndex(), vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) return } if vs.isValidatorWithLowRating(peerAccount) { - peerAccount.SetListAndIndex(validator.ShardId, string(common.JailedList), validator.Index) + peerAccount.SetListAndIndex(validator.GetShardId(), string(common.JailedList), validator.GetIndex(), vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) } } @@ -994,7 +1006,7 @@ func (vs *validatorStatistics) savePeerAccountData( peerAccount.SetRating(startRating) peerAccount.SetTempRating(startRating) - peerAccount.SetListAndIndex(shardID, string(peerType), index) + peerAccount.SetListAndIndex(shardID, string(peerType), index, vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) return vs.peerAdapter.SaveAccount(peerAccount) } diff --git a/process/peer/process_test.go b/process/peer/process_test.go index daa885cff3a..69adb3e936a 100644 --- a/process/peer/process_test.go +++ b/process/peer/process_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/keyValStorage" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" @@ -28,6 +29,7 @@ import ( dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -99,10 +101,9 @@ func createMockArguments() peer.ArgValidatorStatisticsProcessor { MaxGasPriceSetGuardian: "100000", }, }, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) @@ -122,7 +123,7 @@ func createMockArguments() peer.ArgValidatorStatisticsProcessor { RewardsHandler: economicsData, MaxComputableRounds: 1000, MaxConsecutiveRoundsOfRatingDecrease: 2000, - NodesSetup: &mock.NodesSetupStub{}, + NodesSetup: &genesisMocks.NodesSetupStub{}, EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SwitchJailWaitingFlag, common.BelowSignedThresholdFlag), } return arguments @@ -312,7 +313,7 @@ func TestValidatorStatisticsProcessor_SaveInitialStateErrOnGetAccountFail(t *tes arguments := createMockArguments() arguments.PeerAdapter = peerAdapters - arguments.NodesSetup = &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + arguments.NodesSetup = &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) oneMap[0] = append(oneMap[0], mock.NewNodeInfo([]byte("aaaa"), []byte("aaaa"), 0, 50)) return oneMap, oneMap @@ -334,7 +335,7 @@ func TestValidatorStatisticsProcessor_SaveInitialStateGetAccountReturnsInvalid(t arguments := createMockArguments() arguments.PeerAdapter = peerAdapter - arguments.NodesSetup = &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + arguments.NodesSetup = &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) oneMap[0] = append(oneMap[0], mock.NewNodeInfo([]byte("aaaa"), []byte("aaaa"), 0, 50)) return oneMap, oneMap @@ -359,7 +360,7 @@ func TestValidatorStatisticsProcessor_SaveInitialStateSetAddressErrors(t *testin } arguments := createMockArguments() - arguments.NodesSetup = &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + arguments.NodesSetup = &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) oneMap[0] = append(oneMap[0], mock.NewNodeInfo([]byte("aaaa"), []byte("aaaa"), 0, 50)) return oneMap, oneMap @@ -2073,9 +2074,9 @@ func TestValidatorStatistics_Process(t *testing.T) { validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) validatorInfos, _ := validatorStatistics.GetValidatorInfoForRootHash(hash) - vi0 := validatorInfos[0][0] + vi0 := validatorInfos.GetShardValidatorsInfoMap()[0][0] newTempRating := uint32(25) - vi0.TempRating = newTempRating + vi0.SetTempRating(newTempRating) assert.NotEqual(t, newTempRating, pa0.GetRating()) @@ -2145,10 +2146,10 @@ func TestValidatorStatistics_GetValidatorInfoForRootHash(t *testing.T) { validatorInfos, err := validatorStatistics.GetValidatorInfoForRootHash(hash) assert.NotNil(t, validatorInfos) assert.Nil(t, err) - assert.Equal(t, uint32(0), validatorInfos[0][0].ShardId) - compare(t, pa0, validatorInfos[0][0]) - assert.Equal(t, core.MetachainShardId, validatorInfos[core.MetachainShardId][0].ShardId) - compare(t, paMeta, validatorInfos[core.MetachainShardId][0]) + assert.Equal(t, uint32(0), validatorInfos.GetShardValidatorsInfoMap()[0][0].GetShardId()) + compare(t, pa0, validatorInfos.GetShardValidatorsInfoMap()[0][0]) + assert.Equal(t, core.MetachainShardId, validatorInfos.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetShardId()) + compare(t, paMeta, validatorInfos.GetShardValidatorsInfoMap()[core.MetachainShardId][0]) }) } @@ -2161,7 +2162,7 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithNilMapShouldErr( err := validatorStatistics.ProcessRatingsEndOfEpoch(nil, 1) assert.Equal(t, process.ErrNilValidatorInfos, err) - vi := make(map[uint32][]*state.ValidatorInfo) + vi := state.NewShardValidatorsInfoMap() err = validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Equal(t, process.ErrNilValidatorInfos, err) } @@ -2179,9 +2180,8 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithNoValidatorFailu tempRating1 := uint32(75) tempRating2 := uint32(80) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = &state.ValidatorInfo{ + vi := state.NewShardValidatorsInfoMap() + _ = vi.Add(&state.ValidatorInfo{ PublicKey: nil, ShardId: core.MetachainShardId, List: "", @@ -2195,12 +2195,10 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithNoValidatorFailu ValidatorFailure: 0, NumSelectedInSuccessBlocks: 20, AccumulatedFees: nil, - } - - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = &state.ValidatorInfo{ + }) + _ = vi.Add(&state.ValidatorInfo{ PublicKey: nil, - ShardId: core.MetachainShardId, + ShardId: 0, List: "", Index: 0, TempRating: tempRating2, @@ -2212,12 +2210,12 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithNoValidatorFailu ValidatorFailure: 0, NumSelectedInSuccessBlocks: 20, AccumulatedFees: nil, - } + }) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) - assert.Equal(t, tempRating1, vi[core.MetachainShardId][0].TempRating) - assert.Equal(t, tempRating2, vi[0][0].TempRating) + assert.Equal(t, tempRating1, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) + assert.Equal(t, tempRating2, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithSmallValidatorFailureShouldWork(t *testing.T) { @@ -2246,18 +2244,16 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithSmallValidatorFa validatorIgnored2 := uint32(90) validatorFailure2 := uint32(9) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) + vi := state.NewShardValidatorsInfoMap() + _ = vi.Add(createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1)) + _ = vi.Add(createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2)) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) expectedTempRating1 := tempRating1 - uint32(rater.MetaIncreaseValidator)*(validatorSuccess1+validatorIgnored1) - assert.Equal(t, expectedTempRating1, vi[core.MetachainShardId][0].TempRating) + assert.Equal(t, expectedTempRating1, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) expectedTempRating2 := tempRating2 - uint32(rater.IncreaseValidator)*(validatorSuccess2+validatorIgnored2) - assert.Equal(t, expectedTempRating2, vi[0][0].TempRating) + assert.Equal(t, expectedTempRating2, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochComputesJustEligible(t *testing.T) { @@ -2287,20 +2283,19 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochComputesJustEligible validatorIgnored2 := uint32(90) validatorFailure2 := uint32(9) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) + vi := state.NewShardValidatorsInfoMap() + _ = vi.Add(createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1)) - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) - vi[0][0].List = string(common.WaitingList) + validatorWaiting := createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) + validatorWaiting.SetList(string(common.WaitingList)) + _ = vi.Add(validatorWaiting) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) expectedTempRating1 := tempRating1 - uint32(rater.MetaIncreaseValidator)*(validatorSuccess1+validatorIgnored1) - assert.Equal(t, expectedTempRating1, vi[core.MetachainShardId][0].TempRating) + assert.Equal(t, expectedTempRating1, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) - assert.Equal(t, tempRating2, vi[0][0].TempRating) + assert.Equal(t, tempRating2, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochV2ComputesEligibleLeaving(t *testing.T) { @@ -2332,21 +2327,21 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochV2ComputesEligibleLe validatorIgnored2 := uint32(90) validatorFailure2 := uint32(9) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) - vi[core.MetachainShardId][0].List = string(common.LeavingList) + vi := state.NewShardValidatorsInfoMap() + validatorLeaving := createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) + validatorLeaving.SetList(string(common.LeavingList)) + _ = vi.Add(validatorLeaving) - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) - vi[0][0].List = string(common.WaitingList) + validatorWaiting := createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) + validatorWaiting.SetList(string(common.WaitingList)) + _ = vi.Add(validatorWaiting) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) expectedTempRating1 := tempRating1 - uint32(rater.MetaIncreaseValidator)*(validatorSuccess1+validatorIgnored1) - assert.Equal(t, expectedTempRating1, vi[core.MetachainShardId][0].TempRating) + assert.Equal(t, expectedTempRating1, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) - assert.Equal(t, tempRating2, vi[0][0].TempRating) + assert.Equal(t, tempRating2, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithLargeValidatorFailureBelowMinRatingShouldWork(t *testing.T) { @@ -2374,18 +2369,16 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithLargeValidatorFa validatorIgnored2 := uint32(90) validatorFailure2 := uint32(9) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) + vi := state.NewShardValidatorsInfoMap() + _ = vi.Add(createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1)) + _ = vi.Add(createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2)) validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) - assert.Equal(t, rater.MinRating, vi[core.MetachainShardId][0].TempRating) - assert.Equal(t, rater.MinRating, vi[0][0].TempRating) + assert.Equal(t, rater.MinRating, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) + assert.Equal(t, rater.MinRating, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorsProvider_PeerAccoutToValidatorInfo(t *testing.T) { @@ -2485,26 +2478,26 @@ func createMockValidatorInfo(shardId uint32, tempRating uint32, validatorSuccess } } -func compare(t *testing.T, peerAccount state.PeerAccountHandler, validatorInfo *state.ValidatorInfo) { - assert.Equal(t, peerAccount.GetShardId(), validatorInfo.ShardId) - assert.Equal(t, peerAccount.GetRating(), validatorInfo.Rating) - assert.Equal(t, peerAccount.GetTempRating(), validatorInfo.TempRating) - assert.Equal(t, peerAccount.AddressBytes(), validatorInfo.PublicKey) - assert.Equal(t, peerAccount.GetValidatorSuccessRate().GetNumFailure(), validatorInfo.ValidatorFailure) - assert.Equal(t, peerAccount.GetValidatorSuccessRate().GetNumSuccess(), validatorInfo.ValidatorSuccess) - assert.Equal(t, peerAccount.GetValidatorIgnoredSignaturesRate(), validatorInfo.ValidatorIgnoredSignatures) - assert.Equal(t, peerAccount.GetLeaderSuccessRate().GetNumFailure(), validatorInfo.LeaderFailure) - assert.Equal(t, peerAccount.GetLeaderSuccessRate().GetNumSuccess(), validatorInfo.LeaderSuccess) - assert.Equal(t, peerAccount.GetTotalValidatorSuccessRate().GetNumFailure(), validatorInfo.TotalValidatorFailure) - assert.Equal(t, peerAccount.GetTotalValidatorSuccessRate().GetNumSuccess(), validatorInfo.TotalValidatorSuccess) - assert.Equal(t, peerAccount.GetTotalValidatorIgnoredSignaturesRate(), validatorInfo.TotalValidatorIgnoredSignatures) - assert.Equal(t, peerAccount.GetTotalLeaderSuccessRate().GetNumFailure(), validatorInfo.TotalLeaderFailure) - assert.Equal(t, peerAccount.GetTotalLeaderSuccessRate().GetNumSuccess(), validatorInfo.TotalLeaderSuccess) - assert.Equal(t, peerAccount.GetList(), validatorInfo.List) - assert.Equal(t, peerAccount.GetIndexInList(), validatorInfo.Index) - assert.Equal(t, peerAccount.GetRewardAddress(), validatorInfo.RewardAddress) - assert.Equal(t, peerAccount.GetAccumulatedFees(), validatorInfo.AccumulatedFees) - assert.Equal(t, peerAccount.GetNumSelectedInSuccessBlocks(), validatorInfo.NumSelectedInSuccessBlocks) +func compare(t *testing.T, peerAccount state.PeerAccountHandler, validatorInfo state.ValidatorInfoHandler) { + assert.Equal(t, peerAccount.GetShardId(), validatorInfo.GetShardId()) + assert.Equal(t, peerAccount.GetRating(), validatorInfo.GetRating()) + assert.Equal(t, peerAccount.GetTempRating(), validatorInfo.GetTempRating()) + assert.Equal(t, peerAccount.AddressBytes(), validatorInfo.GetPublicKey()) + assert.Equal(t, peerAccount.GetValidatorSuccessRate().GetNumFailure(), validatorInfo.GetValidatorFailure()) + assert.Equal(t, peerAccount.GetValidatorSuccessRate().GetNumSuccess(), validatorInfo.GetValidatorSuccess()) + assert.Equal(t, peerAccount.GetValidatorIgnoredSignaturesRate(), validatorInfo.GetValidatorIgnoredSignatures()) + assert.Equal(t, peerAccount.GetLeaderSuccessRate().GetNumFailure(), validatorInfo.GetLeaderFailure()) + assert.Equal(t, peerAccount.GetLeaderSuccessRate().GetNumSuccess(), validatorInfo.GetLeaderSuccess()) + assert.Equal(t, peerAccount.GetTotalValidatorSuccessRate().GetNumFailure(), validatorInfo.GetTotalValidatorFailure()) + assert.Equal(t, peerAccount.GetTotalValidatorSuccessRate().GetNumSuccess(), validatorInfo.GetTotalValidatorSuccess()) + assert.Equal(t, peerAccount.GetTotalValidatorIgnoredSignaturesRate(), validatorInfo.GetTotalValidatorIgnoredSignatures()) + assert.Equal(t, peerAccount.GetTotalLeaderSuccessRate().GetNumFailure(), validatorInfo.GetTotalLeaderFailure()) + assert.Equal(t, peerAccount.GetTotalLeaderSuccessRate().GetNumSuccess(), validatorInfo.GetTotalLeaderSuccess()) + assert.Equal(t, peerAccount.GetList(), validatorInfo.GetList()) + assert.Equal(t, peerAccount.GetIndexInList(), validatorInfo.GetIndex()) + assert.Equal(t, peerAccount.GetRewardAddress(), validatorInfo.GetRewardAddress()) + assert.Equal(t, peerAccount.GetAccumulatedFees(), validatorInfo.GetAccumulatedFees()) + assert.Equal(t, peerAccount.GetNumSelectedInSuccessBlocks(), validatorInfo.GetNumSelectedInSuccessBlocks()) } func createPeerAccounts(addrBytes0 []byte, addrBytesMeta []byte) (state.PeerAccountHandler, state.PeerAccountHandler) { @@ -2655,6 +2648,114 @@ func TestValidatorStatisticsProcessor_SaveNodesCoordinatorUpdates(t *testing.T) assert.False(t, nodeForcedToRemain) } +func TestValidatorStatisticsProcessor_SaveNodesCoordinatorUpdatesWithStakingV4(t *testing.T) { + t.Parallel() + + peerAdapter := getAccountsMock() + arguments := createMockArguments() + arguments.PeerAdapter = peerAdapter + + pk0 := []byte("pk0") + pk1 := []byte("pk1") + pk2 := []byte("pk2") + + account0, _ := accounts.NewPeerAccount(pk0) + account1, _ := accounts.NewPeerAccount(pk1) + account2, _ := accounts.NewPeerAccount(pk2) + + ctLoadAccount := &atomic.Counter{} + ctSaveAccount := &atomic.Counter{} + + peerAdapter.LoadAccountCalled = func(address []byte) (vmcommon.AccountHandler, error) { + ctLoadAccount.Increment() + + switch string(address) { + case string(pk0): + return account0, nil + case string(pk1): + return account1, nil + case string(pk2): + return account2, nil + default: + require.Fail(t, "should not have called this for other address") + return nil, nil + } + } + peerAdapter.SaveAccountCalled = func(account vmcommon.AccountHandler) error { + ctSaveAccount.Increment() + peerAccount := account.(state.PeerAccountHandler) + require.Equal(t, uint32(0), peerAccount.GetIndexInList()) + + switch string(account.AddressBytes()) { + case string(pk0): + require.Equal(t, string(common.EligibleList), peerAccount.GetList()) + require.Equal(t, uint32(0), peerAccount.GetShardId()) + return nil + case string(pk1): + require.Equal(t, string(common.AuctionList), peerAccount.GetList()) + require.Equal(t, uint32(0), peerAccount.GetShardId()) + return nil + case string(pk2): + require.Equal(t, string(common.AuctionList), peerAccount.GetList()) + require.Equal(t, core.MetachainShardId, peerAccount.GetShardId()) + return nil + default: + require.Fail(t, "should not have called this for other account") + return nil + } + } + + arguments.NodesCoordinator = &shardingMocks.NodesCoordinatorMock{ + GetAllEligibleValidatorsPublicKeysCalled: func(epoch uint32) (map[uint32][][]byte, error) { + mapNodes := map[uint32][][]byte{ + 0: {pk0}, + } + return mapNodes, nil + }, + GetAllShuffledOutValidatorsPublicKeysCalled: func(epoch uint32) (map[uint32][][]byte, error) { + mapNodes := map[uint32][][]byte{ + 0: {pk1}, + core.MetachainShardId: {pk2}, + } + return mapNodes, nil + }, + } + stakingV4Step2EnableEpochCalledCt := 0 + arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + if flag == common.StakingV4Step2Flag { + stakingV4Step2EnableEpochCalledCt++ + switch stakingV4Step2EnableEpochCalledCt { + case 1: + return false + case 2: + return true + default: + require.Fail(t, "should only call this twice") + } + } + + return false + }, + } + + validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) + nodeForcedToRemain, err := validatorStatistics.SaveNodesCoordinatorUpdates(0) + require.Nil(t, err) + require.False(t, nodeForcedToRemain) + require.Equal(t, int64(1), ctSaveAccount.Get()) + require.Equal(t, int64(1), ctLoadAccount.Get()) + + ctSaveAccount.Reset() + ctLoadAccount.Reset() + + nodeForcedToRemain, err = validatorStatistics.SaveNodesCoordinatorUpdates(0) + require.Nil(t, err) + require.False(t, nodeForcedToRemain) + require.Equal(t, int64(3), ctSaveAccount.Get()) + require.Equal(t, int64(3), ctLoadAccount.Get()) +} + func TestValidatorStatisticsProcessor_getActualList(t *testing.T) { t.Parallel() diff --git a/process/peer/ratingReader.go b/process/peer/ratingReader.go index 4a8c8f1c5be..83f236b3869 100644 --- a/process/peer/ratingReader.go +++ b/process/peer/ratingReader.go @@ -5,13 +5,13 @@ type RatingReader struct { getRating func(string) uint32 } -//GetRating returns the Rating for the specified public key +// GetRating returns the Rating for the specified public key func (bsr *RatingReader) GetRating(pk string) uint32 { rating := bsr.getRating(pk) return rating } -//IsInterfaceNil checks if the underlying object is nil +// IsInterfaceNil checks if the underlying object is nil func (bsr *RatingReader) IsInterfaceNil() bool { return bsr == nil } diff --git a/process/peer/validatorsProvider.go b/process/peer/validatorsProvider.go index 6ab8d0ac49b..7c3b8505310 100644 --- a/process/peer/validatorsProvider.go +++ b/process/peer/validatorsProvider.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" @@ -24,14 +25,22 @@ type validatorsProvider struct { nodesCoordinator process.NodesCoordinator validatorStatistics process.ValidatorStatisticsProcessor cache map[string]*validator.ValidatorStatistics + cachedAuctionValidators []*common.AuctionListValidatorAPIResponse + cachedRandomness []byte cacheRefreshIntervalDuration time.Duration refreshCache chan uint32 lastCacheUpdate time.Time + lastAuctionCacheUpdate time.Time lock sync.RWMutex + auctionMutex sync.RWMutex cancelFunc func() - pubkeyConverter core.PubkeyConverter - maxRating uint32 - currentEpoch uint32 + validatorPubKeyConverter core.PubkeyConverter + addressPubKeyConverter core.PubkeyConverter + stakingDataProvider StakingDataProviderAPI + auctionListSelector epochStart.AuctionListSelector + + maxRating uint32 + currentEpoch uint32 } // ArgValidatorsProvider contains all parameters needed for creating a validatorsProvider @@ -40,7 +49,10 @@ type ArgValidatorsProvider struct { EpochStartEventNotifier process.EpochStartEventNotifier CacheRefreshIntervalDurationInSec time.Duration ValidatorStatistics process.ValidatorStatisticsProcessor - PubKeyConverter core.PubkeyConverter + ValidatorPubKeyConverter core.PubkeyConverter + AddressPubKeyConverter core.PubkeyConverter + StakingDataProvider StakingDataProviderAPI + AuctionListSelector epochStart.AuctionListSelector StartEpoch uint32 MaxRating uint32 } @@ -53,8 +65,11 @@ func NewValidatorsProvider( if check.IfNil(args.ValidatorStatistics) { return nil, process.ErrNilValidatorStatistics } - if check.IfNil(args.PubKeyConverter) { - return nil, process.ErrNilPubkeyConverter + if check.IfNil(args.ValidatorPubKeyConverter) { + return nil, fmt.Errorf("%w for validators", process.ErrNilPubkeyConverter) + } + if check.IfNil(args.AddressPubKeyConverter) { + return nil, fmt.Errorf("%w for addresses", process.ErrNilPubkeyConverter) } if check.IfNil(args.NodesCoordinator) { return nil, process.ErrNilNodesCoordinator @@ -62,6 +77,12 @@ func NewValidatorsProvider( if check.IfNil(args.EpochStartEventNotifier) { return nil, process.ErrNilEpochStartNotifier } + if check.IfNil(args.StakingDataProvider) { + return nil, process.ErrNilStakingDataProvider + } + if check.IfNil(args.AuctionListSelector) { + return nil, epochStart.ErrNilAuctionListSelector + } if args.MaxRating == 0 { return nil, process.ErrMaxRatingZero } @@ -74,14 +95,20 @@ func NewValidatorsProvider( valProvider := &validatorsProvider{ nodesCoordinator: args.NodesCoordinator, validatorStatistics: args.ValidatorStatistics, + stakingDataProvider: args.StakingDataProvider, cache: make(map[string]*validator.ValidatorStatistics), + cachedAuctionValidators: make([]*common.AuctionListValidatorAPIResponse, 0), + cachedRandomness: make([]byte, 0), cacheRefreshIntervalDuration: args.CacheRefreshIntervalDurationInSec, refreshCache: make(chan uint32), lock: sync.RWMutex{}, + auctionMutex: sync.RWMutex{}, cancelFunc: cancelfunc, maxRating: args.MaxRating, - pubkeyConverter: args.PubKeyConverter, + validatorPubKeyConverter: args.ValidatorPubKeyConverter, + addressPubKeyConverter: args.AddressPubKeyConverter, currentEpoch: args.StartEpoch, + auctionListSelector: args.AuctionListSelector, } go valProvider.startRefreshProcess(currentContext) @@ -92,19 +119,23 @@ func NewValidatorsProvider( // GetLatestValidators gets the latest configuration of validators from the peerAccountsTrie func (vp *validatorsProvider) GetLatestValidators() map[string]*validator.ValidatorStatistics { + vp.updateCacheIfNeeded() + vp.lock.RLock() - shouldUpdate := time.Since(vp.lastCacheUpdate) > vp.cacheRefreshIntervalDuration + clonedMap := cloneMap(vp.cache) vp.lock.RUnlock() - if shouldUpdate { - vp.updateCache() - } + return clonedMap +} +func (vp *validatorsProvider) updateCacheIfNeeded() { vp.lock.RLock() - clonedMap := cloneMap(vp.cache) + shouldUpdate := time.Since(vp.lastCacheUpdate) > vp.cacheRefreshIntervalDuration vp.lock.RUnlock() - return clonedMap + if shouldUpdate { + vp.updateCache() + } } func cloneMap(cache map[string]*validator.ValidatorStatistics) map[string]*validator.ValidatorStatistics { @@ -182,6 +213,7 @@ func (vp *validatorsProvider) updateCache() { } allNodes, err := vp.validatorStatistics.GetValidatorInfoForRootHash(lastFinalizedRootHash) if err != nil { + allNodes = state.NewShardValidatorsInfoMap() log.Trace("validatorsProvider - GetLatestValidatorInfos failed", "error", err) } @@ -199,48 +231,46 @@ func (vp *validatorsProvider) updateCache() { func (vp *validatorsProvider) createNewCache( epoch uint32, - allNodes map[uint32][]*state.ValidatorInfo, + allNodes state.ShardValidatorsInfoMapHandler, ) map[string]*validator.ValidatorStatistics { newCache := vp.createValidatorApiResponseMapFromValidatorInfoMap(allNodes) nodesMapEligible, err := vp.nodesCoordinator.GetAllEligibleValidatorsPublicKeys(epoch) if err != nil { - log.Debug("validatorsProvider - GetAllEligibleValidatorsPublicKeys failed", "epoch", epoch) + log.Debug("validatorsProvider - GetAllEligibleValidatorsPublicKeys failed", "epoch", epoch, "error", err) } vp.aggregateLists(newCache, nodesMapEligible, common.EligibleList) nodesMapWaiting, err := vp.nodesCoordinator.GetAllWaitingValidatorsPublicKeys(epoch) if err != nil { - log.Debug("validatorsProvider - GetAllWaitingValidatorsPublicKeys failed", "epoch", epoch) + log.Debug("validatorsProvider - GetAllWaitingValidatorsPublicKeys failed", "epoch", epoch, "error", err) } vp.aggregateLists(newCache, nodesMapWaiting, common.WaitingList) return newCache } -func (vp *validatorsProvider) createValidatorApiResponseMapFromValidatorInfoMap(allNodes map[uint32][]*state.ValidatorInfo) map[string]*validator.ValidatorStatistics { +func (vp *validatorsProvider) createValidatorApiResponseMapFromValidatorInfoMap(allNodes state.ShardValidatorsInfoMapHandler) map[string]*validator.ValidatorStatistics { newCache := make(map[string]*validator.ValidatorStatistics) - for _, validatorInfosInShard := range allNodes { - for _, validatorInfo := range validatorInfosInShard { - strKey := vp.pubkeyConverter.SilentEncode(validatorInfo.PublicKey, log) - - newCache[strKey] = &validator.ValidatorStatistics{ - NumLeaderSuccess: validatorInfo.LeaderSuccess, - NumLeaderFailure: validatorInfo.LeaderFailure, - NumValidatorSuccess: validatorInfo.ValidatorSuccess, - NumValidatorFailure: validatorInfo.ValidatorFailure, - NumValidatorIgnoredSignatures: validatorInfo.ValidatorIgnoredSignatures, - TotalNumLeaderSuccess: validatorInfo.TotalLeaderSuccess, - TotalNumLeaderFailure: validatorInfo.TotalLeaderFailure, - TotalNumValidatorSuccess: validatorInfo.TotalValidatorSuccess, - TotalNumValidatorFailure: validatorInfo.TotalValidatorFailure, - TotalNumValidatorIgnoredSignatures: validatorInfo.TotalValidatorIgnoredSignatures, - RatingModifier: validatorInfo.RatingModifier, - Rating: float32(validatorInfo.Rating) * 100 / float32(vp.maxRating), - TempRating: float32(validatorInfo.TempRating) * 100 / float32(vp.maxRating), - ShardId: validatorInfo.ShardId, - ValidatorStatus: validatorInfo.List, - } + + for _, validatorInfo := range allNodes.GetAllValidatorsInfo() { + strKey := vp.validatorPubKeyConverter.SilentEncode(validatorInfo.GetPublicKey(), log) + newCache[strKey] = &validator.ValidatorStatistics{ + NumLeaderSuccess: validatorInfo.GetLeaderSuccess(), + NumLeaderFailure: validatorInfo.GetLeaderFailure(), + NumValidatorSuccess: validatorInfo.GetValidatorSuccess(), + NumValidatorFailure: validatorInfo.GetValidatorFailure(), + NumValidatorIgnoredSignatures: validatorInfo.GetValidatorIgnoredSignatures(), + TotalNumLeaderSuccess: validatorInfo.GetTotalLeaderSuccess(), + TotalNumLeaderFailure: validatorInfo.GetTotalLeaderFailure(), + TotalNumValidatorSuccess: validatorInfo.GetTotalValidatorSuccess(), + TotalNumValidatorFailure: validatorInfo.GetTotalValidatorFailure(), + TotalNumValidatorIgnoredSignatures: validatorInfo.GetTotalValidatorIgnoredSignatures(), + RatingModifier: validatorInfo.GetRatingModifier(), + Rating: float32(validatorInfo.GetRating()) * 100 / float32(vp.maxRating), + TempRating: float32(validatorInfo.GetTempRating()) * 100 / float32(vp.maxRating), + ShardId: validatorInfo.GetShardId(), + ValidatorStatus: validatorInfo.GetList(), } } @@ -254,8 +284,7 @@ func (vp *validatorsProvider) aggregateLists( ) { for shardID, shardValidators := range validatorsMap { for _, val := range shardValidators { - encodedKey := vp.pubkeyConverter.SilentEncode(val, log) - + encodedKey := vp.validatorPubKeyConverter.SilentEncode(val, log) foundInTrieValidator, ok := newCache[encodedKey] peerType := string(currentList) @@ -288,6 +317,12 @@ func shouldCombine(triePeerType common.PeerType, currentPeerType common.PeerType return isLeaving && isEligibleOrWaiting } +// ForceUpdate will trigger the update process of all caches +func (vp *validatorsProvider) ForceUpdate() error { + vp.updateCache() + return vp.updateAuctionListCache() +} + // IsInterfaceNil returns true if there is no value under the interface func (vp *validatorsProvider) IsInterfaceNil() bool { return vp == nil diff --git a/process/peer/validatorsProviderAuction.go b/process/peer/validatorsProviderAuction.go new file mode 100644 index 00000000000..144ace850fb --- /dev/null +++ b/process/peer/validatorsProviderAuction.go @@ -0,0 +1,220 @@ +package peer + +import ( + "bytes" + "math/big" + "sort" + "time" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/state" +) + +// GetAuctionList returns an array containing the validators that are currently in the auction list +func (vp *validatorsProvider) GetAuctionList() ([]*common.AuctionListValidatorAPIResponse, error) { + err := vp.updateAuctionListCacheIfNeeded() + if err != nil { + return nil, err + } + + vp.auctionMutex.RLock() + ret := make([]*common.AuctionListValidatorAPIResponse, len(vp.cachedAuctionValidators)) + copy(ret, vp.cachedAuctionValidators) + vp.auctionMutex.RUnlock() + + return ret, nil +} + +func (vp *validatorsProvider) updateAuctionListCacheIfNeeded() error { + vp.auctionMutex.RLock() + shouldUpdate := time.Since(vp.lastAuctionCacheUpdate) > vp.cacheRefreshIntervalDuration + vp.auctionMutex.RUnlock() + + if shouldUpdate { + return vp.updateAuctionListCache() + } + + return nil +} + +func (vp *validatorsProvider) updateAuctionListCache() error { + rootHash := vp.validatorStatistics.LastFinalizedRootHash() + if len(rootHash) == 0 { + return state.ErrNilRootHash + } + + validatorsMap, err := vp.validatorStatistics.GetValidatorInfoForRootHash(rootHash) + if err != nil { + return err + } + + vp.auctionMutex.Lock() + vp.cachedRandomness = rootHash + vp.auctionMutex.Unlock() + + newCache, err := vp.createValidatorsAuctionCache(validatorsMap) + if err != nil { + return err + } + + vp.auctionMutex.Lock() + vp.lastAuctionCacheUpdate = time.Now() + vp.cachedAuctionValidators = newCache + vp.auctionMutex.Unlock() + + return nil +} + +func (vp *validatorsProvider) createValidatorsAuctionCache(validatorsMap state.ShardValidatorsInfoMapHandler) ([]*common.AuctionListValidatorAPIResponse, error) { + defer vp.stakingDataProvider.Clean() + + err := vp.fillAllValidatorsInfo(validatorsMap) + if err != nil { + return nil, err + } + + selectedNodes, err := vp.getSelectedNodesFromAuction(validatorsMap) + if err != nil { + return nil, err + } + + auctionListValidators, qualifiedOwners := vp.getAuctionListValidatorsAPIResponse(selectedNodes) + sortList(auctionListValidators, qualifiedOwners) + return auctionListValidators, nil +} + +func (vp *validatorsProvider) fillAllValidatorsInfo(validatorsMap state.ShardValidatorsInfoMapHandler) error { + for _, validator := range validatorsMap.GetAllValidatorsInfo() { + err := vp.stakingDataProvider.FillValidatorInfo(validator) + if err != nil { + return err + } + } + + _, _, err := vp.stakingDataProvider.ComputeUnQualifiedNodes(validatorsMap) + return err +} + +func (vp *validatorsProvider) getSelectedNodesFromAuction(validatorsMap state.ShardValidatorsInfoMapHandler) ([]state.ValidatorInfoHandler, error) { + vp.auctionMutex.RLock() + randomness := vp.cachedRandomness + vp.auctionMutex.RUnlock() + + err := vp.auctionListSelector.SelectNodesFromAuctionList(validatorsMap, randomness) + if err != nil { + return nil, err + } + + selectedNodes := make([]state.ValidatorInfoHandler, 0) + for _, validator := range validatorsMap.GetAllValidatorsInfo() { + if validator.GetList() == string(common.SelectedFromAuctionList) { + selectedNodes = append(selectedNodes, validator.ShallowClone()) + } + } + + return selectedNodes, nil +} + +func sortList(list []*common.AuctionListValidatorAPIResponse, qualifiedOwners map[string]bool) { + sort.SliceStable(list, func(i, j int) bool { + qualifiedTopUpValidator1, _ := big.NewInt(0).SetString(list[i].QualifiedTopUp, 10) + qualifiedTopUpValidator2, _ := big.NewInt(0).SetString(list[j].QualifiedTopUp, 10) + if qualifiedTopUpValidator1.Cmp(qualifiedTopUpValidator2) == 0 { + return compareByNumQualified(list[i], list[j], qualifiedOwners) + } + + return qualifiedTopUpValidator1.Cmp(qualifiedTopUpValidator2) > 0 + }) +} + +func compareByNumQualified(owner1Nodes, owner2Nodes *common.AuctionListValidatorAPIResponse, qualifiedOwners map[string]bool) bool { + owner1Qualified := qualifiedOwners[owner1Nodes.Owner] + owner2Qualified := qualifiedOwners[owner2Nodes.Owner] + + bothQualified := owner1Qualified && owner2Qualified + if !bothQualified { + return owner1Qualified + } + + owner1NumQualified := getNumQualified(owner1Nodes.Nodes) + owner2NumQualified := getNumQualified(owner2Nodes.Nodes) + + return owner1NumQualified > owner2NumQualified +} + +func getNumQualified(nodes []*common.AuctionNode) uint32 { + numQualified := uint32(0) + for _, node := range nodes { + if node.Qualified { + numQualified++ + } + } + + return numQualified +} + +func (vp *validatorsProvider) getAuctionListValidatorsAPIResponse( + selectedNodes []state.ValidatorInfoHandler, +) ([]*common.AuctionListValidatorAPIResponse, map[string]bool) { + auctionListValidators := make([]*common.AuctionListValidatorAPIResponse, 0) + qualifiedOwners := make(map[string]bool) + + for ownerPubKey, ownerData := range vp.stakingDataProvider.GetOwnersData() { + numAuctionNodes := len(ownerData.AuctionList) + if numAuctionNodes > 0 { + ownerEncodedPubKey := vp.addressPubKeyConverter.SilentEncode([]byte(ownerPubKey), log) + auctionValidator := &common.AuctionListValidatorAPIResponse{ + Owner: ownerEncodedPubKey, + NumStakedNodes: ownerData.NumStakedNodes, + TotalTopUp: ownerData.TotalTopUp.String(), + TopUpPerNode: ownerData.TopUpPerNode.String(), + QualifiedTopUp: ownerData.TopUpPerNode.String(), + Nodes: make([]*common.AuctionNode, 0, numAuctionNodes), + } + vp.fillAuctionQualifiedValidatorAPIData(selectedNodes, ownerData, auctionValidator) + auctionListValidators = append(auctionListValidators, auctionValidator) + + qualifiedOwners[ownerEncodedPubKey] = ownerData.Qualified + } + } + + return auctionListValidators, qualifiedOwners +} + +func (vp *validatorsProvider) fillAuctionQualifiedValidatorAPIData( + selectedNodes []state.ValidatorInfoHandler, + ownerData *epochStart.OwnerData, + auctionValidatorAPI *common.AuctionListValidatorAPIResponse, +) { + auctionValidatorAPI.Nodes = make([]*common.AuctionNode, 0, len(ownerData.AuctionList)) + numOwnerQualifiedNodes := int64(0) + for _, nodeInAuction := range ownerData.AuctionList { + auctionNode := &common.AuctionNode{ + BlsKey: vp.validatorPubKeyConverter.SilentEncode(nodeInAuction.GetPublicKey(), log), + Qualified: false, + } + if ownerData.Qualified && contains(selectedNodes, nodeInAuction) { + auctionNode.Qualified = true + numOwnerQualifiedNodes++ + } + + auctionValidatorAPI.Nodes = append(auctionValidatorAPI.Nodes, auctionNode) + } + + if numOwnerQualifiedNodes > 0 { + activeNodes := big.NewInt(ownerData.NumActiveNodes) + qualifiedNodes := big.NewInt(numOwnerQualifiedNodes) + ownerRemainingNodes := big.NewInt(0).Add(activeNodes, qualifiedNodes) + auctionValidatorAPI.QualifiedTopUp = big.NewInt(0).Div(ownerData.TotalTopUp, ownerRemainingNodes).String() + } +} + +func contains(list []state.ValidatorInfoHandler, validator state.ValidatorInfoHandler) bool { + for _, val := range list { + if bytes.Equal(val.GetPublicKey(), validator.GetPublicKey()) { + return true + } + } + return false +} diff --git a/process/peer/validatorsProvider_test.go b/process/peer/validatorsProvider_test.go index cd718e0c78b..931567a2435 100644 --- a/process/peer/validatorsProvider_test.go +++ b/process/peer/validatorsProvider_test.go @@ -6,12 +6,14 @@ import ( "encoding/hex" "fmt" "math/big" + "strings" "sync" "sync/atomic" "testing" "time" "github.com/multiversx/mx-chain-core-go/core" + coreAtomic "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/validator" @@ -22,8 +24,10 @@ import ( "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewValidatorsProvider_WithNilValidatorStatisticsShouldErr(t *testing.T) { @@ -42,16 +46,36 @@ func TestNewValidatorsProvider_WithMaxRatingZeroShouldErr(t *testing.T) { assert.Nil(t, vp) } -func TestNewValidatorsProvider_WithNilValidatorPubkeyConverterShouldErr(t *testing.T) { +func TestNewValidatorsProvider_WithNilValidatorPubKeyConverterShouldErr(t *testing.T) { arg := createDefaultValidatorsProviderArg() - arg.PubKeyConverter = nil + arg.ValidatorPubKeyConverter = nil vp, err := NewValidatorsProvider(arg) - assert.Equal(t, process.ErrNilPubkeyConverter, err) + assert.True(t, errors.Is(err, process.ErrNilPubkeyConverter)) + assert.True(t, strings.Contains(err.Error(), "validator")) assert.True(t, check.IfNil(vp)) } -func TestNewValidatorsProvider_WithNilNodesCoordinatorrShouldErr(t *testing.T) { +func TestNewValidatorsProvider_WithNilAddressPubkeyConverterShouldErr(t *testing.T) { + arg := createDefaultValidatorsProviderArg() + arg.AddressPubKeyConverter = nil + vp, err := NewValidatorsProvider(arg) + + assert.True(t, errors.Is(err, process.ErrNilPubkeyConverter)) + assert.True(t, strings.Contains(err.Error(), "address")) + assert.True(t, check.IfNil(vp)) +} + +func TestNewValidatorsProvider_WithNilStakingDataProviderShouldErr(t *testing.T) { + arg := createDefaultValidatorsProviderArg() + arg.StakingDataProvider = nil + vp, err := NewValidatorsProvider(arg) + + assert.Equal(t, process.ErrNilStakingDataProvider, err) + assert.True(t, check.IfNil(vp)) +} + +func TestNewValidatorsProvider_WithNilNodesCoordinatorShouldErr(t *testing.T) { arg := createDefaultValidatorsProviderArg() arg.NodesCoordinator = nil vp, err := NewValidatorsProvider(arg) @@ -69,7 +93,7 @@ func TestNewValidatorsProvider_WithNilStartOfEpochTriggerShouldErr(t *testing.T) assert.True(t, check.IfNil(vp)) } -func TestNewValidatorsProvider_WithNilRefresCacheIntervalInSecShouldErr(t *testing.T) { +func TestNewValidatorsProvider_WithZeroRefreshCacheIntervalInSecShouldErr(t *testing.T) { arg := createDefaultValidatorsProviderArg() arg.CacheRefreshIntervalDurationInSec = 0 vp, err := NewValidatorsProvider(arg) @@ -78,25 +102,33 @@ func TestNewValidatorsProvider_WithNilRefresCacheIntervalInSecShouldErr(t *testi assert.True(t, check.IfNil(vp)) } +func TestNewValidatorsProvider_WithNilAuctionListSelectorShouldErr(t *testing.T) { + arg := createDefaultValidatorsProviderArg() + arg.AuctionListSelector = nil + vp, err := NewValidatorsProvider(arg) + + require.Nil(t, vp) + require.Equal(t, epochStart.ErrNilAuctionListSelector, err) +} + func TestValidatorsProvider_GetLatestValidatorsSecondHashDoesNotExist(t *testing.T) { mut := sync.Mutex{} root := []byte("rootHash") e := errors.Errorf("not ok") initialInfo := createMockValidatorInfo() - validatorInfos := map[uint32][]*state.ValidatorInfo{ - 0: {initialInfo}, - } + validatorInfos := state.NewShardValidatorsInfoMap() + _ = validatorInfos.Add(initialInfo) gotOk := false gotNil := false - vs := &mock.ValidatorStatisticsProcessorStub{ + vs := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() (bytes []byte) { mut.Lock() defer mut.Unlock() return root }, - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (m map[uint32][]*state.ValidatorInfo, err error) { + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (m state.ShardValidatorsInfoMapHandler, err error) { mut.Lock() defer mut.Unlock() if bytes.Equal([]byte("rootHash"), rootHash) { @@ -167,10 +199,10 @@ func TestValidatorsProvider_CallsPopulateAndRegister(t *testing.T) { }, } - arg.ValidatorStatistics = &mock.ValidatorStatisticsProcessorStub{ - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + arg.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { atomic.AddInt32(&numPopulateCacheCalled, 1) - return nil, nil + return state.NewShardValidatorsInfoMap(), nil }, LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") @@ -189,12 +221,12 @@ func TestValidatorsProvider_UpdateCache_WithError(t *testing.T) { expectedErr := errors.New("expectedError") arg := createDefaultValidatorsProviderArg() - validatorProc := &mock.ValidatorStatisticsProcessorStub{ + validatorProc := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorProc.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorProc.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { return nil, expectedErr } @@ -213,7 +245,7 @@ func TestValidatorsProvider_UpdateCache_WithError(t *testing.T) { cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, refreshCache: nil, lock: sync.RWMutex{}, - pubkeyConverter: testscommon.NewPubkeyConverterMock(32), + validatorPubKeyConverter: testscommon.NewPubkeyConverterMock(32), } vsp.updateCache() @@ -233,6 +265,8 @@ func TestValidatorsProvider_Cancel_startRefreshProcess(t *testing.T) { cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, refreshCache: make(chan uint32), lock: sync.RWMutex{}, + stakingDataProvider: &stakingcommon.StakingDataProviderStub{}, + auctionListSelector: &stakingcommon.AuctionListSelectorStub{}, } ctx, cancelFunc := context.WithCancel(context.Background()) @@ -264,21 +298,20 @@ func TestValidatorsProvider_UpdateCache(t *testing.T) { pk := []byte("pk1") initialShardId := uint32(1) initialList := string(common.EligibleList) - validatorsMap := make(map[uint32][]*state.ValidatorInfo) - validatorsMap[initialShardId] = []*state.ValidatorInfo{ - { - PublicKey: pk, - List: initialList, - ShardId: initialShardId, - }, - } + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pk, + List: initialList, + ShardId: initialShardId, + }) + arg := createDefaultValidatorsProviderArg() - validatorProc := &mock.ValidatorStatisticsProcessorStub{ + validatorProc := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorProc.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorProc.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { return validatorsMap, nil } @@ -288,16 +321,15 @@ func TestValidatorsProvider_UpdateCache(t *testing.T) { cache: nil, cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, refreshCache: nil, - pubkeyConverter: testscommon.NewPubkeyConverterMock(32), + validatorPubKeyConverter: testscommon.NewPubkeyConverterMock(32), lock: sync.RWMutex{}, } vsp.updateCache() assert.NotNil(t, vsp.cache) - assert.Equal(t, len(validatorsMap[initialShardId]), len(vsp.cache)) - encodedKey, err := arg.PubKeyConverter.Encode(pk) - assert.Nil(t, err) + assert.Equal(t, len(validatorsMap.GetShardValidatorsInfoMap()[initialShardId]), len(vsp.cache)) + encodedKey, _ := arg.ValidatorPubKeyConverter.Encode(pk) assert.NotNil(t, vsp.cache[encodedKey]) assert.Equal(t, initialList, vsp.cache[encodedKey].ValidatorStatus) assert.Equal(t, initialShardId, vsp.cache[encodedKey].ShardId) @@ -315,12 +347,9 @@ func TestValidatorsProvider_aggregatePType_equal(t *testing.T) { trieLeavingShardId := uint32(2) leavingList := string(common.LeavingList) - encodedEligible, err := pubKeyConverter.Encode(pkEligible) - assert.Nil(t, err) - encondedInactive, err := pubKeyConverter.Encode(pkInactive) - assert.Nil(t, err) - encodedLeaving, err := pubKeyConverter.Encode(pkLeaving) - assert.Nil(t, err) + encodedEligible, _ := pubKeyConverter.Encode(pkEligible) + encondedInactive, _ := pubKeyConverter.Encode(pkInactive) + encodedLeaving, _ := pubKeyConverter.Encode(pkLeaving) cache := make(map[string]*validator.ValidatorStatistics) cache[encondedInactive] = &validator.ValidatorStatistics{ValidatorStatus: inactiveList, ShardId: trieInctiveShardId} cache[encodedEligible] = &validator.ValidatorStatistics{ValidatorStatus: eligibleList, ShardId: trieEligibleShardId} @@ -335,7 +364,7 @@ func TestValidatorsProvider_aggregatePType_equal(t *testing.T) { } vp := validatorsProvider{ - pubkeyConverter: pubKeyConverter, + validatorPubKeyConverter: pubKeyConverter, } vp.aggregateLists(cache, validatorsMap, common.EligibleList) @@ -363,47 +392,41 @@ func TestValidatorsProvider_createCache(t *testing.T) { pkNew := []byte("pk5") newList := string(common.NewList) - validatorsMap := make(map[uint32][]*state.ValidatorInfo) + validatorsMap := state.NewShardValidatorsInfoMap() eligibleShardId := uint32(0) waitingShardId := uint32(1) leavingShardId := uint32(2) inactiveShardId := uint32(3) newShardId := core.MetachainShardId - validatorsMap[eligibleShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkEligible, - ShardId: eligibleShardId, - List: eligibleList, - }, - } - validatorsMap[waitingShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkWaiting, - ShardId: waitingShardId, - List: waitingList, - }, - } - validatorsMap[leavingShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkLeaving, - ShardId: leavingShardId, - List: leavingList, - }, - } - validatorsMap[inactiveShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkInactive, - ShardId: inactiveShardId, - List: inactiveList, - }, - } - validatorsMap[newShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkNew, - ShardId: newShardId, - List: newList, - }, - } + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pkEligible, + ShardId: eligibleShardId, + List: eligibleList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + + PublicKey: pkWaiting, + ShardId: waitingShardId, + List: waitingList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + + PublicKey: pkLeaving, + ShardId: leavingShardId, + List: leavingList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + + PublicKey: pkInactive, + ShardId: inactiveShardId, + List: inactiveList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + + PublicKey: pkNew, + ShardId: newShardId, + List: newList, + }) arg := createDefaultValidatorsProviderArg() pubKeyConverter := testscommon.NewPubkeyConverterMock(32) vsp := validatorsProvider{ @@ -411,7 +434,7 @@ func TestValidatorsProvider_createCache(t *testing.T) { validatorStatistics: arg.ValidatorStatistics, cache: nil, cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, - pubkeyConverter: pubKeyConverter, + validatorPubKeyConverter: pubKeyConverter, lock: sync.RWMutex{}, } @@ -419,26 +442,22 @@ func TestValidatorsProvider_createCache(t *testing.T) { assert.NotNil(t, cache) - encodedPkEligible, err := pubKeyConverter.Encode(pkEligible) - assert.Nil(t, err) + encodedPkEligible, _ := pubKeyConverter.Encode(pkEligible) assert.NotNil(t, cache[encodedPkEligible]) assert.Equal(t, eligibleList, cache[encodedPkEligible].ValidatorStatus) assert.Equal(t, eligibleShardId, cache[encodedPkEligible].ShardId) - encodedPkWaiting, err := pubKeyConverter.Encode(pkWaiting) - assert.Nil(t, err) + encodedPkWaiting, _ := pubKeyConverter.Encode(pkWaiting) assert.NotNil(t, cache[encodedPkWaiting]) assert.Equal(t, waitingList, cache[encodedPkWaiting].ValidatorStatus) assert.Equal(t, waitingShardId, cache[encodedPkWaiting].ShardId) - encodedPkLeaving, err := pubKeyConverter.Encode(pkLeaving) - assert.Nil(t, err) + encodedPkLeaving, _ := pubKeyConverter.Encode(pkLeaving) assert.NotNil(t, cache[encodedPkLeaving]) assert.Equal(t, leavingList, cache[encodedPkLeaving].ValidatorStatus) assert.Equal(t, leavingShardId, cache[encodedPkLeaving].ShardId) - encodedPkNew, err := pubKeyConverter.Encode(pkNew) - assert.Nil(t, err) + encodedPkNew, _ := pubKeyConverter.Encode(pkNew) assert.NotNil(t, cache[encodedPkNew]) assert.Equal(t, newList, cache[encodedPkNew].ValidatorStatus) assert.Equal(t, newShardId, cache[encodedPkNew].ShardId) @@ -452,31 +471,25 @@ func TestValidatorsProvider_createCache_combined(t *testing.T) { pkLeavingInTrie := []byte("pk3") leavingList := string(common.LeavingList) - validatorsMap := make(map[uint32][]*state.ValidatorInfo) + validatorsMap := state.NewShardValidatorsInfoMap() eligibleShardId := uint32(0) inactiveShardId := uint32(1) leavingShardId := uint32(2) - validatorsMap[eligibleShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkEligibleInTrie, - ShardId: eligibleShardId, - List: eligibleList, - }, - } - validatorsMap[inactiveShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkInactive, - ShardId: inactiveShardId, - List: inactiveList, - }, - } - validatorsMap[leavingShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkLeavingInTrie, - ShardId: leavingShardId, - List: leavingList, - }, - } + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pkEligibleInTrie, + ShardId: eligibleShardId, + List: eligibleList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pkInactive, + ShardId: inactiveShardId, + List: inactiveList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pkLeavingInTrie, + ShardId: leavingShardId, + List: leavingList, + }) arg := createDefaultValidatorsProviderArg() nodesCoordinator := shardingMocks.NewNodesCoordinatorMock() nodesCoordinatorEligibleShardId := uint32(5) @@ -491,7 +504,7 @@ func TestValidatorsProvider_createCache_combined(t *testing.T) { vsp := validatorsProvider{ nodesCoordinator: nodesCoordinator, validatorStatistics: arg.ValidatorStatistics, - pubkeyConverter: arg.PubKeyConverter, + validatorPubKeyConverter: arg.ValidatorPubKeyConverter, cache: nil, cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, lock: sync.RWMutex{}, @@ -499,14 +512,12 @@ func TestValidatorsProvider_createCache_combined(t *testing.T) { cache := vsp.createNewCache(0, validatorsMap) - encodedPkEligible, err := arg.PubKeyConverter.Encode(pkEligibleInTrie) - assert.Nil(t, err) + encodedPkEligible, _ := arg.ValidatorPubKeyConverter.Encode(pkEligibleInTrie) assert.NotNil(t, cache[encodedPkEligible]) assert.Equal(t, eligibleList, cache[encodedPkEligible].ValidatorStatus) assert.Equal(t, nodesCoordinatorEligibleShardId, cache[encodedPkEligible].ShardId) - encodedPkLeavingInTrie, err := arg.PubKeyConverter.Encode(pkLeavingInTrie) - assert.Nil(t, err) + encodedPkLeavingInTrie, _ := arg.ValidatorPubKeyConverter.Encode(pkLeavingInTrie) computedPeerType := fmt.Sprintf(common.CombinedPeerType, common.EligibleList, common.LeavingList) assert.NotNil(t, cache[encodedPkLeavingInTrie]) assert.Equal(t, computedPeerType, cache[encodedPkLeavingInTrie].ValidatorStatus) @@ -519,14 +530,14 @@ func TestValidatorsProvider_CallsPopulateOnlyAfterTimeout(t *testing.T) { arg := createDefaultValidatorsProviderArg() arg.CacheRefreshIntervalDurationInSec = time.Millisecond * 10 - validatorStatisticsProcessor := &mock.ValidatorStatisticsProcessorStub{ + validatorStatisticsProcessor := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { atomic.AddInt32(populateCacheCalled, 1) - return nil, nil + return state.NewShardValidatorsInfoMap(), nil } arg.ValidatorStatistics = validatorStatisticsProcessor @@ -560,31 +571,29 @@ func TestValidatorsProvider_CallsUpdateCacheOnEpochChange(t *testing.T) { arg.CacheRefreshIntervalDurationInSec = 5 * time.Millisecond pkEligibleInTrie := []byte("pk1") - validatorStatisticsProcessor := &mock.ValidatorStatisticsProcessorStub{ + validatorStatisticsProcessor := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { callNumber++ // first call comes from the constructor if callNumber == 1 { - return nil, nil + return state.NewShardValidatorsInfoMap(), nil } - return map[uint32][]*state.ValidatorInfo{ - 0: { - { - PublicKey: pkEligibleInTrie, - List: string(common.EligibleList), - }, - }, - }, nil + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(&state.ValidatorInfo{ + ShardId: 0, + PublicKey: pkEligibleInTrie, + List: string(common.EligibleList), + }) + return validatorsMap, nil } arg.ValidatorStatistics = validatorStatisticsProcessor vsp, _ := NewValidatorsProvider(arg) - encodedEligible, err := arg.PubKeyConverter.Encode(pkEligibleInTrie) - assert.Nil(t, err) + encodedEligible, _ := arg.ValidatorPubKeyConverter.Encode(pkEligibleInTrie) assert.Equal(t, 0, len(vsp.GetCache())) // nothing in cache epochStartNotifier.NotifyAll(&block.Header{Nonce: 1, ShardID: 2, Round: 3}) time.Sleep(arg.CacheRefreshIntervalDurationInSec) @@ -600,31 +609,29 @@ func TestValidatorsProvider_DoesntCallUpdateUpdateCacheWithoutRequests(t *testin arg.CacheRefreshIntervalDurationInSec = 5 * time.Millisecond pkEligibleInTrie := []byte("pk1") - validatorStatisticsProcessor := &mock.ValidatorStatisticsProcessorStub{ + validatorStatisticsProcessor := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { callNumber++ // first call comes from the constructor if callNumber == 1 { - return nil, nil + return state.NewShardValidatorsInfoMap(), nil } - return map[uint32][]*state.ValidatorInfo{ - 0: { - { - PublicKey: pkEligibleInTrie, - List: string(common.EligibleList), - }, - }, - }, nil + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(&state.ValidatorInfo{ + ShardId: 0, + PublicKey: pkEligibleInTrie, + List: string(common.EligibleList), + }) + return validatorsMap, nil } arg.ValidatorStatistics = validatorStatisticsProcessor vsp, _ := NewValidatorsProvider(arg) - encodedEligible, err := arg.PubKeyConverter.Encode(pkEligibleInTrie) - assert.Nil(t, err) + encodedEligible, _ := arg.ValidatorPubKeyConverter.Encode(pkEligibleInTrie) assert.Equal(t, 0, len(vsp.GetCache())) // nothing in cache time.Sleep(arg.CacheRefreshIntervalDurationInSec) assert.Equal(t, 0, len(vsp.GetCache())) // nothing in cache @@ -636,6 +643,409 @@ func TestValidatorsProvider_DoesntCallUpdateUpdateCacheWithoutRequests(t *testin assert.Equal(t, 1, len(resp)) assert.NotNil(t, vsp.GetCache()[encodedEligible]) } + +func TestValidatorsProvider_GetAuctionList(t *testing.T) { + t.Parallel() + + t.Run("error getting root hash", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + return nil + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, list) + require.Equal(t, state.ErrNilRootHash, err) + }) + + t.Run("error getting validators info for root hash", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + expectedErr := errors.New("local error") + expectedRootHash := []byte("root hash") + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + return expectedRootHash + }, + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { + require.Equal(t, expectedRootHash, rootHash) + return nil, expectedErr + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, list) + require.Equal(t, expectedErr, err) + }) + + t.Run("error filling validator info, staking data provider cache should be cleaned", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + + cleanCalled := &coreAtomic.Flag{} + expectedValidator := &state.ValidatorInfo{PublicKey: []byte("pubKey"), List: string(common.AuctionList)} + expectedErr := errors.New("local error") + expectedRootHash := []byte("root hash") + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + return expectedRootHash + }, + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { + require.Equal(t, expectedRootHash, rootHash) + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(expectedValidator) + return validatorsMap, nil + }, + } + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + FillValidatorInfoCalled: func(validator state.ValidatorInfoHandler) error { + require.Equal(t, expectedValidator, validator) + return expectedErr + }, + CleanCalled: func() { + cleanCalled.SetValue(true) + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, list) + require.Equal(t, expectedErr, err) + require.True(t, cleanCalled.IsSet()) + }) + + t.Run("error selecting nodes from auction, staking data provider cache should be cleaned", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + + cleanCalled := &coreAtomic.Flag{} + expectedErr := errors.New("local error") + args.AuctionListSelector = &stakingcommon.AuctionListSelectorStub{ + SelectNodesFromAuctionListCalled: func(validatorsInfoMap state.ShardValidatorsInfoMapHandler, randomness []byte) error { + return expectedErr + }, + } + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + CleanCalled: func() { + cleanCalled.SetValue(true) + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, list) + require.Equal(t, expectedErr, err) + require.True(t, cleanCalled.IsSet()) + }) + + t.Run("empty list, check normal flow is executed", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + + expectedRootHash := []byte("root hash") + ctRootHashCalled := uint32(0) + ctGetValidatorsInfoForRootHash := uint32(0) + ctSelectNodesFromAuctionList := uint32(0) + ctFillValidatorInfoCalled := uint32(0) + ctGetOwnersDataCalled := uint32(0) + ctComputeUnqualifiedNodes := uint32(0) + + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + atomic.AddUint32(&ctRootHashCalled, 1) + return expectedRootHash + }, + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { + atomic.AddUint32(&ctGetValidatorsInfoForRootHash, 1) + require.Equal(t, expectedRootHash, rootHash) + return state.NewShardValidatorsInfoMap(), nil + }, + } + args.AuctionListSelector = &stakingcommon.AuctionListSelectorStub{ + SelectNodesFromAuctionListCalled: func(validatorsInfoMap state.ShardValidatorsInfoMapHandler, randomness []byte) error { + atomic.AddUint32(&ctSelectNodesFromAuctionList, 1) + require.Equal(t, expectedRootHash, randomness) + return nil + }, + } + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + FillValidatorInfoCalled: func(validator state.ValidatorInfoHandler) error { + atomic.AddUint32(&ctFillValidatorInfoCalled, 1) + return nil + }, + GetOwnersDataCalled: func() map[string]*epochStart.OwnerData { + atomic.AddUint32(&ctGetOwnersDataCalled, 1) + return nil + }, + ComputeUnQualifiedNodesCalled: func(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) { + atomic.AddUint32(&ctComputeUnqualifiedNodes, 1) + return nil, nil, nil + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, err) + require.Empty(t, list) + require.Equal(t, ctRootHashCalled, uint32(2)) // another call is from constructor in startRefreshProcess.updateCache + require.Equal(t, ctGetValidatorsInfoForRootHash, uint32(2)) // another call is from constructor in startRefreshProcess.updateCache + require.Equal(t, ctFillValidatorInfoCalled, uint32(0)) + require.Equal(t, ctGetOwnersDataCalled, uint32(1)) + require.Equal(t, ctComputeUnqualifiedNodes, uint32(1)) + require.Equal(t, expectedRootHash, vp.cachedRandomness) + }) + + t.Run("normal flow, check data is correctly computed", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1"), List: string(common.AuctionList)} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2"), List: string(common.AuctionList)} + v3 := &state.ValidatorInfo{PublicKey: []byte("pk3"), List: string(common.AuctionList)} + v4 := &state.ValidatorInfo{PublicKey: []byte("pk4"), List: string(common.AuctionList)} + v5 := &state.ValidatorInfo{PublicKey: []byte("pk5"), List: string(common.AuctionList)} + v6 := &state.ValidatorInfo{PublicKey: []byte("pk6"), List: string(common.AuctionList)} + v7 := &state.ValidatorInfo{PublicKey: []byte("pk7"), List: string(common.EligibleList)} + v8 := &state.ValidatorInfo{PublicKey: []byte("pk8"), List: string(common.WaitingList)} + v9 := &state.ValidatorInfo{PublicKey: []byte("pk9"), List: string(common.LeavingList)} + v10 := &state.ValidatorInfo{PublicKey: []byte("pk10"), List: string(common.JailedList)} + v11 := &state.ValidatorInfo{PublicKey: []byte("pk11"), List: string(common.AuctionList)} + v12 := &state.ValidatorInfo{PublicKey: []byte("pk12"), List: string(common.AuctionList)} + + owner1 := "owner1" + owner2 := "owner2" + owner3 := "owner3" + owner4 := "owner4" + owner5 := "owner5" + owner6 := "owner6" + owner7 := "owner7" + ownersData := map[string]*epochStart.OwnerData{ + owner1: { + NumStakedNodes: 3, + NumActiveNodes: 1, + TotalTopUp: big.NewInt(7500), + TopUpPerNode: big.NewInt(2500), + AuctionList: []state.ValidatorInfoHandler{v1, v2}, // owner1 will have v1 & v2 selected + Qualified: true, // with qualifiedTopUp = 2500 + }, + owner2: { + NumStakedNodes: 3, + NumActiveNodes: 1, + TotalTopUp: big.NewInt(3000), + TopUpPerNode: big.NewInt(1000), + AuctionList: []state.ValidatorInfoHandler{v3, v4}, // owner2 will have v3 selected + Qualified: true, // with qualifiedTopUp = 1500 + }, + owner3: { + NumStakedNodes: 2, + NumActiveNodes: 0, + TotalTopUp: big.NewInt(4000), + TopUpPerNode: big.NewInt(2000), + AuctionList: []state.ValidatorInfoHandler{v5, v6}, // owner3 will have v5 selected + Qualified: true, // with qualifiedTopUp = 4000 + }, + owner4: { + NumStakedNodes: 3, + NumActiveNodes: 2, + TotalTopUp: big.NewInt(0), + TopUpPerNode: big.NewInt(0), + AuctionList: []state.ValidatorInfoHandler{v7}, // owner4 has one node in auction, but is not qualified + Qualified: false, // should be sent at the bottom of the list + }, + owner5: { + NumStakedNodes: 5, + NumActiveNodes: 5, + TotalTopUp: big.NewInt(5000), + TopUpPerNode: big.NewInt(1000), + AuctionList: []state.ValidatorInfoHandler{}, // owner5 has no nodes in auction, will not appear in API list + Qualified: true, + }, + // owner6 has same stats as owner7. After selection, owner7 will have its node selected => should be listed above owner 6 + owner6: { + NumStakedNodes: 1, + NumActiveNodes: 0, + TotalTopUp: big.NewInt(0), + TopUpPerNode: big.NewInt(0), + AuctionList: []state.ValidatorInfoHandler{v11}, + Qualified: true, // should be added + }, + owner7: { + NumStakedNodes: 1, + NumActiveNodes: 0, + TotalTopUp: big.NewInt(0), + TopUpPerNode: big.NewInt(0), + AuctionList: []state.ValidatorInfoHandler{v12}, + Qualified: true, + }, + } + + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(v1) + _ = validatorsMap.Add(v2) + _ = validatorsMap.Add(v3) + _ = validatorsMap.Add(v4) + _ = validatorsMap.Add(v5) + _ = validatorsMap.Add(v6) + _ = validatorsMap.Add(v7) + _ = validatorsMap.Add(v8) + _ = validatorsMap.Add(v9) + _ = validatorsMap.Add(v10) + _ = validatorsMap.Add(v11) + _ = validatorsMap.Add(v12) + + rootHash := []byte("root hash") + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + return rootHash + }, + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { + return validatorsMap, nil + }, + } + args.AuctionListSelector = &stakingcommon.AuctionListSelectorStub{ + SelectNodesFromAuctionListCalled: func(validatorsInfoMap state.ShardValidatorsInfoMapHandler, randomness []byte) error { + selectedV1 := v1.ShallowClone() + selectedV1.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v1, selectedV1) + + selectedV2 := v2.ShallowClone() + selectedV2.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v2, selectedV2) + + selectedV3 := v3.ShallowClone() + selectedV3.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v3, selectedV3) + + selectedV5 := v5.ShallowClone() + selectedV5.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v5, selectedV5) + + selectedV12 := v12.ShallowClone() + selectedV12.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v12, selectedV12) + + return nil + }, + } + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + GetOwnersDataCalled: func() map[string]*epochStart.OwnerData { + return ownersData + }, + } + + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + expectedList := []*common.AuctionListValidatorAPIResponse{ + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner3), log), + NumStakedNodes: 2, + TotalTopUp: "4000", + TopUpPerNode: "2000", + QualifiedTopUp: "4000", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v5.PublicKey, log), + Qualified: true, + }, + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v6.PublicKey, log), + Qualified: false, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner1), log), + NumStakedNodes: 3, + TotalTopUp: "7500", + TopUpPerNode: "2500", + QualifiedTopUp: "2500", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v1.PublicKey, log), + Qualified: true, + }, + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v2.PublicKey, log), + Qualified: true, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner2), log), + NumStakedNodes: 3, + TotalTopUp: "3000", + TopUpPerNode: "1000", + QualifiedTopUp: "1500", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v3.PublicKey, log), + Qualified: true, + }, + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v4.PublicKey, log), + Qualified: false, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner7), log), + NumStakedNodes: 1, + TotalTopUp: "0", + TopUpPerNode: "0", + QualifiedTopUp: "0", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v12.PublicKey, log), + Qualified: true, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner6), log), + NumStakedNodes: 1, + TotalTopUp: "0", + TopUpPerNode: "0", + QualifiedTopUp: "0", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v11.PublicKey, log), + Qualified: false, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner4), log), + NumStakedNodes: 3, + TotalTopUp: "0", + TopUpPerNode: "0", + QualifiedTopUp: "0", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v7.PublicKey, log), + Qualified: false, + }, + }, + }, + } + + list, err := vp.GetAuctionList() + require.Nil(t, err) + require.Equal(t, expectedList, list) + }) + +} + func createMockValidatorInfo() *state.ValidatorInfo { initialInfo := &state.ValidatorInfo{ PublicKey: []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), @@ -675,13 +1085,16 @@ func createDefaultValidatorsProviderArg() ArgValidatorsProvider { NodesCoordinator: &shardingMocks.NodesCoordinatorMock{}, StartEpoch: 1, EpochStartEventNotifier: &mock.EpochStartNotifierStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, CacheRefreshIntervalDurationInSec: 1 * time.Millisecond, - ValidatorStatistics: &mock.ValidatorStatisticsProcessorStub{ + ValidatorStatistics: &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, }, - MaxRating: 100, - PubKeyConverter: testscommon.NewPubkeyConverterMock(32), + MaxRating: 100, + ValidatorPubKeyConverter: testscommon.NewPubkeyConverterMock(32), + AddressPubKeyConverter: testscommon.NewPubkeyConverterMock(32), + AuctionListSelector: &stakingcommon.AuctionListSelectorStub{}, } } diff --git a/process/rating/chance.go b/process/rating/chance.go index 8ad3c092cec..71233ba3d3e 100644 --- a/process/rating/chance.go +++ b/process/rating/chance.go @@ -9,17 +9,17 @@ type selectionChance struct { chancePercentage uint32 } -//GetMaxThreshold returns the maxThreshold until this ChancePercentage holds +// GetMaxThreshold returns the maxThreshold until this ChancePercentage holds func (bsr *selectionChance) GetMaxThreshold() uint32 { return bsr.maxThreshold } -//GetChancePercentage returns the percentage for the RatingChance +// GetChancePercentage returns the percentage for the RatingChance func (bsr *selectionChance) GetChancePercentage() uint32 { return bsr.chancePercentage } -//IsInterfaceNil verifies if the interface is nil +// IsInterfaceNil verifies if the interface is nil func (bsr *selectionChance) IsInterfaceNil() bool { return bsr == nil } diff --git a/process/rating/disabledRatingReader.go b/process/rating/disabledRatingReader.go index 8b7ac6662c1..b57f06b2dca 100644 --- a/process/rating/disabledRatingReader.go +++ b/process/rating/disabledRatingReader.go @@ -10,17 +10,17 @@ func NewDisabledRatingReader(startRating uint32) *disabledRatingReader { return &disabledRatingReader{startRating: startRating} } -//GetRating gets the rating for the public key +// GetRating gets the rating for the public key func (rr *disabledRatingReader) GetRating(string) uint32 { return rr.startRating } -//UpdateRatingFromTempRating sets the new rating to the value of the tempRating +// UpdateRatingFromTempRating sets the new rating to the value of the tempRating func (rr *disabledRatingReader) UpdateRatingFromTempRating([]string) error { return nil } -//IsInterfaceNil verifies if the interface is nil +// IsInterfaceNil verifies if the interface is nil func (rr *disabledRatingReader) IsInterfaceNil() bool { return rr == nil } diff --git a/process/scToProtocol/stakingToPeer.go b/process/scToProtocol/stakingToPeer.go index af8bc00d688..e9b166b52ea 100644 --- a/process/scToProtocol/stakingToPeer.go +++ b/process/scToProtocol/stakingToPeer.go @@ -230,16 +230,17 @@ func (stp *stakingToPeer) updatePeerStateV1( isValidator := account.GetList() == string(common.EligibleList) || account.GetList() == string(common.WaitingList) isJailed := stakingData.JailedNonce >= stakingData.UnJailedNonce && stakingData.JailedNonce > 0 + isStakingV4Started := stp.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) if !isJailed { if stakingData.StakedNonce == nonce && !isValidator { - account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.RegisterNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.RegisterNonce), isStakingV4Started) account.SetTempRating(stp.startRating) account.SetUnStakedEpoch(common.DefaultUnstakedEpoch) } if stakingData.UnStakedNonce == nonce && account.GetList() != string(common.InactiveList) { - account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.UnStakedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.UnStakedNonce), isStakingV4Started) account.SetUnStakedEpoch(stakingData.UnStakedEpoch) } } @@ -250,7 +251,7 @@ func (stp *stakingToPeer) updatePeerStateV1( } if !isValidator && account.GetUnStakedEpoch() == common.DefaultUnstakedEpoch { - account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.UnJailedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.UnJailedNonce), isStakingV4Started) } } @@ -276,11 +277,13 @@ func (stp *stakingToPeer) updatePeerState( return err } + isStakingV4Started := stp.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) + isUnJailForInactive := !isNew && !stakingData.Staked && stakingData.UnJailedNonce == nonce && account.GetList() == string(common.JailedList) if isUnJailForInactive { log.Debug("unJail for inactive node changed status to inactive list", "blsKey", account.AddressBytes(), "unStakedEpoch", stakingData.UnStakedEpoch) - account.SetListAndIndex(account.GetShardId(), string(common.InactiveList), uint32(stakingData.UnJailedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.InactiveList), uint32(stakingData.UnJailedNonce), isStakingV4Started) if account.GetTempRating() < stp.unJailRating { account.SetTempRating(stp.unJailRating) } @@ -313,18 +316,23 @@ func (stp *stakingToPeer) updatePeerState( log.Debug("new node", "blsKey", blsPubKey) } + newNodesList := common.NewList + if isStakingV4Started { + newNodesList = common.AuctionList + } + isValidator := account.GetList() == string(common.EligibleList) || account.GetList() == string(common.WaitingList) if !stakingData.Jailed { if stakingData.StakedNonce == nonce && !isValidator { - log.Debug("node is staked, changed status to new", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.StakedNonce)) + log.Debug("node is staked, changed status to", "list", newNodesList, "blsKey", blsPubKey) + account.SetListAndIndex(account.GetShardId(), string(newNodesList), uint32(stakingData.StakedNonce), isStakingV4Started) account.SetTempRating(stp.startRating) account.SetUnStakedEpoch(common.DefaultUnstakedEpoch) } if stakingData.UnStakedNonce == nonce && account.GetList() != string(common.InactiveList) { log.Debug("node is unStaked, changed status to leaving list", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.UnStakedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.UnStakedNonce), isStakingV4Started) account.SetUnStakedEpoch(stakingData.UnStakedEpoch) } } @@ -337,20 +345,20 @@ func (stp *stakingToPeer) updatePeerState( isNewValidator := !isValidator && stakingData.Staked if isNewValidator { - log.Debug("node is unJailed and staked, changing status to new list", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.UnJailedNonce)) + log.Debug("node is unJailed and staked, changing status to", "list", newNodesList, "blsKey", blsPubKey) + account.SetListAndIndex(account.GetShardId(), string(newNodesList), uint32(stakingData.UnJailedNonce), isStakingV4Started) } if account.GetList() == string(common.JailedList) { log.Debug("node is unJailed and not staked, changing status to inactive list", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.InactiveList), uint32(stakingData.UnJailedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.InactiveList), uint32(stakingData.UnJailedNonce), isStakingV4Started) account.SetUnStakedEpoch(stakingData.UnStakedEpoch) } } if stakingData.JailedNonce == nonce && account.GetList() != string(common.InactiveList) { log.Debug("node is jailed, setting status to leaving", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.JailedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.JailedNonce), isStakingV4Started) account.SetTempRating(stp.jailRating) } diff --git a/process/scToProtocol/stakingToPeer_test.go b/process/scToProtocol/stakingToPeer_test.go index 4ac4a2fa081..f53495e92c9 100644 --- a/process/scToProtocol/stakingToPeer_test.go +++ b/process/scToProtocol/stakingToPeer_test.go @@ -673,8 +673,10 @@ func TestStakingToPeer_UpdatePeerState(t *testing.T) { }, } + enableEpochsHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakeFlag, common.ValidatorToDelegationFlag) arguments := createMockArgumentsNewStakingToPeer() arguments.PeerState = peerAccountsDB + arguments.EnableEpochsHandler = enableEpochsHandler stp, _ := NewStakingToPeer(arguments) stakingData := systemSmartContracts.StakedDataV2_0{ @@ -703,11 +705,19 @@ func TestStakingToPeer_UpdatePeerState(t *testing.T) { assert.True(t, bytes.Equal(stakingData.RewardAddress, peerAccount.GetRewardAddress())) assert.Equal(t, string(common.NewList), peerAccount.GetList()) + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + err = stp.updatePeerState(stakingData, blsPubKey, nonce) + assert.NoError(t, err) + assert.True(t, bytes.Equal(blsPubKey, peerAccount.GetBLSPublicKey())) + assert.True(t, bytes.Equal(stakingData.RewardAddress, peerAccount.GetRewardAddress())) + assert.Equal(t, string(common.AuctionList), peerAccount.GetList()) + enableEpochsHandler.RemoveActiveFlags(common.StakingV4StartedFlag) + stakingData.UnStakedNonce = 11 _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnStakedNonce) assert.Equal(t, string(common.LeavingList), peerAccount.GetList()) - peerAccount.SetListAndIndex(0, string(common.EligibleList), 5) + peerAccount.SetListAndIndex(0, string(common.EligibleList), 5, false) stakingData.JailedNonce = 12 _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.JailedNonce) assert.Equal(t, string(common.LeavingList), peerAccount.GetList()) @@ -721,6 +731,12 @@ func TestStakingToPeer_UpdatePeerState(t *testing.T) { _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnJailedNonce) assert.Equal(t, string(common.NewList), peerAccount.GetList()) + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + err = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnJailedNonce) + assert.NoError(t, err) + assert.Equal(t, string(common.AuctionList), peerAccount.GetList()) + enableEpochsHandler.RemoveActiveFlags(common.StakingV4StartedFlag) + stakingData.UnStakedNonce = 15 _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnStakedNonce) assert.Equal(t, string(common.LeavingList), peerAccount.GetList()) @@ -769,7 +785,7 @@ func TestStakingToPeer_UnJailFromInactive(t *testing.T) { _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnStakedNonce) assert.Equal(t, string(common.LeavingList), peerAccount.GetList()) - peerAccount.SetListAndIndex(0, string(common.JailedList), 5) + peerAccount.SetListAndIndex(0, string(common.JailedList), 5, false) stakingData.UnJailedNonce = 14 _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnJailedNonce) assert.Equal(t, string(common.InactiveList), peerAccount.GetList()) diff --git a/process/smartContract/hooks/blockChainHook.go b/process/smartContract/hooks/blockChainHook.go index 8a2df4dfad8..6e590484992 100644 --- a/process/smartContract/hooks/blockChainHook.go +++ b/process/smartContract/hooks/blockChainHook.go @@ -826,8 +826,7 @@ func (bh *BlockChainHookImpl) makeCompiledSCStorage() error { dbConfig := factory.GetDBFromConfig(bh.configSCStorage.DB) dbConfig.FilePath = path.Join(bh.workingDir, defaultCompiledSCPath, bh.configSCStorage.DB.FilePath) - dbConfigHandler := factory.NewDBConfigHandler(bh.configSCStorage.DB) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(bh.configSCStorage.DB) if err != nil { return err } diff --git a/process/smartContract/process.go b/process/smartContract/process.go index 9d1c8bcd4f3..7bd0c9a2f52 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -180,7 +180,6 @@ func NewSmartContractProcessor(args scrCommon.ArgsNewSmartContractProcessor) (*s common.OptimizeGasUsedInCrossMiniBlocksFlag, common.OptimizeNFTStoreFlag, common.RemoveNonUpdatedStorageFlag, - common.BuiltInFunctionOnMetaFlag, common.BackwardCompSaveKeyValueFlag, common.ReturnDataToLastTransferFlagAfterEpoch, common.FixAsyncCallBackArgsListFlag, @@ -2823,7 +2822,7 @@ func (sc *scProcessor) ProcessSmartContractResult(scr *smartContractResult.Smart returnCode, err = sc.ExecuteSmartContractTransaction(scr, sndAcc, dstAcc) return returnCode, err case process.BuiltInFunctionCall: - if sc.shardCoordinator.SelfId() == core.MetachainShardId && !sc.enableEpochsHandler.IsFlagEnabled(common.BuiltInFunctionOnMetaFlag) { + if sc.shardCoordinator.SelfId() == core.MetachainShardId { returnCode, err = sc.ExecuteSmartContractTransaction(scr, sndAcc, dstAcc) return returnCode, err } @@ -2861,7 +2860,8 @@ func (sc *scProcessor) processSimpleSCR( if err != nil { return err } - if !isPayable && !bytes.Equal(scResult.RcvAddr, scResult.OriginalSender) { + isSenderMeta := sc.shardCoordinator.ComputeId(scResult.SndAddr) == core.MetachainShardId + if !isPayable && !bytes.Equal(scResult.RcvAddr, scResult.OriginalSender) && !isSenderMeta { return process.ErrAccountNotPayable } diff --git a/process/smartContract/processProxy/processProxy.go b/process/smartContract/processProxy/processProxy.go index d2408c36dfa..c64db4791a4 100644 --- a/process/smartContract/processProxy/processProxy.go +++ b/process/smartContract/processProxy/processProxy.go @@ -169,11 +169,11 @@ func (proxy *scProcessorProxy) IsInterfaceNil() bool { } // EpochConfirmed is called whenever a new epoch is confirmed -func (proxy *scProcessorProxy) EpochConfirmed(_ uint32, _ uint64) { +func (proxy *scProcessorProxy) EpochConfirmed(epoch uint32, _ uint64) { proxy.mutRc.Lock() defer proxy.mutRc.Unlock() - if proxy.args.EnableEpochsHandler.IsFlagEnabled(common.SCProcessorV2Flag) { + if proxy.args.EnableEpochsHandler.IsFlagEnabledInEpoch(common.SCProcessorV2Flag, epoch) { proxy.setActiveProcessorV2() return } diff --git a/process/smartContract/processProxy/processProxy_test.go b/process/smartContract/processProxy/processProxy_test.go index ba0a9c1c0b8..0b5695386a8 100644 --- a/process/smartContract/processProxy/processProxy_test.go +++ b/process/smartContract/processProxy/processProxy_test.go @@ -129,7 +129,11 @@ func TestNewSmartContractProcessorProxy(t *testing.T) { t.Parallel() args := createMockSmartContractProcessorArguments() - args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCProcessorV2Flag) + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.SCProcessorV2Flag + }, + } proxy, err := NewSmartContractProcessorProxy(args, &epochNotifierMock.EpochNotifierStub{}) assert.False(t, check.IfNil(proxy)) diff --git a/process/smartContract/processProxy/testProcessProxy.go b/process/smartContract/processProxy/testProcessProxy.go index 31c6514814b..5d5d96ee0d2 100644 --- a/process/smartContract/processProxy/testProcessProxy.go +++ b/process/smartContract/processProxy/testProcessProxy.go @@ -145,11 +145,11 @@ func (proxy *scProcessorTestProxy) IsInterfaceNil() bool { } // EpochConfirmed is called whenever a new epoch is confirmed -func (proxy *scProcessorTestProxy) EpochConfirmed(_ uint32, _ uint64) { +func (proxy *scProcessorTestProxy) EpochConfirmed(epoch uint32, _ uint64) { proxy.mutRc.Lock() defer proxy.mutRc.Unlock() - if proxy.args.EnableEpochsHandler.IsFlagEnabled(common.SCProcessorV2Flag) { + if proxy.args.EnableEpochsHandler.IsFlagEnabledInEpoch(common.SCProcessorV2Flag, epoch) { proxy.setActiveProcessorV2() return } diff --git a/process/smartContract/process_test.go b/process/smartContract/process_test.go index 014a1751495..c53c7ef83c9 100644 --- a/process/smartContract/process_test.go +++ b/process/smartContract/process_test.go @@ -3339,12 +3339,6 @@ func TestScProcessor_ProcessSmartContractResultExecuteSCIfMetaAndBuiltIn(t *test _, err = sc.ProcessSmartContractResult(&scr) require.Nil(t, err) require.True(t, executeCalled) - - executeCalled = false - enableEpochsHandlerStub.AddActiveFlags(common.BuiltInFunctionsFlag, common.BuiltInFunctionOnMetaFlag) - _, err = sc.ProcessSmartContractResult(&scr) - require.Nil(t, err) - require.False(t, executeCalled) } func TestScProcessor_ProcessRelayedSCRValueBackToRelayer(t *testing.T) { @@ -3749,7 +3743,7 @@ func TestSmartContractProcessor_computeTotalConsumedFeeAndDevRwdWithDifferentSCC feeHandler, err := economics.NewEconomicsData(*args) require.Nil(t, err) require.NotNil(t, feeHandler) - arguments.TxFeeHandler, _ = postprocess.NewFeeAccumulator() + arguments.TxFeeHandler = postprocess.NewFeeAccumulator() arguments.EconomicsFee = feeHandler arguments.ShardCoordinator = shardCoordinator @@ -3836,9 +3830,7 @@ func TestSmartContractProcessor_finishSCExecutionV2(t *testing.T) { arguments.EconomicsFee, err = economics.NewEconomicsData(*args) require.Nil(t, err) - arguments.TxFeeHandler, err = postprocess.NewFeeAccumulator() - require.Nil(t, err) - + arguments.TxFeeHandler = postprocess.NewFeeAccumulator() arguments.ShardCoordinator = shardCoordinator arguments.AccountsDB = &stateMock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { @@ -4253,8 +4245,7 @@ func createRealEconomicsDataArgs() *economics.ArgsNewEconomicsData { return flag == common.GasPriceModifierFlag }, }, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } } diff --git a/process/smartContract/processorV2/processV2.go b/process/smartContract/processorV2/processV2.go index 938bfe725c3..126433c6dee 100644 --- a/process/smartContract/processorV2/processV2.go +++ b/process/smartContract/processorV2/processV2.go @@ -163,9 +163,7 @@ func NewSmartContractProcessorV2(args scrCommon.ArgsNewSmartContractProcessor) ( if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } - err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ - common.BuiltInFunctionOnMetaFlag, - }) + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{}) if err != nil { return nil, err } @@ -2735,7 +2733,7 @@ func (sc *scProcessor) ProcessSmartContractResult(scr *smartContractResult.Smart returnCode, err = sc.ExecuteSmartContractTransaction(scr, sndAcc, dstAcc) return returnCode, err case process.BuiltInFunctionCall: - if sc.shardCoordinator.SelfId() == core.MetachainShardId && !sc.enableEpochsHandler.IsFlagEnabled(common.BuiltInFunctionOnMetaFlag) { + if sc.shardCoordinator.SelfId() == core.MetachainShardId { returnCode, err = sc.ExecuteSmartContractTransaction(scr, sndAcc, dstAcc) return returnCode, err } diff --git a/process/smartContract/processorV2/process_test.go b/process/smartContract/processorV2/process_test.go index 01a623cbe26..eedea17f1ad 100644 --- a/process/smartContract/processorV2/process_test.go +++ b/process/smartContract/processorV2/process_test.go @@ -371,7 +371,6 @@ func TestNewSmartContractProcessorVerifyAllMembers(t *testing.T) { t.Parallel() arguments := createMockSmartContractProcessorArguments() - arguments.EnableEpochs.BuiltInFunctionOnMetaEnableEpoch = 10 sc, _ := NewSmartContractProcessorV2(arguments) assert.Equal(t, arguments.VmContainer, sc.vmContainer) @@ -3273,12 +3272,6 @@ func TestScProcessor_ProcessSmartContractResultExecuteSCIfMetaAndBuiltIn(t *test _, err = sc.ProcessSmartContractResult(&scr) require.Nil(t, err) require.True(t, executeCalled) - - executeCalled = false - enableEpochsHandlerStub.AddActiveFlags(common.BuiltInFunctionOnMetaFlag) - _, err = sc.ProcessSmartContractResult(&scr) - require.Nil(t, err) - require.False(t, executeCalled) } func TestScProcessor_ProcessRelayedSCRValueBackToRelayer(t *testing.T) { @@ -3704,7 +3697,7 @@ func TestSmartContractProcessor_computeTotalConsumedFeeAndDevRwdWithDifferentSCC feeHandler, err := economics.NewEconomicsData(*args) require.Nil(t, err) require.NotNil(t, feeHandler) - arguments.TxFeeHandler, _ = postprocess.NewFeeAccumulator() + arguments.TxFeeHandler = postprocess.NewFeeAccumulator() arguments.EconomicsFee = feeHandler arguments.ShardCoordinator = shardCoordinator @@ -3790,9 +3783,7 @@ func TestSmartContractProcessor_finishSCExecutionV2(t *testing.T) { arguments.EconomicsFee, err = economics.NewEconomicsData(*args) require.Nil(t, err) - arguments.TxFeeHandler, err = postprocess.NewFeeAccumulator() - require.Nil(t, err) - + arguments.TxFeeHandler = postprocess.NewFeeAccumulator() arguments.ShardCoordinator = shardCoordinator arguments.AccountsDB = &stateMock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { @@ -4191,8 +4182,7 @@ func createRealEconomicsDataArgs() *economics.ArgsNewEconomicsData { return flag == common.GasPriceModifierFlag }, }, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } } diff --git a/process/smartContract/scQueryService.go b/process/smartContract/scQueryService.go index eb3d9b95e4e..ec6ad67e87c 100644 --- a/process/smartContract/scQueryService.go +++ b/process/smartContract/scQueryService.go @@ -22,53 +22,57 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/smartContract/scrCommon" "github.com/multiversx/mx-chain-go/sharding" + logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/multiversx/mx-chain-vm-common-go/parsers" ) var _ process.SCQueryService = (*SCQueryService)(nil) +var logQueryService = logger.GetOrCreate("process/smartcontract.queryService") + // MaxGasLimitPerQuery - each unit is the equivalent of 1 nanosecond processing time const MaxGasLimitPerQuery = 300_000_000_000 // SCQueryService can execute Get functions over SC to fetch stored values type SCQueryService struct { - vmContainer process.VirtualMachinesContainer - economicsFee process.FeeHandler - mutRunSc sync.Mutex - blockChainHook process.BlockChainHookWithAccountsAdapter - mainBlockChain data.ChainHandler - apiBlockChain data.ChainHandler - numQueries int - gasForQuery uint64 - wasmVMChangeLocker common.Locker - bootstrapper process.Bootstrapper - allowExternalQueriesChan chan struct{} - historyRepository dblookupext.HistoryRepository - shardCoordinator sharding.Coordinator - storageService dataRetriever.StorageService - marshaller marshal.Marshalizer - hasher hashing.Hasher - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + vmContainer process.VirtualMachinesContainer + economicsFee process.FeeHandler + mutRunSc sync.Mutex + blockChainHook process.BlockChainHookWithAccountsAdapter + mainBlockChain data.ChainHandler + apiBlockChain data.ChainHandler + gasForQuery uint64 + wasmVMChangeLocker common.Locker + bootstrapper process.Bootstrapper + allowExternalQueriesChan chan struct{} + historyRepository dblookupext.HistoryRepository + shardCoordinator sharding.Coordinator + storageService dataRetriever.StorageService + marshaller marshal.Marshalizer + hasher hashing.Hasher + uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + isInHistoricalBalancesMode bool } // ArgsNewSCQueryService defines the arguments needed for the sc query service type ArgsNewSCQueryService struct { - VmContainer process.VirtualMachinesContainer - EconomicsFee process.FeeHandler - BlockChainHook process.BlockChainHookWithAccountsAdapter - MainBlockChain data.ChainHandler - APIBlockChain data.ChainHandler - WasmVMChangeLocker common.Locker - Bootstrapper process.Bootstrapper - AllowExternalQueriesChan chan struct{} - MaxGasLimitPerQuery uint64 - HistoryRepository dblookupext.HistoryRepository - ShardCoordinator sharding.Coordinator - StorageService dataRetriever.StorageService - Marshaller marshal.Marshalizer - Hasher hashing.Hasher - Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + VmContainer process.VirtualMachinesContainer + EconomicsFee process.FeeHandler + BlockChainHook process.BlockChainHookWithAccountsAdapter + MainBlockChain data.ChainHandler + APIBlockChain data.ChainHandler + WasmVMChangeLocker common.Locker + Bootstrapper process.Bootstrapper + AllowExternalQueriesChan chan struct{} + MaxGasLimitPerQuery uint64 + HistoryRepository dblookupext.HistoryRepository + ShardCoordinator sharding.Coordinator + StorageService dataRetriever.StorageService + Marshaller marshal.Marshalizer + Hasher hashing.Hasher + Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + IsInHistoricalBalancesMode bool } // NewSCQueryService returns a new instance of SCQueryService @@ -85,21 +89,22 @@ func NewSCQueryService( gasForQuery = args.MaxGasLimitPerQuery } return &SCQueryService{ - vmContainer: args.VmContainer, - economicsFee: args.EconomicsFee, - mainBlockChain: args.MainBlockChain, - apiBlockChain: args.APIBlockChain, - blockChainHook: args.BlockChainHook, - wasmVMChangeLocker: args.WasmVMChangeLocker, - bootstrapper: args.Bootstrapper, - gasForQuery: gasForQuery, - allowExternalQueriesChan: args.AllowExternalQueriesChan, - historyRepository: args.HistoryRepository, - shardCoordinator: args.ShardCoordinator, - storageService: args.StorageService, - marshaller: args.Marshaller, - hasher: args.Hasher, - uint64ByteSliceConverter: args.Uint64ByteSliceConverter, + vmContainer: args.VmContainer, + economicsFee: args.EconomicsFee, + mainBlockChain: args.MainBlockChain, + apiBlockChain: args.APIBlockChain, + blockChainHook: args.BlockChainHook, + wasmVMChangeLocker: args.WasmVMChangeLocker, + bootstrapper: args.Bootstrapper, + gasForQuery: gasForQuery, + allowExternalQueriesChan: args.AllowExternalQueriesChan, + historyRepository: args.HistoryRepository, + shardCoordinator: args.ShardCoordinator, + storageService: args.StorageService, + marshaller: args.Marshaller, + hasher: args.Hasher, + uint64ByteSliceConverter: args.Uint64ByteSliceConverter, + isInHistoricalBalancesMode: args.IsInHistoricalBalancesMode, }, nil } @@ -179,8 +184,7 @@ func (service *SCQueryService) shouldAllowQueriesExecution() bool { } func (service *SCQueryService) executeScCall(query *process.SCQuery, gasPrice uint64) (*vmcommon.VMOutput, common.BlockInfo, error) { - log.Trace("executeScCall", "function", query.FuncName, "numQueries", service.numQueries) - service.numQueries++ + logQueryService.Trace("executeScCall", "address", query.ScAddress, "function", query.FuncName, "blockNonce", query.BlockNonce.Value, "blockHash", query.BlockHash) shouldEarlyExitBecauseOfSyncState := query.ShouldBeSynced && service.bootstrapper.GetNodeState() == common.NsNotSynchronized if shouldEarlyExitBecauseOfSyncState { @@ -198,11 +202,11 @@ func (service *SCQueryService) executeScCall(query *process.SCQuery, gasPrice ui return nil, nil, err } - accountsAdapter := service.blockChainHook.GetAccountsAdapter() - err = accountsAdapter.RecreateTrie(blockRootHash) + err = service.recreateTrie(blockRootHash, blockHeader) if err != nil { return nil, nil, err } + service.blockChainHook.SetCurrentHeader(blockHeader) } shouldCheckRootHashChanges := query.SameScState @@ -212,8 +216,6 @@ func (service *SCQueryService) executeScCall(query *process.SCQuery, gasPrice ui rootHashBeforeExecution = service.apiBlockChain.GetCurrentBlockRootHash() } - service.blockChainHook.SetCurrentHeader(service.mainBlockChain.GetCurrentBlockHeader()) - service.wasmVMChangeLocker.RLock() vm, _, err := scrCommon.FindVMByScAddress(service.vmContainer, query.ScAddress) if err != nil { @@ -229,15 +231,6 @@ func (service *SCQueryService) executeScCall(query *process.SCQuery, gasPrice ui return nil, nil, err } - if service.hasRetriableExecutionError(vmOutput) { - log.Error("Retriable execution error detected. Will retry (once) executeScCall()", "returnCode", vmOutput.ReturnCode, "returnMessage", vmOutput.ReturnMessage) - - vmOutput, err = vm.RunSmartContractCall(vmInput) - if err != nil { - return nil, nil, err - } - } - if query.SameScState { err = service.checkForRootHashChanges(rootHashBeforeExecution) if err != nil { @@ -258,9 +251,26 @@ func (service *SCQueryService) executeScCall(query *process.SCQuery, gasPrice ui return vmOutput, blockInfo, nil } +func (service *SCQueryService) recreateTrie(blockRootHash []byte, blockHeader data.HeaderHandler) error { + if check.IfNil(blockHeader) { + return process.ErrNilBlockHeader + } + + accountsAdapter := service.blockChainHook.GetAccountsAdapter() + + if service.isInHistoricalBalancesMode { + logQueryService.Trace("calling RecreateTrieFromEpoch", "block", blockHeader.GetNonce(), "rootHash", blockRootHash) + holder := holders.NewRootHashHolder(blockRootHash, core.OptionalUint32{Value: blockHeader.GetEpoch(), HasValue: true}) + + return accountsAdapter.RecreateTrieFromEpoch(holder) + } + + logQueryService.Trace("calling RecreateTrie", "block", blockHeader.GetNonce(), "rootHash", blockRootHash) + return accountsAdapter.RecreateTrie(blockRootHash) +} + // TODO: extract duplicated code with nodeBlocks.go func (service *SCQueryService) extractBlockHeaderAndRootHash(query *process.SCQuery) (data.HeaderHandler, []byte, error) { - if len(query.BlockHash) > 0 { currentHeader, err := service.getBlockHeaderByHash(query.BlockHash) if err != nil { @@ -417,10 +427,6 @@ func (service *SCQueryService) createVMCallInput(query *process.SCQuery, gasPric return vmContractCallInput } -func (service *SCQueryService) hasRetriableExecutionError(vmOutput *vmcommon.VMOutput) bool { - return vmOutput.ReturnMessage == "allocation error" -} - // ComputeScCallGasLimit will estimate how many gas a transaction will consume func (service *SCQueryService) ComputeScCallGasLimit(tx *transaction.Transaction) (uint64, error) { argParser := parsers.NewCallArgsParser() diff --git a/process/smartContract/scQueryServiceDispatcher.go b/process/smartContract/scQueryServiceDispatcher.go index 2c51b47d55d..981f71f3dd9 100644 --- a/process/smartContract/scQueryServiceDispatcher.go +++ b/process/smartContract/scQueryServiceDispatcher.go @@ -78,7 +78,7 @@ func (sqsd *scQueryServiceDispatcher) Close() error { for _, scQueryService := range sqsd.list { err := scQueryService.Close() if err != nil { - log.Error("error while closing inner SC query service in scQueryServiceDispatcher.Close", "error", err) + logQueryService.Error("error while closing inner SC query service in scQueryServiceDispatcher.Close", "error", err) errFound = err } } diff --git a/process/smartContract/scQueryService_test.go b/process/smartContract/scQueryService_test.go index 0b76f3a739e..d71542a8aaa 100644 --- a/process/smartContract/scQueryService_test.go +++ b/process/smartContract/scQueryService_test.go @@ -11,7 +11,6 @@ import ( "testing" "time" - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/transaction" @@ -41,7 +40,7 @@ func createMockArgumentsForSCQuery() ArgsNewSCQueryService { BlockChainHook: &testscommon.BlockChainHookStub{ GetAccountsAdapterCalled: func() state.AccountsAdapter { return &stateMocks.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieFromEpochCalled: func(options common.RootHashHolder) error { return nil }, } @@ -59,9 +58,10 @@ func createMockArgumentsForSCQuery() ArgsNewSCQueryService { return &storageStubs.StorerStub{}, nil }, }, - Marshaller: &marshallerMock.MarshalizerStub{}, - Hasher: &testscommon.HasherStub{}, - Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + Marshaller: &marshallerMock.MarshalizerStub{}, + Hasher: &testscommon.HasherStub{}, + Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + IsInHistoricalBalancesMode: false, } } @@ -367,10 +367,11 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { _, _, _ = target.ExecuteQuery(&query) assert.True(t, runWasCalled) }) - t.Run("block hash should work", func(t *testing.T) { + t.Run("block hash should work - in deep history mode", func(t *testing.T) { t.Parallel() runWasCalled := false + epoch := uint32(37) mockVM := &mock.VMExecutionHandlerStub{ RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (output *vmcommon.VMOutput, e error) { @@ -396,6 +397,13 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { return uint64(math.MaxUint64) }, } + argsNewSCQuery.MainBlockChain = &testscommon.ChainHandlerStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return &block.Header{ + Epoch: epoch, + } + }, + } providedHash := []byte("provided hash") providedRootHash := []byte("provided root hash") argsNewSCQuery.Marshaller = &marshallerMock.MarshalizerMock{} @@ -422,14 +430,21 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { return true }, GetEpochByHashCalled: func(hash []byte) (uint32, error) { - return 12, nil + return epoch, nil }, } - wasRecreateTrieCalled := false + + recreateTrieWasCalled := false + recreateTrieFromEpochWasCalled := false + providedAccountsAdapter := &stateMocks.AccountsStub{ RecreateTrieCalled: func(rootHash []byte) error { - wasRecreateTrieCalled = true - assert.Equal(t, providedRootHash, rootHash) + recreateTrieWasCalled = true + return nil + }, + RecreateTrieFromEpochCalled: func(options common.RootHashHolder) error { + recreateTrieFromEpochWasCalled = true + assert.Equal(t, providedRootHash, options.GetRootHash()) return nil }, } @@ -438,6 +453,7 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { return providedAccountsAdapter }, } + argsNewSCQuery.IsInHistoricalBalancesMode = true target, _ := NewSCQueryService(argsNewSCQuery) @@ -452,13 +468,16 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { BlockHash: providedHash, } - _, _, _ = target.ExecuteQuery(&query) + _, _, err := target.ExecuteQuery(&query) assert.True(t, runWasCalled) - assert.True(t, wasRecreateTrieCalled) + assert.True(t, recreateTrieFromEpochWasCalled) + assert.False(t, recreateTrieWasCalled) + assert.Nil(t, err) }) - t.Run("block nonce should work", func(t *testing.T) { + t.Run("block hash should work - in normal mode", func(t *testing.T) { t.Parallel() + epoch := uint32(12) runWasCalled := false mockVM := &mock.VMExecutionHandlerStub{ @@ -487,22 +506,14 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { } providedHash := []byte("provided hash") providedRootHash := []byte("provided root hash") - providedNonce := uint64(123) argsNewSCQuery.Marshaller = &marshallerMock.MarshalizerMock{} - counter := 0 argsNewSCQuery.StorageService = &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { return &storageStubs.StorerStub{ - GetCalled: func(key []byte) ([]byte, error) { - return providedHash, nil - }, GetFromEpochCalled: func(key []byte, epoch uint32) ([]byte, error) { - counter++ - if counter > 2 { - return nil, fmt.Errorf("no scheduled") - } hdr := &block.Header{ RootHash: providedRootHash, + Epoch: epoch, } buff, _ := argsNewSCQuery.Marshaller.Marshal(hdr) return buff, nil @@ -515,23 +526,30 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { return true }, GetEpochByHashCalled: func(hash []byte) (uint32, error) { - require.Equal(t, providedHash, hash) - return 12, nil + return epoch, nil }, } - wasRecreateTrieCalled := false + + recreateTrieWasCalled := false + recreateTrieFromEpochWasCalled := false + providedAccountsAdapter := &stateMocks.AccountsStub{ RecreateTrieCalled: func(rootHash []byte) error { - wasRecreateTrieCalled = true + recreateTrieWasCalled = true assert.Equal(t, providedRootHash, rootHash) return nil }, + RecreateTrieFromEpochCalled: func(options common.RootHashHolder) error { + recreateTrieFromEpochWasCalled = true + return nil + }, } argsNewSCQuery.BlockChainHook = &testscommon.BlockChainHookStub{ GetAccountsAdapterCalled: func() state.AccountsAdapter { return providedAccountsAdapter }, } + argsNewSCQuery.IsInHistoricalBalancesMode = false target, _ := NewSCQueryService(argsNewSCQuery) @@ -543,15 +561,123 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { ScAddress: scAddress, FuncName: funcName, Arguments: dataArgs, - BlockNonce: core.OptionalUint64{ - Value: providedNonce, - HasValue: true, - }, + BlockHash: providedHash, } - _, _, _ = target.ExecuteQuery(&query) + _, _, err := target.ExecuteQuery(&query) assert.True(t, runWasCalled) - assert.True(t, wasRecreateTrieCalled) + assert.True(t, recreateTrieWasCalled) + assert.False(t, recreateTrieFromEpochWasCalled) + assert.Nil(t, err) + }) +} + +func TestSCQueryService_RecreateTrie(t *testing.T) { + t.Parallel() + + testRootHash := []byte("test root hash") + t.Run("should not call RecreateTrie if block header is nil", func(t *testing.T) { + t.Parallel() + + argsNewSCQuery := createMockArgumentsForSCQuery() + argsNewSCQuery.BlockChainHook = &testscommon.BlockChainHookStub{ + GetAccountsAdapterCalled: func() state.AccountsAdapter { + return &stateMocks.AccountsStub{ + RecreateTrieCalled: func(rootHash []byte) error { + require.Fail(t, "should not be called") + return nil + }, + } + }, + } + + service, _ := NewSCQueryService(argsNewSCQuery) + err := service.recreateTrie(testRootHash, nil) + assert.ErrorIs(t, err, process.ErrNilBlockHeader) + }) + t.Run("should call RecreateTrieFromEpoch if in deep history mode", func(t *testing.T) { + t.Parallel() + + recreateTrieWasCalled := false + recreateTrieFromEpochWasCalled := false + + argsNewSCQuery := createMockArgumentsForSCQuery() + argsNewSCQuery.IsInHistoricalBalancesMode = true + argsNewSCQuery.MainBlockChain = &testscommon.ChainHandlerStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return nil // after the genesis we do not have a header as current block + }, + } + argsNewSCQuery.BlockChainHook = &testscommon.BlockChainHookStub{ + GetAccountsAdapterCalled: func() state.AccountsAdapter { + return &stateMocks.AccountsStub{ + RecreateTrieCalled: func(rootHash []byte) error { + recreateTrieWasCalled = true + recreateTrieFromEpochWasCalled = false + + assert.Equal(t, testRootHash, rootHash) + return nil + }, + RecreateTrieFromEpochCalled: func(options common.RootHashHolder) error { + recreateTrieWasCalled = false + recreateTrieFromEpochWasCalled = true + + assert.Equal(t, testRootHash, options.GetRootHash()) + return nil + }, + } + }, + } + + service, _ := NewSCQueryService(argsNewSCQuery) + + // For genesis block, RecreateTrieFromEpoch should be called + err := service.recreateTrie(testRootHash, &block.Header{}) + assert.Nil(t, err) + assert.True(t, recreateTrieFromEpochWasCalled) + assert.False(t, recreateTrieWasCalled) + }) + t.Run("should call RecreateTrie if not in deep history mode", func(t *testing.T) { + t.Parallel() + + recreateTrieWasCalled := false + recreateTrieFromEpochWasCalled := false + + argsNewSCQuery := createMockArgumentsForSCQuery() + argsNewSCQuery.IsInHistoricalBalancesMode = false + argsNewSCQuery.MainBlockChain = &testscommon.ChainHandlerStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return nil // after the genesis we do not have a header as current block + }, + } + argsNewSCQuery.BlockChainHook = &testscommon.BlockChainHookStub{ + GetAccountsAdapterCalled: func() state.AccountsAdapter { + return &stateMocks.AccountsStub{ + RecreateTrieCalled: func(rootHash []byte) error { + recreateTrieWasCalled = true + recreateTrieFromEpochWasCalled = false + + assert.Equal(t, testRootHash, rootHash) + return nil + }, + RecreateTrieFromEpochCalled: func(options common.RootHashHolder) error { + recreateTrieWasCalled = false + recreateTrieFromEpochWasCalled = true + + assert.Equal(t, testRootHash, options.GetRootHash()) + return nil + }, + } + }, + } + + service, _ := NewSCQueryService(argsNewSCQuery) + + // For genesis block, RecreateTrieFromEpoch should be called + err := service.recreateTrie(testRootHash, &block.Header{}) + assert.Nil(t, err) + assert.False(t, recreateTrieFromEpochWasCalled) + assert.True(t, recreateTrieWasCalled) }) } @@ -896,16 +1022,6 @@ func TestSCQueryService_ShouldFailIfStateChanged(t *testing.T) { t.Parallel() args := createMockArgumentsForSCQuery() - args.BlockChainHook = &testscommon.BlockChainHookStub{ - GetAccountsAdapterCalled: func() state.AccountsAdapter { - return &stateMocks.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { - return nil - }, - } - }, - } - rootHashCalledCounter := 0 args.APIBlockChain = &testscommon.ChainHandlerStub{ GetCurrentBlockRootHashCalled: func() []byte { @@ -927,7 +1043,7 @@ func TestSCQueryService_ShouldFailIfStateChanged(t *testing.T) { FuncName: "function", }) require.Nil(t, res) - require.True(t, errors.Is(err, process.ErrStateChangedWhileExecutingVmQuery)) + require.ErrorIs(t, err, process.ErrStateChangedWhileExecutingVmQuery) } func TestSCQueryService_ShouldWorkIfStateDidntChange(t *testing.T) { diff --git a/process/transaction/metaProcess.go b/process/transaction/metaProcess.go index 83274dda551..d1b88a012d4 100644 --- a/process/transaction/metaProcess.go +++ b/process/transaction/metaProcess.go @@ -65,7 +65,6 @@ func NewMetaTxProcessor(args ArgsNewMetaTxProcessor) (*metaTxProcessor, error) { } err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ common.PenalizedTooMuchGasFlag, - common.BuiltInFunctionOnMetaFlag, common.ESDTFlag, }) if err != nil { @@ -143,10 +142,6 @@ func (txProc *metaTxProcessor) ProcessTransaction(tx *transaction.Transaction) ( case process.SCInvoking: return txProc.processSCInvoking(tx, tx.SndAddr, tx.RcvAddr) case process.BuiltInFunctionCall: - if txProc.enableEpochsHandler.IsFlagEnabled(common.BuiltInFunctionOnMetaFlag) { - return txProc.processBuiltInFunctionCall(tx, tx.SndAddr, tx.RcvAddr) - } - if txProc.enableEpochsHandler.IsFlagEnabled(common.ESDTFlag) { return txProc.processSCInvoking(tx, tx.SndAddr, tx.RcvAddr) } @@ -189,18 +184,6 @@ func (txProc *metaTxProcessor) processSCInvoking( return txProc.scProcessor.ExecuteSmartContractTransaction(tx, acntSrc, acntDst) } -func (txProc *metaTxProcessor) processBuiltInFunctionCall( - tx *transaction.Transaction, - adrSrc, adrDst []byte, -) (vmcommon.ReturnCode, error) { - acntSrc, acntDst, err := txProc.getAccounts(adrSrc, adrDst) - if err != nil { - return 0, err - } - - return txProc.scProcessor.ExecuteBuiltInFunction(tx, acntSrc, acntDst) -} - // IsInterfaceNil returns true if there is no value under the interface func (txProc *metaTxProcessor) IsInterfaceNil() bool { return txProc == nil diff --git a/process/transaction/metaProcess_test.go b/process/transaction/metaProcess_test.go index ac536af4e30..eaaa1382d2e 100644 --- a/process/transaction/metaProcess_test.go +++ b/process/transaction/metaProcess_test.go @@ -451,19 +451,6 @@ func TestMetaTxProcessor_ProcessTransactionBuiltInCallTxShouldWork(t *testing.T) assert.Nil(t, err) assert.True(t, wasCalled) assert.Equal(t, 0, saveAccountCalled) - - builtInCalled := false - scProcessorMock.ExecuteBuiltInFunctionCalled = func(tx data.TransactionHandler, acntSrc, acntDst state.UserAccountHandler) (vmcommon.ReturnCode, error) { - builtInCalled = true - return 0, nil - } - - enableEpochsHandlerStub.AddActiveFlags(common.BuiltInFunctionOnMetaFlag) - - _, err = txProc.ProcessTransaction(&tx) - assert.Nil(t, err) - assert.True(t, builtInCalled) - assert.Equal(t, 0, saveAccountCalled) } func TestMetaTxProcessor_ProcessTransactionWithInvalidUsernameShouldNotError(t *testing.T) { diff --git a/process/transactionEvaluator/transactionEvaluator.go b/process/transactionEvaluator/transactionEvaluator.go index b9184ae3fad..9e61d138419 100644 --- a/process/transactionEvaluator/transactionEvaluator.go +++ b/process/transactionEvaluator/transactionEvaluator.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/facade" @@ -32,6 +33,7 @@ type ArgsApiTransactionEvaluator struct { Accounts state.AccountsAdapterWithClean ShardCoordinator sharding.Coordinator EnableEpochsHandler common.EnableEpochsHandler + BlockChain data.ChainHandler } type apiTransactionEvaluator struct { @@ -41,6 +43,7 @@ type apiTransactionEvaluator struct { feeHandler process.FeeHandler txSimulator facade.TransactionSimulatorProcessor enableEpochsHandler common.EnableEpochsHandler + blockChain data.ChainHandler mutExecution sync.RWMutex } @@ -64,6 +67,9 @@ func NewAPITransactionEvaluator(args ArgsApiTransactionEvaluator) (*apiTransacti if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + if check.IfNil(args.BlockChain) { + return nil, process.ErrNilBlockChain + } err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ common.CleanUpInformativeSCRsFlag, }) @@ -78,6 +84,7 @@ func NewAPITransactionEvaluator(args ArgsApiTransactionEvaluator) (*apiTransacti accounts: args.Accounts, shardCoordinator: args.ShardCoordinator, enableEpochsHandler: args.EnableEpochsHandler, + blockChain: args.BlockChain, } return tce, nil @@ -91,7 +98,9 @@ func (ate *apiTransactionEvaluator) SimulateTransactionExecution(tx *transaction ate.mutExecution.Unlock() }() - return ate.txSimulator.ProcessTx(tx) + currentHeader := ate.getCurrentBlockHeader() + + return ate.txSimulator.ProcessTx(tx, currentHeader) } // ComputeTransactionGasLimit will calculate how many gas units a transaction will consume @@ -140,8 +149,8 @@ func (ate *apiTransactionEvaluator) simulateTransactionCost(tx *transaction.Tran } costResponse := &transaction.CostResponse{} - - res, err := ate.txSimulator.ProcessTx(tx) + currentHeader := ate.getCurrentBlockHeader() + res, err := ate.txSimulator.ProcessTx(tx, currentHeader) if err != nil { costResponse.ReturnMessage = err.Error() return costResponse, nil @@ -228,6 +237,15 @@ func (ate *apiTransactionEvaluator) addMissingFieldsIfNeeded(tx *transaction.Tra return nil } +func (ate *apiTransactionEvaluator) getCurrentBlockHeader() data.HeaderHandler { + currentHeader := ate.blockChain.GetCurrentBlockHeader() + if check.IfNil(currentHeader) { + return ate.blockChain.GetGenesisHeader() + } + + return currentHeader +} + func (ate *apiTransactionEvaluator) getTxGasLimit(tx *transaction.Transaction) (uint64, error) { selfShardID := ate.shardCoordinator.SelfId() maxGasLimitPerBlock := ate.feeHandler.MaxGasLimitPerBlock(selfShardID) - 1 diff --git a/process/transactionEvaluator/transactionEvaluator_test.go b/process/transactionEvaluator/transactionEvaluator_test.go index 586072856ac..f36a5388777 100644 --- a/process/transactionEvaluator/transactionEvaluator_test.go +++ b/process/transactionEvaluator/transactionEvaluator_test.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" @@ -30,6 +31,7 @@ func createArgs() ArgsApiTransactionEvaluator { Accounts: &stateMock.AccountsStub{}, ShardCoordinator: &mock.ShardCoordinatorStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + BlockChain: &testscommon.ChainHandlerMock{}, } } @@ -43,6 +45,16 @@ func TestTransactionEvaluator_NilTxTypeHandler(t *testing.T) { require.Equal(t, process.ErrNilTxTypeHandler, err) } +func TestTransactionEvaluator_NilBlockChain(t *testing.T) { + t.Parallel() + args := createArgs() + args.BlockChain = nil + tce, err := NewAPITransactionEvaluator(args) + + require.Nil(t, tce) + require.Equal(t, process.ErrNilBlockChain, err) +} + func TestTransactionEvaluator_NilFeeHandlerShouldErr(t *testing.T) { t.Parallel() @@ -115,7 +127,7 @@ func TestComputeTransactionGasLimit_MoveBalance(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return &txSimData.SimulationResultsWithVMOutput{}, nil }, } @@ -154,7 +166,7 @@ func TestComputeTransactionGasLimit_MoveBalanceInvalidNonceShouldStillComputeCos }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return nil, simulationErr }, } @@ -185,7 +197,7 @@ func TestComputeTransactionGasLimit_BuiltInFunction(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return &txSimData.SimulationResultsWithVMOutput{ VMOutput: &vmcommon.VMOutput{ ReturnCode: vmcommon.Ok, @@ -221,7 +233,7 @@ func TestComputeTransactionGasLimit_BuiltInFunctionShouldErr(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return nil, localErr }, } @@ -251,7 +263,7 @@ func TestComputeTransactionGasLimit_NilVMOutput(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return &txSimData.SimulationResultsWithVMOutput{}, nil }, } @@ -260,7 +272,8 @@ func TestComputeTransactionGasLimit_NilVMOutput(t *testing.T) { return &stateMock.UserAccountStub{Balance: big.NewInt(100000)}, nil }, } - tce, _ := NewAPITransactionEvaluator(args) + tce, err := NewAPITransactionEvaluator(args) + require.Nil(t, err) tx := &transaction.Transaction{} cost, err := tce.ComputeTransactionGasLimit(tx) @@ -281,7 +294,7 @@ func TestComputeTransactionGasLimit_RetCodeNotOk(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, _ data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return &txSimData.SimulationResultsWithVMOutput{ VMOutput: &vmcommon.VMOutput{ ReturnCode: vmcommon.UserError, @@ -335,3 +348,82 @@ func TestExtractGasUsedFromMessage(t *testing.T) { require.Equal(t, uint64(0), extractGasRemainedFromMessage("", gasRemainedSplitString)) require.Equal(t, uint64(0), extractGasRemainedFromMessage("too much gas provided, gas needed = 10000, gas used = wrong", gasUsedSlitString)) } + +func TestApiTransactionEvaluator_SimulateTransactionExecution(t *testing.T) { + t.Parallel() + + called := false + expectedNonce := uint64(1000) + args := createArgs() + args.BlockChain = &testscommon.ChainHandlerMock{} + _ = args.BlockChain.SetCurrentBlockHeaderAndRootHash(&block.Header{Nonce: expectedNonce}, []byte("test")) + + args.TxSimulator = &mock.TransactionSimulatorStub{ + ProcessTxCalled: func(_ *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { + called = true + require.Equal(t, expectedNonce, currentHeader.GetNonce()) + return nil, nil + }, + } + + tce, err := NewAPITransactionEvaluator(args) + require.Nil(t, err) + + tx := &transaction.Transaction{} + + _, err = tce.SimulateTransactionExecution(tx) + require.Nil(t, err) + require.True(t, called) +} + +func TestApiTransactionEvaluator_ComputeTransactionGasLimit(t *testing.T) { + t.Parallel() + + called := false + expectedNonce := uint64(1000) + args := createArgs() + args.BlockChain = &testscommon.ChainHandlerMock{} + _ = args.BlockChain.SetCurrentBlockHeaderAndRootHash(&block.Header{Nonce: expectedNonce}, []byte("test")) + + args.TxTypeHandler = &testscommon.TxTypeHandlerMock{ + ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (process.TransactionType, process.TransactionType) { + return process.SCInvoking, process.SCInvoking + }, + } + args.TxSimulator = &mock.TransactionSimulatorStub{ + ProcessTxCalled: func(_ *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { + called = true + require.Equal(t, expectedNonce, currentHeader.GetNonce()) + return &txSimData.SimulationResultsWithVMOutput{}, nil + }, + } + + tce, err := NewAPITransactionEvaluator(args) + require.Nil(t, err) + + tx := &transaction.Transaction{} + + _, err = tce.ComputeTransactionGasLimit(tx) + require.Nil(t, err) + require.True(t, called) +} + +func TestApiTransactionEvaluator_GetCurrentHeader(t *testing.T) { + t.Parallel() + + args := createArgs() + args.BlockChain = &testscommon.ChainHandlerMock{} + _ = args.BlockChain.SetGenesisHeader(&block.Header{Nonce: 0}) + + tce, err := NewAPITransactionEvaluator(args) + require.Nil(t, err) + + currentHeader := tce.getCurrentBlockHeader() + require.Equal(t, uint64(0), currentHeader.GetNonce()) + + expectedNonce := uint64(100) + _ = args.BlockChain.SetCurrentBlockHeaderAndRootHash(&block.Header{Nonce: expectedNonce}, []byte("root")) + + currentHeader = tce.getCurrentBlockHeader() + require.Equal(t, expectedNonce, currentHeader.GetNonce()) +} diff --git a/process/transactionEvaluator/transactionSimulator.go b/process/transactionEvaluator/transactionSimulator.go index 8d1a405643d..c87e79b0472 100644 --- a/process/transactionEvaluator/transactionSimulator.go +++ b/process/transactionEvaluator/transactionSimulator.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/receipt" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" @@ -33,6 +34,7 @@ type ArgsTxSimulator struct { Hasher hashing.Hasher Marshalizer marshal.Marshalizer DataFieldParser DataFieldParser + BlockChainHook process.BlockChainHookHandler } type refundHandler interface { @@ -50,6 +52,7 @@ type transactionSimulator struct { marshalizer marshal.Marshalizer refundDetector refundHandler dataFieldParser DataFieldParser + blockChainHook process.BlockChainHookHandler } // NewTransactionSimulator returns a new instance of a transactionSimulator @@ -78,6 +81,9 @@ func NewTransactionSimulator(args ArgsTxSimulator) (*transactionSimulator, error if check.IfNilReflect(args.DataFieldParser) { return nil, ErrNilDataFieldParser } + if check.IfNil(args.BlockChainHook) { + return nil, process.ErrNilBlockChainHook + } return &transactionSimulator{ txProcessor: args.TransactionProcessor, @@ -89,17 +95,20 @@ func NewTransactionSimulator(args ArgsTxSimulator) (*transactionSimulator, error hasher: args.Hasher, refundDetector: transactionAPI.NewRefundDetector(), dataFieldParser: args.DataFieldParser, + blockChainHook: args.BlockChainHook, }, nil } // ProcessTx will process the transaction in a special environment, where state-writing is not allowed -func (ts *transactionSimulator) ProcessTx(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { +func (ts *transactionSimulator) ProcessTx(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { ts.mutOperation.Lock() defer ts.mutOperation.Unlock() txStatus := transaction.TxStatusPending failReason := "" + ts.blockChainHook.SetCurrentHeader(currentHeader) + retCode, err := ts.txProcessor.ProcessTransaction(tx) if err != nil { failReason = err.Error() diff --git a/process/transactionEvaluator/transactionSimulator_test.go b/process/transactionEvaluator/transactionSimulator_test.go index 727f158c7eb..94da76f4254 100644 --- a/process/transactionEvaluator/transactionSimulator_test.go +++ b/process/transactionEvaluator/transactionSimulator_test.go @@ -76,6 +76,15 @@ func TestNewTransactionSimulator(t *testing.T) { }, exError: ErrNilHasher, }, + { + name: "NilBlockChainHook", + argsFunc: func() ArgsTxSimulator { + args := getTxSimulatorArgs() + args.BlockChainHook = nil + return args + }, + exError: process.ErrNilBlockChainHook, + }, { name: "NilMarshalizer", argsFunc: func() ArgsTxSimulator { @@ -125,7 +134,7 @@ func TestTransactionSimulator_ProcessTxProcessingErrShouldSignal(t *testing.T) { } ts, _ := NewTransactionSimulator(args) - results, err := ts.ProcessTx(&transaction.Transaction{Nonce: 37}) + results, err := ts.ProcessTx(&transaction.Transaction{Nonce: 37}, &block.Header{}) require.NoError(t, err) require.Equal(t, expErr.Error(), results.FailReason) } @@ -207,7 +216,7 @@ func TestTransactionSimulator_ProcessTxShouldIncludeScrsAndReceipts(t *testing.T txHash, _ := core.CalculateHash(args.Marshalizer, args.Hasher, tx) args.VMOutputCacher.Put(txHash, &vmcommon.VMOutput{}, 0) - results, err := ts.ProcessTx(tx) + results, err := ts.ProcessTx(tx, &block.Header{}) require.NoError(t, err) require.Equal( t, @@ -236,6 +245,7 @@ func getTxSimulatorArgs() ArgsTxSimulator { Marshalizer: &mock.MarshalizerMock{}, Hasher: &hashingMocks.HasherMock{}, DataFieldParser: dataFieldParser, + BlockChainHook: &testscommon.BlockChainHookStub{}, } } @@ -261,7 +271,7 @@ func TestTransactionSimulator_ProcessTxConcurrentCalls(t *testing.T) { for i := 0; i < numCalls; i++ { go func(idx int) { time.Sleep(time.Millisecond * 10) - _, _ = txSimulator.ProcessTx(tx) + _, _ = txSimulator.ProcessTx(tx, &block.Header{}) wg.Done() }(i) } diff --git a/scripts/testnet/include/config.sh b/scripts/testnet/include/config.sh index 56d792dc7ed..8fa1d11b3db 100644 --- a/scripts/testnet/include/config.sh +++ b/scripts/testnet/include/config.sh @@ -3,6 +3,7 @@ generateConfig() { TMP_SHARD_OBSERVERCOUNT=$SHARD_OBSERVERCOUNT TMP_META_OBSERVERCOUNT=$META_OBSERVERCOUNT + # set num of observers to 0, they will start with generated keys if [[ $MULTI_KEY_NODES -eq 1 ]]; then TMP_SHARD_OBSERVERCOUNT=0 TMP_META_OBSERVERCOUNT=0 @@ -131,10 +132,53 @@ updateNodeConfig() { sed -i '/\[Antiflood\]/,/\[Logger\]/ s/true/false/' config_observer.toml fi + updateConfigsForStakingV4 + echo "Updated configuration for Nodes." popd } +updateConfigsForStakingV4() { + config=$(cat enableEpochs.toml) + + echo "Updating staking v4 configs" + + # Get the StakingV4Step3EnableEpoch value + staking_enable_epoch=$(echo "$config" | awk -F '=' '/ StakingV4Step3EnableEpoch/{gsub(/^[ \t]+|[ \t]+$/,"", $2); print $2; exit}') + # Count the number of entries in MaxNodesChangeEnableEpoch + entry_count=$(echo "$config" | awk '/MaxNodesChangeEnableEpoch/,/\]/{if ($0 ~ /\{/) {count++}} END {print count}') + + # Check if entry_count is less than 2 + if [ "$entry_count" -lt 2 ]; then + echo "Not enough entries found to update" + else + # Find all entries in MaxNodesChangeEnableEpoch + all_entries=$(awk '/MaxNodesChangeEnableEpoch/,/\]/{if ($0 ~ /^[[:space:]]*\{/) {p=1}; if (p) print; if ($0 ~ /\]/) p=0}' enableEpochs.toml | grep -vE '^\s*#' | sed '/^\s*$/d') + + # Get the index of the entry with EpochEnable equal to StakingV4Step3EnableEpoch + index=$(echo "$all_entries" | grep -n "EpochEnable = $staking_enable_epoch" | cut -d: -f1) + + if [[ -z "${index// }" ]]; then + echo -e "\033[1;33mWarning: MaxNodesChangeEnableEpoch does not contain an entry enable epoch for StakingV4Step3EnableEpoch, nodes might fail to start...\033[0m" + else + prev_entry=$(echo "$all_entries" | sed -n "$((index-1))p") + curr_entry=$(echo "$all_entries" | sed -n "$((index))p") + + # Extract the value of MaxNumNodes & NodesToShufflePerShard from prev_entry + max_nodes_from_prev_epoch=$(echo "$prev_entry" | awk -F 'MaxNumNodes = ' '{print $2}' | cut -d ',' -f1) + nodes_to_shuffle_per_shard=$(echo "$prev_entry" | awk -F 'NodesToShufflePerShard = ' '{gsub(/[^0-9]+/, "", $2); print $2}') + + # Calculate the new MaxNumNodes value based on the formula + new_max_nodes=$((max_nodes_from_prev_epoch - (SHARDCOUNT + 1) * nodes_to_shuffle_per_shard)) + curr_entry_updated=$(echo "$curr_entry" | awk -v new_max_nodes="$new_max_nodes" '{gsub(/MaxNumNodes = [0-9]+,/, "MaxNumNodes = " new_max_nodes ",")}1') + + echo "Updating entry in MaxNodesChangeEnableEpoch from $curr_entry to $curr_entry_updated" + + sed -i "/$staking_enable_epoch/,/$staking_enable_epoch/ s|.*$curr_entry.*|$curr_entry_updated|" enableEpochs.toml + fi + fi +} + copyProxyConfig() { pushd $TESTNETDIR diff --git a/scripts/testnet/include/observers.sh b/scripts/testnet/include/observers.sh index 6ba9ff9293a..50e7f5ade03 100644 --- a/scripts/testnet/include/observers.sh +++ b/scripts/testnet/include/observers.sh @@ -82,10 +82,18 @@ assembleCommand_startObserverNode() { let "KEY_INDEX=$TOTAL_NODECOUNT - $OBSERVER_INDEX - 1" WORKING_DIR=$TESTNETDIR/node_working_dirs/observer$OBSERVER_INDEX + KEYS_FLAGS="-validator-key-pem-file ./config/validatorKey.pem -sk-index $KEY_INDEX" + # if node is running in multi key mode, in order to avoid loading the common allValidatorKeys.pem file + # and force generating a new key for observers, simply provide an invalid path + if [[ $MULTI_KEY_NODES -eq 1 ]]; then + TMP_MISSING_PEM="missing-file.pem" + KEYS_FLAGS="-all-validator-keys-pem-file $TMP_MISSING_PEM -validator-key-pem-file $TMP_MISSING_PEM" + fi + local nodeCommand="./node \ -port $PORT --profile-mode -log-save -log-level $LOGLEVEL --log-logger-name --log-correlation --use-health-service -rest-api-interface localhost:$RESTAPIPORT \ -destination-shard-as-observer $SHARD \ - -sk-index $KEY_INDEX \ + $KEYS_FLAGS \ -working-directory $WORKING_DIR -config ./config/config_observer.toml $EXTRA_OBSERVERS_FLAGS" if [ -n "$NODE_NICENESS" ] diff --git a/scripts/testnet/variables.sh b/scripts/testnet/variables.sh index 1dc3c7cc65c..f3fb44c5866 100644 --- a/scripts/testnet/variables.sh +++ b/scripts/testnet/variables.sh @@ -170,10 +170,6 @@ export TOTAL_OBSERVERCOUNT=$total_observer_count # to enable the full archive feature on the observers, please use the --full-archive flag export EXTRA_OBSERVERS_FLAGS="-operation-mode db-lookup-extension" -if [[ $MULTI_KEY_NODES -eq 1 ]]; then - EXTRA_OBSERVERS_FLAGS="--no-key" -fi - # Leave unchanged. let "total_node_count = $SHARD_VALIDATORCOUNT * $SHARDCOUNT + $META_VALIDATORCOUNT + $TOTAL_OBSERVERCOUNT" export TOTAL_NODECOUNT=$total_node_count diff --git a/sharding/mock/enableEpochsHandlerMock.go b/sharding/mock/enableEpochsHandlerMock.go index e275c4ea165..32c6b4fa14c 100644 --- a/sharding/mock/enableEpochsHandlerMock.go +++ b/sharding/mock/enableEpochsHandlerMock.go @@ -7,7 +7,6 @@ import ( // EnableEpochsHandlerMock - type EnableEpochsHandlerMock struct { - WaitingListFixEnableEpochField uint32 RefactorPeersMiniBlocksEnableEpochField uint32 IsRefactorPeersMiniBlocksFlagEnabledField bool CurrentEpoch uint32 @@ -18,8 +17,6 @@ func (mock *EnableEpochsHandlerMock) GetActivationEpoch(flag core.EnableEpochFla switch flag { case common.RefactorPeersMiniBlocksFlag: return mock.RefactorPeersMiniBlocksEnableEpochField - case common.WaitingListFixFlag: - return mock.WaitingListFixEnableEpochField default: return 0 diff --git a/sharding/nodesCoordinator/common.go b/sharding/nodesCoordinator/common.go index c771e711740..1e376cd6b65 100644 --- a/sharding/nodesCoordinator/common.go +++ b/sharding/nodesCoordinator/common.go @@ -52,6 +52,7 @@ func displayNodesConfiguration( waiting map[uint32][]Validator, leaving map[uint32][]Validator, actualRemaining map[uint32][]Validator, + shuffledOut map[uint32][]Validator, nbShards uint32, ) { for shard := uint32(0); shard <= nbShards; shard++ { @@ -75,6 +76,10 @@ func displayNodesConfiguration( pk := v.PubKey() log.Debug("actually remaining", "pk", pk, "shardID", shardID) } + for _, v := range shuffledOut[shardID] { + pk := v.PubKey() + log.Debug("shuffled out", "pk", pk, "shardID", shardID) + } } } diff --git a/sharding/nodesCoordinator/consensusGroupProviderBench_test.go b/sharding/nodesCoordinator/consensusGroupProviderBench_test.go index c24f6f9549f..49731812213 100644 --- a/sharding/nodesCoordinator/consensusGroupProviderBench_test.go +++ b/sharding/nodesCoordinator/consensusGroupProviderBench_test.go @@ -1,11 +1,9 @@ package nodesCoordinator import ( - "math/rand" "testing" ) -const randSeed = 75 const numValidators = 63 const numValidatorsInEligibleList = 400 @@ -20,7 +18,6 @@ func getRandomness() []byte { func BenchmarkReslicingBasedProvider_Get(b *testing.B) { numVals := numValidators - rand.Seed(randSeed) expElList := getExpandedEligibleList(numValidatorsInEligibleList) randomness := getRandomness() @@ -32,7 +29,6 @@ func BenchmarkReslicingBasedProvider_Get(b *testing.B) { func BenchmarkSelectionBasedProvider_Get(b *testing.B) { numVals := numValidators - rand.Seed(randSeed) expElList := getExpandedEligibleList(numValidatorsInEligibleList) randomness := getRandomness() diff --git a/sharding/nodesCoordinator/dtos.go b/sharding/nodesCoordinator/dtos.go index 854dd931d8d..ab54bdeb4fa 100644 --- a/sharding/nodesCoordinator/dtos.go +++ b/sharding/nodesCoordinator/dtos.go @@ -7,6 +7,7 @@ type ArgsUpdateNodes struct { NewNodes []Validator UnStakeLeaving []Validator AdditionalLeaving []Validator + Auction []Validator Rand []byte NbShards uint32 Epoch uint32 @@ -16,6 +17,7 @@ type ArgsUpdateNodes struct { type ResUpdateNodes struct { Eligible map[uint32][]Validator Waiting map[uint32][]Validator + ShuffledOut map[uint32][]Validator Leaving []Validator StillRemaining []Validator } diff --git a/sharding/nodesCoordinator/errors.go b/sharding/nodesCoordinator/errors.go index def3944cc0d..3d063f4605e 100644 --- a/sharding/nodesCoordinator/errors.go +++ b/sharding/nodesCoordinator/errors.go @@ -114,3 +114,12 @@ var ErrNilGenesisNodesSetupHandler = errors.New("nil genesis nodes setup handler // ErrKeyNotFoundInWaitingList signals that the provided key has not been found in waiting list var ErrKeyNotFoundInWaitingList = errors.New("key not found in waiting list") + +// ErrNilNodesCoordinatorRegistryFactory signals that a nil nodes coordinator registry factory has been given +var ErrNilNodesCoordinatorRegistryFactory = errors.New("nil nodes coordinator registry factory has been given") + +// ErrReceivedAuctionValidatorsBeforeStakingV4 signals that auction nodes have been received from peer mini blocks before enabling staking v4 +var ErrReceivedAuctionValidatorsBeforeStakingV4 = errors.New("should not have received selected nodes from auction in peer mini blocks, since staking v4 is not enabled yet") + +// ErrNilEpochNotifier signals that a nil EpochNotifier has been provided +var ErrNilEpochNotifier = errors.New("nil epoch notifier provided") diff --git a/sharding/nodesCoordinator/hashValidatorShuffler.go b/sharding/nodesCoordinator/hashValidatorShuffler.go index a4a7e178ee1..ceecc9ca352 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler.go @@ -7,10 +7,12 @@ import ( "sync" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/hashing/sha256" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" ) var _ NodesShuffler = (*randHashShuffler)(nil) @@ -24,6 +26,7 @@ type NodesShufflerArgs struct { ShuffleBetweenShards bool MaxNodesEnableConfig []config.MaxNodesChangeConfig EnableEpochsHandler common.EnableEpochsHandler + EnableEpochs config.EnableEpochs } type shuffleNodesArg struct { @@ -32,14 +35,26 @@ type shuffleNodesArg struct { unstakeLeaving []Validator additionalLeaving []Validator newNodes []Validator + auction []Validator randomness []byte distributor ValidatorsDistributor nodesMeta uint32 nodesPerShard uint32 nbShards uint32 maxNodesToSwapPerShard uint32 + maxNumNodes uint32 flagBalanceWaitingLists bool - flagWaitingListFix bool + flagStakingV4Step2 bool + flagStakingV4Step3 bool +} + +type shuffledNodesConfig struct { + numShuffled uint32 + numNewEligible uint32 + numNewWaiting uint32 + numSelectedAuction uint32 + maxNumNodes uint32 + flagStakingV4Step2 bool } // TODO: Decide if transaction load statistics will be used for limiting the number of shards @@ -48,16 +63,20 @@ type randHashShuffler struct { // when reinitialization of node in new shard is implemented shuffleBetweenShards bool - adaptivity bool - nodesShard uint32 - nodesMeta uint32 - shardHysteresis uint32 - metaHysteresis uint32 - activeNodesConfig config.MaxNodesChangeConfig - availableNodesConfigs []config.MaxNodesChangeConfig - mutShufflerParams sync.RWMutex - validatorDistributor ValidatorsDistributor - enableEpochsHandler common.EnableEpochsHandler + adaptivity bool + nodesShard uint32 + nodesMeta uint32 + shardHysteresis uint32 + metaHysteresis uint32 + activeNodesConfig config.MaxNodesChangeConfig + availableNodesConfigs []config.MaxNodesChangeConfig + mutShufflerParams sync.RWMutex + validatorDistributor ValidatorsDistributor + enableEpochsHandler common.EnableEpochsHandler + stakingV4Step2EnableEpoch uint32 + flagStakingV4Step2 atomic.Flag + stakingV4Step3EnableEpoch uint32 + flagStakingV4Step3 atomic.Flag } // NewHashValidatorsShuffler creates a validator shuffler that uses a hash between validator key and a given @@ -71,7 +90,6 @@ func NewHashValidatorsShuffler(args *NodesShufflerArgs) (*randHashShuffler, erro } err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ common.BalanceWaitingListsFlag, - common.WaitingListFixFlag, }) if err != nil { return nil, err @@ -80,6 +98,9 @@ func NewHashValidatorsShuffler(args *NodesShufflerArgs) (*randHashShuffler, erro var configs []config.MaxNodesChangeConfig log.Debug("hashValidatorShuffler: enable epoch for max nodes change", "epoch", args.MaxNodesEnableConfig) + log.Debug("hashValidatorShuffler: enable epoch for staking v4 step 2", "epoch", args.EnableEpochs.StakingV4Step2EnableEpoch) + log.Debug("hashValidatorShuffler: enable epoch for staking v4 step 3", "epoch", args.EnableEpochs.StakingV4Step3EnableEpoch) + if args.MaxNodesEnableConfig != nil { configs = make([]config.MaxNodesChangeConfig, len(args.MaxNodesEnableConfig)) copy(configs, args.MaxNodesEnableConfig) @@ -87,9 +108,11 @@ func NewHashValidatorsShuffler(args *NodesShufflerArgs) (*randHashShuffler, erro log.Debug("Shuffler created", "shuffleBetweenShards", args.ShuffleBetweenShards) rxs := &randHashShuffler{ - shuffleBetweenShards: args.ShuffleBetweenShards, - availableNodesConfigs: configs, - enableEpochsHandler: args.EnableEpochsHandler, + shuffleBetweenShards: args.ShuffleBetweenShards, + availableNodesConfigs: configs, + enableEpochsHandler: args.EnableEpochsHandler, + stakingV4Step2EnableEpoch: args.EnableEpochs.StakingV4Step2EnableEpoch, + stakingV4Step3EnableEpoch: args.EnableEpochs.StakingV4Step3EnableEpoch, } rxs.UpdateParams(args.NodesShard, args.NodesMeta, args.Hysteresis, args.Adaptivity) @@ -178,6 +201,7 @@ func (rhs *randHashShuffler) UpdateNodeLists(args ArgsUpdateNodes) (*ResUpdateNo unstakeLeaving: args.UnStakeLeaving, additionalLeaving: args.AdditionalLeaving, newNodes: args.NewNodes, + auction: args.Auction, randomness: args.Rand, nodesMeta: nodesMeta, nodesPerShard: nodesPerShard, @@ -185,7 +209,9 @@ func (rhs *randHashShuffler) UpdateNodeLists(args ArgsUpdateNodes) (*ResUpdateNo distributor: rhs.validatorDistributor, maxNodesToSwapPerShard: rhs.activeNodesConfig.NodesToShufflePerShard, flagBalanceWaitingLists: rhs.enableEpochsHandler.IsFlagEnabledInEpoch(common.BalanceWaitingListsFlag, args.Epoch), - flagWaitingListFix: rhs.enableEpochsHandler.IsFlagEnabledInEpoch(common.WaitingListFixFlag, args.Epoch), + flagStakingV4Step2: rhs.flagStakingV4Step2.IsSet(), + flagStakingV4Step3: rhs.flagStakingV4Step3.IsSet(), + maxNumNodes: rhs.activeNodesConfig.MaxNumNodes, }) } @@ -263,18 +289,12 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { eligibleCopy, waitingCopy, numToRemove, - remainingUnstakeLeaving, - int(arg.nodesMeta), - int(arg.nodesPerShard), - arg.flagWaitingListFix) + remainingUnstakeLeaving) newEligible, newWaiting, stillRemainingAdditionalLeaving := removeLeavingNodesFromValidatorMaps( newEligible, newWaiting, numToRemove, - remainingAdditionalLeaving, - int(arg.nodesMeta), - int(arg.nodesPerShard), - arg.flagWaitingListFix) + remainingAdditionalLeaving) stillRemainingInLeaving := append(stillRemainingUnstakeLeaving, stillRemainingAdditionalLeaving...) @@ -282,17 +302,44 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { err = moveMaxNumNodesToMap(newEligible, newWaiting, arg.nodesMeta, arg.nodesPerShard) if err != nil { - log.Warn("moveNodesToMap failed", "error", err) + return nil, fmt.Errorf("moveNodesToMap failed, error: %w", err) } - err = distributeValidators(newWaiting, arg.newNodes, arg.randomness, false) + err = checkAndDistributeNewNodes(newWaiting, arg.newNodes, arg.randomness, arg.flagStakingV4Step3) if err != nil { - log.Warn("distributeValidators newNodes failed", "error", err) + return nil, fmt.Errorf("distributeValidators newNodes failed, error: %w", err) } - err = arg.distributor.DistributeValidators(newWaiting, shuffledOutMap, arg.randomness, arg.flagBalanceWaitingLists) - if err != nil { - log.Warn("distributeValidators shuffledOut failed", "error", err) + shuffledNodesCfg := &shuffledNodesConfig{ + numShuffled: getNumPubKeys(shuffledOutMap), + numNewEligible: getNumPubKeys(newEligible), + numNewWaiting: getNumPubKeys(newWaiting), + numSelectedAuction: uint32(len(arg.auction)), + maxNumNodes: arg.maxNumNodes, + flagStakingV4Step2: arg.flagStakingV4Step2, + } + + lowWaitingList := shouldDistributeShuffledToWaitingInStakingV4(shuffledNodesCfg) + if arg.flagStakingV4Step3 || lowWaitingList { + log.Debug("distributing selected nodes from auction to waiting", + "num auction nodes", len(arg.auction), "num waiting nodes", shuffledNodesCfg.numNewWaiting) + + // Distribute selected validators from AUCTION -> WAITING + err = distributeValidators(newWaiting, arg.auction, arg.randomness, arg.flagBalanceWaitingLists) + if err != nil { + return nil, fmt.Errorf("distributeValidators auction list failed, error: %w", err) + } + } + + if !arg.flagStakingV4Step2 || lowWaitingList { + log.Debug("distributing shuffled out nodes to waiting", + "num shuffled nodes", shuffledNodesCfg.numShuffled, "num waiting nodes", shuffledNodesCfg.numNewWaiting) + + // Distribute validators from SHUFFLED OUT -> WAITING + err = arg.distributor.DistributeValidators(newWaiting, shuffledOutMap, arg.randomness, arg.flagBalanceWaitingLists) + if err != nil { + return nil, fmt.Errorf("distributeValidators shuffled out failed, error: %w", err) + } } actualLeaving, _ := removeValidatorsFromList(allLeaving, stillRemainingInLeaving, len(stillRemainingInLeaving)) @@ -300,6 +347,7 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { return &ResUpdateNodes{ Eligible: newEligible, Waiting: newWaiting, + ShuffledOut: shuffledOutMap, Leaving: actualLeaving, StillRemaining: stillRemainingInLeaving, }, nil @@ -381,62 +429,16 @@ func removeLeavingNodesFromValidatorMaps( waiting map[uint32][]Validator, numToRemove map[uint32]int, leaving []Validator, - minNodesMeta int, - minNodesPerShard int, - waitingFixEnabled bool, ) (map[uint32][]Validator, map[uint32][]Validator, []Validator) { stillRemainingInLeaving := make([]Validator, len(leaving)) copy(stillRemainingInLeaving, leaving) - if !waitingFixEnabled { - newWaiting, stillRemainingInLeaving := removeNodesFromMap(waiting, stillRemainingInLeaving, numToRemove) - newEligible, stillRemainingInLeaving := removeNodesFromMap(eligible, stillRemainingInLeaving, numToRemove) - return newEligible, newWaiting, stillRemainingInLeaving - } - - return removeLeavingNodes(eligible, waiting, numToRemove, stillRemainingInLeaving, minNodesMeta, minNodesPerShard) -} - -func removeLeavingNodes( - eligible map[uint32][]Validator, - waiting map[uint32][]Validator, - numToRemove map[uint32]int, - stillRemainingInLeaving []Validator, - minNodesMeta int, - minNodesPerShard int, -) (map[uint32][]Validator, map[uint32][]Validator, []Validator) { - maxNumToRemoveFromWaiting := make(map[uint32]int) - for shardId := range eligible { - computedMinNumberOfNodes := computeMinNumberOfNodes(eligible, waiting, shardId, minNodesMeta, minNodesPerShard) - maxNumToRemoveFromWaiting[shardId] = computedMinNumberOfNodes - } - - newWaiting, stillRemainingInLeaving := removeNodesFromMap(waiting, stillRemainingInLeaving, maxNumToRemoveFromWaiting) - - for shardId, toRemove := range numToRemove { - computedMinNumberOfNodes := computeMinNumberOfNodes(eligible, waiting, shardId, minNodesMeta, minNodesPerShard) - if toRemove > computedMinNumberOfNodes { - numToRemove[shardId] = computedMinNumberOfNodes - } - } - + newWaiting, stillRemainingInLeaving := removeNodesFromMap(waiting, stillRemainingInLeaving, numToRemove) newEligible, stillRemainingInLeaving := removeNodesFromMap(eligible, stillRemainingInLeaving, numToRemove) return newEligible, newWaiting, stillRemainingInLeaving } -func computeMinNumberOfNodes(eligible map[uint32][]Validator, waiting map[uint32][]Validator, shardId uint32, minNodesMeta int, minNodesPerShard int) int { - minimumNumberOfNodes := minNodesPerShard - if shardId == core.MetachainShardId { - minimumNumberOfNodes = minNodesMeta - } - computedMinNumberOfNodes := len(eligible[shardId]) + len(waiting[shardId]) - minimumNumberOfNodes - if computedMinNumberOfNodes < 0 { - computedMinNumberOfNodes = 0 - } - return computedMinNumberOfNodes -} - // computeNewShards determines the new number of shards based on the number of nodes in the network func (rhs *randHashShuffler) computeNewShards( eligible map[uint32][]Validator, @@ -586,6 +588,51 @@ func removeValidatorFromList(validatorList []Validator, index int) []Validator { return validatorList[:len(validatorList)-1] } +func checkAndDistributeNewNodes( + waiting map[uint32][]Validator, + newNodes []Validator, + randomness []byte, + flagStakingV4Step3 bool, +) error { + if !flagStakingV4Step3 { + return distributeValidators(waiting, newNodes, randomness, false) + } + + if len(newNodes) > 0 { + return epochStart.ErrReceivedNewListNodeInStakingV4 + } + + return nil +} + +func shouldDistributeShuffledToWaitingInStakingV4(shuffledNodesCfg *shuffledNodesConfig) bool { + if !shuffledNodesCfg.flagStakingV4Step2 { + return false + } + + totalNewWaiting := shuffledNodesCfg.numNewWaiting + shuffledNodesCfg.numSelectedAuction + totalNodes := totalNewWaiting + shuffledNodesCfg.numNewEligible + shuffledNodesCfg.numShuffled + + log.Debug("checking if should distribute shuffled out nodes to waiting in staking v4", + "numShuffled", shuffledNodesCfg.numShuffled, + "numNewEligible", shuffledNodesCfg.numNewEligible, + "numSelectedAuction", shuffledNodesCfg.numSelectedAuction, + "totalNewWaiting", totalNewWaiting, + "totalNodes", totalNodes, + "maxNumNodes", shuffledNodesCfg.maxNumNodes, + ) + + distributeShuffledToWaitingInStakingV4 := false + if totalNodes <= shuffledNodesCfg.maxNumNodes { + log.Debug("num of total nodes in waiting is too low after shuffling; will distribute " + + "shuffled out nodes directly to waiting and skip sending them to auction") + + distributeShuffledToWaitingInStakingV4 = true + } + + return distributeShuffledToWaitingInStakingV4 +} + func removeValidatorFromListKeepOrder(validatorList []Validator, index int) []Validator { indexNotOK := index > len(validatorList)-1 || index < 0 if indexNotOK { @@ -646,6 +693,16 @@ func moveNodesToMap(destination map[uint32][]Validator, source map[uint32][]Vali return nil } +func getNumPubKeys(shardValidatorsMap map[uint32][]Validator) uint32 { + numPubKeys := uint32(0) + + for _, validatorsInShard := range shardValidatorsMap { + numPubKeys += uint32(len(validatorsInShard)) + } + + return numPubKeys +} + // moveMaxNumNodesToMap moves the validators in the source list to the corresponding destination list // but adding just enough nodes so that at most the number of nodes is kept in the destination list // The parameter maxNodesToMove is a limiting factor and should limit the number of nodes @@ -778,6 +835,12 @@ func (rhs *randHashShuffler) updateShufflerConfig(epoch uint32) { "epochEnable", rhs.activeNodesConfig.EpochEnable, "maxNodesToShufflePerShard", rhs.activeNodesConfig.NodesToShufflePerShard, ) + + rhs.flagStakingV4Step3.SetValue(epoch >= rhs.stakingV4Step3EnableEpoch) + log.Debug("staking v4 step3", "enabled", rhs.flagStakingV4Step3.IsSet()) + + rhs.flagStakingV4Step2.SetValue(epoch >= rhs.stakingV4Step2EnableEpoch) + log.Debug("staking v4 step2", "enabled", rhs.flagStakingV4Step2.IsSet()) } func (rhs *randHashShuffler) sortConfigs() { diff --git a/sharding/nodesCoordinator/hashValidatorShuffler_test.go b/sharding/nodesCoordinator/hashValidatorShuffler_test.go index 79a8ed1e7f8..788ec3f9b59 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler_test.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler_test.go @@ -13,10 +13,9 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/sharding/mock" - "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -194,8 +193,11 @@ func createHashShufflerInter() (*randHashShuffler, error) { Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: true, - MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: 443, + StakingV4Step3EnableEpoch: 444, + }, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -210,8 +212,11 @@ func createHashShufflerIntraShards() (*randHashShuffler, error) { Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: shuffleBetweenShards, - MaxNodesEnableConfig: nil, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: 443, + StakingV4Step3EnableEpoch: 444, + }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -989,10 +994,7 @@ func Test_shuffleOutNodesWithLeaving(t *testing.T) { copyEligibleMap, copyWaitingMap, numToRemove, - leaving, - eligibleNodesPerShard, - eligibleNodesPerShard, - true) + leaving) shuffledOut, newEligible := shuffleOutNodes(newEligible, numToRemove, randomness) shuffleOutList := make([]Validator, 0) for _, shuffledOutPerShard := range shuffledOut { @@ -1027,10 +1029,7 @@ func Test_shuffleOutNodesWithLeavingMoreThanWaiting(t *testing.T) { copyEligibleMap, copyWaitingMap, numToRemove, - leaving, - eligibleNodesPerShard, - eligibleNodesPerShard, - true) + leaving) shuffledOut, newEligible := shuffleOutNodes(newEligible, numToRemove, randomness) shuffleOutList := make([]Validator, 0) @@ -1048,52 +1047,30 @@ func Test_removeLeavingNodesFromValidatorMaps(t *testing.T) { waitingNodesPerShard := 40 nbShards := uint32(2) - tests := []struct { - waitingFixEnabled bool - remainingToRemove int - }{ - { - waitingFixEnabled: false, - remainingToRemove: 18, - }, - { - waitingFixEnabled: true, - remainingToRemove: 20, - }, + leaving := make([]Validator, 0) + + eligibleMap := generateValidatorMap(eligibleNodesPerShard, nbShards) + waitingMap := generateValidatorMap(waitingNodesPerShard, nbShards) + for _, waitingValidators := range waitingMap { + leaving = append(leaving, waitingValidators[:2]...) } - for _, tt := range tests { - t.Run("", func(t *testing.T) { - leaving := make([]Validator, 0) + numToRemove := make(map[uint32]int) - eligibleMap := generateValidatorMap(eligibleNodesPerShard, nbShards) - waitingMap := generateValidatorMap(waitingNodesPerShard, nbShards) - for _, waitingValidators := range waitingMap { - leaving = append(leaving, waitingValidators[:2]...) - } + for shardId := range waitingMap { + numToRemove[shardId] = maxShuffleOutNumber + } + copyEligibleMap := copyValidatorMap(eligibleMap) + copyWaitingMap := copyValidatorMap(waitingMap) - numToRemove := make(map[uint32]int) + _, _, _ = removeLeavingNodesFromValidatorMaps( + copyEligibleMap, + copyWaitingMap, + numToRemove, + leaving) - for shardId := range waitingMap { - numToRemove[shardId] = maxShuffleOutNumber - } - copyEligibleMap := copyValidatorMap(eligibleMap) - copyWaitingMap := copyValidatorMap(waitingMap) - - _, _, _ = removeLeavingNodesFromValidatorMaps( - copyEligibleMap, - copyWaitingMap, - numToRemove, - leaving, - eligibleNodesPerShard, - eligibleNodesPerShard, - tt.waitingFixEnabled, - ) - - for _, remainingToRemove := range numToRemove { - require.Equal(t, tt.remainingToRemove, remainingToRemove) - } - }) + for _, remainingToRemove := range numToRemove { + require.Equal(t, 18, remainingToRemove) } } @@ -1188,15 +1165,17 @@ func TestRandHashShuffler_UpdateParams(t *testing.T) { require.Nil(t, err) shuffler2 := &randHashShuffler{ - nodesShard: 200, - nodesMeta: 200, - shardHysteresis: 0, - metaHysteresis: 0, - adaptivity: true, - shuffleBetweenShards: true, - validatorDistributor: &CrossShardValidatorDistributor{}, - availableNodesConfigs: nil, - enableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + nodesShard: 200, + nodesMeta: 200, + shardHysteresis: 0, + metaHysteresis: 0, + adaptivity: true, + shuffleBetweenShards: true, + validatorDistributor: &CrossShardValidatorDistributor{}, + availableNodesConfigs: nil, + stakingV4Step2EnableEpoch: 443, + stakingV4Step3EnableEpoch: 444, + enableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler.UpdateParams( @@ -1300,12 +1279,6 @@ func TestRandHashShuffler_UpdateNodeListsWaitingListFixDisabled(t *testing.T) { testUpdateNodesAndCheckNumLeaving(t, true) } -func TestRandHashShuffler_UpdateNodeListsWithWaitingListFixEnabled(t *testing.T) { - t.Parallel() - - testUpdateNodesAndCheckNumLeaving(t, false) -} - func testUpdateNodesAndCheckNumLeaving(t *testing.T, beforeFix bool) { eligiblePerShard := 400 eligibleMeta := 10 @@ -1317,11 +1290,6 @@ func testUpdateNodesAndCheckNumLeaving(t *testing.T, beforeFix bool) { numNodesToShuffle := 80 - waitingListFixEnableEpoch := 0 - if beforeFix { - waitingListFixEnableEpoch = 9999 - } - shufflerArgs := &NodesShufflerArgs{ NodesShard: uint32(eligiblePerShard), NodesMeta: uint32(eligibleMeta), @@ -1335,14 +1303,7 @@ func testUpdateNodesAndCheckNumLeaving(t *testing.T, beforeFix bool) { NodesToShufflePerShard: uint32(numNodesToShuffle), }, }, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { - if flag == common.WaitingListFixFlag { - return epoch >= uint32(waitingListFixEnableEpoch) - } - return false - }, - }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -1371,34 +1332,15 @@ func testUpdateNodesAndCheckNumLeaving(t *testing.T, beforeFix bool) { } } -func TestRandHashShuffler_UpdateNodeListsWaitingListWithFixCheckWaitingDisabled(t *testing.T) { - t.Parallel() - - testUpdateNodeListsAndCheckWaitingList(t, true) -} - -func TestRandHashShuffler_UpdateNodeListsWaitingListWithFixCheckWaitingEnabled(t *testing.T) { - t.Parallel() - - testUpdateNodeListsAndCheckWaitingList(t, false) -} - -func testUpdateNodeListsAndCheckWaitingList(t *testing.T, beforeFix bool) { +func TestRandHashShuffler_UpdateNodeListsAndCheckWaitingList(t *testing.T) { eligiblePerShard := 400 eligibleMeta := 10 waitingPerShard := 400 nbShards := 1 - numLeaving := 2 - numNodesToShuffle := 80 - waitingListFixEnableEpoch := 0 - if beforeFix { - waitingListFixEnableEpoch = 9999 - } - shufflerArgs := &NodesShufflerArgs{ NodesShard: uint32(eligiblePerShard), NodesMeta: uint32(eligibleMeta), @@ -1412,14 +1354,7 @@ func testUpdateNodeListsAndCheckWaitingList(t *testing.T, beforeFix bool) { NodesToShufflePerShard: uint32(numNodesToShuffle), }, }, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { - if flag == common.WaitingListFixFlag { - return epoch >= uint32(waitingListFixEnableEpoch) - } - return false - }, - }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -1453,9 +1388,7 @@ func testUpdateNodeListsAndCheckWaitingList(t *testing.T, beforeFix bool) { } expectedNumWaitingMovedToEligible := numNodesToShuffle - if beforeFix { - expectedNumWaitingMovedToEligible -= numLeaving - } + expectedNumWaitingMovedToEligible -= numLeaving assert.Equal(t, expectedNumWaitingMovedToEligible, numWaitingListToEligible) } @@ -1763,10 +1696,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_FromEligible(t *te eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) assert.Equal(t, eligiblePerShard-1, len(newEligible[core.MetachainShardId])) assert.Equal(t, waitingPerShard, len(newWaiting[core.MetachainShardId])) @@ -1804,10 +1734,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_FromWaiting(t *tes eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) assert.Equal(t, eligiblePerShard, len(newEligible[core.MetachainShardId])) assert.Equal(t, waitingPerShard-1, len(newWaiting[core.MetachainShardId])) @@ -1843,10 +1770,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_NonExisting(t *tes eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) assert.Equal(t, eligiblePerShard, len(newEligible[core.MetachainShardId])) assert.Equal(t, waitingPerShard, len(newWaiting[core.MetachainShardId])) @@ -1889,10 +1813,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_2Eligible2Waiting2 eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) remainingInEligible := eligiblePerShard - 2 remainingInWaiting := waitingPerShard - 2 @@ -1949,10 +1870,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_2FromEligible2From eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) // removed first 2 from waiting and just one from eligible remainingInEligible := eligiblePerShard - 1 @@ -2403,8 +2321,11 @@ func TestRandHashShuffler_UpdateNodeLists_All(t *testing.T) { Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: shuffleBetweenShards, - MaxNodesEnableConfig: nil, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: 443, + StakingV4Step3EnableEpoch: 444, + }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) require.Nil(t, err) @@ -2509,6 +2430,7 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_NoWaiting(t *testing.T) { ShuffleBetweenShards: shuffleBetweenShards, MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{StakingV4Step3EnableEpoch: stakingV4Epoch}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -2570,6 +2492,7 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_NilOrEmptyWaiting(t *test ShuffleBetweenShards: shuffleBetweenShards, MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{StakingV4Step3EnableEpoch: stakingV4Epoch}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) require.Nil(t, err) @@ -2642,6 +2565,57 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_WithWaiting(t *testing.T) assert.Equal(t, previousNumberOfNodes, currentNumberOfNodes) } +func TestRandHashShuffler_UpdateNodeLists_WithStakingV4(t *testing.T) { + t.Parallel() + + numEligiblePerShard := 100 + numWaitingPerShard := 30 + numAuction := 40 + nbShards := uint32(2) + + eligibleMap := generateValidatorMap(numEligiblePerShard, nbShards) + waitingMap := generateValidatorMap(numWaitingPerShard, nbShards) + auctionList := generateValidatorList(numAuction) + + args := ArgsUpdateNodes{ + Eligible: eligibleMap, + Waiting: waitingMap, + UnStakeLeaving: make([]Validator, 0), + AdditionalLeaving: make([]Validator, 0), + Rand: generateRandomByteArray(32), + Auction: auctionList, + NbShards: nbShards, + Epoch: stakingV4Epoch, + } + + shuffler, _ := createHashShufflerIntraShards() + resUpdateNodeList, err := shuffler.UpdateNodeLists(args) + require.Nil(t, err) + + for _, auctionNode := range args.Auction { + found, _ := searchInMap(resUpdateNodeList.Waiting, auctionNode.PubKey()) + assert.True(t, found) + } + + allShuffledOut := getValidatorsInMap(resUpdateNodeList.ShuffledOut) + for _, shuffledOut := range allShuffledOut { + found, _ := searchInMap(args.Eligible, shuffledOut.PubKey()) + assert.True(t, found) + } + + allNewEligible := getValidatorsInMap(resUpdateNodeList.Eligible) + allNewWaiting := getValidatorsInMap(resUpdateNodeList.Waiting) + + previousNumberOfNodes := (numEligiblePerShard+numWaitingPerShard)*(int(nbShards)+1) + numAuction + currentNumberOfNodes := len(allNewEligible) + len(allNewWaiting) + len(allShuffledOut) + assert.Equal(t, previousNumberOfNodes, currentNumberOfNodes) + + args.NewNodes = generateValidatorList(100 * (int(nbShards) + 1)) + resUpdateNodeList, err = shuffler.UpdateNodeLists(args) + require.ErrorIs(t, err, epochStart.ErrReceivedNewListNodeInStakingV4) + require.Nil(t, resUpdateNodeList) +} + func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_WithWaiting_WithLeaving(t *testing.T) { t.Parallel() @@ -2699,8 +2673,11 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_WithWaiting_WithLeaving(t Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: shuffleBetweenShards, - MaxNodesEnableConfig: nil, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: 443, + StakingV4Step3EnableEpoch: 444, + }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) require.Nil(t, err) diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinator.go b/sharding/nodesCoordinator/indexHashedNodesCoordinator.go index b591e94e3e2..f70bce06b04 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinator.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinator.go @@ -58,45 +58,49 @@ func (v validatorList) Less(i, j int) bool { // TODO: add a parameter for shardID when acting as observer type epochNodesConfig struct { - nbShards uint32 - shardID uint32 - eligibleMap map[uint32][]Validator - waitingMap map[uint32][]Validator - selectors map[uint32]RandomSelector - leavingMap map[uint32][]Validator - newList []Validator - mutNodesMaps sync.RWMutex + nbShards uint32 + shardID uint32 + eligibleMap map[uint32][]Validator + waitingMap map[uint32][]Validator + selectors map[uint32]RandomSelector + leavingMap map[uint32][]Validator + shuffledOutMap map[uint32][]Validator + newList []Validator + auctionList []Validator + mutNodesMaps sync.RWMutex } type indexHashedNodesCoordinator struct { - shardIDAsObserver uint32 - currentEpoch uint32 - shardConsensusGroupSize int - metaConsensusGroupSize int - numTotalEligible uint64 - selfPubKey []byte - savedStateKey []byte - marshalizer marshal.Marshalizer - hasher hashing.Hasher - shuffler NodesShuffler - epochStartRegistrationHandler EpochStartEventNotifier - bootStorer storage.Storer - nodesConfig map[uint32]*epochNodesConfig - mutNodesConfig sync.RWMutex - mutSavedStateKey sync.RWMutex - nodesCoordinatorHelper NodesCoordinatorHelper - consensusGroupCacher Cacher - loadingFromDisk atomic.Value - shuffledOutHandler ShuffledOutHandler - startEpoch uint32 - publicKeyToValidatorMap map[string]*validatorWithShardID - isFullArchive bool - chanStopNode chan endProcess.ArgEndProcess - flagWaitingListFix atomicFlags.Flag - nodeTypeProvider NodeTypeProviderHandler - enableEpochsHandler common.EnableEpochsHandler - validatorInfoCacher epochStart.ValidatorInfoCacher - genesisNodesSetupHandler GenesisNodesSetupHandler + shardIDAsObserver uint32 + currentEpoch uint32 + shardConsensusGroupSize int + metaConsensusGroupSize int + numTotalEligible uint64 + selfPubKey []byte + savedStateKey []byte + marshalizer marshal.Marshalizer + hasher hashing.Hasher + shuffler NodesShuffler + epochStartRegistrationHandler EpochStartEventNotifier + bootStorer storage.Storer + nodesConfig map[uint32]*epochNodesConfig + mutNodesConfig sync.RWMutex + mutSavedStateKey sync.RWMutex + nodesCoordinatorHelper NodesCoordinatorHelper + consensusGroupCacher Cacher + loadingFromDisk atomic.Value + shuffledOutHandler ShuffledOutHandler + startEpoch uint32 + publicKeyToValidatorMap map[string]*validatorWithShardID + isFullArchive bool + chanStopNode chan endProcess.ArgEndProcess + nodeTypeProvider NodeTypeProviderHandler + enableEpochsHandler common.EnableEpochsHandler + validatorInfoCacher epochStart.ValidatorInfoCacher + genesisNodesSetupHandler GenesisNodesSetupHandler + flagStakingV4Step2 atomicFlags.Flag + nodesCoordinatorRegistryFactory NodesCoordinatorRegistryFactory + flagStakingV4Started atomicFlags.Flag } // NewIndexHashedNodesCoordinator creates a new index hashed group selector @@ -109,52 +113,56 @@ func NewIndexHashedNodesCoordinator(arguments ArgNodesCoordinator) (*indexHashed nodesConfig := make(map[uint32]*epochNodesConfig, nodesCoordinatorStoredEpochs) nodesConfig[arguments.Epoch] = &epochNodesConfig{ - nbShards: arguments.NbShards, - shardID: arguments.ShardIDAsObserver, - eligibleMap: make(map[uint32][]Validator), - waitingMap: make(map[uint32][]Validator), - selectors: make(map[uint32]RandomSelector), - leavingMap: make(map[uint32][]Validator), - newList: make([]Validator, 0), - } - + nbShards: arguments.NbShards, + shardID: arguments.ShardIDAsObserver, + eligibleMap: make(map[uint32][]Validator), + waitingMap: make(map[uint32][]Validator), + selectors: make(map[uint32]RandomSelector), + leavingMap: make(map[uint32][]Validator), + shuffledOutMap: make(map[uint32][]Validator), + newList: make([]Validator, 0), + auctionList: make([]Validator, 0), + } + + // todo: if not genesis, use previous randomness from start of epoch meta block savedKey := arguments.Hasher.Compute(string(arguments.SelfPublicKey)) ihnc := &indexHashedNodesCoordinator{ - marshalizer: arguments.Marshalizer, - hasher: arguments.Hasher, - shuffler: arguments.Shuffler, - epochStartRegistrationHandler: arguments.EpochStartNotifier, - bootStorer: arguments.BootStorer, - selfPubKey: arguments.SelfPublicKey, - nodesConfig: nodesConfig, - currentEpoch: arguments.Epoch, - savedStateKey: savedKey, - shardConsensusGroupSize: arguments.ShardConsensusGroupSize, - metaConsensusGroupSize: arguments.MetaConsensusGroupSize, - consensusGroupCacher: arguments.ConsensusGroupCache, - shardIDAsObserver: arguments.ShardIDAsObserver, - shuffledOutHandler: arguments.ShuffledOutHandler, - startEpoch: arguments.StartEpoch, - publicKeyToValidatorMap: make(map[string]*validatorWithShardID), - chanStopNode: arguments.ChanStopNode, - nodeTypeProvider: arguments.NodeTypeProvider, - isFullArchive: arguments.IsFullArchive, - enableEpochsHandler: arguments.EnableEpochsHandler, - validatorInfoCacher: arguments.ValidatorInfoCacher, - genesisNodesSetupHandler: arguments.GenesisNodesSetupHandler, + marshalizer: arguments.Marshalizer, + hasher: arguments.Hasher, + shuffler: arguments.Shuffler, + epochStartRegistrationHandler: arguments.EpochStartNotifier, + bootStorer: arguments.BootStorer, + selfPubKey: arguments.SelfPublicKey, + nodesConfig: nodesConfig, + currentEpoch: arguments.Epoch, + savedStateKey: savedKey, + shardConsensusGroupSize: arguments.ShardConsensusGroupSize, + metaConsensusGroupSize: arguments.MetaConsensusGroupSize, + consensusGroupCacher: arguments.ConsensusGroupCache, + shardIDAsObserver: arguments.ShardIDAsObserver, + shuffledOutHandler: arguments.ShuffledOutHandler, + startEpoch: arguments.StartEpoch, + publicKeyToValidatorMap: make(map[string]*validatorWithShardID), + chanStopNode: arguments.ChanStopNode, + nodeTypeProvider: arguments.NodeTypeProvider, + isFullArchive: arguments.IsFullArchive, + enableEpochsHandler: arguments.EnableEpochsHandler, + validatorInfoCacher: arguments.ValidatorInfoCacher, + genesisNodesSetupHandler: arguments.GenesisNodesSetupHandler, + nodesCoordinatorRegistryFactory: arguments.NodesCoordinatorRegistryFactory, } ihnc.loadingFromDisk.Store(false) ihnc.nodesCoordinatorHelper = ihnc - err = ihnc.setNodesPerShards(arguments.EligibleNodes, arguments.WaitingNodes, nil, arguments.Epoch) + err = ihnc.setNodesPerShards(arguments.EligibleNodes, arguments.WaitingNodes, nil, nil, arguments.Epoch) if err != nil { return nil, err } ihnc.fillPublicKeyToValidatorMap() - err = ihnc.saveState(ihnc.savedStateKey) + err = ihnc.saveState(ihnc.savedStateKey, arguments.Epoch) if err != nil { log.Error("saving initial nodes coordinator config failed", "error", err.Error()) @@ -175,6 +183,7 @@ func NewIndexHashedNodesCoordinator(arguments ArgNodesCoordinator) (*indexHashed currentConfig.waitingMap, currentConfig.leavingMap, make(map[uint32][]Validator), + currentConfig.shuffledOutMap, currentConfig.nbShards) ihnc.epochStartRegistrationHandler.RegisterHandler(ihnc) @@ -216,6 +225,9 @@ func checkArguments(arguments ArgNodesCoordinator) error { if check.IfNil(arguments.NodeTypeProvider) { return ErrNilNodeTypeProvider } + if check.IfNil(arguments.NodesCoordinatorRegistryFactory) { + return ErrNilNodesCoordinatorRegistryFactory + } if nil == arguments.ChanStopNode { return ErrNilNodeStopChannel } @@ -224,7 +236,6 @@ func checkArguments(arguments ArgNodesCoordinator) error { } err := core.CheckHandlerCompatibility(arguments.EnableEpochsHandler, []core.EnableEpochFlag{ common.RefactorPeersMiniBlocksFlag, - common.WaitingListFixFlag, }) if err != nil { return err @@ -244,6 +255,7 @@ func (ihnc *indexHashedNodesCoordinator) setNodesPerShards( eligible map[uint32][]Validator, waiting map[uint32][]Validator, leaving map[uint32][]Validator, + shuffledOut map[uint32][]Validator, epoch uint32, ) error { ihnc.mutNodesConfig.Lock() @@ -283,6 +295,7 @@ func (ihnc *indexHashedNodesCoordinator) setNodesPerShards( nodesConfig.eligibleMap = eligible nodesConfig.waitingMap = waiting nodesConfig.leavingMap = leaving + nodesConfig.shuffledOutMap = shuffledOut nodesConfig.shardID, isCurrentNodeValidator = ihnc.computeShardForSelfPublicKey(nodesConfig) nodesConfig.selectors, err = ihnc.createSelectors(nodesConfig) if err != nil { @@ -509,6 +522,30 @@ func (ihnc *indexHashedNodesCoordinator) GetAllLeavingValidatorsPublicKeys(epoch return validatorsPubKeys, nil } +// GetAllShuffledOutValidatorsPublicKeys will return all shuffled out validator public keys from all shards +func (ihnc *indexHashedNodesCoordinator) GetAllShuffledOutValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) { + validatorsPubKeys := make(map[uint32][][]byte) + + ihnc.mutNodesConfig.RLock() + nodesConfig, ok := ihnc.nodesConfig[epoch] + ihnc.mutNodesConfig.RUnlock() + + if !ok { + return nil, fmt.Errorf("%w epoch=%v", ErrEpochNodesConfigDoesNotExist, epoch) + } + + nodesConfig.mutNodesMaps.RLock() + defer nodesConfig.mutNodesMaps.RUnlock() + + for shardID, shuffledOutList := range nodesConfig.shuffledOutMap { + for _, shuffledOutValidator := range shuffledOutList { + validatorsPubKeys[shardID] = append(validatorsPubKeys[shardID], shuffledOutValidator.PubKey()) + } + } + + return validatorsPubKeys, nil +} + // GetValidatorsIndexes will return validators indexes for a block func (ihnc *indexHashedNodesCoordinator) GetValidatorsIndexes( publicKeys []string, @@ -563,7 +600,8 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa return } - if _, ok := metaHdr.(*block.MetaBlock); !ok { + _, castOk := metaHdr.(*block.MetaBlock) + if !castOk { log.Error("could not process EpochStartPrepare on nodesCoordinator - not metaBlock") return } @@ -584,37 +622,13 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa return } - ihnc.mutNodesConfig.RLock() - previousConfig := ihnc.nodesConfig[ihnc.currentEpoch] - if previousConfig == nil { - log.Error("previous nodes config is nil") - ihnc.mutNodesConfig.RUnlock() - return - } - - // TODO: remove the copy if no changes are done to the maps - copiedPrevious := &epochNodesConfig{} - copiedPrevious.eligibleMap = copyValidatorMap(previousConfig.eligibleMap) - copiedPrevious.waitingMap = copyValidatorMap(previousConfig.waitingMap) - copiedPrevious.nbShards = previousConfig.nbShards - - ihnc.mutNodesConfig.RUnlock() - // TODO: compare with previous nodesConfig if exists - newNodesConfig, err := ihnc.computeNodesConfigFromList(copiedPrevious, allValidatorInfo) + newNodesConfig, err := ihnc.computeNodesConfigFromList(allValidatorInfo) if err != nil { log.Error("could not compute nodes config from list - do nothing on nodesCoordinator epochStartPrepare") return } - if copiedPrevious.nbShards != newNodesConfig.nbShards { - log.Warn("number of shards does not match", - "previous epoch", ihnc.currentEpoch, - "previous number of shards", copiedPrevious.nbShards, - "new epoch", newEpoch, - "new number of shards", newNodesConfig.nbShards) - } - additionalLeavingMap, err := ihnc.nodesCoordinatorHelper.ComputeAdditionalLeaving(allValidatorInfo) if err != nil { log.Error("could not compute additionalLeaving Nodes - do nothing on nodesCoordinator epochStartPrepare") @@ -628,6 +642,7 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa Eligible: newNodesConfig.eligibleMap, Waiting: newNodesConfig.waitingMap, NewNodes: newNodesConfig.newList, + Auction: newNodesConfig.auctionList, UnStakeLeaving: unStakeLeavingList, AdditionalLeaving: additionalLeavingList, Rand: randomness, @@ -647,13 +662,13 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa resUpdateNodes.Leaving, ) - err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, newEpoch) + err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, resUpdateNodes.ShuffledOut, newEpoch) if err != nil { log.Error("set nodes per shard failed", "error", err.Error()) } ihnc.fillPublicKeyToValidatorMap() - err = ihnc.saveState(randomness) + err = ihnc.saveState(randomness, newEpoch) ihnc.handleErrorLog(err, "saving nodes coordinator config failed") displayNodesConfiguration( @@ -661,6 +676,7 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa resUpdateNodes.Waiting, leavingNodesMap, stillRemainingNodesMap, + resUpdateNodes.ShuffledOut, newNodesConfig.nbShards) ihnc.mutSavedStateKey.Lock() @@ -714,18 +730,13 @@ func (ihnc *indexHashedNodesCoordinator) GetChance(_ uint32) uint32 { } func (ihnc *indexHashedNodesCoordinator) computeNodesConfigFromList( - previousEpochConfig *epochNodesConfig, validatorInfos []*state.ShardValidatorInfo, ) (*epochNodesConfig, error) { eligibleMap := make(map[uint32][]Validator) waitingMap := make(map[uint32][]Validator) leavingMap := make(map[uint32][]Validator) newNodesList := make([]Validator, 0) - - if ihnc.flagWaitingListFix.IsSet() && previousEpochConfig == nil { - return nil, ErrNilPreviousEpochConfig - } - + auctionList := make([]Validator, 0) if len(validatorInfos) == 0 { log.Warn("computeNodesConfigFromList - validatorInfos len is 0") } @@ -743,25 +754,41 @@ func (ihnc *indexHashedNodesCoordinator) computeNodesConfigFromList( case string(common.EligibleList): eligibleMap[validatorInfo.ShardId] = append(eligibleMap[validatorInfo.ShardId], currentValidator) case string(common.LeavingList): - log.Debug("leaving node validatorInfo", "pk", validatorInfo.PublicKey) + log.Debug("leaving node validatorInfo", + "pk", validatorInfo.PublicKey, + "previous list", validatorInfo.PreviousList, + "current index", validatorInfo.Index, + "previous index", validatorInfo.PreviousIndex, + "shardId", validatorInfo.ShardId) leavingMap[validatorInfo.ShardId] = append(leavingMap[validatorInfo.ShardId], currentValidator) ihnc.addValidatorToPreviousMap( - previousEpochConfig, eligibleMap, waitingMap, currentValidator, - validatorInfo.ShardId) + validatorInfo, + ) case string(common.NewList): + if ihnc.flagStakingV4Step2.IsSet() { + return nil, epochStart.ErrReceivedNewListNodeInStakingV4 + } log.Debug("new node registered", "pk", validatorInfo.PublicKey) newNodesList = append(newNodesList, currentValidator) case string(common.InactiveList): log.Debug("inactive validator", "pk", validatorInfo.PublicKey) case string(common.JailedList): log.Debug("jailed validator", "pk", validatorInfo.PublicKey) + case string(common.SelectedFromAuctionList): + log.Debug("selected node from auction", "pk", validatorInfo.PublicKey) + if ihnc.flagStakingV4Step2.IsSet() { + auctionList = append(auctionList, currentValidator) + } else { + return nil, ErrReceivedAuctionValidatorsBeforeStakingV4 + } } } sort.Sort(validatorList(newNodesList)) + sort.Sort(validatorList(auctionList)) for _, eligibleList := range eligibleMap { sort.Sort(validatorList(eligibleList)) } @@ -783,6 +810,7 @@ func (ihnc *indexHashedNodesCoordinator) computeNodesConfigFromList( waitingMap: waitingMap, leavingMap: leavingMap, newList: newNodesList, + auctionList: auctionList, nbShards: uint32(nbShards), } @@ -790,30 +818,49 @@ func (ihnc *indexHashedNodesCoordinator) computeNodesConfigFromList( } func (ihnc *indexHashedNodesCoordinator) addValidatorToPreviousMap( - previousEpochConfig *epochNodesConfig, eligibleMap map[uint32][]Validator, waitingMap map[uint32][]Validator, currentValidator *validator, - currentValidatorShardId uint32) { - - if !ihnc.flagWaitingListFix.IsSet() { - eligibleMap[currentValidatorShardId] = append(eligibleMap[currentValidatorShardId], currentValidator) + validatorInfo *state.ShardValidatorInfo, +) { + shardId := validatorInfo.ShardId + previousList := validatorInfo.PreviousList + + log.Debug("checking leaving node", + "current list", validatorInfo.List, + "previous list", previousList, + "current index", validatorInfo.Index, + "previous index", validatorInfo.PreviousIndex, + "pk", currentValidator.PubKey(), + "shardId", shardId) + + if !ihnc.flagStakingV4Started.IsSet() || len(previousList) == 0 { + log.Debug("leaving node before staking v4 or with not previous list set node found in", + "list", "eligible", "shardId", shardId, "previous list", previousList) + eligibleMap[shardId] = append(eligibleMap[shardId], currentValidator) return } - found, shardId := searchInMap(previousEpochConfig.eligibleMap, currentValidator.PubKey()) - if found { + if previousList == string(common.EligibleList) { log.Debug("leaving node found in", "list", "eligible", "shardId", shardId) - eligibleMap[shardId] = append(eligibleMap[currentValidatorShardId], currentValidator) + currentValidator.index = validatorInfo.PreviousIndex + eligibleMap[shardId] = append(eligibleMap[shardId], currentValidator) return } - found, shardId = searchInMap(previousEpochConfig.waitingMap, currentValidator.PubKey()) - if found { + if previousList == string(common.WaitingList) { log.Debug("leaving node found in", "list", "waiting", "shardId", shardId) - waitingMap[shardId] = append(waitingMap[currentValidatorShardId], currentValidator) + currentValidator.index = validatorInfo.PreviousIndex + waitingMap[shardId] = append(waitingMap[shardId], currentValidator) return } + + log.Debug("leaving node not found in eligible or waiting", + "previous list", previousList, + "current index", validatorInfo.Index, + "previous index", validatorInfo.PreviousIndex, + "pk", currentValidator.PubKey(), + "shardId", shardId) } func (ihnc *indexHashedNodesCoordinator) handleErrorLog(err error, message string) { @@ -837,7 +884,7 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartAction(hdr data.HeaderHandler needToRemove := epochToRemove >= 0 ihnc.currentEpoch = newEpoch - err := ihnc.saveState(ihnc.savedStateKey) + err := ihnc.saveState(ihnc.savedStateKey, newEpoch) ihnc.handleErrorLog(err, "saving nodes coordinator config failed") ihnc.mutNodesConfig.Lock() @@ -1044,6 +1091,18 @@ func (ihnc *indexHashedNodesCoordinator) computeShardForSelfPublicKey(nodesConfi return shardId, true } + if ihnc.flagStakingV4Step2.IsSet() { + found, shardId = searchInMap(nodesConfig.shuffledOutMap, pubKey) + if found { + log.Trace("computeShardForSelfPublicKey found validator in shuffled out", + "epoch", ihnc.currentEpoch, + "shard", shardId, + "validator PK", pubKey, + ) + return shardId, true + } + } + log.Trace("computeShardForSelfPublicKey returned default", "shard", selfShard, ) @@ -1241,8 +1300,11 @@ func (ihnc *indexHashedNodesCoordinator) getShardValidatorInfoData(txHash []byte } func (ihnc *indexHashedNodesCoordinator) updateEpochFlags(epoch uint32) { - ihnc.flagWaitingListFix.SetValue(epoch >= ihnc.enableEpochsHandler.GetActivationEpoch(common.WaitingListFixFlag)) - log.Debug("indexHashedNodesCoordinator: waiting list fix", "enabled", ihnc.flagWaitingListFix.IsSet()) + ihnc.flagStakingV4Started.SetValue(epoch >= ihnc.enableEpochsHandler.GetActivationEpoch(common.StakingV4Step1Flag)) + log.Debug("indexHashedNodesCoordinator: flagStakingV4Started", "enabled", ihnc.flagStakingV4Started.IsSet()) + + ihnc.flagStakingV4Step2.SetValue(epoch >= ihnc.enableEpochsHandler.GetActivationEpoch(common.StakingV4Step2Flag)) + log.Debug("indexHashedNodesCoordinator: flagStakingV4Step2", "enabled", ihnc.flagStakingV4Step2.IsSet()) } // GetWaitingEpochsLeftForPublicKey returns the number of epochs left for the public key until it becomes eligible diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorLite.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorLite.go index bb96c6ec15a..3b80e8bdd23 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorLite.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorLite.go @@ -6,7 +6,7 @@ import ( // SetNodesConfigFromValidatorsInfo sets epoch config based on validators list configuration func (ihnc *indexHashedNodesCoordinator) SetNodesConfigFromValidatorsInfo(epoch uint32, randomness []byte, validatorsInfo []*state.ShardValidatorInfo) error { - newNodesConfig, err := ihnc.computeNodesConfigFromList(&epochNodesConfig{}, validatorsInfo) + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorsInfo) if err != nil { return err } @@ -41,7 +41,7 @@ func (ihnc *indexHashedNodesCoordinator) SetNodesConfigFromValidatorsInfo(epoch resUpdateNodes.Leaving, ) - err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, epoch) + err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, resUpdateNodes.ShuffledOut, epoch) if err != nil { return err } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry.go index 40f9995febe..813929bac90 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry.go @@ -1,35 +1,12 @@ package nodesCoordinator import ( - "encoding/json" "fmt" "strconv" "github.com/multiversx/mx-chain-go/common" ) -// SerializableValidator holds the minimal data required for marshalling and un-marshalling a validator -type SerializableValidator struct { - PubKey []byte `json:"pubKey"` - Chances uint32 `json:"chances"` - Index uint32 `json:"index"` -} - -// EpochValidators holds one epoch configuration for a nodes coordinator -type EpochValidators struct { - EligibleValidators map[string][]*SerializableValidator `json:"eligibleValidators"` - WaitingValidators map[string][]*SerializableValidator `json:"waitingValidators"` - LeavingValidators map[string][]*SerializableValidator `json:"leavingValidators"` -} - -// NodesCoordinatorRegistry holds the data that can be used to initialize a nodes coordinator -type NodesCoordinatorRegistry struct { - EpochsConfig map[string]*EpochValidators `json:"epochConfigs"` - CurrentEpoch uint32 `json:"currentEpoch"` -} - -// TODO: add proto marshalizer for these package - replace all json marshalizers - // LoadState loads the nodes coordinator state from the used boot storage func (ihnc *indexHashedNodesCoordinator) LoadState(key []byte) error { return ihnc.baseLoadState(key) @@ -48,8 +25,7 @@ func (ihnc *indexHashedNodesCoordinator) baseLoadState(key []byte) error { return err } - config := &NodesCoordinatorRegistry{} - err = json.Unmarshal(data, config) + config, err := ihnc.nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry(data) if err != nil { return err } @@ -58,8 +34,8 @@ func (ihnc *indexHashedNodesCoordinator) baseLoadState(key []byte) error { ihnc.savedStateKey = key ihnc.mutSavedStateKey.Unlock() - ihnc.currentEpoch = config.CurrentEpoch - log.Debug("loaded nodes config", "current epoch", config.CurrentEpoch) + ihnc.currentEpoch = config.GetCurrentEpoch() + log.Debug("loaded nodes config", "current epoch", config.GetCurrentEpoch()) nodesConfig, err := ihnc.registryToNodesCoordinator(config) if err != nil { @@ -83,22 +59,31 @@ func displayNodesConfigInfo(config map[uint32]*epochNodesConfig) { } } -func (ihnc *indexHashedNodesCoordinator) saveState(key []byte) error { - registry := ihnc.NodesCoordinatorToRegistry() - data, err := json.Marshal(registry) +func (ihnc *indexHashedNodesCoordinator) saveState(key []byte, epoch uint32) error { + registry := ihnc.NodesCoordinatorToRegistry(epoch) + data, err := ihnc.nodesCoordinatorRegistryFactory.GetRegistryData(registry, ihnc.currentEpoch) if err != nil { return err } - ncInternalkey := append([]byte(common.NodesCoordinatorRegistryKeyPrefix), key...) - - log.Debug("saving nodes coordinator config", "key", ncInternalkey) + ncInternalKey := append([]byte(common.NodesCoordinatorRegistryKeyPrefix), key...) + log.Debug("saving nodes coordinator config", "key", ncInternalKey, "epoch", epoch) - return ihnc.bootStorer.Put(ncInternalkey, data) + return ihnc.bootStorer.Put(ncInternalKey, data) } // NodesCoordinatorToRegistry will export the nodesCoordinator data to the registry -func (ihnc *indexHashedNodesCoordinator) NodesCoordinatorToRegistry() *NodesCoordinatorRegistry { +func (ihnc *indexHashedNodesCoordinator) NodesCoordinatorToRegistry(epoch uint32) NodesCoordinatorRegistryHandler { + if epoch >= ihnc.enableEpochsHandler.GetActivationEpoch(common.StakingV4Step2Flag) { + log.Debug("indexHashedNodesCoordinator.NodesCoordinatorToRegistry called with auction registry", "epoch", epoch) + return ihnc.nodesCoordinatorToRegistryWithAuction() + } + + log.Debug("indexHashedNodesCoordinator.NodesCoordinatorToRegistry called with old registry", "epoch", epoch) + return ihnc.nodesCoordinatorToOldRegistry() +} + +func (ihnc *indexHashedNodesCoordinator) nodesCoordinatorToOldRegistry() NodesCoordinatorRegistryHandler { ihnc.mutNodesConfig.RLock() defer ihnc.mutNodesConfig.RUnlock() @@ -107,13 +92,8 @@ func (ihnc *indexHashedNodesCoordinator) NodesCoordinatorToRegistry() *NodesCoor EpochsConfig: make(map[string]*EpochValidators), } - minEpoch := 0 - lastEpoch := ihnc.getLastEpochConfig() - if lastEpoch >= nodesCoordinatorStoredEpochs { - minEpoch = int(lastEpoch) - nodesCoordinatorStoredEpochs + 1 - } - - for epoch := uint32(minEpoch); epoch <= lastEpoch; epoch++ { + minEpoch, lastEpoch := ihnc.getMinAndLastEpoch() + for epoch := minEpoch; epoch <= lastEpoch; epoch++ { epochNodesData, ok := ihnc.nodesConfig[epoch] if !ok { continue @@ -125,6 +105,16 @@ func (ihnc *indexHashedNodesCoordinator) NodesCoordinatorToRegistry() *NodesCoor return registry } +func (ihnc *indexHashedNodesCoordinator) getMinAndLastEpoch() (uint32, uint32) { + minEpoch := 0 + lastEpoch := ihnc.getLastEpochConfig() + if lastEpoch >= nodesCoordinatorStoredEpochs { + minEpoch = int(lastEpoch) - nodesCoordinatorStoredEpochs + 1 + } + + return uint32(minEpoch), lastEpoch +} + func (ihnc *indexHashedNodesCoordinator) getLastEpochConfig() uint32 { lastEpoch := uint32(0) for epoch := range ihnc.nodesConfig { @@ -137,13 +127,13 @@ func (ihnc *indexHashedNodesCoordinator) getLastEpochConfig() uint32 { } func (ihnc *indexHashedNodesCoordinator) registryToNodesCoordinator( - config *NodesCoordinatorRegistry, + config NodesCoordinatorRegistryHandler, ) (map[uint32]*epochNodesConfig, error) { var err error var epoch int64 result := make(map[uint32]*epochNodesConfig) - for epochStr, epochValidators := range config.EpochsConfig { + for epochStr, epochValidators := range config.GetEpochsConfig() { epoch, err = strconv.ParseInt(epochStr, 10, 64) if err != nil { return nil, err @@ -197,25 +187,33 @@ func epochNodesConfigToEpochValidators(config *epochNodesConfig) *EpochValidator return result } -func epochValidatorsToEpochNodesConfig(config *EpochValidators) (*epochNodesConfig, error) { +func epochValidatorsToEpochNodesConfig(config EpochValidatorsHandler) (*epochNodesConfig, error) { result := &epochNodesConfig{} var err error - result.eligibleMap, err = serializableValidatorsMapToValidatorsMap(config.EligibleValidators) + result.eligibleMap, err = serializableValidatorsMapToValidatorsMap(config.GetEligibleValidators()) if err != nil { return nil, err } - result.waitingMap, err = serializableValidatorsMapToValidatorsMap(config.WaitingValidators) + result.waitingMap, err = serializableValidatorsMapToValidatorsMap(config.GetWaitingValidators()) if err != nil { return nil, err } - result.leavingMap, err = serializableValidatorsMapToValidatorsMap(config.LeavingValidators) + result.leavingMap, err = serializableValidatorsMapToValidatorsMap(config.GetLeavingValidators()) if err != nil { return nil, err } + configWithAuction, castOk := config.(EpochValidatorsHandlerWithAuction) + if castOk { + result.shuffledOutMap, err = serializableValidatorsMapToValidatorsMap(configWithAuction.GetShuffledOutValidators()) + if err != nil { + return nil, err + } + } + return result, nil } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistryWithAuction.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistryWithAuction.go new file mode 100644 index 00000000000..261aa60aefc --- /dev/null +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistryWithAuction.go @@ -0,0 +1,55 @@ +package nodesCoordinator + +import ( + "fmt" +) + +// nodesCoordinatorToRegistryWithAuction will export the nodesCoordinator data to the registry which contains auction list +func (ihnc *indexHashedNodesCoordinator) nodesCoordinatorToRegistryWithAuction() *NodesCoordinatorRegistryWithAuction { + ihnc.mutNodesConfig.RLock() + defer ihnc.mutNodesConfig.RUnlock() + + registry := &NodesCoordinatorRegistryWithAuction{ + CurrentEpoch: ihnc.currentEpoch, + EpochsConfigWithAuction: make(map[string]*EpochValidatorsWithAuction), + } + + minEpoch, lastEpoch := ihnc.getMinAndLastEpoch() + for epoch := minEpoch; epoch <= lastEpoch; epoch++ { + epochNodesData, ok := ihnc.nodesConfig[epoch] + if !ok { + continue + } + + registry.EpochsConfigWithAuction[fmt.Sprint(epoch)] = epochNodesConfigToEpochValidatorsWithAuction(epochNodesData) + } + + return registry +} + +func epochNodesConfigToEpochValidatorsWithAuction(config *epochNodesConfig) *EpochValidatorsWithAuction { + result := &EpochValidatorsWithAuction{ + Eligible: make(map[string]Validators, len(config.eligibleMap)), + Waiting: make(map[string]Validators, len(config.waitingMap)), + Leaving: make(map[string]Validators, len(config.leavingMap)), + ShuffledOut: make(map[string]Validators, len(config.shuffledOutMap)), + } + + for k, v := range config.eligibleMap { + result.Eligible[fmt.Sprint(k)] = Validators{Data: ValidatorArrayToSerializableValidatorArray(v)} + } + + for k, v := range config.waitingMap { + result.Waiting[fmt.Sprint(k)] = Validators{Data: ValidatorArrayToSerializableValidatorArray(v)} + } + + for k, v := range config.leavingMap { + result.Leaving[fmt.Sprint(k)] = Validators{Data: ValidatorArrayToSerializableValidatorArray(v)} + } + + for k, v := range config.shuffledOutMap { + result.ShuffledOut[fmt.Sprint(k)] = Validators{Data: ValidatorArrayToSerializableValidatorArray(v)} + } + + return result +} diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry_test.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry_test.go index 348c7a74280..b2b99e6e87b 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry_test.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry_test.go @@ -6,6 +6,9 @@ import ( "strconv" "testing" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -73,13 +76,23 @@ func validatorsEqualSerializableValidators(validators []Validator, sValidators [ } func TestIndexHashedNodesCoordinator_LoadStateAfterSave(t *testing.T) { + t.Parallel() + args := createArguments() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.StakingV4Step2Flag { + return stakingV4Epoch + } + return 0 + }, + } nodesCoordinator, _ := NewIndexHashedNodesCoordinator(args) expectedConfig := nodesCoordinator.nodesConfig[0] key := []byte("config") - err := nodesCoordinator.saveState(key) + err := nodesCoordinator.saveState(key, 0) assert.Nil(t, err) delete(nodesCoordinator.nodesConfig, 0) @@ -94,26 +107,77 @@ func TestIndexHashedNodesCoordinator_LoadStateAfterSave(t *testing.T) { assert.True(t, sameValidatorsMaps(expectedConfig.waitingMap, actualConfig.waitingMap)) } -func TestIndexHashedNodesCooridinator_nodesCoordinatorToRegistry(t *testing.T) { +func TestIndexHashedNodesCoordinator_LoadStateAfterSaveWithStakingV4(t *testing.T) { + t.Parallel() + + args := createArguments() + args.Epoch = stakingV4Epoch + nodesCoordinator, _ := NewIndexHashedNodesCoordinator(args) + + nodesCoordinator.nodesConfig[stakingV4Epoch].leavingMap = createDummyNodesMap(3, 0, string(common.LeavingList)) + nodesCoordinator.nodesConfig[stakingV4Epoch].shuffledOutMap = createDummyNodesMap(3, 0, string(common.SelectedFromAuctionList)) + expectedConfig := nodesCoordinator.nodesConfig[stakingV4Epoch] + + key := []byte("config") + err := nodesCoordinator.saveState(key, stakingV4Epoch) + assert.Nil(t, err) + + delete(nodesCoordinator.nodesConfig, 0) + err = nodesCoordinator.LoadState(key) + assert.Nil(t, err) + + actualConfig := nodesCoordinator.nodesConfig[stakingV4Epoch] + assert.Equal(t, expectedConfig.shardID, actualConfig.shardID) + assert.Equal(t, expectedConfig.nbShards, actualConfig.nbShards) + assert.True(t, sameValidatorsMaps(expectedConfig.eligibleMap, actualConfig.eligibleMap)) + assert.True(t, sameValidatorsMaps(expectedConfig.waitingMap, actualConfig.waitingMap)) + assert.True(t, sameValidatorsMaps(expectedConfig.shuffledOutMap, actualConfig.shuffledOutMap)) + assert.True(t, sameValidatorsMaps(expectedConfig.leavingMap, actualConfig.leavingMap)) +} + +func TestIndexHashedNodesCoordinator_nodesCoordinatorToRegistryWithStakingV4(t *testing.T) { + args := createArguments() + args.Epoch = stakingV4Epoch + nodesCoordinator, _ := NewIndexHashedNodesCoordinator(args) + + nodesCoordinator.nodesConfig[stakingV4Epoch].leavingMap = createDummyNodesMap(3, 0, string(common.LeavingList)) + nodesCoordinator.nodesConfig[stakingV4Epoch].shuffledOutMap = createDummyNodesMap(3, 0, string(common.SelectedFromAuctionList)) + + ncr := nodesCoordinator.NodesCoordinatorToRegistry(stakingV4Epoch) + nc := nodesCoordinator.nodesConfig + + assert.Equal(t, nodesCoordinator.currentEpoch, ncr.GetCurrentEpoch()) + assert.Equal(t, len(nodesCoordinator.nodesConfig), len(ncr.GetEpochsConfig())) + + for epoch, config := range nc { + ncrWithAuction := ncr.GetEpochsConfig()[fmt.Sprint(epoch)].(EpochValidatorsHandlerWithAuction) + assert.True(t, sameValidatorsDifferentMapTypes(config.waitingMap, ncrWithAuction.GetWaitingValidators())) + assert.True(t, sameValidatorsDifferentMapTypes(config.leavingMap, ncrWithAuction.GetLeavingValidators())) + assert.True(t, sameValidatorsDifferentMapTypes(config.eligibleMap, ncrWithAuction.GetEligibleValidators())) + assert.True(t, sameValidatorsDifferentMapTypes(config.shuffledOutMap, ncrWithAuction.GetShuffledOutValidators())) + } +} + +func TestIndexHashedNodesCoordinator_nodesCoordinatorToRegistry(t *testing.T) { args := createArguments() nodesCoordinator, _ := NewIndexHashedNodesCoordinator(args) - ncr := nodesCoordinator.NodesCoordinatorToRegistry() + ncr := nodesCoordinator.NodesCoordinatorToRegistry(args.Epoch) nc := nodesCoordinator.nodesConfig - assert.Equal(t, nodesCoordinator.currentEpoch, ncr.CurrentEpoch) - assert.Equal(t, len(nodesCoordinator.nodesConfig), len(ncr.EpochsConfig)) + assert.Equal(t, nodesCoordinator.currentEpoch, ncr.GetCurrentEpoch()) + assert.Equal(t, len(nodesCoordinator.nodesConfig), len(ncr.GetEpochsConfig())) for epoch, config := range nc { - assert.True(t, sameValidatorsDifferentMapTypes(config.eligibleMap, ncr.EpochsConfig[fmt.Sprint(epoch)].EligibleValidators)) - assert.True(t, sameValidatorsDifferentMapTypes(config.waitingMap, ncr.EpochsConfig[fmt.Sprint(epoch)].WaitingValidators)) + assert.True(t, sameValidatorsDifferentMapTypes(config.eligibleMap, ncr.GetEpochsConfig()[fmt.Sprint(epoch)].GetEligibleValidators())) + assert.True(t, sameValidatorsDifferentMapTypes(config.waitingMap, ncr.GetEpochsConfig()[fmt.Sprint(epoch)].GetWaitingValidators())) } } func TestIndexHashedNodesCoordinator_registryToNodesCoordinator(t *testing.T) { args := createArguments() nodesCoordinator1, _ := NewIndexHashedNodesCoordinator(args) - ncr := nodesCoordinator1.NodesCoordinatorToRegistry() + ncr := nodesCoordinator1.NodesCoordinatorToRegistry(args.Epoch) args = createArguments() nodesCoordinator2, _ := NewIndexHashedNodesCoordinator(args) @@ -147,17 +211,17 @@ func TestIndexHashedNodesCooridinator_nodesCoordinatorToRegistryLimitNumEpochsIn } } - ncr := nodesCoordinator.NodesCoordinatorToRegistry() + ncr := nodesCoordinator.NodesCoordinatorToRegistry(args.Epoch) nc := nodesCoordinator.nodesConfig - require.Equal(t, nodesCoordinator.currentEpoch, ncr.CurrentEpoch) - require.Equal(t, nodesCoordinatorStoredEpochs, len(ncr.EpochsConfig)) + require.Equal(t, nodesCoordinator.currentEpoch, ncr.GetCurrentEpoch()) + require.Equal(t, nodesCoordinatorStoredEpochs, len(ncr.GetEpochsConfig())) - for epochStr := range ncr.EpochsConfig { + for epochStr := range ncr.GetEpochsConfig() { epoch, err := strconv.Atoi(epochStr) require.Nil(t, err) - require.True(t, sameValidatorsDifferentMapTypes(nc[uint32(epoch)].eligibleMap, ncr.EpochsConfig[epochStr].EligibleValidators)) - require.True(t, sameValidatorsDifferentMapTypes(nc[uint32(epoch)].waitingMap, ncr.EpochsConfig[epochStr].WaitingValidators)) + require.True(t, sameValidatorsDifferentMapTypes(nc[uint32(epoch)].eligibleMap, ncr.GetEpochsConfig()[epochStr].GetEligibleValidators())) + require.True(t, sameValidatorsDifferentMapTypes(nc[uint32(epoch)].waitingMap, ncr.GetEpochsConfig()[epochStr].GetWaitingValidators())) } } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater.go index c9e4779e73f..689fe95d341 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater.go @@ -76,7 +76,7 @@ func (ihnc *indexHashedNodesCoordinatorWithRater) ComputeAdditionalLeaving(allVa return extraLeavingNodesMap, nil } -//IsInterfaceNil verifies that the underlying value is nil +// IsInterfaceNil verifies that the underlying value is nil func (ihnc *indexHashedNodesCoordinatorWithRater) IsInterfaceNil() bool { return ihnc == nil } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go index dfd1bbbe2ad..40286a0c135 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go @@ -55,7 +55,7 @@ func TestIndexHashedGroupSelectorWithRater_SetNilEligibleMapShouldErr(t *testing waiting := createDummyNodesMap(2, 1, "waiting") nc, _ := NewIndexHashedNodesCoordinator(createArguments()) ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) - assert.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waiting, nil, 0)) + assert.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waiting, nil, nil, 0)) } func TestIndexHashedGroupSelectorWithRater_OkValShouldWork(t *testing.T) { @@ -79,25 +79,26 @@ func TestIndexHashedGroupSelectorWithRater_OkValShouldWork(t *testing.T) { bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 2, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("test"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 2, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("test"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, err := NewIndexHashedNodesCoordinator(arguments) assert.Nil(t, err) @@ -328,25 +329,26 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldReturn bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -383,25 +385,26 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldReturn bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -452,25 +455,26 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldWork(t eligibleMap[1] = listShard1 arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -537,26 +541,27 @@ func TestIndexHashedGroupSelectorWithRater_GetAllEligibleValidatorsPublicKeys(t eligibleMap[shardOneId] = listShard1 arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardZeroId, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardZeroId, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -781,8 +786,8 @@ func BenchmarkIndexHashedGroupSelectorWithRater_TestExpandList(b *testing.B) { } //a := []int{1, 2, 3, 4, 5, 6, 7, 8} - rand.Seed(time.Now().UnixNano()) - rand.Shuffle(len(array), func(i, j int) { array[i], array[j] = array[j], array[i] }) + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + rnd.Shuffle(len(array), func(i, j int) { array[i], array[j] = array[j], array[i] }) m2 := runtime.MemStats{} runtime.ReadMemStats(&m2) diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go b/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go index c1c01a67680..5db65609f59 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go @@ -35,6 +35,8 @@ import ( "github.com/stretchr/testify/require" ) +const stakingV4Epoch = 444 + func createDummyNodesList(nbNodes uint32, suffix string) []Validator { list := make([]Validator, 0) hasher := sha256.NewSha256() @@ -82,6 +84,14 @@ func isStringSubgroup(a []string, b []string) bool { return found } +func createNodesCoordinatorRegistryFactory() NodesCoordinatorRegistryFactory { + ncf, _ := NewNodesCoordinatorRegistryFactory( + &marshal.GogoProtoMarshalizer{}, + stakingV4Epoch, + ) + return ncf +} + func createArguments() ArgNodesCoordinator { nbShards := uint32(1) eligibleMap := createDummyNodesMap(10, nbShards, "eligible") @@ -92,7 +102,6 @@ func createArguments() ArgNodesCoordinator { Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: shuffleBetweenShards, - MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } nodeShuffler, _ := NewHashValidatorsShuffler(shufflerArgs) @@ -120,8 +129,9 @@ func createArguments() ArgNodesCoordinator { EnableEpochsHandler: &mock.EnableEpochsHandlerMock{ IsRefactorPeersMiniBlocksFlagEnabledField: true, }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } return arguments } @@ -246,7 +256,7 @@ func TestIndexHashedNodesCoordinator_SetNilEligibleMapShouldErr(t *testing.T) { arguments := createArguments() ihnc, _ := NewIndexHashedNodesCoordinator(arguments) - require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waitingMap, nil, 0)) + require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waitingMap, nil, nil, 0)) } func TestIndexHashedNodesCoordinator_SetNilWaitingMapShouldErr(t *testing.T) { @@ -256,7 +266,7 @@ func TestIndexHashedNodesCoordinator_SetNilWaitingMapShouldErr(t *testing.T) { arguments := createArguments() ihnc, _ := NewIndexHashedNodesCoordinator(arguments) - require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(eligibleMap, nil, nil, 0)) + require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(eligibleMap, nil, nil, nil, 0)) } func TestIndexHashedNodesCoordinator_OkValShouldWork(t *testing.T) { @@ -281,24 +291,25 @@ func TestIndexHashedNodesCoordinator_OkValShouldWork(t *testing.T) { bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 2, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 2, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -342,24 +353,25 @@ func TestIndexHashedNodesCoordinator_NewCoordinatorTooFewNodesShouldErr(t *testi bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 10, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 10, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -417,24 +429,25 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup1ValidatorShouldRetur bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: nodesMap, - WaitingNodes: make(map[uint32][]Validator), - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: nodesMap, + WaitingNodes: make(map[uint32][]Validator), + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) list2, err := ihnc.ComputeConsensusGroup([]byte("randomness"), 0, 0, 0) @@ -478,24 +491,25 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup400of400For10locksNoM } arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: lruCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: lruCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -567,24 +581,25 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup400of400For10BlocksMe } arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: lruCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: lruCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -970,24 +985,25 @@ func TestIndexHashedNodesCoordinator_GetValidatorWithPublicKeyShouldWork(t *test bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: make(map[uint32][]Validator), - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: make(map[uint32][]Validator), + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1054,25 +1070,26 @@ func TestIndexHashedGroupSelector_GetAllEligibleValidatorsPublicKeys(t *testing. bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardZeroId, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: make(map[uint32][]Validator), - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardZeroId, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: make(map[uint32][]Validator), + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1134,25 +1151,26 @@ func TestIndexHashedGroupSelector_GetAllWaitingValidatorsPublicKeys(t *testing.T eligibleMap[shardZeroId] = []Validator{&validator{}} arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardZeroId, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardZeroId, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1257,7 +1275,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldTriggerWrongConfigur }, } - err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) + err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) require.NoError(t, err) value := <-chanStopNode @@ -1283,7 +1301,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldNotTriggerWrongConfi }, } - err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) + err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) require.NoError(t, err) require.Empty(t, chanStopNode) @@ -1315,7 +1333,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldSetNodeTypeValidator }, } - err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) + err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) require.NoError(t, err) require.True(t, setTypeWasCalled) require.Equal(t, core.NodeTypeValidator, nodeTypeResult) @@ -1347,7 +1365,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldSetNodeTypeObserver( }, } - err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) + err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) require.NoError(t, err) require.True(t, setTypeWasCalled) require.Equal(t, core.NodeTypeObserver, nodeTypeResult) @@ -1389,6 +1407,36 @@ func TestIndexHashedNodesCoordinator_EpochStartInEligible(t *testing.T) { require.True(t, isValidator) } +func TestIndexHashedNodesCoordinator_computeShardForSelfPublicKeyWithStakingV4(t *testing.T) { + t.Parallel() + + arguments := createArguments() + pk := []byte("pk") + arguments.SelfPublicKey = pk + nc, _ := NewIndexHashedNodesCoordinator(arguments) + epoch := uint32(2) + + metaShard := core.MetachainShardId + nc.nodesConfig = map[uint32]*epochNodesConfig{ + epoch: { + shardID: metaShard, + shuffledOutMap: map[uint32][]Validator{ + metaShard: {newValidatorMock(pk, 1, 1)}, + }, + }, + } + + computedShardId, isValidator := nc.computeShardForSelfPublicKey(nc.nodesConfig[epoch]) + require.Equal(t, nc.shardIDAsObserver, computedShardId) + require.False(t, isValidator) + + nc.flagStakingV4Step2.SetValue(true) + + computedShardId, isValidator = nc.computeShardForSelfPublicKey(nc.nodesConfig[epoch]) + require.Equal(t, metaShard, computedShardId) + require.True(t, isValidator) +} + func TestIndexHashedNodesCoordinator_EpochStartInWaiting(t *testing.T) { t.Parallel() @@ -1513,8 +1561,9 @@ func TestIndexHashedNodesCoordinator_EpochStart_EligibleSortedAscendingByIndex(t EnableEpochsHandler: &mock.EnableEpochsHandlerMock{ IsRefactorPeersMiniBlocksFlagEnabledField: true, }, - ValidatorInfoCacher: dataPool.NewCurrentEpochValidatorInfoPool(), - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ValidatorInfoCacher: dataPool.NewCurrentEpochValidatorInfoPool(), + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -2021,38 +2070,6 @@ func TestIndexHashedNodesCoordinator_ShuffleOutNilConfig(t *testing.T) { require.Equal(t, expectedShardForNotFound, newShard) } -func TestIndexHashedNodesCoordinator_computeNodesConfigFromListNilPreviousNodesConfig(t *testing.T) { - t.Parallel() - - arguments := createArguments() - pk := []byte("pk") - arguments.SelfPublicKey = pk - ihnc, _ := NewIndexHashedNodesCoordinator(arguments) - - ihnc.flagWaitingListFix.Reset() - validatorInfos := make([]*state.ShardValidatorInfo, 0) - newNodesConfig, err := ihnc.computeNodesConfigFromList(nil, validatorInfos) - - assert.Nil(t, newNodesConfig) - assert.False(t, errors.Is(err, ErrNilPreviousEpochConfig)) - - newNodesConfig, err = ihnc.computeNodesConfigFromList(nil, nil) - - assert.Nil(t, newNodesConfig) - assert.False(t, errors.Is(err, ErrNilPreviousEpochConfig)) - - _ = ihnc.flagWaitingListFix.SetReturningPrevious() - newNodesConfig, err = ihnc.computeNodesConfigFromList(nil, validatorInfos) - - assert.Nil(t, newNodesConfig) - assert.True(t, errors.Is(err, ErrNilPreviousEpochConfig)) - - newNodesConfig, err = ihnc.computeNodesConfigFromList(nil, nil) - - assert.Nil(t, newNodesConfig) - assert.True(t, errors.Is(err, ErrNilPreviousEpochConfig)) -} - func TestIndexHashedNodesCoordinator_computeNodesConfigFromListNoValidators(t *testing.T) { t.Parallel() @@ -2062,12 +2079,12 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListNoValidators(t *t ihnc, _ := NewIndexHashedNodesCoordinator(arguments) validatorInfos := make([]*state.ShardValidatorInfo, 0) - newNodesConfig, err := ihnc.computeNodesConfigFromList(&epochNodesConfig{}, validatorInfos) + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorInfos) assert.Nil(t, newNodesConfig) assert.True(t, errors.Is(err, ErrMapSizeZero)) - newNodesConfig, err = ihnc.computeNodesConfigFromList(&epochNodesConfig{}, nil) + newNodesConfig, err = ihnc.computeNodesConfigFromList(nil) assert.Nil(t, newNodesConfig) assert.True(t, errors.Is(err, ErrMapSizeZero)) @@ -2099,13 +2116,62 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListNilPk(t *testing. }, } - newNodesConfig, err := ihnc.computeNodesConfigFromList(&epochNodesConfig{}, validatorInfos) + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorInfos) assert.Nil(t, newNodesConfig) assert.NotNil(t, err) assert.Equal(t, ErrNilPubKey, err) } +func TestIndexHashedNodesCoordinator_computeNodesConfigFromListWithStakingV4(t *testing.T) { + t.Parallel() + arguments := createArguments() + nc, _ := NewIndexHashedNodesCoordinator(arguments) + + shard0Eligible := &state.ShardValidatorInfo{ + PublicKey: []byte("pk0"), + List: string(common.EligibleList), + Index: 1, + TempRating: 2, + ShardId: 0, + } + shard0Auction := &state.ShardValidatorInfo{ + PublicKey: []byte("pk1"), + List: string(common.SelectedFromAuctionList), + Index: 3, + TempRating: 2, + ShardId: 0, + } + shard1Auction := &state.ShardValidatorInfo{ + PublicKey: []byte("pk2"), + List: string(common.SelectedFromAuctionList), + Index: 2, + TempRating: 2, + ShardId: 1, + } + validatorInfos := []*state.ShardValidatorInfo{shard0Eligible, shard0Auction, shard1Auction} + + newNodesConfig, err := nc.computeNodesConfigFromList(validatorInfos) + require.Equal(t, ErrReceivedAuctionValidatorsBeforeStakingV4, err) + require.Nil(t, newNodesConfig) + + nc.updateEpochFlags(stakingV4Epoch) + + newNodesConfig, err = nc.computeNodesConfigFromList(validatorInfos) + require.Nil(t, err) + v1, _ := NewValidator([]byte("pk2"), 1, 2) + v2, _ := NewValidator([]byte("pk1"), 1, 3) + require.Equal(t, []Validator{v1, v2}, newNodesConfig.auctionList) + + validatorInfos = append(validatorInfos, &state.ShardValidatorInfo{ + PublicKey: []byte("pk3"), + List: string(common.NewList), + }) + newNodesConfig, err = nc.computeNodesConfigFromList(validatorInfos) + require.Equal(t, epochStart.ErrReceivedNewListNodeInStakingV4, err) + require.Nil(t, newNodesConfig) +} + func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsWithFix(t *testing.T) { t.Parallel() @@ -2113,7 +2179,7 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsWithFix pk := []byte("pk") arguments.SelfPublicKey = pk ihnc, _ := NewIndexHashedNodesCoordinator(arguments) - _ = ihnc.flagWaitingListFix.SetReturningPrevious() + _ = ihnc.flagStakingV4Started.SetReturningPrevious() shard0Eligible0 := &state.ShardValidatorInfo{ PublicKey: []byte("pk0"), @@ -2154,15 +2220,18 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsWithFix ShardId: 0, } shard0Leaving0 := &state.ShardValidatorInfo{ - PublicKey: []byte("pk6"), - List: string(common.LeavingList), - ShardId: 0, + PublicKey: []byte("pk6"), + List: string(common.LeavingList), + PreviousList: string(common.EligibleList), + ShardId: 0, } shardMetaLeaving1 := &state.ShardValidatorInfo{ - PublicKey: []byte("pk7"), - List: string(common.LeavingList), - Index: 1, - ShardId: core.MetachainShardId, + PublicKey: []byte("pk7"), + List: string(common.LeavingList), + PreviousList: string(common.WaitingList), + Index: 1, + PreviousIndex: 1, + ShardId: core.MetachainShardId, } validatorInfos := @@ -2177,29 +2246,7 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsWithFix shardMetaLeaving1, } - previousConfig := &epochNodesConfig{ - eligibleMap: map[uint32][]Validator{ - 0: { - newValidatorMock(shard0Eligible0.PublicKey, 0, 0), - newValidatorMock(shard0Eligible1.PublicKey, 0, 0), - newValidatorMock(shard0Leaving0.PublicKey, 0, 0), - }, - core.MetachainShardId: { - newValidatorMock(shardmetaEligible0.PublicKey, 0, 0), - }, - }, - waitingMap: map[uint32][]Validator{ - 0: { - newValidatorMock(shard0Waiting0.PublicKey, 0, 0), - }, - core.MetachainShardId: { - newValidatorMock(shardmetaWaiting0.PublicKey, 0, 0), - newValidatorMock(shardMetaLeaving1.PublicKey, 0, 0), - }, - }, - } - - newNodesConfig, err := ihnc.computeNodesConfigFromList(previousConfig, validatorInfos) + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorInfos) assert.Nil(t, err) assert.Equal(t, uint32(1), newNodesConfig.nbShards) @@ -2293,10 +2340,6 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsNoFix(t ShardId: core.MetachainShardId, } - previousConfig := &epochNodesConfig{ - eligibleMap: map[uint32][]Validator{}, - } - validatorInfos := []*state.ShardValidatorInfo{ shard0Eligible0, @@ -2309,8 +2352,8 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsNoFix(t shardMetaLeaving1, } - ihnc.flagWaitingListFix.Reset() - newNodesConfig, err := ihnc.computeNodesConfigFromList(previousConfig, validatorInfos) + ihnc.flagStakingV4Started.Reset() + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorInfos) assert.Nil(t, err) assert.Equal(t, uint32(1), newNodesConfig.nbShards) @@ -2509,8 +2552,9 @@ func TestIndexHashedGroupSelector_GetWaitingEpochsLeftForPublicKey(t *testing.T) EnableEpochsHandler: &mock.EnableEpochsHandlerMock{ CurrentEpoch: 1, }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -2585,6 +2629,7 @@ func TestIndexHashedGroupSelector_GetWaitingEpochsLeftForPublicKey(t *testing.T) return 0 }, }, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -2669,6 +2714,7 @@ func TestIndexHashedGroupSelector_GetWaitingEpochsLeftForPublicKey(t *testing.T) return 2 }, }, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) diff --git a/sharding/nodesCoordinator/interface.go b/sharding/nodesCoordinator/interface.go index 0c16a505364..68dfa9bbb15 100644 --- a/sharding/nodesCoordinator/interface.go +++ b/sharding/nodesCoordinator/interface.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/state" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) // Validator defines a node that can be allocated to a shard for participation in a consensus group as validator @@ -46,6 +47,7 @@ type PublicKeysSelector interface { GetAllEligibleValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) GetAllWaitingValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) GetAllLeavingValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) + GetAllShuffledOutValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) GetConsensusValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) GetOwnPublicKey() []byte } @@ -138,3 +140,38 @@ type GenesisNodesSetupHandler interface { MinMetaHysteresisNodes() uint32 IsInterfaceNil() bool } + +// EpochValidatorsHandler defines what one epoch configuration for a nodes coordinator should hold +type EpochValidatorsHandler interface { + GetEligibleValidators() map[string][]*SerializableValidator + GetWaitingValidators() map[string][]*SerializableValidator + GetLeavingValidators() map[string][]*SerializableValidator +} + +// EpochValidatorsHandlerWithAuction defines what one epoch configuration for a nodes coordinator should hold + shuffled out validators +type EpochValidatorsHandlerWithAuction interface { + EpochValidatorsHandler + GetShuffledOutValidators() map[string][]*SerializableValidator +} + +// NodesCoordinatorRegistryHandler defines what is used to initialize nodes coordinator +type NodesCoordinatorRegistryHandler interface { + GetEpochsConfig() map[string]EpochValidatorsHandler + GetCurrentEpoch() uint32 + SetCurrentEpoch(epoch uint32) +} + +// NodesCoordinatorRegistryFactory handles NodesCoordinatorRegistryHandler marshall/unmarshall +type NodesCoordinatorRegistryFactory interface { + CreateNodesCoordinatorRegistry(buff []byte) (NodesCoordinatorRegistryHandler, error) + GetRegistryData(registry NodesCoordinatorRegistryHandler, epoch uint32) ([]byte, error) + IsInterfaceNil() bool +} + +// EpochNotifier can notify upon an epoch change and provide the current epoch +type EpochNotifier interface { + RegisterNotifyHandler(handler vmcommon.EpochSubscriberHandler) + CurrentEpoch() uint32 + CheckEpoch(header data.HeaderHandler) + IsInterfaceNil() bool +} diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistry.go b/sharding/nodesCoordinator/nodesCoordinatorRegistry.go new file mode 100644 index 00000000000..fbf84919d7a --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistry.go @@ -0,0 +1,49 @@ +package nodesCoordinator + +// EpochValidators holds one epoch configuration for a nodes coordinator +type EpochValidators struct { + EligibleValidators map[string][]*SerializableValidator `json:"eligibleValidators"` + WaitingValidators map[string][]*SerializableValidator `json:"waitingValidators"` + LeavingValidators map[string][]*SerializableValidator `json:"leavingValidators"` +} + +// GetEligibleValidators returns all eligible validators from all shards +func (ev *EpochValidators) GetEligibleValidators() map[string][]*SerializableValidator { + return ev.EligibleValidators +} + +// GetWaitingValidators returns all waiting validators from all shards +func (ev *EpochValidators) GetWaitingValidators() map[string][]*SerializableValidator { + return ev.WaitingValidators +} + +// GetLeavingValidators returns all leaving validators from all shards +func (ev *EpochValidators) GetLeavingValidators() map[string][]*SerializableValidator { + return ev.LeavingValidators +} + +// NodesCoordinatorRegistry holds the data that can be used to initialize a nodes coordinator +type NodesCoordinatorRegistry struct { + EpochsConfig map[string]*EpochValidators `json:"epochConfigs"` + CurrentEpoch uint32 `json:"currentEpoch"` +} + +// GetCurrentEpoch returns the current epoch +func (ncr *NodesCoordinatorRegistry) GetCurrentEpoch() uint32 { + return ncr.CurrentEpoch +} + +// GetEpochsConfig returns epoch-validators configuration +func (ncr *NodesCoordinatorRegistry) GetEpochsConfig() map[string]EpochValidatorsHandler { + ret := make(map[string]EpochValidatorsHandler) + for epoch, config := range ncr.EpochsConfig { + ret[epoch] = config + } + + return ret +} + +// SetCurrentEpoch sets internally the current epoch +func (ncr *NodesCoordinatorRegistry) SetCurrentEpoch(epoch uint32) { + ncr.CurrentEpoch = epoch +} diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistryFactory.go b/sharding/nodesCoordinator/nodesCoordinatorRegistryFactory.go new file mode 100644 index 00000000000..0ef508fbf89 --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistryFactory.go @@ -0,0 +1,80 @@ +package nodesCoordinator + +import ( + "encoding/json" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/marshal" +) + +type nodesCoordinatorRegistryFactory struct { + marshaller marshal.Marshalizer + stakingV4Step2EnableEpoch uint32 +} + +// NewNodesCoordinatorRegistryFactory creates a nodes coordinator registry factory which will create a +// NodesCoordinatorRegistryHandler from a buffer depending on the epoch +func NewNodesCoordinatorRegistryFactory( + marshaller marshal.Marshalizer, + stakingV4Step2EnableEpoch uint32, +) (*nodesCoordinatorRegistryFactory, error) { + if check.IfNil(marshaller) { + return nil, ErrNilMarshalizer + } + + return &nodesCoordinatorRegistryFactory{ + marshaller: marshaller, + stakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + }, nil +} + +// CreateNodesCoordinatorRegistry creates a NodesCoordinatorRegistryHandler depending on the buffer. Old version uses +// NodesCoordinatorRegistry with a json marshaller; while the new version(from staking v4) uses NodesCoordinatorRegistryWithAuction +// with proto marshaller +func (ncf *nodesCoordinatorRegistryFactory) CreateNodesCoordinatorRegistry(buff []byte) (NodesCoordinatorRegistryHandler, error) { + registry, err := ncf.createRegistryWithAuction(buff) + if err == nil { + log.Debug("nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry created registry with auction", + "epoch", registry.CurrentEpoch) + return registry, nil + } + log.Debug("nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry creating old registry") + return createOldRegistry(buff) +} + +func (ncf *nodesCoordinatorRegistryFactory) createRegistryWithAuction(buff []byte) (*NodesCoordinatorRegistryWithAuction, error) { + registry := &NodesCoordinatorRegistryWithAuction{} + err := ncf.marshaller.Unmarshal(registry, buff) + if err != nil { + return nil, err + } + + log.Debug("nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry created old registry", + "epoch", registry.CurrentEpoch) + return registry, nil +} + +func createOldRegistry(buff []byte) (*NodesCoordinatorRegistry, error) { + registry := &NodesCoordinatorRegistry{} + err := json.Unmarshal(buff, registry) + if err != nil { + return nil, err + } + + return registry, nil +} + +// GetRegistryData returns the registry data as buffer. Old version uses json marshaller, while new version uses proto marshaller +func (ncf *nodesCoordinatorRegistryFactory) GetRegistryData(registry NodesCoordinatorRegistryHandler, epoch uint32) ([]byte, error) { + if epoch >= ncf.stakingV4Step2EnableEpoch { + log.Debug("nodesCoordinatorRegistryFactory.GetRegistryData called with auction after staking v4", "epoch", epoch) + return ncf.marshaller.Marshal(registry) + } + log.Debug("nodesCoordinatorRegistryFactory.GetRegistryData called with old json before staking v4", "epoch", epoch) + return json.Marshal(registry) +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ncf *nodesCoordinatorRegistryFactory) IsInterfaceNil() bool { + return ncf == nil +} diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.go b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.go new file mode 100644 index 00000000000..d9bea843a16 --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.go @@ -0,0 +1,47 @@ +//go:generate protoc -I=. -I=$GOPATH/src -I=$GOPATH/src/github.com/multiversx/protobuf/protobuf --gogoslick_out=. nodesCoordinatorRegistryWithAuction.proto +package nodesCoordinator + +func protoValidatorsMapToSliceMap(validators map[string]Validators) map[string][]*SerializableValidator { + ret := make(map[string][]*SerializableValidator) + + for shardID, val := range validators { + ret[shardID] = val.GetData() + } + + return ret +} + +// GetEligibleValidators returns all eligible validators from all shards +func (m *EpochValidatorsWithAuction) GetEligibleValidators() map[string][]*SerializableValidator { + return protoValidatorsMapToSliceMap(m.GetEligible()) +} + +// GetWaitingValidators returns all waiting validators from all shards +func (m *EpochValidatorsWithAuction) GetWaitingValidators() map[string][]*SerializableValidator { + return protoValidatorsMapToSliceMap(m.GetWaiting()) +} + +// GetLeavingValidators returns all leaving validators from all shards +func (m *EpochValidatorsWithAuction) GetLeavingValidators() map[string][]*SerializableValidator { + return protoValidatorsMapToSliceMap(m.GetLeaving()) +} + +// GetShuffledOutValidators returns all shuffled out validators from all shards +func (m *EpochValidatorsWithAuction) GetShuffledOutValidators() map[string][]*SerializableValidator { + return protoValidatorsMapToSliceMap(m.GetShuffledOut()) +} + +// GetEpochsConfig returns epoch-validators configuration +func (m *NodesCoordinatorRegistryWithAuction) GetEpochsConfig() map[string]EpochValidatorsHandler { + ret := make(map[string]EpochValidatorsHandler) + for epoch, config := range m.GetEpochsConfigWithAuction() { + ret[epoch] = config + } + + return ret +} + +// SetCurrentEpoch sets internally the current epoch +func (m *NodesCoordinatorRegistryWithAuction) SetCurrentEpoch(epoch uint32) { + m.CurrentEpoch = epoch +} diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.pb.go b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.pb.go new file mode 100644 index 00000000000..3c69dc78080 --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.pb.go @@ -0,0 +1,2128 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: nodesCoordinatorRegistryWithAuction.proto + +package nodesCoordinator + +import ( + bytes "bytes" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type SerializableValidator struct { + PubKey []byte `protobuf:"bytes,1,opt,name=PubKey,proto3" json:"pubKey"` + Chances uint32 `protobuf:"varint,2,opt,name=Chances,proto3" json:"chances"` + Index uint32 `protobuf:"varint,3,opt,name=Index,proto3" json:"index"` +} + +func (m *SerializableValidator) Reset() { *m = SerializableValidator{} } +func (*SerializableValidator) ProtoMessage() {} +func (*SerializableValidator) Descriptor() ([]byte, []int) { + return fileDescriptor_f04461c784f438d5, []int{0} +} +func (m *SerializableValidator) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SerializableValidator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SerializableValidator) XXX_Merge(src proto.Message) { + xxx_messageInfo_SerializableValidator.Merge(m, src) +} +func (m *SerializableValidator) XXX_Size() int { + return m.Size() +} +func (m *SerializableValidator) XXX_DiscardUnknown() { + xxx_messageInfo_SerializableValidator.DiscardUnknown(m) +} + +var xxx_messageInfo_SerializableValidator proto.InternalMessageInfo + +func (m *SerializableValidator) GetPubKey() []byte { + if m != nil { + return m.PubKey + } + return nil +} + +func (m *SerializableValidator) GetChances() uint32 { + if m != nil { + return m.Chances + } + return 0 +} + +func (m *SerializableValidator) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +type Validators struct { + Data []*SerializableValidator `protobuf:"bytes,1,rep,name=Data,proto3" json:"Data,omitempty"` +} + +func (m *Validators) Reset() { *m = Validators{} } +func (*Validators) ProtoMessage() {} +func (*Validators) Descriptor() ([]byte, []int) { + return fileDescriptor_f04461c784f438d5, []int{1} +} +func (m *Validators) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Validators) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Validators) XXX_Merge(src proto.Message) { + xxx_messageInfo_Validators.Merge(m, src) +} +func (m *Validators) XXX_Size() int { + return m.Size() +} +func (m *Validators) XXX_DiscardUnknown() { + xxx_messageInfo_Validators.DiscardUnknown(m) +} + +var xxx_messageInfo_Validators proto.InternalMessageInfo + +func (m *Validators) GetData() []*SerializableValidator { + if m != nil { + return m.Data + } + return nil +} + +type EpochValidatorsWithAuction struct { + Eligible map[string]Validators `protobuf:"bytes,1,rep,name=Eligible,proto3" json:"Eligible" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Waiting map[string]Validators `protobuf:"bytes,2,rep,name=Waiting,proto3" json:"Waiting" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Leaving map[string]Validators `protobuf:"bytes,3,rep,name=Leaving,proto3" json:"Leaving" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ShuffledOut map[string]Validators `protobuf:"bytes,4,rep,name=ShuffledOut,proto3" json:"ShuffledOut" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *EpochValidatorsWithAuction) Reset() { *m = EpochValidatorsWithAuction{} } +func (*EpochValidatorsWithAuction) ProtoMessage() {} +func (*EpochValidatorsWithAuction) Descriptor() ([]byte, []int) { + return fileDescriptor_f04461c784f438d5, []int{2} +} +func (m *EpochValidatorsWithAuction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EpochValidatorsWithAuction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EpochValidatorsWithAuction) XXX_Merge(src proto.Message) { + xxx_messageInfo_EpochValidatorsWithAuction.Merge(m, src) +} +func (m *EpochValidatorsWithAuction) XXX_Size() int { + return m.Size() +} +func (m *EpochValidatorsWithAuction) XXX_DiscardUnknown() { + xxx_messageInfo_EpochValidatorsWithAuction.DiscardUnknown(m) +} + +var xxx_messageInfo_EpochValidatorsWithAuction proto.InternalMessageInfo + +func (m *EpochValidatorsWithAuction) GetEligible() map[string]Validators { + if m != nil { + return m.Eligible + } + return nil +} + +func (m *EpochValidatorsWithAuction) GetWaiting() map[string]Validators { + if m != nil { + return m.Waiting + } + return nil +} + +func (m *EpochValidatorsWithAuction) GetLeaving() map[string]Validators { + if m != nil { + return m.Leaving + } + return nil +} + +func (m *EpochValidatorsWithAuction) GetShuffledOut() map[string]Validators { + if m != nil { + return m.ShuffledOut + } + return nil +} + +type NodesCoordinatorRegistryWithAuction struct { + CurrentEpoch uint32 `protobuf:"varint,1,opt,name=CurrentEpoch,proto3" json:"CurrentEpoch,omitempty"` + EpochsConfigWithAuction map[string]*EpochValidatorsWithAuction `protobuf:"bytes,2,rep,name=EpochsConfigWithAuction,proto3" json:"EpochsConfigWithAuction,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *NodesCoordinatorRegistryWithAuction) Reset() { *m = NodesCoordinatorRegistryWithAuction{} } +func (*NodesCoordinatorRegistryWithAuction) ProtoMessage() {} +func (*NodesCoordinatorRegistryWithAuction) Descriptor() ([]byte, []int) { + return fileDescriptor_f04461c784f438d5, []int{3} +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodesCoordinatorRegistryWithAuction.Merge(m, src) +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_Size() int { + return m.Size() +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_DiscardUnknown() { + xxx_messageInfo_NodesCoordinatorRegistryWithAuction.DiscardUnknown(m) +} + +var xxx_messageInfo_NodesCoordinatorRegistryWithAuction proto.InternalMessageInfo + +func (m *NodesCoordinatorRegistryWithAuction) GetCurrentEpoch() uint32 { + if m != nil { + return m.CurrentEpoch + } + return 0 +} + +func (m *NodesCoordinatorRegistryWithAuction) GetEpochsConfigWithAuction() map[string]*EpochValidatorsWithAuction { + if m != nil { + return m.EpochsConfigWithAuction + } + return nil +} + +func init() { + proto.RegisterType((*SerializableValidator)(nil), "proto.SerializableValidator") + proto.RegisterType((*Validators)(nil), "proto.Validators") + proto.RegisterType((*EpochValidatorsWithAuction)(nil), "proto.EpochValidatorsWithAuction") + proto.RegisterMapType((map[string]Validators)(nil), "proto.EpochValidatorsWithAuction.EligibleEntry") + proto.RegisterMapType((map[string]Validators)(nil), "proto.EpochValidatorsWithAuction.LeavingEntry") + proto.RegisterMapType((map[string]Validators)(nil), "proto.EpochValidatorsWithAuction.ShuffledOutEntry") + proto.RegisterMapType((map[string]Validators)(nil), "proto.EpochValidatorsWithAuction.WaitingEntry") + proto.RegisterType((*NodesCoordinatorRegistryWithAuction)(nil), "proto.NodesCoordinatorRegistryWithAuction") + proto.RegisterMapType((map[string]*EpochValidatorsWithAuction)(nil), "proto.NodesCoordinatorRegistryWithAuction.EpochsConfigWithAuctionEntry") +} + +func init() { + proto.RegisterFile("nodesCoordinatorRegistryWithAuction.proto", fileDescriptor_f04461c784f438d5) +} + +var fileDescriptor_f04461c784f438d5 = []byte{ + // 561 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0x4f, 0x8f, 0xd2, 0x40, + 0x18, 0xc6, 0x3b, 0xb0, 0x80, 0xfb, 0x02, 0x09, 0x4e, 0x62, 0x6c, 0xc8, 0x66, 0xc0, 0x1a, 0x23, + 0x1e, 0x2c, 0x06, 0x0f, 0x1a, 0x0f, 0x26, 0x82, 0xc4, 0xf8, 0x0f, 0xdd, 0x6e, 0xe2, 0x26, 0x7b, + 0x6b, 0x61, 0x28, 0x13, 0xbb, 0x1d, 0x52, 0xa6, 0x1b, 0xf1, 0xa4, 0xf1, 0x0b, 0xf8, 0x31, 0x3c, + 0xf8, 0x11, 0xfc, 0x00, 0x7b, 0xe4, 0xc8, 0x89, 0x48, 0xb9, 0x18, 0x4e, 0xfb, 0x11, 0x0c, 0xd3, + 0xb2, 0x5b, 0x36, 0x8b, 0x6c, 0xb2, 0x9e, 0x98, 0x3e, 0x33, 0xcf, 0xef, 0x19, 0x1e, 0x5e, 0x0a, + 0xf7, 0x5c, 0xde, 0xa1, 0x83, 0x06, 0xe7, 0x5e, 0x87, 0xb9, 0xa6, 0xe0, 0x9e, 0x41, 0x6d, 0x36, + 0x10, 0xde, 0x70, 0x9f, 0x89, 0xde, 0x33, 0xbf, 0x2d, 0x18, 0x77, 0xf5, 0xbe, 0xc7, 0x05, 0xc7, + 0x29, 0xf9, 0x51, 0xbc, 0x6f, 0x33, 0xd1, 0xf3, 0x2d, 0xbd, 0xcd, 0x0f, 0xab, 0x36, 0xb7, 0x79, + 0x55, 0xca, 0x96, 0xdf, 0x95, 0x4f, 0xf2, 0x41, 0xae, 0x42, 0x97, 0xf6, 0x0d, 0xc1, 0x8d, 0x3d, + 0xea, 0x31, 0xd3, 0x61, 0x9f, 0x4d, 0xcb, 0xa1, 0x1f, 0x4c, 0x87, 0x75, 0x16, 0x41, 0x58, 0x83, + 0xf4, 0x7b, 0xdf, 0x7a, 0x4d, 0x87, 0x2a, 0x2a, 0xa3, 0x4a, 0xae, 0x0e, 0xf3, 0x49, 0x29, 0xdd, + 0x97, 0x8a, 0x11, 0xed, 0xe0, 0x3b, 0x90, 0x69, 0xf4, 0x4c, 0xb7, 0x4d, 0x07, 0x6a, 0xa2, 0x8c, + 0x2a, 0xf9, 0x7a, 0x76, 0x3e, 0x29, 0x65, 0xda, 0xa1, 0x64, 0x2c, 0xf7, 0x70, 0x09, 0x52, 0x2f, + 0xdd, 0x0e, 0xfd, 0xa4, 0x26, 0xe5, 0xa1, 0xed, 0xf9, 0xa4, 0x94, 0x62, 0x0b, 0xc1, 0x08, 0x75, + 0xed, 0x29, 0xc0, 0x69, 0xf0, 0x00, 0x3f, 0x80, 0xad, 0xe7, 0xa6, 0x30, 0x55, 0x54, 0x4e, 0x56, + 0xb2, 0xb5, 0x9d, 0xf0, 0xa6, 0xfa, 0x85, 0xb7, 0x34, 0xe4, 0x49, 0xed, 0x67, 0x0a, 0x8a, 0xcd, + 0x3e, 0x6f, 0xf7, 0xce, 0x28, 0xb1, 0x82, 0xf0, 0x2e, 0x5c, 0x6b, 0x3a, 0xcc, 0x66, 0x96, 0x43, + 0x23, 0x68, 0x35, 0x82, 0xae, 0x37, 0xe9, 0x4b, 0x47, 0xd3, 0x15, 0xde, 0xb0, 0xbe, 0x75, 0x3c, + 0x29, 0x29, 0xc6, 0x29, 0x06, 0xb7, 0x20, 0xb3, 0x6f, 0x32, 0xc1, 0x5c, 0x5b, 0x4d, 0x48, 0xa2, + 0xbe, 0x99, 0x18, 0x19, 0xe2, 0xc0, 0x25, 0x64, 0xc1, 0x7b, 0x43, 0xcd, 0xa3, 0x05, 0x2f, 0x79, + 0x59, 0x5e, 0x64, 0x58, 0xe1, 0x45, 0x1a, 0x3e, 0x80, 0xec, 0x5e, 0xcf, 0xef, 0x76, 0x1d, 0xda, + 0x79, 0xe7, 0x0b, 0x75, 0x4b, 0x32, 0x6b, 0x9b, 0x99, 0x31, 0x53, 0x9c, 0x1b, 0x87, 0x15, 0x5b, + 0x90, 0x5f, 0x29, 0x07, 0x17, 0x20, 0xf9, 0x31, 0x9a, 0x93, 0x6d, 0x63, 0xb1, 0xc4, 0x77, 0x21, + 0x75, 0x64, 0x3a, 0x3e, 0x95, 0x63, 0x91, 0xad, 0x5d, 0x8f, 0x82, 0xcf, 0x32, 0x8d, 0x70, 0xff, + 0x49, 0xe2, 0x31, 0x2a, 0xbe, 0x85, 0x5c, 0xbc, 0x9a, 0xff, 0x80, 0x8b, 0x37, 0x73, 0x55, 0xdc, + 0x2e, 0x14, 0xce, 0x97, 0x72, 0x45, 0xa4, 0xf6, 0x2b, 0x01, 0xb7, 0x5b, 0x9b, 0xff, 0xd8, 0x58, + 0x83, 0x5c, 0xc3, 0xf7, 0x3c, 0xea, 0x0a, 0xf9, 0x8b, 0xc9, 0xbc, 0xbc, 0xb1, 0xa2, 0xe1, 0xaf, + 0x08, 0x6e, 0xca, 0xd5, 0xa0, 0xc1, 0xdd, 0x2e, 0xb3, 0x63, 0xfe, 0x68, 0x32, 0x5f, 0x44, 0x77, + 0xb9, 0x44, 0xa2, 0xbe, 0x86, 0x24, 0xbf, 0xb5, 0xb1, 0x2e, 0xa7, 0x78, 0x08, 0x3b, 0xff, 0x32, + 0x5e, 0x50, 0xd7, 0xa3, 0xd5, 0xba, 0x6e, 0x6d, 0x1c, 0xcc, 0x58, 0x7d, 0xf5, 0x57, 0xa3, 0x29, + 0x51, 0xc6, 0x53, 0xa2, 0x9c, 0x4c, 0x09, 0xfa, 0x12, 0x10, 0xf4, 0x23, 0x20, 0xe8, 0x38, 0x20, + 0x68, 0x14, 0x10, 0x34, 0x0e, 0x08, 0xfa, 0x1d, 0x10, 0xf4, 0x27, 0x20, 0xca, 0x49, 0x40, 0xd0, + 0xf7, 0x19, 0x51, 0x46, 0x33, 0xa2, 0x8c, 0x67, 0x44, 0x39, 0x28, 0x9c, 0x7f, 0x9d, 0x5a, 0x69, + 0x19, 0xfc, 0xf0, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x45, 0x19, 0xc5, 0xc4, 0x69, 0x05, 0x00, + 0x00, +} + +func (this *SerializableValidator) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SerializableValidator) + if !ok { + that2, ok := that.(SerializableValidator) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.PubKey, that1.PubKey) { + return false + } + if this.Chances != that1.Chances { + return false + } + if this.Index != that1.Index { + return false + } + return true +} +func (this *Validators) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Validators) + if !ok { + that2, ok := that.(Validators) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Data) != len(that1.Data) { + return false + } + for i := range this.Data { + if !this.Data[i].Equal(that1.Data[i]) { + return false + } + } + return true +} +func (this *EpochValidatorsWithAuction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EpochValidatorsWithAuction) + if !ok { + that2, ok := that.(EpochValidatorsWithAuction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Eligible) != len(that1.Eligible) { + return false + } + for i := range this.Eligible { + a := this.Eligible[i] + b := that1.Eligible[i] + if !(&a).Equal(&b) { + return false + } + } + if len(this.Waiting) != len(that1.Waiting) { + return false + } + for i := range this.Waiting { + a := this.Waiting[i] + b := that1.Waiting[i] + if !(&a).Equal(&b) { + return false + } + } + if len(this.Leaving) != len(that1.Leaving) { + return false + } + for i := range this.Leaving { + a := this.Leaving[i] + b := that1.Leaving[i] + if !(&a).Equal(&b) { + return false + } + } + if len(this.ShuffledOut) != len(that1.ShuffledOut) { + return false + } + for i := range this.ShuffledOut { + a := this.ShuffledOut[i] + b := that1.ShuffledOut[i] + if !(&a).Equal(&b) { + return false + } + } + return true +} +func (this *NodesCoordinatorRegistryWithAuction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*NodesCoordinatorRegistryWithAuction) + if !ok { + that2, ok := that.(NodesCoordinatorRegistryWithAuction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.CurrentEpoch != that1.CurrentEpoch { + return false + } + if len(this.EpochsConfigWithAuction) != len(that1.EpochsConfigWithAuction) { + return false + } + for i := range this.EpochsConfigWithAuction { + if !this.EpochsConfigWithAuction[i].Equal(that1.EpochsConfigWithAuction[i]) { + return false + } + } + return true +} +func (this *SerializableValidator) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&nodesCoordinator.SerializableValidator{") + s = append(s, "PubKey: "+fmt.Sprintf("%#v", this.PubKey)+",\n") + s = append(s, "Chances: "+fmt.Sprintf("%#v", this.Chances)+",\n") + s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Validators) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&nodesCoordinator.Validators{") + if this.Data != nil { + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EpochValidatorsWithAuction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&nodesCoordinator.EpochValidatorsWithAuction{") + keysForEligible := make([]string, 0, len(this.Eligible)) + for k, _ := range this.Eligible { + keysForEligible = append(keysForEligible, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEligible) + mapStringForEligible := "map[string]Validators{" + for _, k := range keysForEligible { + mapStringForEligible += fmt.Sprintf("%#v: %#v,", k, this.Eligible[k]) + } + mapStringForEligible += "}" + if this.Eligible != nil { + s = append(s, "Eligible: "+mapStringForEligible+",\n") + } + keysForWaiting := make([]string, 0, len(this.Waiting)) + for k, _ := range this.Waiting { + keysForWaiting = append(keysForWaiting, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForWaiting) + mapStringForWaiting := "map[string]Validators{" + for _, k := range keysForWaiting { + mapStringForWaiting += fmt.Sprintf("%#v: %#v,", k, this.Waiting[k]) + } + mapStringForWaiting += "}" + if this.Waiting != nil { + s = append(s, "Waiting: "+mapStringForWaiting+",\n") + } + keysForLeaving := make([]string, 0, len(this.Leaving)) + for k, _ := range this.Leaving { + keysForLeaving = append(keysForLeaving, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLeaving) + mapStringForLeaving := "map[string]Validators{" + for _, k := range keysForLeaving { + mapStringForLeaving += fmt.Sprintf("%#v: %#v,", k, this.Leaving[k]) + } + mapStringForLeaving += "}" + if this.Leaving != nil { + s = append(s, "Leaving: "+mapStringForLeaving+",\n") + } + keysForShuffledOut := make([]string, 0, len(this.ShuffledOut)) + for k, _ := range this.ShuffledOut { + keysForShuffledOut = append(keysForShuffledOut, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForShuffledOut) + mapStringForShuffledOut := "map[string]Validators{" + for _, k := range keysForShuffledOut { + mapStringForShuffledOut += fmt.Sprintf("%#v: %#v,", k, this.ShuffledOut[k]) + } + mapStringForShuffledOut += "}" + if this.ShuffledOut != nil { + s = append(s, "ShuffledOut: "+mapStringForShuffledOut+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *NodesCoordinatorRegistryWithAuction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&nodesCoordinator.NodesCoordinatorRegistryWithAuction{") + s = append(s, "CurrentEpoch: "+fmt.Sprintf("%#v", this.CurrentEpoch)+",\n") + keysForEpochsConfigWithAuction := make([]string, 0, len(this.EpochsConfigWithAuction)) + for k, _ := range this.EpochsConfigWithAuction { + keysForEpochsConfigWithAuction = append(keysForEpochsConfigWithAuction, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEpochsConfigWithAuction) + mapStringForEpochsConfigWithAuction := "map[string]*EpochValidatorsWithAuction{" + for _, k := range keysForEpochsConfigWithAuction { + mapStringForEpochsConfigWithAuction += fmt.Sprintf("%#v: %#v,", k, this.EpochsConfigWithAuction[k]) + } + mapStringForEpochsConfigWithAuction += "}" + if this.EpochsConfigWithAuction != nil { + s = append(s, "EpochsConfigWithAuction: "+mapStringForEpochsConfigWithAuction+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringNodesCoordinatorRegistryWithAuction(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *SerializableValidator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SerializableValidator) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SerializableValidator) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Index != 0 { + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x18 + } + if m.Chances != 0 { + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(m.Chances)) + i-- + dAtA[i] = 0x10 + } + if len(m.PubKey) > 0 { + i -= len(m.PubKey) + copy(dAtA[i:], m.PubKey) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(m.PubKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Validators) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Validators) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validators) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + for iNdEx := len(m.Data) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Data[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EpochValidatorsWithAuction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EpochValidatorsWithAuction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EpochValidatorsWithAuction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ShuffledOut) > 0 { + keysForShuffledOut := make([]string, 0, len(m.ShuffledOut)) + for k := range m.ShuffledOut { + keysForShuffledOut = append(keysForShuffledOut, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForShuffledOut) + for iNdEx := len(keysForShuffledOut) - 1; iNdEx >= 0; iNdEx-- { + v := m.ShuffledOut[string(keysForShuffledOut[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForShuffledOut[iNdEx]) + copy(dAtA[i:], keysForShuffledOut[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForShuffledOut[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Leaving) > 0 { + keysForLeaving := make([]string, 0, len(m.Leaving)) + for k := range m.Leaving { + keysForLeaving = append(keysForLeaving, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLeaving) + for iNdEx := len(keysForLeaving) - 1; iNdEx >= 0; iNdEx-- { + v := m.Leaving[string(keysForLeaving[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForLeaving[iNdEx]) + copy(dAtA[i:], keysForLeaving[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForLeaving[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Waiting) > 0 { + keysForWaiting := make([]string, 0, len(m.Waiting)) + for k := range m.Waiting { + keysForWaiting = append(keysForWaiting, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForWaiting) + for iNdEx := len(keysForWaiting) - 1; iNdEx >= 0; iNdEx-- { + v := m.Waiting[string(keysForWaiting[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForWaiting[iNdEx]) + copy(dAtA[i:], keysForWaiting[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForWaiting[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Eligible) > 0 { + keysForEligible := make([]string, 0, len(m.Eligible)) + for k := range m.Eligible { + keysForEligible = append(keysForEligible, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEligible) + for iNdEx := len(keysForEligible) - 1; iNdEx >= 0; iNdEx-- { + v := m.Eligible[string(keysForEligible[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForEligible[iNdEx]) + copy(dAtA[i:], keysForEligible[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForEligible[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NodesCoordinatorRegistryWithAuction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodesCoordinatorRegistryWithAuction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodesCoordinatorRegistryWithAuction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.EpochsConfigWithAuction) > 0 { + keysForEpochsConfigWithAuction := make([]string, 0, len(m.EpochsConfigWithAuction)) + for k := range m.EpochsConfigWithAuction { + keysForEpochsConfigWithAuction = append(keysForEpochsConfigWithAuction, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEpochsConfigWithAuction) + for iNdEx := len(keysForEpochsConfigWithAuction) - 1; iNdEx >= 0; iNdEx-- { + v := m.EpochsConfigWithAuction[string(keysForEpochsConfigWithAuction[iNdEx])] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(keysForEpochsConfigWithAuction[iNdEx]) + copy(dAtA[i:], keysForEpochsConfigWithAuction[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForEpochsConfigWithAuction[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.CurrentEpoch != 0 { + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(m.CurrentEpoch)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintNodesCoordinatorRegistryWithAuction(dAtA []byte, offset int, v uint64) int { + offset -= sovNodesCoordinatorRegistryWithAuction(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *SerializableValidator) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PubKey) + if l > 0 { + n += 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + } + if m.Chances != 0 { + n += 1 + sovNodesCoordinatorRegistryWithAuction(uint64(m.Chances)) + } + if m.Index != 0 { + n += 1 + sovNodesCoordinatorRegistryWithAuction(uint64(m.Index)) + } + return n +} + +func (m *Validators) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Data) > 0 { + for _, e := range m.Data { + l = e.Size() + n += 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + } + } + return n +} + +func (m *EpochValidatorsWithAuction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Eligible) > 0 { + for k, v := range m.Eligible { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + if len(m.Waiting) > 0 { + for k, v := range m.Waiting { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + if len(m.Leaving) > 0 { + for k, v := range m.Leaving { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + if len(m.ShuffledOut) > 0 { + for k, v := range m.ShuffledOut { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodesCoordinatorRegistryWithAuction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CurrentEpoch != 0 { + n += 1 + sovNodesCoordinatorRegistryWithAuction(uint64(m.CurrentEpoch)) + } + if len(m.EpochsConfigWithAuction) > 0 { + for k, v := range m.EpochsConfigWithAuction { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + l + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + return n +} + +func sovNodesCoordinatorRegistryWithAuction(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozNodesCoordinatorRegistryWithAuction(x uint64) (n int) { + return sovNodesCoordinatorRegistryWithAuction(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SerializableValidator) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SerializableValidator{`, + `PubKey:` + fmt.Sprintf("%v", this.PubKey) + `,`, + `Chances:` + fmt.Sprintf("%v", this.Chances) + `,`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `}`, + }, "") + return s +} +func (this *Validators) String() string { + if this == nil { + return "nil" + } + repeatedStringForData := "[]*SerializableValidator{" + for _, f := range this.Data { + repeatedStringForData += strings.Replace(f.String(), "SerializableValidator", "SerializableValidator", 1) + "," + } + repeatedStringForData += "}" + s := strings.Join([]string{`&Validators{`, + `Data:` + repeatedStringForData + `,`, + `}`, + }, "") + return s +} +func (this *EpochValidatorsWithAuction) String() string { + if this == nil { + return "nil" + } + keysForEligible := make([]string, 0, len(this.Eligible)) + for k, _ := range this.Eligible { + keysForEligible = append(keysForEligible, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEligible) + mapStringForEligible := "map[string]Validators{" + for _, k := range keysForEligible { + mapStringForEligible += fmt.Sprintf("%v: %v,", k, this.Eligible[k]) + } + mapStringForEligible += "}" + keysForWaiting := make([]string, 0, len(this.Waiting)) + for k, _ := range this.Waiting { + keysForWaiting = append(keysForWaiting, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForWaiting) + mapStringForWaiting := "map[string]Validators{" + for _, k := range keysForWaiting { + mapStringForWaiting += fmt.Sprintf("%v: %v,", k, this.Waiting[k]) + } + mapStringForWaiting += "}" + keysForLeaving := make([]string, 0, len(this.Leaving)) + for k, _ := range this.Leaving { + keysForLeaving = append(keysForLeaving, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLeaving) + mapStringForLeaving := "map[string]Validators{" + for _, k := range keysForLeaving { + mapStringForLeaving += fmt.Sprintf("%v: %v,", k, this.Leaving[k]) + } + mapStringForLeaving += "}" + keysForShuffledOut := make([]string, 0, len(this.ShuffledOut)) + for k, _ := range this.ShuffledOut { + keysForShuffledOut = append(keysForShuffledOut, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForShuffledOut) + mapStringForShuffledOut := "map[string]Validators{" + for _, k := range keysForShuffledOut { + mapStringForShuffledOut += fmt.Sprintf("%v: %v,", k, this.ShuffledOut[k]) + } + mapStringForShuffledOut += "}" + s := strings.Join([]string{`&EpochValidatorsWithAuction{`, + `Eligible:` + mapStringForEligible + `,`, + `Waiting:` + mapStringForWaiting + `,`, + `Leaving:` + mapStringForLeaving + `,`, + `ShuffledOut:` + mapStringForShuffledOut + `,`, + `}`, + }, "") + return s +} +func (this *NodesCoordinatorRegistryWithAuction) String() string { + if this == nil { + return "nil" + } + keysForEpochsConfigWithAuction := make([]string, 0, len(this.EpochsConfigWithAuction)) + for k, _ := range this.EpochsConfigWithAuction { + keysForEpochsConfigWithAuction = append(keysForEpochsConfigWithAuction, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEpochsConfigWithAuction) + mapStringForEpochsConfigWithAuction := "map[string]*EpochValidatorsWithAuction{" + for _, k := range keysForEpochsConfigWithAuction { + mapStringForEpochsConfigWithAuction += fmt.Sprintf("%v: %v,", k, this.EpochsConfigWithAuction[k]) + } + mapStringForEpochsConfigWithAuction += "}" + s := strings.Join([]string{`&NodesCoordinatorRegistryWithAuction{`, + `CurrentEpoch:` + fmt.Sprintf("%v", this.CurrentEpoch) + `,`, + `EpochsConfigWithAuction:` + mapStringForEpochsConfigWithAuction + `,`, + `}`, + }, "") + return s +} +func valueToStringNodesCoordinatorRegistryWithAuction(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SerializableValidator) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SerializableValidator: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SerializableValidator: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PubKey = append(m.PubKey[:0], dAtA[iNdEx:postIndex]...) + if m.PubKey == nil { + m.PubKey = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Chances", wireType) + } + m.Chances = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Chances |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Validators) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Validators: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Validators: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data, &SerializableValidator{}) + if err := m.Data[len(m.Data)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EpochValidatorsWithAuction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EpochValidatorsWithAuction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EpochValidatorsWithAuction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Eligible", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Eligible == nil { + m.Eligible = make(map[string]Validators) + } + var mapkey string + mapvalue := &Validators{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Validators{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Eligible[mapkey] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Waiting == nil { + m.Waiting = make(map[string]Validators) + } + var mapkey string + mapvalue := &Validators{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Validators{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Waiting[mapkey] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leaving", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leaving == nil { + m.Leaving = make(map[string]Validators) + } + var mapkey string + mapvalue := &Validators{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Validators{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Leaving[mapkey] = *mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShuffledOut", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ShuffledOut == nil { + m.ShuffledOut = make(map[string]Validators) + } + var mapkey string + mapvalue := &Validators{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Validators{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ShuffledOut[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodesCoordinatorRegistryWithAuction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodesCoordinatorRegistryWithAuction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodesCoordinatorRegistryWithAuction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentEpoch", wireType) + } + m.CurrentEpoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentEpoch |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochsConfigWithAuction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EpochsConfigWithAuction == nil { + m.EpochsConfigWithAuction = make(map[string]*EpochValidatorsWithAuction) + } + var mapkey string + var mapvalue *EpochValidatorsWithAuction + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &EpochValidatorsWithAuction{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.EpochsConfigWithAuction[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipNodesCoordinatorRegistryWithAuction(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupNodesCoordinatorRegistryWithAuction + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthNodesCoordinatorRegistryWithAuction = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowNodesCoordinatorRegistryWithAuction = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupNodesCoordinatorRegistryWithAuction = fmt.Errorf("proto: unexpected end of group") +) diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.proto b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.proto new file mode 100644 index 00000000000..3ff1c90acb1 --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package proto; + +option go_package = "nodesCoordinator"; +option (gogoproto.stable_marshaler_all) = true; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message SerializableValidator { + bytes PubKey = 1 [(gogoproto.jsontag) = "pubKey"]; + uint32 Chances = 2 [(gogoproto.jsontag) = "chances"]; + uint32 Index = 3 [(gogoproto.jsontag) = "index"]; +} + +message Validators { + repeated SerializableValidator Data = 1; +} + +message EpochValidatorsWithAuction { + map Eligible = 1 [(gogoproto.nullable) = false]; + map Waiting = 2 [(gogoproto.nullable) = false]; + map Leaving = 3 [(gogoproto.nullable) = false]; + map ShuffledOut = 4 [(gogoproto.nullable) = false]; +} + +message NodesCoordinatorRegistryWithAuction { + uint32 CurrentEpoch = 1; + map EpochsConfigWithAuction = 2; +} diff --git a/sharding/nodesCoordinator/shardingArgs.go b/sharding/nodesCoordinator/shardingArgs.go index 71a6b2684c3..67c542952d7 100644 --- a/sharding/nodesCoordinator/shardingArgs.go +++ b/sharding/nodesCoordinator/shardingArgs.go @@ -11,26 +11,27 @@ import ( // ArgNodesCoordinator holds all dependencies required by the nodes coordinator in order to create new instances type ArgNodesCoordinator struct { - ShardConsensusGroupSize int - MetaConsensusGroupSize int - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - Shuffler NodesShuffler - EpochStartNotifier EpochStartEventNotifier - BootStorer storage.Storer - ShardIDAsObserver uint32 - NbShards uint32 - EligibleNodes map[uint32][]Validator - WaitingNodes map[uint32][]Validator - SelfPublicKey []byte - Epoch uint32 - StartEpoch uint32 - ConsensusGroupCache Cacher - ShuffledOutHandler ShuffledOutHandler - ChanStopNode chan endProcess.ArgEndProcess - NodeTypeProvider NodeTypeProviderHandler - IsFullArchive bool - EnableEpochsHandler common.EnableEpochsHandler - ValidatorInfoCacher epochStart.ValidatorInfoCacher - GenesisNodesSetupHandler GenesisNodesSetupHandler + ShardConsensusGroupSize int + MetaConsensusGroupSize int + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + Shuffler NodesShuffler + EpochStartNotifier EpochStartEventNotifier + BootStorer storage.Storer + ShardIDAsObserver uint32 + NbShards uint32 + EligibleNodes map[uint32][]Validator + WaitingNodes map[uint32][]Validator + SelfPublicKey []byte + Epoch uint32 + StartEpoch uint32 + ConsensusGroupCache Cacher + ShuffledOutHandler ShuffledOutHandler + ChanStopNode chan endProcess.ArgEndProcess + NodeTypeProvider NodeTypeProviderHandler + IsFullArchive bool + EnableEpochsHandler common.EnableEpochsHandler + ValidatorInfoCacher epochStart.ValidatorInfoCacher + GenesisNodesSetupHandler GenesisNodesSetupHandler + NodesCoordinatorRegistryFactory NodesCoordinatorRegistryFactory } diff --git a/state/accounts/peerAccount.go b/state/accounts/peerAccount.go index a63b71ff040..8900edc6f1b 100644 --- a/state/accounts/peerAccount.go +++ b/state/accounts/peerAccount.go @@ -100,7 +100,12 @@ func (pa *peerAccount) SetTempRating(rating uint32) { } // SetListAndIndex will update the peer's list (eligible, waiting) and the index inside it with journal -func (pa *peerAccount) SetListAndIndex(shardID uint32, list string, index uint32) { +func (pa *peerAccount) SetListAndIndex(shardID uint32, list string, index uint32, updatePreviousValues bool) { + if updatePreviousValues { + pa.PreviousList = pa.List + pa.PreviousIndexInList = pa.IndexInList + } + pa.ShardId = shardID pa.List = list pa.IndexInList = index @@ -158,6 +163,11 @@ func (pa *peerAccount) GetTotalValidatorSuccessRate() state.SignRate { return &pa.TotalValidatorSuccessRate } +// SetPreviousList sets validator's previous list +func (pa *peerAccount) SetPreviousList(list string) { + pa.PreviousList = list +} + // IsInterfaceNil return if there is no value under the interface func (pa *peerAccount) IsInterfaceNil() bool { return pa == nil diff --git a/state/accounts/peerAccountData.pb.go b/state/accounts/peerAccountData.pb.go index 42b84c24dda..eb0a6ef69d9 100644 --- a/state/accounts/peerAccountData.pb.go +++ b/state/accounts/peerAccountData.pb.go @@ -96,6 +96,8 @@ type PeerAccountData struct { TotalValidatorIgnoredSignaturesRate uint32 `protobuf:"varint,16,opt,name=TotalValidatorIgnoredSignaturesRate,proto3" json:"totalValidatorIgnoredSignaturesRate"` Nonce uint64 `protobuf:"varint,17,opt,name=Nonce,proto3" json:"nonce"` UnStakedEpoch uint32 `protobuf:"varint,18,opt,name=UnStakedEpoch,proto3" json:"unStakedEpoch"` + PreviousList string `protobuf:"bytes,19,opt,name=PreviousList,proto3" json:"previousList,omitempty"` + PreviousIndexInList uint32 `protobuf:"varint,20,opt,name=PreviousIndexInList,proto3" json:"previousIndexInList,omitempty"` } func (m *PeerAccountData) Reset() { *m = PeerAccountData{} } @@ -252,6 +254,20 @@ func (m *PeerAccountData) GetUnStakedEpoch() uint32 { return 0 } +func (m *PeerAccountData) GetPreviousList() string { + if m != nil { + return m.PreviousList + } + return "" +} + +func (m *PeerAccountData) GetPreviousIndexInList() uint32 { + if m != nil { + return m.PreviousIndexInList + } + return 0 +} + func init() { proto.RegisterType((*SignRate)(nil), "proto.SignRate") proto.RegisterType((*PeerAccountData)(nil), "proto.PeerAccountData") @@ -260,56 +276,59 @@ func init() { func init() { proto.RegisterFile("peerAccountData.proto", fileDescriptor_26bd0314afcce126) } var fileDescriptor_26bd0314afcce126 = []byte{ - // 774 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcf, 0x8f, 0xdb, 0x44, - 0x18, 0x8d, 0xcb, 0x26, 0xbb, 0x3b, 0x49, 0x36, 0xcd, 0xa8, 0x14, 0x67, 0x81, 0x99, 0x90, 0x0a, - 0xc8, 0x81, 0x24, 0xe2, 0x87, 0xc4, 0x81, 0x53, 0x5c, 0x5a, 0x29, 0xb0, 0x54, 0xab, 0x49, 0x41, - 0x08, 0x24, 0xa4, 0xc9, 0x78, 0xea, 0x98, 0xda, 0x9e, 0x68, 0x66, 0xbc, 0xec, 0xde, 0xb8, 0x72, - 0xeb, 0x9f, 0x81, 0xf8, 0x4b, 0x7a, 0xdc, 0xe3, 0x9e, 0x0c, 0xeb, 0xbd, 0x20, 0x9f, 0xfa, 0x27, - 0x20, 0x4f, 0xdc, 0x34, 0x69, 0x9d, 0x5d, 0x4e, 0x89, 0xdf, 0x7b, 0xdf, 0xfb, 0xbe, 0x79, 0xfe, - 0xc6, 0xe0, 0xed, 0x05, 0xe7, 0x72, 0xcc, 0x98, 0x88, 0x23, 0xfd, 0x35, 0xd5, 0x74, 0xb8, 0x90, - 0x42, 0x0b, 0x58, 0x35, 0x3f, 0x87, 0x03, 0xcf, 0xd7, 0xf3, 0x78, 0x36, 0x64, 0x22, 0x1c, 0x79, - 0xc2, 0x13, 0x23, 0x03, 0xcf, 0xe2, 0x27, 0xe6, 0xc9, 0x3c, 0x98, 0x7f, 0xcb, 0xaa, 0xde, 0x37, - 0x60, 0x6f, 0xea, 0x7b, 0x11, 0xa1, 0x9a, 0x43, 0x04, 0xc0, 0xa3, 0x38, 0x9c, 0xc6, 0x8c, 0x71, - 0xa5, 0x6c, 0xab, 0x6b, 0xf5, 0x9b, 0x64, 0x0d, 0x29, 0xf8, 0x87, 0xd4, 0x0f, 0x62, 0xc9, 0xed, - 0x5b, 0x2b, 0xbe, 0x40, 0x7a, 0x7f, 0xd4, 0x41, 0xeb, 0x78, 0x73, 0x36, 0xf8, 0x05, 0x68, 0x38, - 0x47, 0xd3, 0xe3, 0x78, 0x16, 0xf8, 0xec, 0x5b, 0x7e, 0x66, 0x5c, 0x1b, 0xce, 0xed, 0x2c, 0xc1, - 0x8d, 0x59, 0xa0, 0x56, 0x38, 0xd9, 0x50, 0xc1, 0x31, 0x68, 0x12, 0xfe, 0x1b, 0x95, 0xee, 0xd8, - 0x75, 0x65, 0x3e, 0xcc, 0x2d, 0x53, 0xf6, 0x6e, 0x96, 0xe0, 0x77, 0xe4, 0x3a, 0xf1, 0x89, 0x08, - 0x7d, 0xcd, 0xc3, 0x85, 0x3e, 0x23, 0x9b, 0x15, 0xf0, 0x43, 0xb0, 0x3b, 0x9d, 0x53, 0xe9, 0x4e, - 0x5c, 0xfb, 0xad, 0x7c, 0x52, 0xa7, 0x9e, 0x25, 0x78, 0x57, 0x2d, 0x21, 0xf2, 0x92, 0x83, 0x14, - 0xdc, 0xf9, 0x81, 0x06, 0xbe, 0x4b, 0xb5, 0x90, 0xc5, 0x39, 0xf3, 0x2c, 0xec, 0x9d, 0xae, 0xd5, - 0xaf, 0x7f, 0xd6, 0x5a, 0xa6, 0x34, 0x7c, 0x19, 0x91, 0xf3, 0xde, 0xf3, 0x04, 0x57, 0xb2, 0x04, - 0xdf, 0x39, 0x29, 0x29, 0x22, 0xa5, 0x56, 0xf0, 0x47, 0xd0, 0x3e, 0xe2, 0xd4, 0xe5, 0x1b, 0xfe, - 0xd5, 0x72, 0xff, 0x4e, 0xe1, 0xdf, 0x0e, 0x5e, 0xaf, 0x20, 0x6f, 0x9a, 0xc0, 0x5f, 0x01, 0x5a, - 0x75, 0x9c, 0x78, 0x91, 0x90, 0xdc, 0xcd, 0x9d, 0xa8, 0x8e, 0x25, 0x5f, 0xb6, 0xa9, 0x99, 0xa3, - 0xf7, 0xb2, 0x04, 0xa3, 0x93, 0x6b, 0x95, 0xe4, 0x06, 0x27, 0xd8, 0x03, 0x35, 0x42, 0xb5, 0x1f, - 0x79, 0xf6, 0xae, 0xf1, 0x04, 0x59, 0x82, 0x6b, 0xd2, 0x20, 0xa4, 0x60, 0xe0, 0x10, 0x80, 0xc7, - 0x3c, 0x5c, 0x14, 0xba, 0x3d, 0xa3, 0x3b, 0xc8, 0x12, 0x0c, 0xf4, 0x0a, 0x25, 0x6b, 0x0a, 0xf8, - 0xcc, 0x02, 0xad, 0x31, 0x63, 0x71, 0x18, 0x07, 0x54, 0x73, 0xf7, 0x21, 0xe7, 0xca, 0xde, 0x37, - 0x6f, 0xfa, 0x49, 0x96, 0xe0, 0x0e, 0xdd, 0xa4, 0x5e, 0xbd, 0xeb, 0xbf, 0xfe, 0xc6, 0x0f, 0x42, - 0xaa, 0xe7, 0xa3, 0x99, 0xef, 0x0d, 0x27, 0x91, 0xfe, 0x6a, 0x6d, 0xe7, 0xc3, 0x38, 0xd0, 0xfe, - 0x09, 0x97, 0xea, 0x74, 0x14, 0x9e, 0x0e, 0xd8, 0x9c, 0xfa, 0xd1, 0x80, 0x09, 0xc9, 0x07, 0x9e, - 0x18, 0xb9, 0xf9, 0x6d, 0x71, 0x7c, 0x6f, 0x12, 0xe9, 0xfb, 0x54, 0x69, 0x2e, 0xc9, 0xeb, 0xed, - 0xe1, 0x2f, 0xe0, 0x30, 0xdf, 0x78, 0x1e, 0x70, 0xa6, 0xb9, 0x3b, 0x89, 0x8a, 0xb8, 0x9d, 0x40, - 0xb0, 0xa7, 0xca, 0x06, 0xe6, 0x48, 0x28, 0x4b, 0xf0, 0x61, 0xb4, 0x55, 0x45, 0xae, 0x71, 0x80, - 0x9f, 0x82, 0xfa, 0x24, 0x72, 0xf9, 0xe9, 0x24, 0x3a, 0xf2, 0x95, 0xb6, 0xeb, 0xc6, 0xb0, 0x95, - 0x25, 0xb8, 0xee, 0xbf, 0x82, 0xc9, 0xba, 0x06, 0x7e, 0x04, 0x76, 0x8c, 0xb6, 0xd1, 0xb5, 0xfa, - 0xfb, 0x0e, 0xcc, 0x12, 0x7c, 0x10, 0xf8, 0x4a, 0xaf, 0xad, 0xbe, 0xe1, 0xe1, 0xcf, 0xa0, 0x73, - 0x5f, 0x44, 0x8a, 0xb3, 0x38, 0x0f, 0xe0, 0x58, 0x8a, 0x85, 0x50, 0x5c, 0x7e, 0xe7, 0x2b, 0xc5, - 0x95, 0xdd, 0x34, 0x8d, 0xde, 0xcf, 0x63, 0x65, 0xdb, 0x44, 0x64, 0x7b, 0x3d, 0x5c, 0x80, 0xce, - 0x63, 0xa1, 0x69, 0x50, 0x7a, 0x59, 0x0e, 0xca, 0x97, 0xf9, 0x83, 0x62, 0x99, 0x3b, 0x7a, 0x5b, - 0x25, 0xd9, 0x6e, 0x0a, 0x3d, 0x70, 0xd7, 0x90, 0x6f, 0xde, 0x9d, 0x56, 0x79, 0x3b, 0x54, 0xb4, - 0xbb, 0xab, 0x4b, 0xcb, 0xc8, 0x16, 0x3b, 0x78, 0x06, 0xee, 0x6d, 0x4e, 0x51, 0x7e, 0x95, 0x6e, - 0x9b, 0x04, 0x3f, 0xce, 0x12, 0x7c, 0x4f, 0xdf, 0x2c, 0x27, 0xff, 0xc7, 0x13, 0x62, 0x50, 0x7d, - 0x24, 0x22, 0xc6, 0xed, 0x76, 0xd7, 0xea, 0xef, 0x38, 0xfb, 0x59, 0x82, 0xab, 0x51, 0x0e, 0x90, - 0x25, 0x0e, 0xbf, 0x04, 0xcd, 0xef, 0xa3, 0xa9, 0xa6, 0x4f, 0xb9, 0xfb, 0x60, 0x21, 0xd8, 0xdc, - 0x86, 0x66, 0x8a, 0x76, 0x96, 0xe0, 0x66, 0xbc, 0x4e, 0x90, 0x4d, 0x9d, 0xe3, 0x9c, 0x5f, 0xa2, - 0xca, 0xc5, 0x25, 0xaa, 0xbc, 0xb8, 0x44, 0xd6, 0xef, 0x29, 0xb2, 0xfe, 0x4c, 0x91, 0xf5, 0x3c, - 0x45, 0xd6, 0x79, 0x8a, 0xac, 0x8b, 0x14, 0x59, 0xff, 0xa4, 0xc8, 0xfa, 0x37, 0x45, 0x95, 0x17, - 0x29, 0xb2, 0x9e, 0x5d, 0xa1, 0xca, 0xf9, 0x15, 0xaa, 0x5c, 0x5c, 0xa1, 0xca, 0x4f, 0x7b, 0x74, - 0xf9, 0xf9, 0x56, 0xb3, 0x9a, 0x09, 0xf8, 0xf3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x7c, 0xff, - 0x1c, 0x23, 0x71, 0x06, 0x00, 0x00, + // 822 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4f, 0x6f, 0xdb, 0x36, + 0x1c, 0xb5, 0xba, 0xfc, 0xa5, 0xed, 0xb8, 0x61, 0xb3, 0x4e, 0xce, 0x56, 0x32, 0x4d, 0xb1, 0x2d, + 0x87, 0xc5, 0xc6, 0xfe, 0x00, 0x3b, 0x0c, 0x18, 0x10, 0x75, 0x2d, 0xe0, 0x2d, 0x2b, 0x02, 0xba, + 0x1b, 0x86, 0x0d, 0x18, 0x40, 0x4b, 0xac, 0xcc, 0x55, 0x12, 0x05, 0x92, 0xca, 0x92, 0xdb, 0x3e, + 0x42, 0x3f, 0xc1, 0xce, 0xc3, 0x3e, 0x49, 0x8f, 0x39, 0xe6, 0xc4, 0x2d, 0xce, 0x65, 0xd0, 0xa9, + 0x1f, 0x61, 0x10, 0xad, 0xb8, 0x72, 0x23, 0xb7, 0x3d, 0xd9, 0x7c, 0xef, 0xfd, 0xde, 0x8f, 0xfc, + 0xf1, 0x11, 0x02, 0xef, 0xa6, 0x8c, 0xc9, 0x03, 0xdf, 0x17, 0x59, 0xa2, 0xbf, 0xa1, 0x9a, 0xf6, + 0x52, 0x29, 0xb4, 0x80, 0xcb, 0xf6, 0x67, 0x7b, 0x3f, 0xe4, 0x7a, 0x9c, 0x8d, 0x7a, 0xbe, 0x88, + 0xfb, 0xa1, 0x08, 0x45, 0xdf, 0xc2, 0xa3, 0xec, 0x89, 0x5d, 0xd9, 0x85, 0xfd, 0x37, 0xad, 0xda, + 0xfd, 0x16, 0xac, 0x0d, 0x79, 0x98, 0x10, 0xaa, 0x19, 0x44, 0x00, 0x3c, 0xca, 0xe2, 0x61, 0xe6, + 0xfb, 0x4c, 0x29, 0xd7, 0xd9, 0x71, 0xf6, 0xda, 0xa4, 0x82, 0x94, 0xfc, 0x43, 0xca, 0xa3, 0x4c, + 0x32, 0xf7, 0xc6, 0x8c, 0x2f, 0x91, 0xdd, 0x3f, 0x5b, 0xa0, 0x73, 0x34, 0xbf, 0x37, 0xf8, 0x05, + 0x68, 0x79, 0x87, 0xc3, 0xa3, 0x6c, 0x14, 0x71, 0xff, 0x3b, 0x76, 0x6a, 0x5d, 0x5b, 0xde, 0xcd, + 0xdc, 0xe0, 0xd6, 0x28, 0x52, 0x33, 0x9c, 0xcc, 0xa9, 0xe0, 0x01, 0x68, 0x13, 0xf6, 0x3b, 0x95, + 0xc1, 0x41, 0x10, 0xc8, 0x62, 0x33, 0x37, 0x6c, 0xd9, 0xfb, 0xb9, 0xc1, 0xef, 0xc9, 0x2a, 0xf1, + 0x89, 0x88, 0xb9, 0x66, 0x71, 0xaa, 0x4f, 0xc9, 0x7c, 0x05, 0xfc, 0x10, 0xac, 0x0e, 0xc7, 0x54, + 0x06, 0x83, 0xc0, 0x7d, 0xa7, 0xd8, 0xa9, 0xd7, 0xcc, 0x0d, 0x5e, 0x55, 0x53, 0x88, 0x5c, 0x71, + 0x90, 0x82, 0xad, 0x1f, 0x69, 0xc4, 0x03, 0xaa, 0x85, 0x2c, 0xcf, 0x59, 0xcc, 0xc2, 0x5d, 0xda, + 0x71, 0xf6, 0x9a, 0x9f, 0x75, 0xa6, 0x53, 0xea, 0x5d, 0x8d, 0xc8, 0xfb, 0xe0, 0xb9, 0xc1, 0x8d, + 0xdc, 0xe0, 0xad, 0xe3, 0x9a, 0x22, 0x52, 0x6b, 0x05, 0x7f, 0x02, 0x9b, 0x87, 0x8c, 0x06, 0x6c, + 0xce, 0x7f, 0xb9, 0xde, 0xbf, 0x5b, 0xfa, 0x6f, 0x46, 0xaf, 0x56, 0x90, 0xeb, 0x26, 0xf0, 0x37, + 0x80, 0x66, 0x1d, 0x07, 0x61, 0x22, 0x24, 0x0b, 0x0a, 0x27, 0xaa, 0x33, 0xc9, 0xa6, 0x6d, 0x56, + 0xec, 0xd1, 0x77, 0x73, 0x83, 0xd1, 0xf1, 0x6b, 0x95, 0xe4, 0x0d, 0x4e, 0x70, 0x17, 0xac, 0x10, + 0xaa, 0x79, 0x12, 0xba, 0xab, 0xd6, 0x13, 0xe4, 0x06, 0xaf, 0x48, 0x8b, 0x90, 0x92, 0x81, 0x3d, + 0x00, 0x1e, 0xb3, 0x38, 0x2d, 0x75, 0x6b, 0x56, 0xb7, 0x91, 0x1b, 0x0c, 0xf4, 0x0c, 0x25, 0x15, + 0x05, 0x7c, 0xe6, 0x80, 0xce, 0x81, 0xef, 0x67, 0x71, 0x16, 0x51, 0xcd, 0x82, 0x87, 0x8c, 0x29, + 0x77, 0xdd, 0xde, 0xf4, 0x93, 0xdc, 0xe0, 0x2e, 0x9d, 0xa7, 0x5e, 0xde, 0xf5, 0xdf, 0xff, 0xe0, + 0x07, 0x31, 0xd5, 0xe3, 0xfe, 0x88, 0x87, 0xbd, 0x41, 0xa2, 0xbf, 0xaa, 0x64, 0x3e, 0xce, 0x22, + 0xcd, 0x8f, 0x99, 0x54, 0x27, 0xfd, 0xf8, 0x64, 0xdf, 0x1f, 0x53, 0x9e, 0xec, 0xfb, 0x42, 0xb2, + 0xfd, 0x50, 0xf4, 0x83, 0xe2, 0xb5, 0x78, 0x3c, 0x1c, 0x24, 0xfa, 0x3e, 0x55, 0x9a, 0x49, 0xf2, + 0x6a, 0x7b, 0xf8, 0x2b, 0xd8, 0x2e, 0x12, 0xcf, 0x22, 0xe6, 0x6b, 0x16, 0x0c, 0x92, 0x72, 0xdc, + 0x5e, 0x24, 0xfc, 0xa7, 0xca, 0x05, 0xf6, 0x48, 0x28, 0x37, 0x78, 0x3b, 0x59, 0xa8, 0x22, 0xaf, + 0x71, 0x80, 0x9f, 0x82, 0xe6, 0x20, 0x09, 0xd8, 0xc9, 0x20, 0x39, 0xe4, 0x4a, 0xbb, 0x4d, 0x6b, + 0xd8, 0xc9, 0x0d, 0x6e, 0xf2, 0x97, 0x30, 0xa9, 0x6a, 0xe0, 0x47, 0x60, 0xc9, 0x6a, 0x5b, 0x3b, + 0xce, 0xde, 0xba, 0x07, 0x73, 0x83, 0x37, 0x22, 0xae, 0x74, 0x25, 0xfa, 0x96, 0x87, 0xbf, 0x80, + 0xee, 0x7d, 0x91, 0x28, 0xe6, 0x67, 0xc5, 0x00, 0x8e, 0xa4, 0x48, 0x85, 0x62, 0xf2, 0x7b, 0xae, + 0x14, 0x53, 0x6e, 0xdb, 0x36, 0xba, 0x53, 0x8c, 0xd5, 0x5f, 0x24, 0x22, 0x8b, 0xeb, 0x61, 0x0a, + 0xba, 0x8f, 0x85, 0xa6, 0x51, 0xed, 0x63, 0xd9, 0xa8, 0x0f, 0xf3, 0xdd, 0x32, 0xcc, 0x5d, 0xbd, + 0xa8, 0x92, 0x2c, 0x36, 0x85, 0x21, 0xb8, 0x6d, 0xc9, 0xeb, 0x6f, 0xa7, 0x53, 0xdf, 0x0e, 0x95, + 0xed, 0x6e, 0xeb, 0xda, 0x32, 0xb2, 0xc0, 0x0e, 0x9e, 0x82, 0x7b, 0xf3, 0xbb, 0xa8, 0x7f, 0x4a, + 0x37, 0xed, 0x04, 0x3f, 0xce, 0x0d, 0xbe, 0xa7, 0xdf, 0x2c, 0x27, 0x6f, 0xe3, 0x09, 0x31, 0x58, + 0x7e, 0x24, 0x12, 0x9f, 0xb9, 0x9b, 0x3b, 0xce, 0xde, 0x92, 0xb7, 0x9e, 0x1b, 0xbc, 0x9c, 0x14, + 0x00, 0x99, 0xe2, 0xf0, 0x4b, 0xd0, 0xfe, 0x21, 0x19, 0x6a, 0xfa, 0x94, 0x05, 0x0f, 0x52, 0xe1, + 0x8f, 0x5d, 0x68, 0x77, 0xb1, 0x99, 0x1b, 0xdc, 0xce, 0xaa, 0x04, 0x99, 0xd7, 0xc1, 0xaf, 0x41, + 0xeb, 0x48, 0xb2, 0x63, 0x2e, 0x32, 0x65, 0xc3, 0x73, 0xcb, 0x86, 0x67, 0xbb, 0x18, 0x4f, 0x5a, + 0xc1, 0x2b, 0x21, 0x9a, 0xd3, 0xc3, 0x21, 0xb8, 0x75, 0xb5, 0xae, 0xe6, 0x75, 0xcb, 0xb6, 0xbf, + 0x9b, 0x1b, 0x7c, 0x27, 0xbd, 0x4e, 0x57, 0xdc, 0xea, 0xaa, 0x3d, 0xef, 0xec, 0x02, 0x35, 0xce, + 0x2f, 0x50, 0xe3, 0xc5, 0x05, 0x72, 0xfe, 0x98, 0x20, 0xe7, 0xaf, 0x09, 0x72, 0x9e, 0x4f, 0x90, + 0x73, 0x36, 0x41, 0xce, 0xf9, 0x04, 0x39, 0xff, 0x4e, 0x90, 0xf3, 0xdf, 0x04, 0x35, 0x5e, 0x4c, + 0x90, 0xf3, 0xec, 0x12, 0x35, 0xce, 0x2e, 0x51, 0xe3, 0xfc, 0x12, 0x35, 0x7e, 0x5e, 0xa3, 0xd3, + 0x6f, 0x8a, 0x1a, 0xad, 0xd8, 0x5b, 0xff, 0xfc, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x70, 0x40, + 0xd1, 0x9b, 0x06, 0x07, 0x00, 0x00, } func (this *SignRate) Equal(that interface{}) bool { @@ -415,6 +434,12 @@ func (this *PeerAccountData) Equal(that interface{}) bool { if this.UnStakedEpoch != that1.UnStakedEpoch { return false } + if this.PreviousList != that1.PreviousList { + return false + } + if this.PreviousIndexInList != that1.PreviousIndexInList { + return false + } return true } func (this *SignRate) GoString() string { @@ -432,7 +457,7 @@ func (this *PeerAccountData) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 22) + s := make([]string, 0, 24) s = append(s, "&accounts.PeerAccountData{") s = append(s, "BLSPublicKey: "+fmt.Sprintf("%#v", this.BLSPublicKey)+",\n") s = append(s, "RewardAddress: "+fmt.Sprintf("%#v", this.RewardAddress)+",\n") @@ -452,6 +477,8 @@ func (this *PeerAccountData) GoString() string { s = append(s, "TotalValidatorIgnoredSignaturesRate: "+fmt.Sprintf("%#v", this.TotalValidatorIgnoredSignaturesRate)+",\n") s = append(s, "Nonce: "+fmt.Sprintf("%#v", this.Nonce)+",\n") s = append(s, "UnStakedEpoch: "+fmt.Sprintf("%#v", this.UnStakedEpoch)+",\n") + s = append(s, "PreviousList: "+fmt.Sprintf("%#v", this.PreviousList)+",\n") + s = append(s, "PreviousIndexInList: "+fmt.Sprintf("%#v", this.PreviousIndexInList)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -516,6 +543,22 @@ func (m *PeerAccountData) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PreviousIndexInList != 0 { + i = encodeVarintPeerAccountData(dAtA, i, uint64(m.PreviousIndexInList)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if len(m.PreviousList) > 0 { + i -= len(m.PreviousList) + copy(dAtA[i:], m.PreviousList) + i = encodeVarintPeerAccountData(dAtA, i, uint64(len(m.PreviousList))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } if m.UnStakedEpoch != 0 { i = encodeVarintPeerAccountData(dAtA, i, uint64(m.UnStakedEpoch)) i-- @@ -734,6 +777,13 @@ func (m *PeerAccountData) Size() (n int) { if m.UnStakedEpoch != 0 { n += 2 + sovPeerAccountData(uint64(m.UnStakedEpoch)) } + l = len(m.PreviousList) + if l > 0 { + n += 2 + l + sovPeerAccountData(uint64(l)) + } + if m.PreviousIndexInList != 0 { + n += 2 + sovPeerAccountData(uint64(m.PreviousIndexInList)) + } return n } @@ -777,6 +827,8 @@ func (this *PeerAccountData) String() string { `TotalValidatorIgnoredSignaturesRate:` + fmt.Sprintf("%v", this.TotalValidatorIgnoredSignaturesRate) + `,`, `Nonce:` + fmt.Sprintf("%v", this.Nonce) + `,`, `UnStakedEpoch:` + fmt.Sprintf("%v", this.UnStakedEpoch) + `,`, + `PreviousList:` + fmt.Sprintf("%v", this.PreviousList) + `,`, + `PreviousIndexInList:` + fmt.Sprintf("%v", this.PreviousIndexInList) + `,`, `}`, }, "") return s @@ -1369,6 +1421,57 @@ func (m *PeerAccountData) Unmarshal(dAtA []byte) error { break } } + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousList", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPeerAccountData + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPeerAccountData + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPeerAccountData + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreviousList = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousIndexInList", wireType) + } + m.PreviousIndexInList = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPeerAccountData + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PreviousIndexInList |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipPeerAccountData(dAtA[iNdEx:]) diff --git a/state/accounts/peerAccountData.proto b/state/accounts/peerAccountData.proto index d4cc3292c38..1a3e99a295f 100644 --- a/state/accounts/peerAccountData.proto +++ b/state/accounts/peerAccountData.proto @@ -33,4 +33,6 @@ message PeerAccountData { uint32 TotalValidatorIgnoredSignaturesRate = 16 [(gogoproto.jsontag) = "totalValidatorIgnoredSignaturesRate"]; uint64 Nonce = 17 [(gogoproto.jsontag) = "nonce"]; uint32 UnStakedEpoch = 18 [(gogoproto.jsontag) = "unStakedEpoch"]; + string PreviousList = 19 [(gogoproto.jsontag) = "previousList,omitempty"]; + uint32 PreviousIndexInList = 20 [(gogoproto.jsontag) = "previousIndexInList,omitempty"]; } diff --git a/state/accountsDB.go b/state/accountsDB.go index 06fb88eac3a..249dd64f471 100644 --- a/state/accountsDB.go +++ b/state/accountsDB.go @@ -785,6 +785,7 @@ func (adb *AccountsDB) CommitInEpoch(currentEpoch uint32, epochToCommit uint32) adb.mutOp.Lock() defer func() { adb.mainTrie.GetStorageManager().SetEpochForPutOperation(currentEpoch) + adb.mainTrie.GetStorageManager().GetStateStatsHandler().Reset() adb.mutOp.Unlock() adb.loadCodeMeasurements.resetAndPrint() }() diff --git a/state/accountsDBApi.go b/state/accountsDBApi.go index af8fdd5a763..86daccf660c 100644 --- a/state/accountsDBApi.go +++ b/state/accountsDBApi.go @@ -171,8 +171,28 @@ func (accountsDB *accountsDBApi) RecreateTrie(rootHash []byte) error { } // RecreateTrieFromEpoch is a not permitted operation in this implementation and thus, will return an error -func (accountsDB *accountsDBApi) RecreateTrieFromEpoch(_ common.RootHashHolder) error { - return ErrOperationNotPermitted +func (accountsDB *accountsDBApi) RecreateTrieFromEpoch(options common.RootHashHolder) error { + accountsDB.mutRecreatedTrieBlockInfo.Lock() + defer accountsDB.mutRecreatedTrieBlockInfo.Unlock() + + if check.IfNil(options) { + return ErrNilRootHashHolder + } + + newBlockInfo := holders.NewBlockInfo([]byte{}, 0, options.GetRootHash()) + if newBlockInfo.Equal(accountsDB.blockInfo) { + return nil + } + + err := accountsDB.innerAccountsAdapter.RecreateTrieFromEpoch(options) + if err != nil { + accountsDB.blockInfo = nil + return err + } + + accountsDB.blockInfo = newBlockInfo + + return nil } // PruneTrie is a not permitted operation in this implementation and thus, does nothing diff --git a/state/accountsDBApi_test.go b/state/accountsDBApi_test.go index aee169c4f64..0d9aea1c098 100644 --- a/state/accountsDBApi_test.go +++ b/state/accountsDBApi_test.go @@ -16,7 +16,8 @@ import ( "github.com/multiversx/mx-chain-go/state/parsers" "github.com/multiversx/mx-chain-go/testscommon" mockState "github.com/multiversx/mx-chain-go/testscommon/state" - "github.com/multiversx/mx-chain-go/testscommon/trie" + testTrie "github.com/multiversx/mx-chain-go/testscommon/trie" + "github.com/multiversx/mx-chain-go/trie" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -195,7 +196,6 @@ func TestAccountsDBApi_NotPermittedOperations(t *testing.T) { assert.Equal(t, state.ErrOperationNotPermitted, accountsApi.SaveAccount(nil)) assert.Equal(t, state.ErrOperationNotPermitted, accountsApi.RemoveAccount(nil)) assert.Equal(t, state.ErrOperationNotPermitted, accountsApi.RevertToSnapshot(0)) - assert.Equal(t, state.ErrOperationNotPermitted, accountsApi.RecreateTrieFromEpoch(nil)) buff, err := accountsApi.CommitInEpoch(0, 0) assert.Nil(t, buff) @@ -226,6 +226,41 @@ func TestAccountsDBApi_RecreateTrie(t *testing.T) { assert.True(t, wasCalled) } +func TestAccountsDBApi_RecreateTrieFromEpoch(t *testing.T) { + t.Parallel() + + t.Run("should error if the roothash holder is nil", func(t *testing.T) { + accountsApi, _ := state.NewAccountsDBApi(&mockState.AccountsStub{ + RecreateTrieFromEpochCalled: func(options common.RootHashHolder) error { + assert.Fail(t, "should have not called accountsApi.RecreateTrieFromEpochCalled") + + return nil + }, + }, createBlockInfoProviderStub(dummyRootHash)) + + err := accountsApi.RecreateTrieFromEpoch(nil) + assert.Equal(t, trie.ErrNilRootHashHolder, err) + }) + t.Run("should work", func(t *testing.T) { + wasCalled := false + rootHash := []byte("root hash") + epoch := core.OptionalUint32{Value: 37, HasValue: true} + accountsApi, _ := state.NewAccountsDBApi(&mockState.AccountsStub{ + RecreateTrieFromEpochCalled: func(options common.RootHashHolder) error { + wasCalled = true + assert.Equal(t, rootHash, options.GetRootHash()) + assert.Equal(t, epoch, options.GetEpoch()) + return nil + }, + }, createBlockInfoProviderStub(dummyRootHash)) + + holder := holders.NewRootHashHolder(rootHash, epoch) + err := accountsApi.RecreateTrieFromEpoch(holder) + assert.NoError(t, err) + assert.True(t, wasCalled) + }) +} + func TestAccountsDBApi_EmptyMethodsShouldNotPanic(t *testing.T) { t.Parallel() @@ -272,7 +307,7 @@ func TestAccountsDBApi_SimpleProxyMethodsShouldWork(t *testing.T) { }, GetTrieCalled: func(i []byte) (common.Trie, error) { getTrieCalled = true - return &trie.TrieStub{}, nil + return &testTrie.TrieStub{}, nil }, } diff --git a/state/errors.go b/state/errors.go index fd8c0057241..168dc098b98 100644 --- a/state/errors.go +++ b/state/errors.go @@ -145,6 +145,9 @@ var ErrNilStateMetrics = errors.New("nil sstate metrics") // ErrNilChannelsProvider signals that a nil channels provider has been given var ErrNilChannelsProvider = errors.New("nil channels provider") +// ErrNilRootHashHolder signals that a nil root hash holder was provided +var ErrNilRootHashHolder = errors.New("nil root hash holder provided") + // ErrNilStatsHandler signals that a nil stats handler provider has been given var ErrNilStatsHandler = errors.New("nil stats handler") @@ -153,3 +156,12 @@ var ErrNilLastSnapshotMarker = errors.New("nil last snapshot marker") // ErrNilSnapshotsManager signals that a nil snapshots manager has been given var ErrNilSnapshotsManager = errors.New("nil snapshots manager") + +// ErrNilValidatorInfo signals that a nil value for the validator info has been provided +var ErrNilValidatorInfo = errors.New("validator info is nil") + +// ErrValidatorsDifferentShards signals that validators are not in the same shard +var ErrValidatorsDifferentShards = errors.New("validators are not in the same shard") + +// ErrValidatorNotFound signals that a validator was not found +var ErrValidatorNotFound = errors.New("validator not found") diff --git a/state/interface.go b/state/interface.go index 06050f95fcb..bf515803346 100644 --- a/state/interface.go +++ b/state/interface.go @@ -23,6 +23,47 @@ type Updater interface { IsInterfaceNil() bool } +// PeerAccountHandler models a peer state account, which can journalize a normal account's data +// with some extra features like signing statistics or rating information +type PeerAccountHandler interface { + GetBLSPublicKey() []byte + SetBLSPublicKey([]byte) error + GetRewardAddress() []byte + SetRewardAddress([]byte) error + GetAccumulatedFees() *big.Int + AddToAccumulatedFees(*big.Int) + GetList() string + GetPreviousList() string + GetIndexInList() uint32 + GetPreviousIndexInList() uint32 + GetShardId() uint32 + SetUnStakedEpoch(epoch uint32) + GetUnStakedEpoch() uint32 + IncreaseLeaderSuccessRate(uint32) + DecreaseLeaderSuccessRate(uint32) + IncreaseValidatorSuccessRate(uint32) + DecreaseValidatorSuccessRate(uint32) + IncreaseValidatorIgnoredSignaturesRate(uint32) + GetNumSelectedInSuccessBlocks() uint32 + IncreaseNumSelectedInSuccessBlocks() + GetLeaderSuccessRate() SignRate + GetValidatorSuccessRate() SignRate + GetValidatorIgnoredSignaturesRate() uint32 + GetTotalLeaderSuccessRate() SignRate + GetTotalValidatorSuccessRate() SignRate + GetTotalValidatorIgnoredSignaturesRate() uint32 + SetListAndIndex(shardID uint32, list string, index uint32, updatePreviousValues bool) + GetRating() uint32 + SetRating(uint32) + GetTempRating() uint32 + SetTempRating(uint32) + GetConsecutiveProposerMisses() uint32 + SetConsecutiveProposerMisses(uint322 uint32) + ResetAtNewEpoch() + SetPreviousList(list string) + vmcommon.AccountHandler +} + // AccountsAdapter is used for the structure that manages the accounts on top of a trie.PatriciaMerkleTrie // implementation type AccountsAdapter interface { @@ -180,43 +221,6 @@ type DataTrie interface { CollectLeavesForMigration(args vmcommon.ArgsMigrateDataTrieLeaves) error } -// PeerAccountHandler models a peer state account, which can journalize a normal account's data -// with some extra features like signing statistics or rating information -type PeerAccountHandler interface { - SetBLSPublicKey([]byte) error - GetRewardAddress() []byte - SetRewardAddress([]byte) error - GetAccumulatedFees() *big.Int - AddToAccumulatedFees(*big.Int) - GetList() string - GetIndexInList() uint32 - GetShardId() uint32 - SetUnStakedEpoch(epoch uint32) - GetUnStakedEpoch() uint32 - IncreaseLeaderSuccessRate(uint32) - DecreaseLeaderSuccessRate(uint32) - IncreaseValidatorSuccessRate(uint32) - DecreaseValidatorSuccessRate(uint32) - IncreaseValidatorIgnoredSignaturesRate(uint32) - GetNumSelectedInSuccessBlocks() uint32 - IncreaseNumSelectedInSuccessBlocks() - GetLeaderSuccessRate() SignRate - GetValidatorSuccessRate() SignRate - GetValidatorIgnoredSignaturesRate() uint32 - GetTotalLeaderSuccessRate() SignRate - GetTotalValidatorSuccessRate() SignRate - GetTotalValidatorIgnoredSignaturesRate() uint32 - SetListAndIndex(shardID uint32, list string, index uint32) - GetRating() uint32 - SetRating(uint32) - GetTempRating() uint32 - SetTempRating(uint32) - GetConsecutiveProposerMisses() uint32 - SetConsecutiveProposerMisses(uint322 uint32) - ResetAtNewEpoch() - vmcommon.AccountHandler -} - // UserAccountHandler models a user account, which can journalize account's data with some extra features // like balance, developer rewards, owner type UserAccountHandler interface { @@ -278,3 +282,71 @@ type LastSnapshotMarker interface { GetMarkerInfo(trieStorageManager common.StorageManager) ([]byte, error) IsInterfaceNil() bool } + +// ShardValidatorsInfoMapHandler shall be used to manage operations inside +// a map in a concurrent-safe way. +type ShardValidatorsInfoMapHandler interface { + GetShardValidatorsInfoMap() map[uint32][]ValidatorInfoHandler + GetAllValidatorsInfo() []ValidatorInfoHandler + GetValidator(blsKey []byte) ValidatorInfoHandler + + Add(validator ValidatorInfoHandler) error + Delete(validator ValidatorInfoHandler) error + Replace(old ValidatorInfoHandler, new ValidatorInfoHandler) error + SetValidatorsInShard(shardID uint32, validators []ValidatorInfoHandler) error +} + +// ValidatorInfoHandler defines which data shall a validator info hold. +type ValidatorInfoHandler interface { + IsInterfaceNil() bool + + GetPublicKey() []byte + GetShardId() uint32 + GetList() string + GetIndex() uint32 + GetPreviousIndex() uint32 + GetTempRating() uint32 + GetRating() uint32 + GetRatingModifier() float32 + GetRewardAddress() []byte + GetLeaderSuccess() uint32 + GetLeaderFailure() uint32 + GetValidatorSuccess() uint32 + GetValidatorFailure() uint32 + GetValidatorIgnoredSignatures() uint32 + GetNumSelectedInSuccessBlocks() uint32 + GetAccumulatedFees() *big.Int + GetTotalLeaderSuccess() uint32 + GetTotalLeaderFailure() uint32 + GetTotalValidatorSuccess() uint32 + GetTotalValidatorFailure() uint32 + GetTotalValidatorIgnoredSignatures() uint32 + GetPreviousList() string + + SetPublicKey(publicKey []byte) + SetShardId(shardID uint32) + SetPreviousList(list string) + SetList(list string) + SetIndex(index uint32) + SetListAndIndex(list string, index uint32, updatePreviousValues bool) + SetTempRating(tempRating uint32) + SetRating(rating uint32) + SetRatingModifier(ratingModifier float32) + SetRewardAddress(rewardAddress []byte) + SetLeaderSuccess(leaderSuccess uint32) + SetLeaderFailure(leaderFailure uint32) + SetValidatorSuccess(validatorSuccess uint32) + SetValidatorFailure(validatorFailure uint32) + SetValidatorIgnoredSignatures(validatorIgnoredSignatures uint32) + SetNumSelectedInSuccessBlocks(numSelectedInSuccessBlock uint32) + SetAccumulatedFees(accumulatedFees *big.Int) + SetTotalLeaderSuccess(totalLeaderSuccess uint32) + SetTotalLeaderFailure(totalLeaderFailure uint32) + SetTotalValidatorSuccess(totalValidatorSuccess uint32) + SetTotalValidatorFailure(totalValidatorFailure uint32) + SetTotalValidatorIgnoredSignatures(totalValidatorIgnoredSignatures uint32) + + ShallowClone() ValidatorInfoHandler + String() string + GoString() string +} diff --git a/state/validatorInfo.go b/state/validatorInfo.go index 2ca0cf416e0..c6ea6d06001 100644 --- a/state/validatorInfo.go +++ b/state/validatorInfo.go @@ -2,11 +2,138 @@ package state +import mathbig "math/big" + // IsInterfaceNil returns true if there is no value under the interface func (vi *ValidatorInfo) IsInterfaceNil() bool { return vi == nil } +// SetPublicKey sets validator's public key +func (vi *ValidatorInfo) SetPublicKey(publicKey []byte) { + vi.PublicKey = publicKey +} + +// SetList sets validator's list +func (vi *ValidatorInfo) SetList(list string) { + vi.List = list +} + +// SetPreviousList sets validator's previous list +func (vi *ValidatorInfo) SetPreviousList(list string) { + vi.PreviousList = list +} + +func (vi *ValidatorInfo) SetListAndIndex(list string, index uint32, updatePreviousValues bool) { + if updatePreviousValues { + vi.PreviousIndex = vi.Index + vi.PreviousList = vi.List + } + + vi.List = list + vi.Index = index +} + +// SetShardId sets validator's public shard id +func (vi *ValidatorInfo) SetShardId(shardID uint32) { + vi.ShardId = shardID +} + +// SetIndex sets validator's index +func (vi *ValidatorInfo) SetIndex(index uint32) { + vi.Index = index +} + +// SetTempRating sets validator's temp rating +func (vi *ValidatorInfo) SetTempRating(tempRating uint32) { + vi.TempRating = tempRating +} + +// SetRating sets validator's rating +func (vi *ValidatorInfo) SetRating(rating uint32) { + vi.Rating = rating +} + +// SetRatingModifier sets validator's rating modifier +func (vi *ValidatorInfo) SetRatingModifier(ratingModifier float32) { + vi.RatingModifier = ratingModifier +} + +// SetRewardAddress sets validator's reward address +func (vi *ValidatorInfo) SetRewardAddress(rewardAddress []byte) { + vi.RewardAddress = rewardAddress +} + +// SetLeaderSuccess sets leader success +func (vi *ValidatorInfo) SetLeaderSuccess(leaderSuccess uint32) { + vi.LeaderSuccess = leaderSuccess +} + +// SetLeaderFailure sets validator's leader failure +func (vi *ValidatorInfo) SetLeaderFailure(leaderFailure uint32) { + vi.LeaderFailure = leaderFailure +} + +// SetValidatorSuccess sets validator's success +func (vi *ValidatorInfo) SetValidatorSuccess(validatorSuccess uint32) { + vi.ValidatorSuccess = validatorSuccess +} + +// SetValidatorFailure sets validator's failure +func (vi *ValidatorInfo) SetValidatorFailure(validatorFailure uint32) { + vi.ValidatorFailure = validatorFailure +} + +// SetValidatorIgnoredSignatures sets validator's ignored signatures +func (vi *ValidatorInfo) SetValidatorIgnoredSignatures(validatorIgnoredSignatures uint32) { + vi.ValidatorIgnoredSignatures = validatorIgnoredSignatures +} + +// SetNumSelectedInSuccessBlocks sets validator's num of selected in success block +func (vi *ValidatorInfo) SetNumSelectedInSuccessBlocks(numSelectedInSuccessBlock uint32) { + vi.NumSelectedInSuccessBlocks = numSelectedInSuccessBlock +} + +// SetAccumulatedFees sets validator's accumulated fees +func (vi *ValidatorInfo) SetAccumulatedFees(accumulatedFees *mathbig.Int) { + vi.AccumulatedFees = mathbig.NewInt(0).Set(accumulatedFees) +} + +// SetTotalLeaderSuccess sets validator's total leader success +func (vi *ValidatorInfo) SetTotalLeaderSuccess(totalLeaderSuccess uint32) { + vi.TotalLeaderSuccess = totalLeaderSuccess +} + +// SetTotalLeaderFailure sets validator's total leader failure +func (vi *ValidatorInfo) SetTotalLeaderFailure(totalLeaderFailure uint32) { + vi.TotalLeaderFailure = totalLeaderFailure +} + +// SetTotalValidatorSuccess sets validator's total success +func (vi *ValidatorInfo) SetTotalValidatorSuccess(totalValidatorSuccess uint32) { + vi.TotalValidatorSuccess = totalValidatorSuccess +} + +// SetTotalValidatorFailure sets validator's total failure +func (vi *ValidatorInfo) SetTotalValidatorFailure(totalValidatorFailure uint32) { + vi.TotalValidatorFailure = totalValidatorFailure +} + +// SetTotalValidatorIgnoredSignatures sets validator's total ignored signatures +func (vi *ValidatorInfo) SetTotalValidatorIgnoredSignatures(totalValidatorIgnoredSignatures uint32) { + vi.TotalValidatorIgnoredSignatures = totalValidatorIgnoredSignatures +} + +// ShallowClone returns a clone of the object +func (vi *ValidatorInfo) ShallowClone() ValidatorInfoHandler { + if vi == nil { + return nil + } + + validatorCopy := *vi + return &validatorCopy +} + // IsInterfaceNil returns true if there is no value under the interface func (svi *ShardValidatorInfo) IsInterfaceNil() bool { return svi == nil diff --git a/state/validatorInfo.pb.go b/state/validatorInfo.pb.go index 19907c86869..3261e3da880 100644 --- a/state/validatorInfo.pb.go +++ b/state/validatorInfo.pb.go @@ -51,6 +51,8 @@ type ValidatorInfo struct { TotalValidatorSuccess uint32 `protobuf:"varint,18,opt,name=TotalValidatorSuccess,proto3" json:"totalValidatorSuccess"` TotalValidatorFailure uint32 `protobuf:"varint,19,opt,name=TotalValidatorFailure,proto3" json:"totalValidatorFailure"` TotalValidatorIgnoredSignatures uint32 `protobuf:"varint,20,opt,name=TotalValidatorIgnoredSignatures,proto3" json:"totalValidatorIgnoredSignatures"` + PreviousList string `protobuf:"bytes,21,opt,name=PreviousList,proto3" json:"previousList,omitempty"` + PreviousIndex uint32 `protobuf:"varint,22,opt,name=PreviousIndex,proto3" json:"previousIndex,omitempty"` } func (m *ValidatorInfo) Reset() { *m = ValidatorInfo{} } @@ -221,13 +223,29 @@ func (m *ValidatorInfo) GetTotalValidatorIgnoredSignatures() uint32 { return 0 } +func (m *ValidatorInfo) GetPreviousList() string { + if m != nil { + return m.PreviousList + } + return "" +} + +func (m *ValidatorInfo) GetPreviousIndex() uint32 { + if m != nil { + return m.PreviousIndex + } + return 0 +} + // ShardValidatorInfo represents the data regarding a validator that is stored in the PeerMiniblocks type ShardValidatorInfo struct { - PublicKey []byte `protobuf:"bytes,1,opt,name=PublicKey,proto3" json:"publicKey"` - ShardId uint32 `protobuf:"varint,2,opt,name=ShardId,proto3" json:"shardId"` - List string `protobuf:"bytes,3,opt,name=List,proto3" json:"list,omitempty"` - Index uint32 `protobuf:"varint,4,opt,name=Index,proto3" json:"index"` - TempRating uint32 `protobuf:"varint,5,opt,name=TempRating,proto3" json:"tempRating"` + PublicKey []byte `protobuf:"bytes,1,opt,name=PublicKey,proto3" json:"publicKey"` + ShardId uint32 `protobuf:"varint,2,opt,name=ShardId,proto3" json:"shardId"` + List string `protobuf:"bytes,3,opt,name=List,proto3" json:"list,omitempty"` + Index uint32 `protobuf:"varint,4,opt,name=Index,proto3" json:"index"` + TempRating uint32 `protobuf:"varint,5,opt,name=TempRating,proto3" json:"tempRating"` + PreviousList string `protobuf:"bytes,6,opt,name=PreviousList,proto3" json:"previousList,omitempty"` + PreviousIndex uint32 `protobuf:"varint,7,opt,name=PreviousIndex,proto3" json:"previousIndex,omitempty"` } func (m *ShardValidatorInfo) Reset() { *m = ShardValidatorInfo{} } @@ -293,6 +311,20 @@ func (m *ShardValidatorInfo) GetTempRating() uint32 { return 0 } +func (m *ShardValidatorInfo) GetPreviousList() string { + if m != nil { + return m.PreviousList + } + return "" +} + +func (m *ShardValidatorInfo) GetPreviousIndex() uint32 { + if m != nil { + return m.PreviousIndex + } + return 0 +} + func init() { proto.RegisterType((*ValidatorInfo)(nil), "proto.ValidatorInfo") proto.RegisterType((*ShardValidatorInfo)(nil), "proto.ShardValidatorInfo") @@ -301,52 +333,56 @@ func init() { func init() { proto.RegisterFile("validatorInfo.proto", fileDescriptor_bf9cdc082f0b2ec2) } var fileDescriptor_bf9cdc082f0b2ec2 = []byte{ - // 714 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0x4f, 0x4f, 0x13, 0x41, - 0x18, 0xc6, 0xbb, 0x48, 0x0b, 0x1d, 0x68, 0x81, 0x01, 0x74, 0x41, 0xb3, 0xd3, 0x60, 0x34, 0x4d, - 0xb4, 0xed, 0xc1, 0x83, 0x89, 0x1e, 0x94, 0x1a, 0x49, 0x1a, 0xf1, 0x4f, 0xa6, 0xc4, 0x83, 0x07, - 0x93, 0xe9, 0xee, 0x74, 0x3b, 0x71, 0xff, 0x90, 0xd9, 0xd9, 0x0a, 0x37, 0x3f, 0x02, 0x1f, 0xc3, - 0xf8, 0x49, 0x3c, 0x72, 0xe4, 0xb4, 0xd8, 0xe5, 0x62, 0xe6, 0xc4, 0x47, 0x30, 0x9d, 0x76, 0x69, - 0xb7, 0x2d, 0x78, 0xe2, 0xc4, 0xee, 0xfb, 0x3c, 0xcf, 0x6f, 0x5e, 0xfa, 0x4e, 0xdf, 0x82, 0xf5, - 0x2e, 0x71, 0x98, 0x45, 0x84, 0xcf, 0x1b, 0x5e, 0xdb, 0xaf, 0x1e, 0x72, 0x5f, 0xf8, 0x30, 0xab, - 0xfe, 0x6c, 0x57, 0x6c, 0x26, 0x3a, 0x61, 0xab, 0x6a, 0xfa, 0x6e, 0xcd, 0xf6, 0x6d, 0xbf, 0xa6, - 0xca, 0xad, 0xb0, 0xad, 0xde, 0xd4, 0x8b, 0x7a, 0x1a, 0xa4, 0x76, 0xce, 0x01, 0x28, 0x7c, 0x1e, - 0xa7, 0xc1, 0x27, 0x20, 0xff, 0x29, 0x6c, 0x39, 0xcc, 0x7c, 0x47, 0x8f, 0x75, 0xad, 0xa4, 0x95, - 0x97, 0xeb, 0x05, 0x19, 0xa1, 0xfc, 0x61, 0x52, 0xc4, 0x23, 0x1d, 0x3e, 0x02, 0x0b, 0xcd, 0x0e, - 0xe1, 0x56, 0xc3, 0xd2, 0xe7, 0x4a, 0x5a, 0xb9, 0x50, 0x5f, 0x92, 0x11, 0x5a, 0x08, 0x06, 0x25, - 0x9c, 0x68, 0xf0, 0x01, 0x98, 0xdf, 0x67, 0x81, 0xd0, 0xef, 0x94, 0xb4, 0x72, 0xbe, 0xbe, 0x28, - 0x23, 0x34, 0xef, 0xb0, 0x40, 0x60, 0x55, 0x85, 0x08, 0x64, 0x1b, 0x9e, 0x45, 0x8f, 0xf4, 0x79, - 0x85, 0xc8, 0xcb, 0x08, 0x65, 0x59, 0xbf, 0x80, 0x07, 0x75, 0x58, 0x05, 0xe0, 0x80, 0xba, 0x87, - 0x98, 0x08, 0xe6, 0xd9, 0x7a, 0x56, 0xb9, 0x8a, 0x32, 0x42, 0x40, 0x5c, 0x55, 0xf1, 0x98, 0x03, - 0xee, 0x80, 0xdc, 0xd0, 0x9b, 0x53, 0x5e, 0x20, 0x23, 0x94, 0xe3, 0x03, 0xdf, 0x50, 0x81, 0x2f, - 0x40, 0x71, 0xf0, 0xf4, 0xde, 0xb7, 0x58, 0x9b, 0x51, 0xae, 0x2f, 0x94, 0xb4, 0xf2, 0x5c, 0x1d, - 0xca, 0x08, 0x15, 0x79, 0x4a, 0xc1, 0x13, 0x4e, 0xb8, 0x0b, 0x0a, 0x98, 0x7e, 0x27, 0xdc, 0xda, - 0xb5, 0x2c, 0x4e, 0x83, 0x40, 0x5f, 0x54, 0x1f, 0xd3, 0x7d, 0x19, 0xa1, 0x7b, 0x7c, 0x5c, 0x78, - 0xea, 0xbb, 0xac, 0xdf, 0xa3, 0x38, 0xc6, 0xe9, 0x04, 0x7c, 0x0e, 0x0a, 0xfb, 0x94, 0x58, 0x94, - 0x37, 0x43, 0xd3, 0xec, 0x23, 0xf2, 0xaa, 0xd3, 0x35, 0x19, 0xa1, 0x82, 0x33, 0x2e, 0xe0, 0xb4, - 0x6f, 0x14, 0xdc, 0x23, 0xcc, 0x09, 0x39, 0xd5, 0xc1, 0x64, 0x70, 0x28, 0xe0, 0xb4, 0x0f, 0xbe, - 0x06, 0xab, 0x57, 0x83, 0x4e, 0x0e, 0x5d, 0x52, 0xd9, 0x0d, 0x19, 0xa1, 0xd5, 0xee, 0x84, 0x86, - 0xa7, 0xdc, 0x29, 0x42, 0x72, 0xfa, 0xf2, 0x0c, 0x42, 0xd2, 0xc0, 0x94, 0x1b, 0x7e, 0x05, 0xdb, - 0xa3, 0xcb, 0x66, 0x7b, 0x3e, 0xa7, 0x56, 0x93, 0xd9, 0x1e, 0x11, 0x21, 0xa7, 0x81, 0x5e, 0x50, - 0x2c, 0x43, 0x46, 0x68, 0xbb, 0x7b, 0xad, 0x0b, 0xdf, 0x40, 0xe8, 0xf3, 0x3f, 0x84, 0x6e, 0x93, - 0x3a, 0xd4, 0x14, 0xd4, 0x6a, 0x78, 0xc3, 0xce, 0xeb, 0x8e, 0x6f, 0x7e, 0x0b, 0xf4, 0xe2, 0x88, - 0xef, 0x5d, 0xeb, 0xc2, 0x37, 0x10, 0xe0, 0x89, 0x06, 0x56, 0x76, 0x4d, 0x33, 0x74, 0x43, 0x87, - 0x08, 0x6a, 0xed, 0x51, 0x1a, 0xe8, 0x2b, 0x6a, 0xf6, 0x6d, 0x19, 0xa1, 0x2d, 0x92, 0x96, 0x46, - 0xd3, 0xff, 0x75, 0x8e, 0xde, 0xba, 0x44, 0x74, 0x6a, 0x2d, 0x66, 0x57, 0x1b, 0x9e, 0x78, 0x39, - 0xf6, 0x25, 0x75, 0x43, 0x47, 0xb0, 0x2e, 0xe5, 0xc1, 0x51, 0xcd, 0x3d, 0xaa, 0x98, 0x1d, 0xc2, - 0xbc, 0x8a, 0xe9, 0x73, 0x5a, 0xb1, 0xfd, 0x9a, 0x45, 0x04, 0xa9, 0xd6, 0x99, 0xdd, 0xf0, 0xc4, - 0x1b, 0x12, 0x08, 0xca, 0xf1, 0xe4, 0xf1, 0x70, 0x0f, 0xc0, 0x03, 0x5f, 0x10, 0x27, 0x7d, 0x9b, - 0x56, 0xd5, 0xbf, 0x7a, 0x57, 0x46, 0x08, 0x8a, 0x29, 0x15, 0xcf, 0x48, 0x4c, 0x70, 0x92, 0xf1, - 0xae, 0xcd, 0xe4, 0x24, 0x03, 0x9e, 0x91, 0x80, 0x1f, 0xc1, 0xa6, 0xaa, 0x4e, 0xdd, 0x35, 0xa8, - 0x50, 0x5b, 0x32, 0x42, 0x9b, 0x62, 0x96, 0x01, 0xcf, 0xce, 0x4d, 0x03, 0x93, 0xde, 0xd6, 0xaf, - 0x03, 0x26, 0xed, 0xcd, 0xce, 0x41, 0x17, 0xa0, 0xb4, 0x30, 0x7d, 0x13, 0x37, 0x14, 0xfa, 0xa1, - 0x8c, 0x10, 0x12, 0x37, 0x5b, 0xf1, 0xff, 0x58, 0x3b, 0x3d, 0x0d, 0x40, 0xb5, 0x07, 0x6f, 0x7f, - 0xcd, 0x3e, 0x4e, 0xad, 0x59, 0xb5, 0xc9, 0xfa, 0x6b, 0x76, 0x6c, 0x0b, 0xdd, 0xce, 0xc2, 0xad, - 0xbf, 0x3a, 0xed, 0x19, 0x99, 0xb3, 0x9e, 0x91, 0xb9, 0xec, 0x19, 0xda, 0x8f, 0xd8, 0xd0, 0x7e, - 0xc6, 0x86, 0xf6, 0x3b, 0x36, 0xb4, 0xd3, 0xd8, 0xd0, 0xce, 0x62, 0x43, 0xfb, 0x13, 0x1b, 0xda, - 0xdf, 0xd8, 0xc8, 0x5c, 0xc6, 0x86, 0x76, 0x72, 0x61, 0x64, 0x4e, 0x2f, 0x8c, 0xcc, 0xd9, 0x85, - 0x91, 0xf9, 0x92, 0x0d, 0x04, 0x11, 0xb4, 0x95, 0x53, 0xbf, 0x46, 0xcf, 0xfe, 0x05, 0x00, 0x00, - 0xff, 0xff, 0x5e, 0xa1, 0xc3, 0x5e, 0xda, 0x06, 0x00, 0x00, + // 770 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x4d, 0x6f, 0xf3, 0x34, + 0x1c, 0x6f, 0xc6, 0xda, 0x3e, 0xf5, 0xd6, 0x3e, 0x9b, 0xf7, 0x42, 0x56, 0x50, 0x5c, 0x0d, 0x81, + 0x2a, 0x41, 0xdb, 0x03, 0x07, 0x24, 0x90, 0x80, 0x15, 0x31, 0xa9, 0x62, 0xc0, 0xe4, 0x4e, 0x1c, + 0x38, 0x20, 0xb9, 0x89, 0x9b, 0x5a, 0xe4, 0xa5, 0x72, 0x9c, 0xb2, 0xdd, 0xf8, 0x08, 0xfb, 0x18, + 0x88, 0x4f, 0xc2, 0x71, 0xc7, 0x9d, 0x0c, 0xcb, 0x38, 0x20, 0x9f, 0xf6, 0x11, 0x50, 0xdd, 0x66, + 0x4d, 0xda, 0x6e, 0x08, 0x3d, 0xda, 0xa9, 0xf1, 0xff, 0xf7, 0xe2, 0x7f, 0xfc, 0x77, 0x7f, 0x01, + 0x7b, 0x13, 0xe2, 0x31, 0x87, 0x88, 0x90, 0xf7, 0x82, 0x61, 0xd8, 0x1e, 0xf3, 0x50, 0x84, 0xb0, + 0xa8, 0x7f, 0xea, 0x2d, 0x97, 0x89, 0x51, 0x3c, 0x68, 0xdb, 0xa1, 0xdf, 0x71, 0x43, 0x37, 0xec, + 0xe8, 0xf2, 0x20, 0x1e, 0xea, 0x95, 0x5e, 0xe8, 0xa7, 0x99, 0xea, 0x38, 0xd9, 0x02, 0xd5, 0x1f, + 0xb2, 0x6e, 0xf0, 0x43, 0x50, 0x39, 0x8f, 0x07, 0x1e, 0xb3, 0xbf, 0xa1, 0x57, 0xa6, 0xd1, 0x30, + 0x9a, 0xdb, 0xdd, 0xaa, 0x92, 0xa8, 0x32, 0x4e, 0x8b, 0x78, 0x81, 0xc3, 0xf7, 0x41, 0xb9, 0x3f, + 0x22, 0xdc, 0xe9, 0x39, 0xe6, 0x46, 0xc3, 0x68, 0x56, 0xbb, 0x5b, 0x4a, 0xa2, 0x72, 0x34, 0x2b, + 0xe1, 0x14, 0x83, 0xef, 0x82, 0xcd, 0x33, 0x16, 0x09, 0xf3, 0xad, 0x86, 0xd1, 0xac, 0x74, 0x5f, + 0x29, 0x89, 0x36, 0x3d, 0x16, 0x09, 0xac, 0xab, 0x10, 0x81, 0x62, 0x2f, 0x70, 0xe8, 0xa5, 0xb9, + 0xa9, 0x2d, 0x2a, 0x4a, 0xa2, 0x22, 0x9b, 0x16, 0xf0, 0xac, 0x0e, 0xdb, 0x00, 0x5c, 0x50, 0x7f, + 0x8c, 0x89, 0x60, 0x81, 0x6b, 0x16, 0x35, 0xab, 0xa6, 0x24, 0x02, 0xe2, 0xb1, 0x8a, 0x33, 0x0c, + 0x78, 0x0c, 0x4a, 0x73, 0x6e, 0x49, 0x73, 0x81, 0x92, 0xa8, 0xc4, 0x67, 0xbc, 0x39, 0x02, 0x3f, + 0x05, 0xb5, 0xd9, 0xd3, 0xb7, 0xa1, 0xc3, 0x86, 0x8c, 0x72, 0xb3, 0xdc, 0x30, 0x9a, 0x1b, 0x5d, + 0xa8, 0x24, 0xaa, 0xf1, 0x1c, 0x82, 0x97, 0x98, 0xf0, 0x04, 0x54, 0x31, 0xfd, 0x85, 0x70, 0xe7, + 0xc4, 0x71, 0x38, 0x8d, 0x22, 0xf3, 0x95, 0x3e, 0xa6, 0x77, 0x94, 0x44, 0x6f, 0xf3, 0x2c, 0xf0, + 0x51, 0xe8, 0xb3, 0x69, 0x8f, 0xe2, 0x0a, 0xe7, 0x15, 0xf0, 0x13, 0x50, 0x3d, 0xa3, 0xc4, 0xa1, + 0xbc, 0x1f, 0xdb, 0xf6, 0xd4, 0xa2, 0xa2, 0x3b, 0xdd, 0x55, 0x12, 0x55, 0xbd, 0x2c, 0x80, 0xf3, + 0xbc, 0x85, 0xf0, 0x94, 0x30, 0x2f, 0xe6, 0xd4, 0x04, 0xcb, 0xc2, 0x39, 0x80, 0xf3, 0x3c, 0xf8, + 0x25, 0xd8, 0x79, 0x1c, 0x74, 0xba, 0xe9, 0x96, 0xd6, 0xee, 0x2b, 0x89, 0x76, 0x26, 0x4b, 0x18, + 0x5e, 0x61, 0xe7, 0x1c, 0xd2, 0xdd, 0xb7, 0xd7, 0x38, 0xa4, 0x0d, 0xac, 0xb0, 0xe1, 0x4f, 0xa0, + 0xbe, 0xb8, 0x6c, 0x6e, 0x10, 0x72, 0xea, 0xf4, 0x99, 0x1b, 0x10, 0x11, 0x73, 0x1a, 0x99, 0x55, + 0xed, 0x65, 0x29, 0x89, 0xea, 0x93, 0x27, 0x59, 0xf8, 0x19, 0x87, 0xa9, 0xff, 0x77, 0xb1, 0xdf, + 0xa7, 0x1e, 0xb5, 0x05, 0x75, 0x7a, 0xc1, 0xbc, 0xf3, 0xae, 0x17, 0xda, 0x3f, 0x47, 0x66, 0x6d, + 0xe1, 0x1f, 0x3c, 0xc9, 0xc2, 0xcf, 0x38, 0xc0, 0x6b, 0x03, 0xbc, 0x3e, 0xb1, 0xed, 0xd8, 0x8f, + 0x3d, 0x22, 0xa8, 0x73, 0x4a, 0x69, 0x64, 0xbe, 0xd6, 0xb3, 0x1f, 0x2a, 0x89, 0x8e, 0x48, 0x1e, + 0x5a, 0x4c, 0xff, 0xf7, 0x3f, 0xd1, 0xd7, 0x3e, 0x11, 0xa3, 0xce, 0x80, 0xb9, 0xed, 0x5e, 0x20, + 0x3e, 0xcb, 0xfc, 0x49, 0xfd, 0xd8, 0x13, 0x6c, 0x42, 0x79, 0x74, 0xd9, 0xf1, 0x2f, 0x5b, 0xf6, + 0x88, 0xb0, 0xa0, 0x65, 0x87, 0x9c, 0xb6, 0xdc, 0xb0, 0xe3, 0x10, 0x41, 0xda, 0x5d, 0xe6, 0xf6, + 0x02, 0xf1, 0x15, 0x89, 0x04, 0xe5, 0x78, 0x79, 0x7b, 0x78, 0x0a, 0xe0, 0x45, 0x28, 0x88, 0x97, + 0xbf, 0x4d, 0x3b, 0xfa, 0x55, 0x0f, 0x95, 0x44, 0x50, 0xac, 0xa0, 0x78, 0x8d, 0x62, 0xc9, 0x27, + 0x1d, 0xef, 0xee, 0x5a, 0x9f, 0x74, 0xc0, 0x6b, 0x14, 0xf0, 0x7b, 0x70, 0xa0, 0xab, 0x2b, 0x77, + 0x0d, 0x6a, 0xab, 0x23, 0x25, 0xd1, 0x81, 0x58, 0x47, 0xc0, 0xeb, 0x75, 0xab, 0x86, 0x69, 0x6f, + 0x7b, 0x4f, 0x19, 0xa6, 0xed, 0xad, 0xd7, 0x41, 0x1f, 0xa0, 0x3c, 0xb0, 0x7a, 0x13, 0xf7, 0xb5, + 0xf5, 0x7b, 0x4a, 0x22, 0x24, 0x9e, 0xa7, 0xe2, 0xff, 0xf2, 0x82, 0x9f, 0x83, 0xed, 0x73, 0x4e, + 0x27, 0x2c, 0x8c, 0x23, 0x9d, 0x81, 0x07, 0x3a, 0x03, 0xeb, 0x4a, 0xa2, 0xc3, 0x71, 0xa6, 0x9e, + 0x89, 0x8a, 0x1c, 0x7f, 0x1a, 0x36, 0xe9, 0x7a, 0x96, 0x92, 0x87, 0xba, 0x39, 0x1d, 0x36, 0xe3, + 0x2c, 0x90, 0x0d, 0x9b, 0x9c, 0xe2, 0xf8, 0xef, 0x0d, 0x00, 0x75, 0x14, 0xbf, 0x7c, 0xd2, 0x7f, + 0x90, 0x4b, 0x7a, 0x1d, 0xa6, 0x5e, 0xfe, 0xed, 0x5e, 0x28, 0xf3, 0x97, 0x8f, 0xb9, 0xf4, 0xa6, + 0xc7, 0x5c, 0xfe, 0xbf, 0xc7, 0xdc, 0xfd, 0xe2, 0xe6, 0xce, 0x2a, 0xdc, 0xde, 0x59, 0x85, 0x87, + 0x3b, 0xcb, 0xf8, 0x35, 0xb1, 0x8c, 0xdf, 0x12, 0xcb, 0xf8, 0x23, 0xb1, 0x8c, 0x9b, 0xc4, 0x32, + 0x6e, 0x13, 0xcb, 0xf8, 0x2b, 0xb1, 0x8c, 0x7f, 0x12, 0xab, 0xf0, 0x90, 0x58, 0xc6, 0xf5, 0xbd, + 0x55, 0xb8, 0xb9, 0xb7, 0x0a, 0xb7, 0xf7, 0x56, 0xe1, 0xc7, 0x62, 0x24, 0x88, 0xa0, 0x83, 0x92, + 0xfe, 0x26, 0x7f, 0xfc, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x69, 0x2e, 0x1c, 0xe0, 0x07, + 0x00, 0x00, } func (this *ValidatorInfo) Equal(that interface{}) bool { @@ -431,6 +467,12 @@ func (this *ValidatorInfo) Equal(that interface{}) bool { if this.TotalValidatorIgnoredSignatures != that1.TotalValidatorIgnoredSignatures { return false } + if this.PreviousList != that1.PreviousList { + return false + } + if this.PreviousIndex != that1.PreviousIndex { + return false + } return true } func (this *ShardValidatorInfo) Equal(that interface{}) bool { @@ -467,13 +509,19 @@ func (this *ShardValidatorInfo) Equal(that interface{}) bool { if this.TempRating != that1.TempRating { return false } + if this.PreviousList != that1.PreviousList { + return false + } + if this.PreviousIndex != that1.PreviousIndex { + return false + } return true } func (this *ValidatorInfo) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 24) + s := make([]string, 0, 26) s = append(s, "&state.ValidatorInfo{") s = append(s, "PublicKey: "+fmt.Sprintf("%#v", this.PublicKey)+",\n") s = append(s, "ShardId: "+fmt.Sprintf("%#v", this.ShardId)+",\n") @@ -495,6 +543,8 @@ func (this *ValidatorInfo) GoString() string { s = append(s, "TotalValidatorSuccess: "+fmt.Sprintf("%#v", this.TotalValidatorSuccess)+",\n") s = append(s, "TotalValidatorFailure: "+fmt.Sprintf("%#v", this.TotalValidatorFailure)+",\n") s = append(s, "TotalValidatorIgnoredSignatures: "+fmt.Sprintf("%#v", this.TotalValidatorIgnoredSignatures)+",\n") + s = append(s, "PreviousList: "+fmt.Sprintf("%#v", this.PreviousList)+",\n") + s = append(s, "PreviousIndex: "+fmt.Sprintf("%#v", this.PreviousIndex)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -502,13 +552,15 @@ func (this *ShardValidatorInfo) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 9) + s := make([]string, 0, 11) s = append(s, "&state.ShardValidatorInfo{") s = append(s, "PublicKey: "+fmt.Sprintf("%#v", this.PublicKey)+",\n") s = append(s, "ShardId: "+fmt.Sprintf("%#v", this.ShardId)+",\n") s = append(s, "List: "+fmt.Sprintf("%#v", this.List)+",\n") s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n") s = append(s, "TempRating: "+fmt.Sprintf("%#v", this.TempRating)+",\n") + s = append(s, "PreviousList: "+fmt.Sprintf("%#v", this.PreviousList)+",\n") + s = append(s, "PreviousIndex: "+fmt.Sprintf("%#v", this.PreviousIndex)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -540,6 +592,22 @@ func (m *ValidatorInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PreviousIndex != 0 { + i = encodeVarintValidatorInfo(dAtA, i, uint64(m.PreviousIndex)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if len(m.PreviousList) > 0 { + i -= len(m.PreviousList) + copy(dAtA[i:], m.PreviousList) + i = encodeVarintValidatorInfo(dAtA, i, uint64(len(m.PreviousList))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } if m.TotalValidatorIgnoredSignatures != 0 { i = encodeVarintValidatorInfo(dAtA, i, uint64(m.TotalValidatorIgnoredSignatures)) i-- @@ -686,6 +754,18 @@ func (m *ShardValidatorInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PreviousIndex != 0 { + i = encodeVarintValidatorInfo(dAtA, i, uint64(m.PreviousIndex)) + i-- + dAtA[i] = 0x38 + } + if len(m.PreviousList) > 0 { + i -= len(m.PreviousList) + copy(dAtA[i:], m.PreviousList) + i = encodeVarintValidatorInfo(dAtA, i, uint64(len(m.PreviousList))) + i-- + dAtA[i] = 0x32 + } if m.TempRating != 0 { i = encodeVarintValidatorInfo(dAtA, i, uint64(m.TempRating)) i-- @@ -800,6 +880,13 @@ func (m *ValidatorInfo) Size() (n int) { if m.TotalValidatorIgnoredSignatures != 0 { n += 2 + sovValidatorInfo(uint64(m.TotalValidatorIgnoredSignatures)) } + l = len(m.PreviousList) + if l > 0 { + n += 2 + l + sovValidatorInfo(uint64(l)) + } + if m.PreviousIndex != 0 { + n += 2 + sovValidatorInfo(uint64(m.PreviousIndex)) + } return n } @@ -826,6 +913,13 @@ func (m *ShardValidatorInfo) Size() (n int) { if m.TempRating != 0 { n += 1 + sovValidatorInfo(uint64(m.TempRating)) } + l = len(m.PreviousList) + if l > 0 { + n += 1 + l + sovValidatorInfo(uint64(l)) + } + if m.PreviousIndex != 0 { + n += 1 + sovValidatorInfo(uint64(m.PreviousIndex)) + } return n } @@ -860,6 +954,8 @@ func (this *ValidatorInfo) String() string { `TotalValidatorSuccess:` + fmt.Sprintf("%v", this.TotalValidatorSuccess) + `,`, `TotalValidatorFailure:` + fmt.Sprintf("%v", this.TotalValidatorFailure) + `,`, `TotalValidatorIgnoredSignatures:` + fmt.Sprintf("%v", this.TotalValidatorIgnoredSignatures) + `,`, + `PreviousList:` + fmt.Sprintf("%v", this.PreviousList) + `,`, + `PreviousIndex:` + fmt.Sprintf("%v", this.PreviousIndex) + `,`, `}`, }, "") return s @@ -874,6 +970,8 @@ func (this *ShardValidatorInfo) String() string { `List:` + fmt.Sprintf("%v", this.List) + `,`, `Index:` + fmt.Sprintf("%v", this.Index) + `,`, `TempRating:` + fmt.Sprintf("%v", this.TempRating) + `,`, + `PreviousList:` + fmt.Sprintf("%v", this.PreviousList) + `,`, + `PreviousIndex:` + fmt.Sprintf("%v", this.PreviousIndex) + `,`, `}`, }, "") return s @@ -1349,6 +1447,57 @@ func (m *ValidatorInfo) Unmarshal(dAtA []byte) error { break } } + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousList", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthValidatorInfo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthValidatorInfo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreviousList = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousIndex", wireType) + } + m.PreviousIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PreviousIndex |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipValidatorInfo(dAtA[iNdEx:]) @@ -1525,6 +1674,57 @@ func (m *ShardValidatorInfo) Unmarshal(dAtA []byte) error { break } } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousList", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthValidatorInfo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthValidatorInfo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreviousList = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousIndex", wireType) + } + m.PreviousIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PreviousIndex |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipValidatorInfo(dAtA[iNdEx:]) diff --git a/state/validatorInfo.proto b/state/validatorInfo.proto index c6256810091..2df2149d8f5 100644 --- a/state/validatorInfo.proto +++ b/state/validatorInfo.proto @@ -29,13 +29,17 @@ message ValidatorInfo { uint32 TotalValidatorSuccess = 18 [(gogoproto.jsontag) = "totalValidatorSuccess"]; uint32 TotalValidatorFailure = 19 [(gogoproto.jsontag) = "totalValidatorFailure"]; uint32 TotalValidatorIgnoredSignatures = 20 [(gogoproto.jsontag) = "totalValidatorIgnoredSignatures"]; + string PreviousList = 21 [(gogoproto.jsontag) = "previousList,omitempty"]; + uint32 PreviousIndex = 22 [(gogoproto.jsontag) = "previousIndex,omitempty"]; } // ShardValidatorInfo represents the data regarding a validator that is stored in the PeerMiniblocks message ShardValidatorInfo { - bytes PublicKey = 1 [(gogoproto.jsontag) = "publicKey"]; - uint32 ShardId = 2 [(gogoproto.jsontag) = "shardId"]; - string List = 3 [(gogoproto.jsontag) = "list,omitempty"]; - uint32 Index = 4 [(gogoproto.jsontag) = "index"]; - uint32 TempRating = 5 [(gogoproto.jsontag) = "tempRating"]; + bytes PublicKey = 1 [(gogoproto.jsontag) = "publicKey"]; + uint32 ShardId = 2 [(gogoproto.jsontag) = "shardId"]; + string List = 3 [(gogoproto.jsontag) = "list,omitempty"]; + uint32 Index = 4 [(gogoproto.jsontag) = "index"]; + uint32 TempRating = 5 [(gogoproto.jsontag) = "tempRating"]; + string PreviousList = 6 [(gogoproto.jsontag) = "previousList,omitempty"]; + uint32 PreviousIndex = 7 [(gogoproto.jsontag) = "previousIndex,omitempty"]; } diff --git a/state/validatorInfo_test.go b/state/validatorInfo_test.go deleted file mode 100644 index 6a6ca0be930..00000000000 --- a/state/validatorInfo_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package state - -import ( - "testing" - - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/stretchr/testify/assert" -) - -func TestValidatorInfo_IsInterfaceNile(t *testing.T) { - t.Parallel() - - vi := &ValidatorInfo{} - assert.False(t, check.IfNil(vi)) -} diff --git a/state/validatorsInfoMap.go b/state/validatorsInfoMap.go new file mode 100644 index 00000000000..e6c492d9d39 --- /dev/null +++ b/state/validatorsInfoMap.go @@ -0,0 +1,177 @@ +package state + +import ( + "bytes" + "encoding/hex" + "fmt" + "sync" + + "github.com/multiversx/mx-chain-core-go/core/check" +) + +type shardValidatorsInfoMap struct { + mutex sync.RWMutex + valInfoMap map[uint32][]ValidatorInfoHandler +} + +// NewShardValidatorsInfoMap creates an instance of shardValidatorsInfoMap which manages a +// map internally +func NewShardValidatorsInfoMap() *shardValidatorsInfoMap { + return &shardValidatorsInfoMap{ + mutex: sync.RWMutex{}, + valInfoMap: make(map[uint32][]ValidatorInfoHandler), + } +} + +// GetAllValidatorsInfo returns a []ValidatorInfoHandler copy with validators from all shards. +func (vi *shardValidatorsInfoMap) GetAllValidatorsInfo() []ValidatorInfoHandler { + ret := make([]ValidatorInfoHandler, 0) + + vi.mutex.RLock() + defer vi.mutex.RUnlock() + + for _, validatorsInShard := range vi.valInfoMap { + validatorsCopy := make([]ValidatorInfoHandler, len(validatorsInShard)) + copy(validatorsCopy, validatorsInShard) + ret = append(ret, validatorsCopy...) + } + + return ret +} + +// GetShardValidatorsInfoMap returns a map copy of internally stored data +func (vi *shardValidatorsInfoMap) GetShardValidatorsInfoMap() map[uint32][]ValidatorInfoHandler { + ret := make(map[uint32][]ValidatorInfoHandler, len(vi.valInfoMap)) + + vi.mutex.RLock() + defer vi.mutex.RUnlock() + + for shardID, validatorsInShard := range vi.valInfoMap { + validatorsCopy := make([]ValidatorInfoHandler, len(validatorsInShard)) + copy(validatorsCopy, validatorsInShard) + ret[shardID] = validatorsCopy + } + + return ret +} + +// Add adds a ValidatorInfoHandler in its corresponding shardID +func (vi *shardValidatorsInfoMap) Add(validator ValidatorInfoHandler) error { + if check.IfNil(validator) { + return ErrNilValidatorInfo + } + + shardID := validator.GetShardId() + vi.mutex.Lock() + vi.valInfoMap[shardID] = append(vi.valInfoMap[shardID], validator) + vi.mutex.Unlock() + + return nil +} + +// GetValidator returns a ValidatorInfoHandler copy with the provided blsKey, +// if it is present in the map, otherwise returns nil +func (vi *shardValidatorsInfoMap) GetValidator(blsKey []byte) ValidatorInfoHandler { + for _, validator := range vi.GetAllValidatorsInfo() { + if bytes.Equal(validator.GetPublicKey(), blsKey) { + return validator.ShallowClone() + } + } + + return nil +} + +// Replace will replace an existing ValidatorInfoHandler with a new one. The old and new validator +// shall be in the same shard. If the old validator is not found in the map, an error is returned +func (vi *shardValidatorsInfoMap) Replace(old ValidatorInfoHandler, new ValidatorInfoHandler) error { + if check.IfNil(old) { + return fmt.Errorf("%w for old validator in shardValidatorsInfoMap.Replace", ErrNilValidatorInfo) + } + if check.IfNil(new) { + return fmt.Errorf("%w for new validator in shardValidatorsInfoMap.Replace", ErrNilValidatorInfo) + } + if old.GetShardId() != new.GetShardId() { + return fmt.Errorf("%w when trying to replace %s from shard %v with %s from shard %v", + ErrValidatorsDifferentShards, + hex.EncodeToString(old.GetPublicKey()), + old.GetShardId(), + hex.EncodeToString(new.GetPublicKey()), + new.GetShardId(), + ) + } + + shardID := old.GetShardId() + log.Debug("shardValidatorsInfoMap.Replace", + "old validator", hex.EncodeToString(old.GetPublicKey()), "shard", old.GetShardId(), "list", old.GetList(), + "with new validator", hex.EncodeToString(new.GetPublicKey()), "shard", new.GetShardId(), "list", new.GetList(), + ) + + vi.mutex.Lock() + defer vi.mutex.Unlock() + + for idx, validator := range vi.valInfoMap[shardID] { + if bytes.Equal(validator.GetPublicKey(), old.GetPublicKey()) { + vi.valInfoMap[shardID][idx] = new + return nil + } + } + + return fmt.Errorf("old %w: %s when trying to replace it with %s", + ErrValidatorNotFound, + hex.EncodeToString(old.GetPublicKey()), + hex.EncodeToString(new.GetPublicKey()), + ) +} + +// SetValidatorsInShard resets all validators saved in a specific shard with the provided []ValidatorInfoHandler. +// Before setting them, it checks that provided validators have the same shardID as the one provided. +func (vi *shardValidatorsInfoMap) SetValidatorsInShard(shardID uint32, validators []ValidatorInfoHandler) error { + sameShardValidators := make([]ValidatorInfoHandler, 0, len(validators)) + for idx, validator := range validators { + if check.IfNil(validator) { + return fmt.Errorf("%w in shardValidatorsInfoMap.SetValidatorsInShard at index %d", + ErrNilValidatorInfo, + idx, + ) + } + if validator.GetShardId() != shardID { + return fmt.Errorf("%w, %s is in shard %d, but should be set in shard %d in shardValidatorsInfoMap.SetValidatorsInShard", + ErrValidatorsDifferentShards, + hex.EncodeToString(validator.GetPublicKey()), + validator.GetShardId(), + shardID, + ) + } + sameShardValidators = append(sameShardValidators, validator) + } + + vi.mutex.Lock() + vi.valInfoMap[shardID] = sameShardValidators + vi.mutex.Unlock() + + return nil +} + +// Delete will delete the provided validator from the internally stored map, if found. +// The validators slice at the corresponding shardID key will be re-sliced, without reordering +func (vi *shardValidatorsInfoMap) Delete(validator ValidatorInfoHandler) error { + if check.IfNil(validator) { + return ErrNilValidatorInfo + } + + shardID := validator.GetShardId() + vi.mutex.Lock() + defer vi.mutex.Unlock() + + for index, validatorInfo := range vi.valInfoMap[shardID] { + if bytes.Equal(validatorInfo.GetPublicKey(), validator.GetPublicKey()) { + length := len(vi.valInfoMap[shardID]) + vi.valInfoMap[shardID][index] = vi.valInfoMap[shardID][length-1] + vi.valInfoMap[shardID][length-1] = nil + vi.valInfoMap[shardID] = vi.valInfoMap[shardID][:length-1] + break + } + } + + return nil +} diff --git a/state/validatorsInfoMap_test.go b/state/validatorsInfoMap_test.go new file mode 100644 index 00000000000..e90c01993cd --- /dev/null +++ b/state/validatorsInfoMap_test.go @@ -0,0 +1,345 @@ +package state + +import ( + "encoding/hex" + "strconv" + "strings" + "sync" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/stretchr/testify/require" +) + +func TestShardValidatorsInfoMap_OperationsWithNilValidators(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + t.Run("add nil validator", func(t *testing.T) { + t.Parallel() + + err := vi.Add(nil) + require.Equal(t, ErrNilValidatorInfo, err) + }) + + t.Run("delete nil validator", func(t *testing.T) { + t.Parallel() + + err := vi.Delete(nil) + require.Equal(t, ErrNilValidatorInfo, err) + }) + + t.Run("replace nil validator", func(t *testing.T) { + t.Parallel() + + err := vi.Replace(nil, &ValidatorInfo{}) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrNilValidatorInfo.Error())) + require.True(t, strings.Contains(err.Error(), "old")) + + err = vi.Replace(&ValidatorInfo{}, nil) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrNilValidatorInfo.Error())) + require.True(t, strings.Contains(err.Error(), "new")) + }) + + t.Run("set nil validators in shard", func(t *testing.T) { + t.Parallel() + + v := &ValidatorInfo{ShardId: 3, PublicKey: []byte("pk")} + err := vi.SetValidatorsInShard(3, []ValidatorInfoHandler{v, nil}) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrNilValidatorInfo.Error())) + require.True(t, strings.Contains(err.Error(), "index 1")) + }) +} + +func TestShardValidatorsInfoMap_Add_GetShardValidatorsInfoMap_GetAllValidatorsInfo_GetValInfoPointerMap(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + v1 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk1")} + v2 := &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk2")} + v3 := &ValidatorInfo{ShardId: core.MetachainShardId, PublicKey: []byte("pk3")} + + _ = vi.Add(v0) + _ = vi.Add(v1) + _ = vi.Add(v2) + _ = vi.Add(v3) + + allValidators := vi.GetAllValidatorsInfo() + require.Len(t, allValidators, 4) + require.Contains(t, allValidators, v0) + require.Contains(t, allValidators, v1) + require.Contains(t, allValidators, v2) + require.Contains(t, allValidators, v3) + + validatorsMap := vi.GetShardValidatorsInfoMap() + expectedValidatorsMap := map[uint32][]ValidatorInfoHandler{ + 0: {v0, v1}, + 1: {v2}, + core.MetachainShardId: {v3}, + } + require.Equal(t, validatorsMap, expectedValidatorsMap) +} + +func TestShardValidatorsInfoMap_GetValidator(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + pubKey0 := []byte("pk0") + pubKey1 := []byte("pk1") + v0 := &ValidatorInfo{ShardId: 0, PublicKey: pubKey0} + v1 := &ValidatorInfo{ShardId: 1, PublicKey: pubKey1} + + _ = vi.Add(v0) + _ = vi.Add(v1) + + require.Equal(t, v0, vi.GetValidator(pubKey0)) + require.Equal(t, v1, vi.GetValidator(pubKey1)) + require.Nil(t, vi.GetValidator([]byte("pk2"))) +} + +func TestShardValidatorsInfoMap_Delete(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + v1 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk1")} + v2 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk2")} + v3 := &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk3")} + + _ = vi.Add(v0) + _ = vi.Add(v1) + _ = vi.Add(v2) + _ = vi.Add(v3) + + _ = vi.Delete(&ValidatorInfo{ShardId: 0, PublicKey: []byte("pk3")}) + _ = vi.Delete(&ValidatorInfo{ShardId: 1, PublicKey: []byte("pk0")}) + require.Len(t, vi.GetAllValidatorsInfo(), 4) + + _ = vi.Delete(v1) + require.Len(t, vi.GetAllValidatorsInfo(), 3) + require.Equal(t, []ValidatorInfoHandler{v0, v2}, vi.GetShardValidatorsInfoMap()[0]) + require.Equal(t, []ValidatorInfoHandler{v3}, vi.GetShardValidatorsInfoMap()[1]) + + _ = vi.Delete(v3) + require.Len(t, vi.GetAllValidatorsInfo(), 2) + require.Equal(t, []ValidatorInfoHandler{v0, v2}, vi.GetShardValidatorsInfoMap()[0]) +} + +func TestShardValidatorsInfoMap_Replace(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + v1 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk1")} + + _ = vi.Add(v0) + _ = vi.Add(v1) + + err := vi.Replace(v0, &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk2")}) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrValidatorsDifferentShards.Error())) + require.Equal(t, []ValidatorInfoHandler{v0, v1}, vi.GetShardValidatorsInfoMap()[0]) + + v2 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk2")} + err = vi.Replace(v0, v2) + require.Nil(t, err) + require.Equal(t, []ValidatorInfoHandler{v2, v1}, vi.GetShardValidatorsInfoMap()[0]) + + v3 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk3")} + v4 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk4")} + err = vi.Replace(v3, v4) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrValidatorNotFound.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(v3.PublicKey))) + require.Equal(t, []ValidatorInfoHandler{v2, v1}, vi.GetShardValidatorsInfoMap()[0]) +} + +func TestShardValidatorsInfoMap_SetValidatorsInShard(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + _ = vi.Add(v0) + + v1 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk1")} + v2 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk2")} + v3 := &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk3")} + shard0Validators := []ValidatorInfoHandler{v1, v2} + shard1Validators := []ValidatorInfoHandler{v3} + + err := vi.SetValidatorsInShard(1, shard0Validators) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrValidatorsDifferentShards.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(v1.PublicKey))) + require.Equal(t, []ValidatorInfoHandler{v0}, vi.GetShardValidatorsInfoMap()[0]) + require.Empty(t, vi.GetShardValidatorsInfoMap()[1]) + + err = vi.SetValidatorsInShard(0, []ValidatorInfoHandler{v1, v2, v3}) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrValidatorsDifferentShards.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(v3.PublicKey))) + require.Equal(t, []ValidatorInfoHandler{v0}, vi.GetShardValidatorsInfoMap()[0]) + require.Empty(t, vi.GetShardValidatorsInfoMap()[1]) + + err = vi.SetValidatorsInShard(0, shard0Validators) + require.Nil(t, err) + require.Equal(t, shard0Validators, vi.GetShardValidatorsInfoMap()[0]) + + err = vi.SetValidatorsInShard(1, shard1Validators) + require.Nil(t, err) + require.Equal(t, shard1Validators, vi.GetShardValidatorsInfoMap()[1]) +} + +func TestShardValidatorsInfoMap_GettersShouldReturnCopiesOfInternalData(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + v1 := &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk1")} + + _ = vi.Add(v0) + _ = vi.Add(v1) + + validatorsMap := vi.GetShardValidatorsInfoMap() + delete(validatorsMap, 0) + validatorsMap[1][0].SetPublicKey([]byte("rnd")) + + validators := vi.GetAllValidatorsInfo() + validators = append(validators, &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk3")}) + + validator := vi.GetValidator([]byte("pk0")) + require.False(t, validator == v0) // require not same pointer + validator.SetShardId(2) + + require.Len(t, vi.GetAllValidatorsInfo(), 2) + require.True(t, vi.GetShardValidatorsInfoMap()[0][0] == v0) // check by pointer + require.True(t, vi.GetShardValidatorsInfoMap()[1][0] == v1) // check by pointer + require.NotEqual(t, vi.GetAllValidatorsInfo(), validators) +} + +func TestShardValidatorsInfoMap_Concurrency(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + numValidatorsShard0 := 100 + numValidatorsShard1 := 50 + numValidators := numValidatorsShard0 + numValidatorsShard1 + + shard0Validators := createValidatorsInfo(0, numValidatorsShard0) + shard1Validators := createValidatorsInfo(1, numValidatorsShard1) + + firstHalfShard0 := shard0Validators[:numValidatorsShard0/2] + secondHalfShard0 := shard0Validators[numValidatorsShard0/2:] + + firstHalfShard1 := shard1Validators[:numValidatorsShard1/2] + secondHalfShard1 := shard1Validators[numValidatorsShard1/2:] + + wg := &sync.WaitGroup{} + + wg.Add(numValidators) + go addValidatorsInShardConcurrently(vi, shard0Validators, wg) + go addValidatorsInShardConcurrently(vi, shard1Validators, wg) + wg.Wait() + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[0], shard0Validators) + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[1], shard1Validators) + + wg.Add(numValidators / 2) + go deleteValidatorsConcurrently(vi, firstHalfShard0, wg) + go deleteValidatorsConcurrently(vi, firstHalfShard1, wg) + wg.Wait() + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[0], secondHalfShard0) + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[1], secondHalfShard1) + + wg.Add(numValidators / 2) + go replaceValidatorsConcurrently(vi, vi.GetShardValidatorsInfoMap()[0], firstHalfShard0, wg) + go replaceValidatorsConcurrently(vi, vi.GetShardValidatorsInfoMap()[1], firstHalfShard1, wg) + wg.Wait() + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[0], firstHalfShard0) + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[1], firstHalfShard1) + + wg.Add(2) + go func() { + _ = vi.SetValidatorsInShard(0, shard0Validators) + wg.Done() + }() + go func() { + _ = vi.SetValidatorsInShard(1, shard1Validators) + wg.Done() + }() + wg.Wait() + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[0], shard0Validators) + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[1], shard1Validators) +} + +func requireSameValidatorsDifferentOrder(t *testing.T, dest []ValidatorInfoHandler, src []ValidatorInfoHandler) { + require.Equal(t, len(dest), len(src)) + + for _, v := range src { + require.Contains(t, dest, v) + } +} + +func createValidatorsInfo(shardID uint32, numOfValidators int) []ValidatorInfoHandler { + ret := make([]ValidatorInfoHandler, 0, numOfValidators) + + for i := 0; i < numOfValidators; i++ { + ret = append(ret, &ValidatorInfo{ + ShardId: shardID, + PublicKey: []byte(strconv.Itoa(int(shardID)) + "pubKey" + strconv.Itoa(i)), + }) + } + + return ret +} + +func addValidatorsInShardConcurrently( + vi ShardValidatorsInfoMapHandler, + validators []ValidatorInfoHandler, + wg *sync.WaitGroup, +) { + for _, validator := range validators { + go func(val ValidatorInfoHandler) { + _ = vi.Add(val) + wg.Done() + }(validator) + } +} + +func deleteValidatorsConcurrently( + vi ShardValidatorsInfoMapHandler, + validators []ValidatorInfoHandler, + wg *sync.WaitGroup, +) { + for _, validator := range validators { + go func(val ValidatorInfoHandler) { + _ = vi.Delete(val) + wg.Done() + }(validator) + } +} + +func replaceValidatorsConcurrently( + vi ShardValidatorsInfoMapHandler, + oldValidators []ValidatorInfoHandler, + newValidators []ValidatorInfoHandler, + wg *sync.WaitGroup, +) { + for idx := range oldValidators { + go func(old ValidatorInfoHandler, new ValidatorInfoHandler) { + _ = vi.Replace(old, new) + wg.Done() + }(oldValidators[idx], newValidators[idx]) + } +} diff --git a/statusHandler/statusMetricsProvider.go b/statusHandler/statusMetricsProvider.go index c4211e889a2..99f15ad1bf6 100644 --- a/statusHandler/statusMetricsProvider.go +++ b/statusHandler/statusMetricsProvider.go @@ -295,7 +295,6 @@ func (sm *statusMetrics) EnableEpochsMetrics() (map[string]interface{}, error) { enableEpochsMetrics[common.MetricDelegationSmartContractEnableEpoch] = sm.uint64Metrics[common.MetricDelegationSmartContractEnableEpoch] enableEpochsMetrics[common.MetricIncrementSCRNonceInMultiTransferEnableEpoch] = sm.uint64Metrics[common.MetricIncrementSCRNonceInMultiTransferEnableEpoch] enableEpochsMetrics[common.MetricBalanceWaitingListsEnableEpoch] = sm.uint64Metrics[common.MetricBalanceWaitingListsEnableEpoch] - enableEpochsMetrics[common.MetricWaitingListFixEnableEpoch] = sm.uint64Metrics[common.MetricWaitingListFixEnableEpoch] enableEpochsMetrics[common.MetricSetGuardianEnableEpoch] = sm.uint64Metrics[common.MetricSetGuardianEnableEpoch] numNodesChangeConfig := sm.uint64Metrics[common.MetricMaxNodesChangeEnableEpoch+"_count"] diff --git a/statusHandler/statusMetricsProvider_test.go b/statusHandler/statusMetricsProvider_test.go index 12831f384c6..5572b1754f8 100644 --- a/statusHandler/statusMetricsProvider_test.go +++ b/statusHandler/statusMetricsProvider_test.go @@ -315,7 +315,6 @@ func TestStatusMetrics_EnableEpochMetrics(t *testing.T) { sm.SetUInt64Value(common.MetricDelegationSmartContractEnableEpoch, 2) sm.SetUInt64Value(common.MetricIncrementSCRNonceInMultiTransferEnableEpoch, 3) sm.SetUInt64Value(common.MetricBalanceWaitingListsEnableEpoch, 4) - sm.SetUInt64Value(common.MetricWaitingListFixEnableEpoch, 1) sm.SetUInt64Value(common.MetricSetGuardianEnableEpoch, 3) maxNodesChangeConfig := []map[string]uint64{ @@ -365,7 +364,6 @@ func TestStatusMetrics_EnableEpochMetrics(t *testing.T) { common.MetricDelegationSmartContractEnableEpoch: uint64(2), common.MetricIncrementSCRNonceInMultiTransferEnableEpoch: uint64(3), common.MetricBalanceWaitingListsEnableEpoch: uint64(4), - common.MetricWaitingListFixEnableEpoch: uint64(1), common.MetricSetGuardianEnableEpoch: uint64(3), common.MetricMaxNodesChangeEnableEpoch: []map[string]interface{}{ diff --git a/storage/constants.go b/storage/constants.go index b78021138c7..9cd37571521 100644 --- a/storage/constants.go +++ b/storage/constants.go @@ -1,14 +1,14 @@ package storage import ( - "github.com/multiversx/mx-chain-storage-go/storageUnit" + "github.com/multiversx/mx-chain-storage-go/common" ) // MaxRetriesToCreateDB represents the maximum number of times to try to create DB if it failed -const MaxRetriesToCreateDB = storageUnit.MaxRetriesToCreateDB +const MaxRetriesToCreateDB = common.MaxRetriesToCreateDB // SleepTimeBetweenCreateDBRetries represents the number of seconds to sleep between DB creates -const SleepTimeBetweenCreateDBRetries = storageUnit.SleepTimeBetweenCreateDBRetries +const SleepTimeBetweenCreateDBRetries = common.SleepTimeBetweenCreateDBRetries // PathShardPlaceholder represents the placeholder for the shard ID in paths const PathShardPlaceholder = "[S]" diff --git a/storage/factory/dbConfigHandler.go b/storage/factory/dbConfigHandler.go index 2e5a611f293..468c42a2ee7 100644 --- a/storage/factory/dbConfigHandler.go +++ b/storage/factory/dbConfigHandler.go @@ -11,8 +11,12 @@ import ( ) const ( - dbConfigFileName = "config.toml" - defaultType = "LvlDBSerial" + dbConfigFileName = "config.toml" + defaultType = "LvlDBSerial" + defaultBatchDelaySeconds = 2 + defaultMaxBatchSize = 100 + defaultMaxOpenFiles = 10 + defaultUseTmpAsFilePath = false ) var ( @@ -20,23 +24,13 @@ var ( ) type dbConfigHandler struct { - dbType string - batchDelaySeconds int - maxBatchSize int - maxOpenFiles int - shardIDProviderType string - numShards int32 + conf config.DBConfig } // NewDBConfigHandler will create a new db config handler instance func NewDBConfigHandler(config config.DBConfig) *dbConfigHandler { return &dbConfigHandler{ - dbType: config.Type, - batchDelaySeconds: config.BatchDelaySeconds, - maxBatchSize: config.MaxBatchSize, - maxOpenFiles: config.MaxOpenFiles, - shardIDProviderType: config.ShardIDProviderType, - numShards: config.NumShards, + conf: config, } } @@ -56,9 +50,10 @@ func (dh *dbConfigHandler) GetDBConfig(path string) (*config.DBConfig, error) { if !empty { dbConfig := &config.DBConfig{ Type: defaultType, - BatchDelaySeconds: dh.batchDelaySeconds, - MaxBatchSize: dh.maxBatchSize, - MaxOpenFiles: dh.maxOpenFiles, + BatchDelaySeconds: dh.conf.BatchDelaySeconds, + MaxBatchSize: dh.conf.MaxBatchSize, + MaxOpenFiles: dh.conf.MaxOpenFiles, + UseTmpAsFilePath: dh.conf.UseTmpAsFilePath, } log.Debug("GetDBConfig: loaded default db config", @@ -68,20 +63,11 @@ func (dh *dbConfigHandler) GetDBConfig(path string) (*config.DBConfig, error) { return dbConfig, nil } - dbConfig := &config.DBConfig{ - Type: dh.dbType, - BatchDelaySeconds: dh.batchDelaySeconds, - MaxBatchSize: dh.maxBatchSize, - MaxOpenFiles: dh.maxOpenFiles, - ShardIDProviderType: dh.shardIDProviderType, - NumShards: dh.numShards, - } - log.Debug("GetDBConfig: loaded db config from main config file", - "configuration", fmt.Sprintf("%+v", dbConfig), + "configuration", fmt.Sprintf("%+v", dh.conf), ) - return dbConfig, nil + return &dh.conf, nil } func readCorrectConfigurationFromToml(dbConfig *config.DBConfig, filePath string) error { diff --git a/storage/factory/export_test.go b/storage/factory/export_test.go index 177bc97358c..854f52e9cc3 100644 --- a/storage/factory/export_test.go +++ b/storage/factory/export_test.go @@ -25,3 +25,8 @@ func NewPersisterCreator(config config.DBConfig) *persisterCreator { func (pc *persisterCreator) CreateShardIDProvider() (storage.ShardIDProvider, error) { return pc.createShardIDProvider() } + +// GetTmpFilePath - +func GetTmpFilePath(path string) (string, error) { + return getTmpFilePath(path) +} diff --git a/storage/factory/openStorage.go b/storage/factory/openStorage.go index 80dae5bc39c..0effada6f04 100644 --- a/storage/factory/openStorage.go +++ b/storage/factory/openStorage.go @@ -3,7 +3,6 @@ package factory import ( "fmt" "path/filepath" - "time" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/config" @@ -56,8 +55,7 @@ func (o *openStorageUnits) GetMostRecentStorageUnit(dbConfig config.DBConfig) (s return nil, err } - dbConfigHandler := NewDBConfigHandler(dbConfig) - persisterFactory, err := NewPersisterFactory(dbConfigHandler) + persisterFactory, err := NewPersisterFactory(dbConfig) if err != nil { return nil, err } @@ -74,7 +72,7 @@ func (o *openStorageUnits) GetMostRecentStorageUnit(dbConfig config.DBConfig) (s persisterPath := o.getPersisterPath(pathWithoutShard, mostRecentShard, dbConfig) - persister, err := createDB(persisterFactory, persisterPath) + persister, err := persisterFactory.CreateWithRetries(persisterPath) if err != nil { return nil, err } @@ -112,13 +110,12 @@ func (o *openStorageUnits) OpenDB(dbConfig config.DBConfig, shardID uint32, epoc parentDir := o.latestStorageDataProvider.GetParentDirectory() pathWithoutShard := o.getPathWithoutShard(parentDir, epoch) persisterPath := o.getPersisterPath(pathWithoutShard, fmt.Sprintf("%d", shardID), dbConfig) - dbConfigHandler := NewDBConfigHandler(dbConfig) - persisterFactory, err := NewPersisterFactory(dbConfigHandler) + persisterFactory, err := NewPersisterFactory(dbConfig) if err != nil { return nil, err } - persister, err := createDB(persisterFactory, persisterPath) + persister, err := persisterFactory.CreateWithRetries(persisterPath) if err != nil { return nil, err } @@ -131,21 +128,6 @@ func (o *openStorageUnits) OpenDB(dbConfig config.DBConfig, shardID uint32, epoc return storageunit.NewStorageUnit(lruCache, persister) } -func createDB(persisterFactory *PersisterFactory, persisterPath string) (storage.Persister, error) { - var persister storage.Persister - var err error - for i := 0; i < storage.MaxRetriesToCreateDB; i++ { - persister, err = persisterFactory.Create(persisterPath) - if err == nil { - return persister, nil - } - log.Warn("Create Persister failed", "path", persisterPath, "error", err) - //TODO: extract this in a parameter and inject it - time.Sleep(storage.SleepTimeBetweenCreateDBRetries) - } - return nil, err -} - func (o *openStorageUnits) getMostUpToDateDirectory( dbConfig config.DBConfig, pathWithoutShard string, diff --git a/storage/factory/persisterCreator.go b/storage/factory/persisterCreator.go index 1357fc37ae4..0d17287815e 100644 --- a/storage/factory/persisterCreator.go +++ b/storage/factory/persisterCreator.go @@ -5,39 +5,29 @@ import ( "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" "github.com/multiversx/mx-chain-go/storage/storageunit" + "github.com/multiversx/mx-chain-storage-go/factory" ) const minNumShards = 2 // persisterCreator is the factory which will handle creating new persisters type persisterCreator struct { - dbType string - batchDelaySeconds int - maxBatchSize int - maxOpenFiles int - shardIDProviderType string - numShards int32 + conf config.DBConfig } func newPersisterCreator(config config.DBConfig) *persisterCreator { return &persisterCreator{ - dbType: config.Type, - batchDelaySeconds: config.BatchDelaySeconds, - maxBatchSize: config.MaxBatchSize, - maxOpenFiles: config.MaxOpenFiles, - shardIDProviderType: config.ShardIDProviderType, - numShards: config.NumShards, + conf: config, } } // Create will create the persister for the provided path -// TODO: refactor to use max tries mechanism func (pc *persisterCreator) Create(path string) (storage.Persister, error) { if len(path) == 0 { return nil, storage.ErrInvalidFilePath } - if pc.numShards < minNumShards { + if pc.conf.NumShards < minNumShards { return pc.CreateBasePersister(path) } @@ -50,23 +40,23 @@ func (pc *persisterCreator) Create(path string) (storage.Persister, error) { // CreateBasePersister will create base the persister for the provided path func (pc *persisterCreator) CreateBasePersister(path string) (storage.Persister, error) { - var dbType = storageunit.DBType(pc.dbType) - switch dbType { - case storageunit.LvlDB: - return database.NewLevelDB(path, pc.batchDelaySeconds, pc.maxBatchSize, pc.maxOpenFiles) - case storageunit.LvlDBSerial: - return database.NewSerialDB(path, pc.batchDelaySeconds, pc.maxBatchSize, pc.maxOpenFiles) - case storageunit.MemoryDB: - return database.NewMemDB(), nil - default: - return nil, storage.ErrNotSupportedDBType + var dbType = storageunit.DBType(pc.conf.Type) + + argsDB := factory.ArgDB{ + DBType: dbType, + Path: path, + BatchDelaySeconds: pc.conf.BatchDelaySeconds, + MaxBatchSize: pc.conf.MaxBatchSize, + MaxOpenFiles: pc.conf.MaxOpenFiles, } + + return storageunit.NewDB(argsDB) } func (pc *persisterCreator) createShardIDProvider() (storage.ShardIDProvider, error) { - switch storageunit.ShardIDProviderType(pc.shardIDProviderType) { + switch storageunit.ShardIDProviderType(pc.conf.ShardIDProviderType) { case storageunit.BinarySplit: - return database.NewShardIDProvider(pc.numShards) + return database.NewShardIDProvider(pc.conf.NumShards) default: return nil, storage.ErrNotSupportedShardIDProviderType } diff --git a/storage/factory/persisterCreator_test.go b/storage/factory/persisterCreator_test.go index a0fdef7e1ef..b1a4cc63796 100644 --- a/storage/factory/persisterCreator_test.go +++ b/storage/factory/persisterCreator_test.go @@ -38,6 +38,19 @@ func TestPersisterCreator_Create(t *testing.T) { require.Equal(t, storage.ErrInvalidFilePath, err) }) + t.Run("use tmp as file path", func(t *testing.T) { + t.Parallel() + + conf := createDefaultDBConfig() + conf.UseTmpAsFilePath = true + + pc := factory.NewPersisterCreator(conf) + + p, err := pc.Create("path1") + require.Nil(t, err) + require.NotNil(t, p) + }) + t.Run("should create non sharded persister", func(t *testing.T) { t.Parallel() diff --git a/storage/factory/persisterFactory.go b/storage/factory/persisterFactory.go index a1305ec2184..321ddf59118 100644 --- a/storage/factory/persisterFactory.go +++ b/storage/factory/persisterFactory.go @@ -1,29 +1,51 @@ package factory import ( - "github.com/multiversx/mx-chain-core-go/core/check" + "os" + "path" + "time" + + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/disabled" ) -// PersisterFactory is the factory which will handle creating new databases -type PersisterFactory struct { +// persisterFactory is the factory which will handle creating new databases +type persisterFactory struct { dbConfigHandler storage.DBConfigHandler } -// NewPersisterFactory will return a new instance of a PersisterFactory -func NewPersisterFactory(dbConfigHandler storage.DBConfigHandler) (*PersisterFactory, error) { - if check.IfNil(dbConfigHandler) { - return nil, storage.ErrNilDBConfigHandler - } +// NewPersisterFactory will return a new instance of persister factory +func NewPersisterFactory(config config.DBConfig) (*persisterFactory, error) { + dbConfigHandler := NewDBConfigHandler(config) - return &PersisterFactory{ + return &persisterFactory{ dbConfigHandler: dbConfigHandler, }, nil } +// CreateWithRetries will return a new instance of a DB with a given path +// It will try to create db multiple times +func (pf *persisterFactory) CreateWithRetries(path string) (storage.Persister, error) { + var persister storage.Persister + var err error + + for i := 0; i < storage.MaxRetriesToCreateDB; i++ { + persister, err = pf.Create(path) + if err == nil { + return persister, nil + } + log.Warn("Create Persister failed", "path", path, "error", err) + + // TODO: extract this in a parameter and inject it + time.Sleep(storage.SleepTimeBetweenCreateDBRetries) + } + + return nil, err +} + // Create will return a new instance of a DB with a given path -func (pf *PersisterFactory) Create(path string) (storage.Persister, error) { +func (pf *persisterFactory) Create(path string) (storage.Persister, error) { if len(path) == 0 { return nil, storage.ErrInvalidFilePath } @@ -33,6 +55,15 @@ func (pf *PersisterFactory) Create(path string) (storage.Persister, error) { return nil, err } + if dbConfig.UseTmpAsFilePath { + filePath, err := getTmpFilePath(path) + if err != nil { + return nil, err + } + + path = filePath + } + pc := newPersisterCreator(*dbConfig) persister, err := pc.Create(path) @@ -49,11 +80,16 @@ func (pf *PersisterFactory) Create(path string) (storage.Persister, error) { } // CreateDisabled will return a new disabled persister -func (pf *PersisterFactory) CreateDisabled() storage.Persister { +func (pf *persisterFactory) CreateDisabled() storage.Persister { return disabled.NewErrorDisabledPersister() } +func getTmpFilePath(p string) (string, error) { + _, file := path.Split(p) + return os.MkdirTemp("", file) +} + // IsInterfaceNil returns true if there is no value under the interface -func (pf *PersisterFactory) IsInterfaceNil() bool { +func (pf *persisterFactory) IsInterfaceNil() bool { return pf == nil } diff --git a/storage/factory/persisterFactory_test.go b/storage/factory/persisterFactory_test.go index 208542a665b..babf32f660d 100644 --- a/storage/factory/persisterFactory_test.go +++ b/storage/factory/persisterFactory_test.go @@ -3,11 +3,15 @@ package factory_test import ( "fmt" "os" + "path" + "strings" "testing" + "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/factory" "github.com/multiversx/mx-chain-go/storage/storageunit" + "github.com/multiversx/mx-chain-storage-go/common" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -15,8 +19,7 @@ import ( func TestNewPersisterFactory(t *testing.T) { t.Parallel() - dbConfigHandler := factory.NewDBConfigHandler(createDefaultDBConfig()) - pf, err := factory.NewPersisterFactory(dbConfigHandler) + pf, err := factory.NewPersisterFactory(createDefaultDBConfig()) require.NotNil(t, pf) require.Nil(t, err) } @@ -27,25 +30,84 @@ func TestPersisterFactory_Create(t *testing.T) { t.Run("invalid file path, should fail", func(t *testing.T) { t.Parallel() - dbConfigHandler := factory.NewDBConfigHandler(createDefaultDBConfig()) - pf, _ := factory.NewPersisterFactory(dbConfigHandler) + pf, _ := factory.NewPersisterFactory(createDefaultDBConfig()) p, err := pf.Create("") require.Nil(t, p) require.Equal(t, storage.ErrInvalidFilePath, err) }) + t.Run("with tmp file path, should work", func(t *testing.T) { + t.Parallel() + + conf := createDefaultDBConfig() + conf.UseTmpAsFilePath = true + + pf, _ := factory.NewPersisterFactory(conf) + + dir := t.TempDir() + + p, err := pf.Create(dir) + require.NotNil(t, p) + require.Nil(t, err) + + // config.toml will be created in tmp path, but cannot be easily checked since + // the file path is not created deterministically + + // should not find in the dir created initially. + _, err = os.Stat(dir + "/config.toml") + require.Error(t, err) + }) + t.Run("should work", func(t *testing.T) { t.Parallel() - dbConfigHandler := factory.NewDBConfigHandler(createDefaultDBConfig()) - pf, _ := factory.NewPersisterFactory(dbConfigHandler) + pf, _ := factory.NewPersisterFactory(createDefaultDBConfig()) dir := t.TempDir() p, err := pf.Create(dir) require.NotNil(t, p) require.Nil(t, err) + + // check config.toml file exists + _, err = os.Stat(dir + "/config.toml") + require.Nil(t, err) + }) +} + +func TestPersisterFactory_CreateWithRetries(t *testing.T) { + t.Parallel() + + t.Run("wrong config should error", func(t *testing.T) { + t.Parallel() + + path := "TEST" + dbConfig := createDefaultDBConfig() + dbConfig.Type = "invalid type" + + persisterFactory, err := factory.NewPersisterFactory(dbConfig) + assert.Nil(t, err) + + db, err := persisterFactory.CreateWithRetries(path) + assert.True(t, check.IfNil(db)) + assert.Equal(t, common.ErrNotSupportedDBType, err) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + path := path.Join(t.TempDir(), "TEST") + dbConfig := createDefaultDBConfig() + dbConfig.FilePath = path + + persisterFactory, err := factory.NewPersisterFactory(dbConfig) + assert.Nil(t, err) + + db, err := persisterFactory.CreateWithRetries(path) + assert.False(t, check.IfNil(db)) + assert.Nil(t, err) + _ = db.Close() }) } @@ -57,8 +119,7 @@ func TestPersisterFactory_Create_ConfigSaveToFilePath(t *testing.T) { dbConfig := createDefaultBasePersisterConfig() dbConfig.Type = string(storageunit.LvlDB) - dbConfigHandler := factory.NewDBConfigHandler(dbConfig) - pf, _ := factory.NewPersisterFactory(dbConfigHandler) + pf, _ := factory.NewPersisterFactory(dbConfig) dir := t.TempDir() path := dir + "storer/" @@ -77,8 +138,7 @@ func TestPersisterFactory_Create_ConfigSaveToFilePath(t *testing.T) { dbConfig := createDefaultBasePersisterConfig() dbConfig.Type = string(storageunit.LvlDBSerial) - dbConfigHandler := factory.NewDBConfigHandler(dbConfig) - pf, _ := factory.NewPersisterFactory(dbConfigHandler) + pf, _ := factory.NewPersisterFactory(dbConfig) dir := t.TempDir() path := dir + "storer/" @@ -97,8 +157,7 @@ func TestPersisterFactory_Create_ConfigSaveToFilePath(t *testing.T) { dbConfig := createDefaultBasePersisterConfig() dbConfig.Type = string(storageunit.MemoryDB) - dbConfigHandler := factory.NewDBConfigHandler(dbConfig) - pf, _ := factory.NewPersisterFactory(dbConfigHandler) + pf, _ := factory.NewPersisterFactory(dbConfig) dir := t.TempDir() path := dir + "storer/" @@ -117,8 +176,7 @@ func TestPersisterFactory_Create_ConfigSaveToFilePath(t *testing.T) { dbConfig := createDefaultBasePersisterConfig() dbConfig.Type = string(storageunit.MemoryDB) - dbConfigHandler := factory.NewDBConfigHandler(dbConfig) - pf, _ := factory.NewPersisterFactory(dbConfigHandler) + pf, _ := factory.NewPersisterFactory(dbConfig) dir := t.TempDir() path := dir + "storer/" @@ -135,8 +193,7 @@ func TestPersisterFactory_Create_ConfigSaveToFilePath(t *testing.T) { func TestPersisterFactory_CreateDisabled(t *testing.T) { t.Parallel() - dbConfigHandler := factory.NewDBConfigHandler(createDefaultDBConfig()) - factoryInstance, err := factory.NewPersisterFactory(dbConfigHandler) + factoryInstance, err := factory.NewPersisterFactory(createDefaultDBConfig()) require.Nil(t, err) persisterInstance := factoryInstance.CreateDisabled() @@ -147,10 +204,28 @@ func TestPersisterFactory_CreateDisabled(t *testing.T) { func TestPersisterFactory_IsInterfaceNil(t *testing.T) { t.Parallel() - var pf *factory.PersisterFactory - require.True(t, pf.IsInterfaceNil()) - - dbConfigHandler := factory.NewDBConfigHandler(createDefaultDBConfig()) - pf, _ = factory.NewPersisterFactory(dbConfigHandler) + pf, _ := factory.NewPersisterFactory(createDefaultDBConfig()) require.False(t, pf.IsInterfaceNil()) } + +func TestGetTmpFilePath(t *testing.T) { + t.Parallel() + + pathSeparator := "/" + + tmpDir := os.TempDir() + tmpBasePath := path.Join(tmpDir, pathSeparator) + + tmpPath, err := factory.GetTmpFilePath("aaaa/bbbb/cccc") + require.Nil(t, err) + require.True(t, strings.Contains(tmpPath, path.Join(tmpBasePath, "cccc"))) + + tmpPath, _ = factory.GetTmpFilePath("aaaa") + require.True(t, strings.Contains(tmpPath, path.Join(tmpBasePath, "aaaa"))) + + tmpPath, _ = factory.GetTmpFilePath("") + require.True(t, strings.Contains(tmpPath, path.Join(tmpBasePath, ""))) + + tmpPath, _ = factory.GetTmpFilePath("/") + require.True(t, strings.Contains(tmpPath, path.Join(tmpBasePath, ""))) +} diff --git a/storage/factory/storageServiceFactory.go b/storage/factory/storageServiceFactory.go index f316bfec7d7..c153e6b2cc8 100644 --- a/storage/factory/storageServiceFactory.go +++ b/storage/factory/storageServiceFactory.go @@ -27,6 +27,7 @@ var log = logger.GetOrCreate("storage/factory") const ( minimumNumberOfActivePersisters = 1 minimumNumberOfEpochsToKeep = 2 + emptyDBPathSuffix = "" ) // StorageServiceType defines the type of StorageService @@ -134,11 +135,8 @@ func checkArgs(args StorageServiceFactoryArgs) error { return nil } -// TODO: refactor this function, split it into multiple ones -func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( +func (psf *StorageServiceFactory) createAndAddTxStorageUnits( store dataRetriever.StorageService, - customDatabaseRemover storage.CustomDatabaseRemoverHandler, - shardID string, ) error { disabledCustomDatabaseRemover := disabled.NewDisabledCustomDatabaseRemover() @@ -182,6 +180,21 @@ func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( } store.AddStorer(dataRetriever.ReceiptsUnit, receiptsUnit) + return nil +} + +func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( + store dataRetriever.StorageService, + customDatabaseRemover storage.CustomDatabaseRemoverHandler, + shardID string, +) error { + disabledCustomDatabaseRemover := disabled.NewDisabledCustomDatabaseRemover() + + err := psf.createAndAddTxStorageUnits(store) + if err != nil { + return err + } + scheduledSCRsUnitArgs, err := psf.createPruningStorerArgs(psf.generalConfig.ScheduledSCRsStorage, disabledCustomDatabaseRemover) if err != nil { return err @@ -222,22 +235,7 @@ func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( } store.AddStorer(dataRetriever.MetaBlockUnit, metaBlockUnit) - // metaHdrHashNonce is static - metaHdrHashNonceUnitConfig := GetDBFromConfig(psf.generalConfig.MetaHdrNonceHashStorage.DB) - dbPath := psf.pathManager.PathForStatic(shardID, psf.generalConfig.MetaHdrNonceHashStorage.DB.FilePath) - metaHdrHashNonceUnitConfig.FilePath = dbPath - - dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.MetaHdrNonceHashStorage.DB) - metaHdrHashNoncePersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) - if err != nil { - return err - } - - metaHdrHashNonceUnit, err := storageunit.NewStorageUnitFromConf( - GetCacherFromConfig(psf.generalConfig.MetaHdrNonceHashStorage.Cache), - metaHdrHashNonceUnitConfig, - metaHdrHashNoncePersisterCreator, - ) + metaHdrHashNonceUnit, err := psf.createStaticStorageUnit(psf.generalConfig.MetaHdrNonceHashStorage, shardID, emptyDBPathSuffix) if err != nil { return fmt.Errorf("%w for MetaHdrNonceHashStorage", err) } @@ -259,22 +257,8 @@ func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( } store.AddStorer(dataRetriever.UserAccountsUnit, userAccountsUnit) - statusMetricsDbConfig := GetDBFromConfig(psf.generalConfig.StatusMetricsStorage.DB) shardId := core.GetShardIDString(psf.shardCoordinator.SelfId()) - dbPath = psf.pathManager.PathForStatic(shardId, psf.generalConfig.StatusMetricsStorage.DB.FilePath) - statusMetricsDbConfig.FilePath = dbPath - - dbConfigHandlerInstance = NewDBConfigHandler(psf.generalConfig.StatusMetricsStorage.DB) - statusMetricsPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) - if err != nil { - return err - } - - statusMetricsStorageUnit, err := storageunit.NewStorageUnitFromConf( - GetCacherFromConfig(psf.generalConfig.StatusMetricsStorage.Cache), - statusMetricsDbConfig, - statusMetricsPersisterCreator, - ) + statusMetricsStorageUnit, err := psf.createStaticStorageUnit(psf.generalConfig.StatusMetricsStorage, shardId, emptyDBPathSuffix) if err != nil { return fmt.Errorf("%w for StatusMetricsStorage", err) } @@ -289,6 +273,27 @@ func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( return nil } +func (psf *StorageServiceFactory) createStaticStorageUnit( + storageConf config.StorageConfig, + shardID string, + dbPathSuffix string, +) (*storageunit.Unit, error) { + storageUnitDBConf := GetDBFromConfig(storageConf.DB) + dbPath := psf.pathManager.PathForStatic(shardID, storageConf.DB.FilePath) + dbPathSuffix + storageUnitDBConf.FilePath = dbPath + + persisterCreator, err := NewPersisterFactory(storageConf.DB) + if err != nil { + return nil, err + } + + return storageunit.NewStorageUnitFromConf( + GetCacherFromConfig(storageConf.Cache), + storageUnitDBConf, + persisterCreator, + ) +} + // CreateForShard will return the storage service which contains all storers needed for a shard func (psf *StorageServiceFactory) CreateForShard() (dataRetriever.StorageService, error) { // TODO: if there will be a differentiation between the creation or opening of a DB, the DBs could be destroyed on a defer @@ -301,23 +306,8 @@ func (psf *StorageServiceFactory) CreateForShard() (dataRetriever.StorageService } shardID := core.GetShardIDString(psf.shardCoordinator.SelfId()) - - // shardHdrHashNonce storer is static - shardHdrHashNonceConfig := GetDBFromConfig(psf.generalConfig.ShardHdrNonceHashStorage.DB) - dbPath := psf.pathManager.PathForStatic(shardID, psf.generalConfig.ShardHdrNonceHashStorage.DB.FilePath) + shardID - shardHdrHashNonceConfig.FilePath = dbPath - - dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.ShardHdrNonceHashStorage.DB) - shardHdrHashNoncePersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) - if err != nil { - return nil, err - } - - shardHdrHashNonceUnit, err := storageunit.NewStorageUnitFromConf( - GetCacherFromConfig(psf.generalConfig.ShardHdrNonceHashStorage.Cache), - shardHdrHashNonceConfig, - shardHdrHashNoncePersisterCreator, - ) + dbPathSuffix := shardID + shardHdrHashNonceUnit, err := psf.createStaticStorageUnit(psf.generalConfig.ShardHdrNonceHashStorage, shardID, dbPathSuffix) if err != nil { return nil, fmt.Errorf("%w for ShardHdrNonceHashStorage", err) } @@ -382,24 +372,10 @@ func (psf *StorageServiceFactory) CreateForMeta() (dataRetriever.StorageService, shardHdrHashNonceUnits := make([]*storageunit.Unit, psf.shardCoordinator.NumberOfShards()) for i := uint32(0); i < psf.shardCoordinator.NumberOfShards(); i++ { - shardHdrHashNonceConfig := GetDBFromConfig(psf.generalConfig.ShardHdrNonceHashStorage.DB) shardID = core.GetShardIDString(core.MetachainShardId) - dbPath := psf.pathManager.PathForStatic(shardID, psf.generalConfig.ShardHdrNonceHashStorage.DB.FilePath) + fmt.Sprintf("%d", i) - shardHdrHashNonceConfig.FilePath = dbPath - - dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.ShardHdrNonceHashStorage.DB) - shardHdrHashNoncePersisterCreator, errLoop := NewPersisterFactory(dbConfigHandlerInstance) - if errLoop != nil { - return nil, errLoop - } - - shardHdrHashNonceUnits[i], errLoop = storageunit.NewStorageUnitFromConf( - GetCacherFromConfig(psf.generalConfig.ShardHdrNonceHashStorage.Cache), - shardHdrHashNonceConfig, - shardHdrHashNoncePersisterCreator, - ) - if errLoop != nil { - return nil, fmt.Errorf("%w for ShardHdrNonceHashStorage on shard %d", errLoop, i) + shardHdrHashNonceUnits[i], err = psf.createStaticStorageUnit(psf.generalConfig.ShardHdrNonceHashStorage, shardID, fmt.Sprintf("%d", i)) + if err != nil { + return nil, fmt.Errorf("%w for ShardHdrNonceHashStorage on shard %d", err, i) } } @@ -527,69 +503,21 @@ func (psf *StorageServiceFactory) setUpDbLookupExtensions(chainStorer *dataRetri chainStorer.AddStorer(dataRetriever.MiniblocksMetadataUnit, miniblocksMetadataPruningStorer) - // Create the miniblocksHashByTxHash (STATIC) storer - miniblockHashByTxHashConfig := psf.generalConfig.DbLookupExtensions.MiniblockHashByTxHashStorageConfig - miniblockHashByTxHashDbConfig := GetDBFromConfig(miniblockHashByTxHashConfig.DB) - miniblockHashByTxHashDbConfig.FilePath = psf.pathManager.PathForStatic(shardID, miniblockHashByTxHashConfig.DB.FilePath) - miniblockHashByTxHashCacherConfig := GetCacherFromConfig(miniblockHashByTxHashConfig.Cache) - - dbConfigHandlerInstance := NewDBConfigHandler(miniblockHashByTxHashConfig.DB) - miniblockHashByTxHashPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) - if err != nil { - return err - } - - miniblockHashByTxHashUnit, err := storageunit.NewStorageUnitFromConf( - miniblockHashByTxHashCacherConfig, - miniblockHashByTxHashDbConfig, - miniblockHashByTxHashPersisterCreator, - ) + miniblockHashByTxHashUnit, err := psf.createStaticStorageUnit(psf.generalConfig.DbLookupExtensions.MiniblockHashByTxHashStorageConfig, shardID, emptyDBPathSuffix) if err != nil { return fmt.Errorf("%w for DbLookupExtensions.MiniblockHashByTxHashStorageConfig", err) } chainStorer.AddStorer(dataRetriever.MiniblockHashByTxHashUnit, miniblockHashByTxHashUnit) - // Create the blockHashByRound (STATIC) storer - blockHashByRoundConfig := psf.generalConfig.DbLookupExtensions.RoundHashStorageConfig - blockHashByRoundDBConfig := GetDBFromConfig(blockHashByRoundConfig.DB) - blockHashByRoundDBConfig.FilePath = psf.pathManager.PathForStatic(shardID, blockHashByRoundConfig.DB.FilePath) - blockHashByRoundCacherConfig := GetCacherFromConfig(blockHashByRoundConfig.Cache) - - dbConfigHandlerInstance = NewDBConfigHandler(blockHashByRoundConfig.DB) - blockHashByRoundPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) - if err != nil { - return err - } - - blockHashByRoundUnit, err := storageunit.NewStorageUnitFromConf( - blockHashByRoundCacherConfig, - blockHashByRoundDBConfig, - blockHashByRoundPersisterCreator, - ) + blockHashByRoundUnit, err := psf.createStaticStorageUnit(psf.generalConfig.DbLookupExtensions.RoundHashStorageConfig, shardID, emptyDBPathSuffix) if err != nil { return fmt.Errorf("%w for DbLookupExtensions.RoundHashStorageConfig", err) } chainStorer.AddStorer(dataRetriever.RoundHdrHashDataUnit, blockHashByRoundUnit) - // Create the epochByHash (STATIC) storer - epochByHashConfig := psf.generalConfig.DbLookupExtensions.EpochByHashStorageConfig - epochByHashDbConfig := GetDBFromConfig(epochByHashConfig.DB) - epochByHashDbConfig.FilePath = psf.pathManager.PathForStatic(shardID, epochByHashConfig.DB.FilePath) - epochByHashCacherConfig := GetCacherFromConfig(epochByHashConfig.Cache) - - dbConfigHandlerInstance = NewDBConfigHandler(epochByHashConfig.DB) - epochByHashPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) - if err != nil { - return err - } - - epochByHashUnit, err := storageunit.NewStorageUnitFromConf( - epochByHashCacherConfig, - epochByHashDbConfig, - epochByHashPersisterCreator, - ) + epochByHashUnit, err := psf.createStaticStorageUnit(psf.generalConfig.DbLookupExtensions.EpochByHashStorageConfig, shardID, emptyDBPathSuffix) if err != nil { return fmt.Errorf("%w for DbLookupExtensions.EpochByHashStorageConfig", err) } @@ -600,7 +528,7 @@ func (psf *StorageServiceFactory) setUpDbLookupExtensions(chainStorer *dataRetri } func (psf *StorageServiceFactory) setUpEsdtSuppliesStorer(chainStorer *dataRetriever.ChainStorer, shardIDStr string) error { - esdtSuppliesUnit, err := psf.createEsdtSuppliesUnit(shardIDStr) + esdtSuppliesUnit, err := psf.createStaticStorageUnit(psf.generalConfig.DbLookupExtensions.ESDTSuppliesStorageConfig, shardIDStr, emptyDBPathSuffix) if err != nil { return fmt.Errorf("%w for DbLookupExtensions.ESDTSuppliesStorageConfig", err) } @@ -613,7 +541,7 @@ func (psf *StorageServiceFactory) setUpEsdtSuppliesStorer(chainStorer *dataRetri } time.Sleep(time.Second) // making sure the unit was properly closed and destroyed - esdtSuppliesUnit, err = psf.createEsdtSuppliesUnit(shardIDStr) + esdtSuppliesUnit, err = psf.createStaticStorageUnit(psf.generalConfig.DbLookupExtensions.ESDTSuppliesStorageConfig, shardIDStr, emptyDBPathSuffix) if err != nil { return err } @@ -623,23 +551,6 @@ func (psf *StorageServiceFactory) setUpEsdtSuppliesStorer(chainStorer *dataRetri return nil } -func (psf *StorageServiceFactory) createEsdtSuppliesUnit(shardIDStr string) (storage.Storer, error) { - esdtSuppliesConfig := psf.generalConfig.DbLookupExtensions.ESDTSuppliesStorageConfig - esdtSuppliesDbConfig := GetDBFromConfig(esdtSuppliesConfig.DB) - esdtSuppliesDbConfig.FilePath = psf.pathManager.PathForStatic(shardIDStr, esdtSuppliesConfig.DB.FilePath) - esdtSuppliesCacherConfig := GetCacherFromConfig(esdtSuppliesConfig.Cache) - - dbConfigHandlerInstance := NewDBConfigHandler(esdtSuppliesConfig.DB) - esdtSuppliesPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) - if err != nil { - return nil, err - } - - return storageunit.NewStorageUnitFromConf( - esdtSuppliesCacherConfig, esdtSuppliesDbConfig, - esdtSuppliesPersisterCreator) -} - func (psf *StorageServiceFactory) createPruningStorerArgs( storageConfig config.StorageConfig, customDatabaseRemover storage.CustomDatabaseRemoverHandler, @@ -655,8 +566,7 @@ func (psf *StorageServiceFactory) createPruningStorerArgs( NumOfActivePersisters: numOfActivePersisters, } - dbConfigHandlerInstance := NewDBConfigHandler(storageConfig.DB) - persisterFactory, err := NewPersisterFactory(dbConfigHandlerInstance) + persisterFactory, err := NewPersisterFactory(storageConfig.DB) if err != nil { return pruning.StorerArgs{}, err } @@ -687,22 +597,8 @@ func (psf *StorageServiceFactory) createTrieEpochRootHashStorerIfNeeded() (stora return storageunit.NewNilStorer(), nil } - trieEpochRootHashDbConfig := GetDBFromConfig(psf.generalConfig.TrieEpochRootHashStorage.DB) shardId := core.GetShardIDString(psf.shardCoordinator.SelfId()) - dbPath := psf.pathManager.PathForStatic(shardId, psf.generalConfig.TrieEpochRootHashStorage.DB.FilePath) - trieEpochRootHashDbConfig.FilePath = dbPath - - dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.TrieEpochRootHashStorage.DB) - esdtSuppliesPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) - if err != nil { - return nil, err - } - - trieEpochRootHashStorageUnit, err := storageunit.NewStorageUnitFromConf( - GetCacherFromConfig(psf.generalConfig.TrieEpochRootHashStorage.Cache), - trieEpochRootHashDbConfig, - esdtSuppliesPersisterCreator, - ) + trieEpochRootHashStorageUnit, err := psf.createStaticStorageUnit(psf.generalConfig.TrieEpochRootHashStorage, shardId, emptyDBPathSuffix) if err != nil { return nil, fmt.Errorf("%w for TrieEpochRootHashStorage", err) } @@ -713,21 +609,8 @@ func (psf *StorageServiceFactory) createTrieEpochRootHashStorerIfNeeded() (stora func (psf *StorageServiceFactory) createTriePersister( storageConfig config.StorageConfig, ) (storage.Storer, error) { - trieDBConfig := GetDBFromConfig(storageConfig.DB) shardID := core.GetShardIDString(psf.shardCoordinator.SelfId()) - dbPath := psf.pathManager.PathForStatic(shardID, storageConfig.DB.FilePath) - trieDBConfig.FilePath = dbPath - - dbConfigHandlerInstance := NewDBConfigHandler(storageConfig.DB) - persisterFactory, err := NewPersisterFactory(dbConfigHandlerInstance) - if err != nil { - return nil, err - } - - return storageunit.NewStorageUnitFromConf( - GetCacherFromConfig(storageConfig.Cache), - trieDBConfig, - persisterFactory) + return psf.createStaticStorageUnit(storageConfig, shardID, emptyDBPathSuffix) } func (psf *StorageServiceFactory) createTriePruningPersister(arg pruning.StorerArgs) (storage.Storer, error) { diff --git a/storage/interface.go b/storage/interface.go index 328eb86c4ed..543d5d04f5b 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -211,13 +211,14 @@ type ManagedPeersHolder interface { // PersisterFactoryHandler defines the behaviour of a component which is able to create persisters type PersisterFactoryHandler interface { Create(path string) (Persister, error) + CreateWithRetries(path string) (Persister, error) IsInterfaceNil() bool } // StateStatsHandler defines the behaviour needed to handler storage statistics type StateStatsHandler interface { - IncrCache() - IncrSnapshotCache() - IncrPersister(epoch uint32) - IncrSnapshotPersister(epoch uint32) + IncrementCache() + IncrementSnapshotCache() + IncrementPersister(epoch uint32) + IncrementSnapshotPersister(epoch uint32) } diff --git a/storage/latestData/latestDataProvider.go b/storage/latestData/latestDataProvider.go index df6ea7e2418..2b894627de3 100644 --- a/storage/latestData/latestDataProvider.go +++ b/storage/latestData/latestDataProvider.go @@ -132,8 +132,7 @@ func (ldp *latestDataProvider) getEpochDirs() ([]string, error) { } func (ldp *latestDataProvider) getLastEpochAndRoundFromStorage(parentDir string, lastEpoch uint32) (storage.LatestDataFromStorage, error) { - dbConfigHandler := factory.NewDBConfigHandler(ldp.generalConfig.BootstrapStorage.DB) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(ldp.generalConfig.BootstrapStorage.DB) if err != nil { return storage.LatestDataFromStorage{}, err } diff --git a/storage/pruning/fullHistoryPruningStorer_test.go b/storage/pruning/fullHistoryPruningStorer_test.go index c83fc5fae34..0e0d43877e8 100644 --- a/storage/pruning/fullHistoryPruningStorer_test.go +++ b/storage/pruning/fullHistoryPruningStorer_test.go @@ -294,16 +294,13 @@ func TestFullHistoryPruningStorer_ConcurrentOperations(t *testing.T) { fmt.Println(testDir) args := getDefaultArgs() - dbConfigHandler := factory.NewDBConfigHandler( - config.DBConfig{ - FilePath: filepath.Join(testDir, dbName), - Type: "LvlDBSerial", - MaxBatchSize: 100, - MaxOpenFiles: 10, - BatchDelaySeconds: 2, - }, - ) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(config.DBConfig{ + FilePath: filepath.Join(testDir, dbName), + Type: "LvlDBSerial", + MaxBatchSize: 100, + MaxOpenFiles: 10, + BatchDelaySeconds: 2, + }) require.Nil(t, err) args.PersisterFactory = persisterFactory diff --git a/storage/pruning/pruningStorer.go b/storage/pruning/pruningStorer.go index f90f1c75aaa..2007454a7c8 100644 --- a/storage/pruning/pruningStorer.go +++ b/storage/pruning/pruningStorer.go @@ -434,7 +434,7 @@ func (ps *PruningStorer) createAndInitPersister(pd *persisterData) (storage.Pers func (ps *PruningStorer) Get(key []byte) ([]byte, error) { v, ok := ps.cacher.Get(key) if ok { - ps.stateStatsHandler.IncrCache() + ps.stateStatsHandler.IncrementCache() return v.([]byte), nil } @@ -457,7 +457,7 @@ func (ps *PruningStorer) Get(key []byte) ([]byte, error) { // if found in persistence unit, add it to cache and return _ = ps.cacher.Put(key, val, len(val)) - ps.stateStatsHandler.IncrPersister(ps.activePersisters[idx].epoch) + ps.stateStatsHandler.IncrementPersister(ps.activePersisters[idx].epoch) return val, nil } diff --git a/storage/pruning/pruningStorer_test.go b/storage/pruning/pruningStorer_test.go index 29c3765e2d8..248cc53cda2 100644 --- a/storage/pruning/pruningStorer_test.go +++ b/storage/pruning/pruningStorer_test.go @@ -1053,16 +1053,13 @@ func TestPruningStorer_ConcurrentOperations(t *testing.T) { fmt.Println(testDir) args := getDefaultArgs() - dbConfigHandler := factory.NewDBConfigHandler( - config.DBConfig{ - FilePath: filepath.Join(testDir, dbName), - Type: "LvlDBSerial", - MaxBatchSize: 100, - MaxOpenFiles: 10, - BatchDelaySeconds: 2, - }, - ) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(config.DBConfig{ + FilePath: filepath.Join(testDir, dbName), + Type: "LvlDBSerial", + MaxBatchSize: 100, + MaxOpenFiles: 10, + BatchDelaySeconds: 2, + }) require.Nil(t, err) args.PersisterFactory = persisterFactory diff --git a/storage/pruning/triePruningStorer.go b/storage/pruning/triePruningStorer.go index 1eb290023c6..e013820db65 100644 --- a/storage/pruning/triePruningStorer.go +++ b/storage/pruning/triePruningStorer.go @@ -95,7 +95,7 @@ func (ps *triePruningStorer) PutInEpochWithoutCache(key []byte, data []byte, epo func (ps *triePruningStorer) GetFromOldEpochsWithoutAddingToCache(key []byte) ([]byte, core.OptionalUint32, error) { v, ok := ps.cacher.Get(key) if ok && !bytes.Equal([]byte(common.ActiveDBKey), key) { - ps.stateStatsHandler.IncrSnapshotCache() + ps.stateStatsHandler.IncrementSnapshotCache() return v.([]byte), core.OptionalUint32{}, nil } @@ -118,7 +118,7 @@ func (ps *triePruningStorer) GetFromOldEpochsWithoutAddingToCache(key []byte) ([ HasValue: true, } - ps.stateStatsHandler.IncrSnapshotPersister(epoch.Value) + ps.stateStatsHandler.IncrementSnapshotPersister(epoch.Value) return val, epoch, nil } diff --git a/storage/pruning/triePruningStorer_test.go b/storage/pruning/triePruningStorer_test.go index 4d9a7c83227..28dc5c93f8e 100644 --- a/storage/pruning/triePruningStorer_test.go +++ b/storage/pruning/triePruningStorer_test.go @@ -76,6 +76,31 @@ func TestTriePruningStorer_GetFromOldEpochsWithoutCacheSearchesOnlyOldEpochsAndR assert.True(t, strings.Contains(err.Error(), "not found")) } +func TestTriePruningStorer_GetFromOldEpochsWithCache(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + ps, _ := pruning.NewTriePruningStorer(args) + cacher := testscommon.NewCacherMock() + ps.SetCacher(cacher) + + testKey1 := []byte("key1") + testVal1 := []byte("value1") + + err := ps.PutInEpoch(testKey1, testVal1, 0) + assert.Nil(t, err) + + err = ps.ChangeEpochSimple(1) + assert.Nil(t, err) + ps.SetEpochForPutOperation(1) + + res, epoch, err := ps.GetFromOldEpochsWithoutAddingToCache(testKey1) + assert.Equal(t, testVal1, res) + assert.Nil(t, err) + assert.False(t, epoch.HasValue) + assert.Equal(t, uint32(0), epoch.Value) +} + func TestTriePruningStorer_GetFromOldEpochsWithoutCacheLessActivePersisters(t *testing.T) { t.Parallel() diff --git a/storage/storageunit/constants.go b/storage/storageunit/constants.go index 0e128af8123..022715dbcb7 100644 --- a/storage/storageunit/constants.go +++ b/storage/storageunit/constants.go @@ -1,25 +1,27 @@ package storageunit -import "github.com/multiversx/mx-chain-storage-go/storageUnit" +import ( + "github.com/multiversx/mx-chain-storage-go/common" +) const ( // LRUCache defines a cache identifier with least-recently-used eviction mechanism - LRUCache = storageUnit.LRUCache + LRUCache = common.LRUCache // SizeLRUCache defines a cache identifier with least-recently-used eviction mechanism and fixed size in bytes - SizeLRUCache = storageUnit.SizeLRUCache + SizeLRUCache = common.SizeLRUCache ) // DB types that are currently supported const ( // LvlDB represents a levelDB storage identifier - LvlDB = storageUnit.LvlDB + LvlDB = common.LvlDB // LvlDBSerial represents a levelDB storage with serialized operations identifier - LvlDBSerial = storageUnit.LvlDBSerial + LvlDBSerial = common.LvlDBSerial // MemoryDB represents an in memory storage identifier - MemoryDB = storageUnit.MemoryDB + MemoryDB = common.MemoryDB ) // Shard id provider types that are currently supported const ( - BinarySplit = storageUnit.BinarySplit + BinarySplit = common.BinarySplit ) diff --git a/storage/storageunit/storageunit.go b/storage/storageunit/storageunit.go index 4e1605efaa7..c1944777920 100644 --- a/storage/storageunit/storageunit.go +++ b/storage/storageunit/storageunit.go @@ -3,6 +3,8 @@ package storageunit import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-storage-go/common" + "github.com/multiversx/mx-chain-storage-go/factory" "github.com/multiversx/mx-chain-storage-go/storageCacherAdapter" "github.com/multiversx/mx-chain-storage-go/storageUnit" ) @@ -12,25 +14,25 @@ import ( type Unit = storageUnit.Unit // CacheConfig holds the configurable elements of a cache -type CacheConfig = storageUnit.CacheConfig - -// ArgDB is a structure that is used to create a new storage.Persister implementation -type ArgDB = storageUnit.ArgDB +type CacheConfig = common.CacheConfig // DBConfig holds the configurable elements of a database -type DBConfig = storageUnit.DBConfig +type DBConfig = common.DBConfig // NilStorer resembles a disabled implementation of the Storer interface type NilStorer = storageUnit.NilStorer // CacheType represents the type of the supported caches -type CacheType = storageUnit.CacheType +type CacheType = common.CacheType // DBType represents the type of the supported databases -type DBType = storageUnit.DBType +type DBType = common.DBType // ShardIDProviderType represents the type of the supported shard id providers -type ShardIDProviderType = storageUnit.ShardIDProviderType +type ShardIDProviderType = common.ShardIDProviderType + +// ArgDB is a structure that is used to create a new storage.Persister implementation +type ArgDB = factory.ArgDB // NewStorageUnit is the constructor for the storage unit, creating a new storage unit // from the given cacher and persister. @@ -40,17 +42,31 @@ func NewStorageUnit(c storage.Cacher, p storage.Persister) (*Unit, error) { // NewCache creates a new cache from a cache config func NewCache(config CacheConfig) (storage.Cacher, error) { - return storageUnit.NewCache(config) + return factory.NewCache(config) } // NewDB creates a new database from database config -func NewDB(persisterFactory storage.PersisterFactoryHandler, path string) (storage.Persister, error) { - return storageUnit.NewDB(persisterFactory, path) +func NewDB(args ArgDB) (storage.Persister, error) { + return factory.NewDB(args) } // NewStorageUnitFromConf creates a new storage unit from a storage unit config func NewStorageUnitFromConf(cacheConf CacheConfig, dbConf DBConfig, persisterFactory storage.PersisterFactoryHandler) (*Unit, error) { - return storageUnit.NewStorageUnitFromConf(cacheConf, dbConf, persisterFactory) + if dbConf.MaxBatchSize > int(cacheConf.Capacity) { + return nil, common.ErrCacheSizeIsLowerThanBatchSize + } + + cache, err := NewCache(cacheConf) + if err != nil { + return nil, err + } + + db, err := persisterFactory.CreateWithRetries(dbConf.FilePath) + if err != nil { + return nil, err + } + + return NewStorageUnit(cache, db) } // NewNilStorer will return a nil storer diff --git a/storage/storageunit/storageunit_test.go b/storage/storageunit/storageunit_test.go index 34affcb569f..da4aea63b33 100644 --- a/storage/storageunit/storageunit_test.go +++ b/storage/storageunit/storageunit_test.go @@ -72,52 +72,6 @@ func TestNewCache(t *testing.T) { }) } -func TestNewDB(t *testing.T) { - t.Parallel() - - t.Run("wrong config should error", func(t *testing.T) { - t.Parallel() - - path := "TEST" - dbConfig := config.DBConfig{ - FilePath: path, - Type: "invalid type", - BatchDelaySeconds: 5, - MaxBatchSize: 10, - MaxOpenFiles: 10, - } - - dbConfigHandler := factory.NewDBConfigHandler(dbConfig) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) - assert.Nil(t, err) - - db, err := storageunit.NewDB(persisterFactory, path) - assert.True(t, check.IfNil(db)) - assert.Equal(t, common.ErrNotSupportedDBType, err) - }) - t.Run("should work", func(t *testing.T) { - t.Parallel() - - path := path.Join(t.TempDir(), "TEST") - dbConfig := config.DBConfig{ - FilePath: path, - Type: "LvlDBSerial", - BatchDelaySeconds: 5, - MaxBatchSize: 10, - MaxOpenFiles: 10, - } - - dbConfigHandler := factory.NewDBConfigHandler(dbConfig) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) - assert.Nil(t, err) - - db, err := storageunit.NewDB(persisterFactory, path) - assert.False(t, check.IfNil(db)) - assert.Nil(t, err) - _ = db.Close() - }) -} - func TestNewStorageUnitFromConf(t *testing.T) { t.Parallel() @@ -144,8 +98,7 @@ func TestNewStorageUnitFromConf(t *testing.T) { MaxBatchSize: dbConfig.MaxBatchSize, MaxOpenFiles: dbConfig.MaxOpenFiles, } - dbConfigHandler := factory.NewDBConfigHandler(dbConf) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(dbConf) assert.Nil(t, err) unit, err := storageunit.NewStorageUnitFromConf(cacheConfig, dbConfig, persisterFactory) @@ -166,8 +119,7 @@ func TestNewStorageUnitFromConf(t *testing.T) { MaxBatchSize: dbConfig.MaxBatchSize, MaxOpenFiles: dbConfig.MaxOpenFiles, } - dbConfigHandler := factory.NewDBConfigHandler(dbConf) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(dbConf) assert.Nil(t, err) unit, err := storageunit.NewStorageUnitFromConf(cacheConfig, dbConfig, persisterFactory) diff --git a/testscommon/bootstrapMocks/bootstrapParamsStub.go b/testscommon/bootstrapMocks/bootstrapParamsStub.go index d62f2d72b61..56d0b6219bd 100644 --- a/testscommon/bootstrapMocks/bootstrapParamsStub.go +++ b/testscommon/bootstrapMocks/bootstrapParamsStub.go @@ -7,7 +7,7 @@ type BootstrapParamsHandlerMock struct { EpochCalled func() uint32 SelfShardIDCalled func() uint32 NumOfShardsCalled func() uint32 - NodesConfigCalled func() *nodesCoordinator.NodesCoordinatorRegistry + NodesConfigCalled func() nodesCoordinator.NodesCoordinatorRegistryHandler } // Epoch - @@ -36,7 +36,7 @@ func (bphm *BootstrapParamsHandlerMock) NumOfShards() uint32 { } // NodesConfig - -func (bphm *BootstrapParamsHandlerMock) NodesConfig() *nodesCoordinator.NodesCoordinatorRegistry { +func (bphm *BootstrapParamsHandlerMock) NodesConfig() nodesCoordinator.NodesCoordinatorRegistryHandler { if bphm.NodesConfigCalled != nil { return bphm.NodesConfigCalled() } diff --git a/testscommon/builtInCostHandlerStub.go b/testscommon/builtInCostHandlerStub.go deleted file mode 100644 index 046cc45ac2b..00000000000 --- a/testscommon/builtInCostHandlerStub.go +++ /dev/null @@ -1,34 +0,0 @@ -package testscommon - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// BuiltInCostHandlerStub - -type BuiltInCostHandlerStub struct { - ComputeBuiltInCostCalled func(tx data.TransactionWithFeeHandler) uint64 - IsBuiltInFuncCallCalled func(tx data.TransactionWithFeeHandler) bool -} - -// ComputeBuiltInCost - -func (stub *BuiltInCostHandlerStub) ComputeBuiltInCost(tx data.TransactionWithFeeHandler) uint64 { - if stub.ComputeBuiltInCostCalled != nil { - return stub.ComputeBuiltInCostCalled(tx) - } - - return 1 -} - -// IsBuiltInFuncCall - -func (stub *BuiltInCostHandlerStub) IsBuiltInFuncCall(tx data.TransactionWithFeeHandler) bool { - if stub.IsBuiltInFuncCallCalled != nil { - return stub.IsBuiltInFuncCallCalled(tx) - } - - return false -} - -// IsInterfaceNil returns true if underlying object is nil -func (stub *BuiltInCostHandlerStub) IsInterfaceNil() bool { - return stub == nil -} diff --git a/testscommon/chainSimulator/chainSimulatorMock.go b/testscommon/chainSimulator/chainSimulatorMock.go new file mode 100644 index 00000000000..07db474a07e --- /dev/null +++ b/testscommon/chainSimulator/chainSimulatorMock.go @@ -0,0 +1,31 @@ +package chainSimulator + +import "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + +// ChainSimulatorMock - +type ChainSimulatorMock struct { + GenerateBlocksCalled func(numOfBlocks int) error + GetNodeHandlerCalled func(shardID uint32) process.NodeHandler +} + +// GenerateBlocks - +func (mock *ChainSimulatorMock) GenerateBlocks(numOfBlocks int) error { + if mock.GenerateBlocksCalled != nil { + return mock.GenerateBlocksCalled(numOfBlocks) + } + + return nil +} + +// GetNodeHandler - +func (mock *ChainSimulatorMock) GetNodeHandler(shardID uint32) process.NodeHandler { + if mock.GetNodeHandlerCalled != nil { + return mock.GetNodeHandlerCalled(shardID) + } + return nil +} + +// IsInterfaceNil - +func (mock *ChainSimulatorMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/chainSimulator/nodeHandlerMock.go b/testscommon/chainSimulator/nodeHandlerMock.go new file mode 100644 index 00000000000..23941f914eb --- /dev/null +++ b/testscommon/chainSimulator/nodeHandlerMock.go @@ -0,0 +1,127 @@ +package chainSimulator + +import ( + chainData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/api/shared" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/sharding" +) + +// NodeHandlerMock - +type NodeHandlerMock struct { + GetProcessComponentsCalled func() factory.ProcessComponentsHolder + GetChainHandlerCalled func() chainData.ChainHandler + GetBroadcastMessengerCalled func() consensus.BroadcastMessenger + GetShardCoordinatorCalled func() sharding.Coordinator + GetCryptoComponentsCalled func() factory.CryptoComponentsHolder + GetCoreComponentsCalled func() factory.CoreComponentsHolder + GetStateComponentsCalled func() factory.StateComponentsHolder + GetFacadeHandlerCalled func() shared.FacadeHandler + GetStatusCoreComponentsCalled func() factory.StatusCoreComponentsHolder + SetKeyValueForAddressCalled func(addressBytes []byte, state map[string]string) error + SetStateForAddressCalled func(address []byte, state *dtos.AddressState) error + CloseCalled func() error +} + +// GetProcessComponents - +func (mock *NodeHandlerMock) GetProcessComponents() factory.ProcessComponentsHolder { + if mock.GetProcessComponentsCalled != nil { + return mock.GetProcessComponentsCalled() + } + return nil +} + +// GetChainHandler - +func (mock *NodeHandlerMock) GetChainHandler() chainData.ChainHandler { + if mock.GetChainHandlerCalled != nil { + return mock.GetChainHandlerCalled() + } + return nil +} + +// GetBroadcastMessenger - +func (mock *NodeHandlerMock) GetBroadcastMessenger() consensus.BroadcastMessenger { + if mock.GetBroadcastMessengerCalled != nil { + return mock.GetBroadcastMessengerCalled() + } + return nil +} + +// GetShardCoordinator - +func (mock *NodeHandlerMock) GetShardCoordinator() sharding.Coordinator { + if mock.GetShardCoordinatorCalled != nil { + return mock.GetShardCoordinatorCalled() + } + return nil +} + +// GetCryptoComponents - +func (mock *NodeHandlerMock) GetCryptoComponents() factory.CryptoComponentsHolder { + if mock.GetCryptoComponentsCalled != nil { + return mock.GetCryptoComponentsCalled() + } + return nil +} + +// GetCoreComponents - +func (mock *NodeHandlerMock) GetCoreComponents() factory.CoreComponentsHolder { + if mock.GetCoreComponentsCalled != nil { + return mock.GetCoreComponentsCalled() + } + return nil +} + +// GetStateComponents - +func (mock *NodeHandlerMock) GetStateComponents() factory.StateComponentsHolder { + if mock.GetStateComponentsCalled != nil { + return mock.GetStateComponentsCalled() + } + return nil +} + +// GetFacadeHandler - +func (mock *NodeHandlerMock) GetFacadeHandler() shared.FacadeHandler { + if mock.GetFacadeHandlerCalled != nil { + return mock.GetFacadeHandlerCalled() + } + return nil +} + +// GetStatusCoreComponents - +func (mock *NodeHandlerMock) GetStatusCoreComponents() factory.StatusCoreComponentsHolder { + if mock.GetStatusCoreComponentsCalled != nil { + return mock.GetStatusCoreComponentsCalled() + } + return nil +} + +// SetKeyValueForAddress - +func (mock *NodeHandlerMock) SetKeyValueForAddress(addressBytes []byte, state map[string]string) error { + if mock.SetKeyValueForAddressCalled != nil { + return mock.SetKeyValueForAddressCalled(addressBytes, state) + } + return nil +} + +// SetStateForAddress - +func (mock *NodeHandlerMock) SetStateForAddress(address []byte, state *dtos.AddressState) error { + if mock.SetStateForAddressCalled != nil { + return mock.SetStateForAddressCalled(address, state) + } + return nil +} + +// Close - +func (mock *NodeHandlerMock) Close() error { + if mock.CloseCalled != nil { + return mock.CloseCalled() + } + return nil +} + +// IsInterfaceNil - +func (mock *NodeHandlerMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/components/components.go b/testscommon/components/components.go index bd65895bab1..0e3dcc14cd1 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -134,7 +134,7 @@ func GetConsensusArgs(shardCoordinator sharding.Coordinator) consensusComp.Conse coreComponents := GetCoreComponents() cryptoComponents := GetCryptoComponents(coreComponents) networkComponents := GetNetworkComponents(cryptoComponents) - stateComponents := GetStateComponents(coreComponents) + stateComponents := GetStateComponents(coreComponents, GetStatusCoreComponents()) dataComponents := GetDataComponents(coreComponents, shardCoordinator) processComponents := GetProcessComponents( shardCoordinator, @@ -199,6 +199,13 @@ func GetCryptoArgs(coreComponents factory.CoreComponentsHolder) cryptoComp.Crypt }, EnableEpochs: config.EnableEpochs{ BLSMultiSignerEnableEpoch: []config.MultiSignerConfig{{EnableEpoch: 0, Type: "no-KOSK"}}, + MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 100, + NodesToShufflePerShard: 2, + }, + }, }, } @@ -325,7 +332,7 @@ func GetNetworkFactoryArgs() networkComp.NetworkComponentsFactoryArgs { } // GetStateFactoryArgs - -func GetStateFactoryArgs(coreComponents factory.CoreComponentsHolder) stateComp.StateComponentsFactoryArgs { +func GetStateFactoryArgs(coreComponents factory.CoreComponentsHolder, statusCoreComp factory.StatusCoreComponentsHolder) stateComp.StateComponentsFactoryArgs { tsm, _ := trie.NewTrieStorageManager(storage.GetStorageManagerArgs()) storageManagerUser, _ := trie.NewTrieStorageManagerWithoutPruning(tsm) tsm, _ = trie.NewTrieStorageManager(storage.GetStorageManagerArgs()) @@ -344,7 +351,7 @@ func GetStateFactoryArgs(coreComponents factory.CoreComponentsHolder) stateComp. stateComponentsFactoryArgs := stateComp.StateComponentsFactoryArgs{ Config: GetGeneralConfig(), Core: coreComponents, - StatusCore: GetStatusCoreComponents(), + StatusCore: statusCoreComp, StorageService: disabled.NewChainStorer(), ProcessingMode: common.Normal, ChainHandler: &testscommon.ChainHandlerStub{}, @@ -359,7 +366,7 @@ func GetProcessComponentsFactoryArgs(shardCoordinator sharding.Coordinator) proc cryptoComponents := GetCryptoComponents(coreComponents) networkComponents := GetNetworkComponents(cryptoComponents) dataComponents := GetDataComponents(coreComponents, shardCoordinator) - stateComponents := GetStateComponents(coreComponents) + stateComponents := GetStateComponents(coreComponents, GetStatusCoreComponents()) processArgs := GetProcessArgs( shardCoordinator, coreComponents, @@ -548,6 +555,8 @@ func GetProcessArgs( MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100, + NodeLimitPercentage: 100, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -558,12 +567,30 @@ func GetProcessArgs( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, HistoryRepo: &dblookupext.HistoryRepositoryStub{}, FlagsConfig: config.ContextFlagsConfig{ Version: "v1.0.0", }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, + EpochConfig: config.EpochConfig{ + EnableEpochs: config.EnableEpochs{ + MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 100, + NodesToShufflePerShard: 2, + }, + }, + }, + }, } } @@ -626,7 +653,7 @@ func GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator shardin cryptoComponents := GetCryptoComponents(coreComponents) networkComponents := GetNetworkComponents(cryptoComponents) dataComponents := GetDataComponents(coreComponents, shardCoordinator) - stateComponents := GetStateComponents(coreComponents) + stateComponents := GetStateComponents(coreComponents, GetStatusCoreComponents()) processComponents := GetProcessComponents( shardCoordinator, coreComponents, @@ -718,22 +745,22 @@ func GetCryptoComponents(coreComponents factory.CoreComponentsHolder) factory.Cr } // GetStateComponents - -func GetStateComponents(coreComponents factory.CoreComponentsHolder) factory.StateComponentsHolder { - stateArgs := GetStateFactoryArgs(coreComponents) +func GetStateComponents(coreComponents factory.CoreComponentsHolder, statusCoreComponents factory.StatusCoreComponentsHolder) factory.StateComponentsHolder { + stateArgs := GetStateFactoryArgs(coreComponents, statusCoreComponents) stateComponentsFactory, err := stateComp.NewStateComponentsFactory(stateArgs) if err != nil { - log.Error("getStateComponents NewStateComponentsFactory", "error", err.Error()) + log.Error("GetStateComponents NewStateComponentsFactory", "error", err.Error()) return nil } stateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) if err != nil { - log.Error("getStateComponents NewManagedStateComponents", "error", err.Error()) + log.Error("GetStateComponents NewManagedStateComponents", "error", err.Error()) return nil } err = stateComponents.Create() if err != nil { - log.Error("getStateComponents Create", "error", err.Error()) + log.Error("GetStateComponents Create", "error", err.Error()) return nil } return stateComponents @@ -756,7 +783,7 @@ func GetStatusCoreComponents() factory.StatusCoreComponentsHolder { err = statusCoreComponents.Create() if err != nil { - log.Error("statusCoreComponents Create", "error", err.Error()) + log.Error("GetStatusCoreComponents Create", "error", err.Error()) return nil } diff --git a/testscommon/components/default.go b/testscommon/components/default.go index c39baf24385..514b8355407 100644 --- a/testscommon/components/default.go +++ b/testscommon/components/default.go @@ -13,12 +13,15 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" dataRetrieverTests "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/testscommon/storageManager" @@ -42,17 +45,18 @@ func GetDefaultCoreComponents() *mock.CoreComponentsMock { MinTransactionVersionCalled: func() uint32 { return 1 }, - WatchdogTimer: &testscommon.WatchdogMock{}, - AlarmSch: &testscommon.AlarmSchedulerStub{}, - NtpSyncTimer: &testscommon.SyncTimerStub{}, - RoundHandlerField: &testscommon.RoundHandlerMock{}, - EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - RatingsConfig: &testscommon.RatingsInfoMock{}, - RatingHandler: &testscommon.RaterMock{}, - NodesConfig: &testscommon.NodesSetupStub{}, - StartTime: time.Time{}, - NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EpochChangeNotifier: &epochNotifierMock.EpochNotifierStub{}, + WatchdogTimer: &testscommon.WatchdogMock{}, + AlarmSch: &testscommon.AlarmSchedulerStub{}, + NtpSyncTimer: &testscommon.SyncTimerStub{}, + RoundHandlerField: &testscommon.RoundHandlerMock{}, + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + RatingsConfig: &testscommon.RatingsInfoMock{}, + RatingHandler: &testscommon.RaterMock{}, + NodesConfig: &genesisMocks.NodesSetupStub{}, + StartTime: time.Time{}, + NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EpochChangeNotifier: &epochNotifierMock.EpochNotifierStub{}, + EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, } } @@ -131,8 +135,8 @@ func GetDefaultProcessComponents(shardCoordinator sharding.Coordinator) *mock.Pr BootSore: &mock.BootstrapStorerMock{}, HeaderSigVerif: &mock.HeaderSigVerifierStub{}, HeaderIntegrVerif: &mock.HeaderIntegrityVerifierStub{}, - ValidatorStatistics: &mock.ValidatorStatisticsProcessorStub{}, - ValidatorProvider: &mock.ValidatorsProviderStub{}, + ValidatorStatistics: &testscommon.ValidatorStatisticsProcessorStub{}, + ValidatorProvider: &stakingcommon.ValidatorsProviderStub{}, BlockTrack: &mock.BlockTrackerStub{}, PendingMiniBlocksHdl: &mock.PendingMiniBlocksHandlerStub{}, ReqHandler: &testscommon.RequestHandlerStub{}, diff --git a/testscommon/dataRetriever/poolFactory.go b/testscommon/dataRetriever/poolFactory.go index 77bdeb610a7..a8f4374e800 100644 --- a/testscommon/dataRetriever/poolFactory.go +++ b/testscommon/dataRetriever/poolFactory.go @@ -98,11 +98,10 @@ func CreatePoolsHolder(numShards uint32, selfShard uint32) dataRetriever.PoolsHo MaxOpenFiles: 10, } - dbConfigHandler := storageFactory.NewDBConfigHandler(dbConfig) - persisterFactory, err := storageFactory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := storageFactory.NewPersisterFactory(dbConfig) panicIfError("Create persister factory", err) - persister, err := storageunit.NewDB(persisterFactory, tempDir) + persister, err := persisterFactory.CreateWithRetries(tempDir) panicIfError("Create trieSync DB", err) tnf := factory.NewTrieNodeFactory() diff --git a/testscommon/dataRetriever/poolsHolderMock.go b/testscommon/dataRetriever/poolsHolderMock.go index 5c711addbb0..d3d30562954 100644 --- a/testscommon/dataRetriever/poolsHolderMock.go +++ b/testscommon/dataRetriever/poolsHolderMock.go @@ -4,6 +4,7 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/dataPool" @@ -142,6 +143,11 @@ func (holder *PoolsHolderMock) Headers() dataRetriever.HeadersPool { return holder.headers } +// SetHeadersPool - +func (holder *PoolsHolderMock) SetHeadersPool(headersPool dataRetriever.HeadersPool) { + holder.headers = headersPool +} + // MiniBlocks - func (holder *PoolsHolderMock) MiniBlocks() storage.Cacher { return holder.miniBlocks diff --git a/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go b/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go index 16fc9019390..bf633508147 100644 --- a/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go +++ b/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go @@ -44,6 +44,10 @@ func (stub *EnableEpochsHandlerStub) AddActiveFlags(flags ...core.EnableEpochFla stub.Lock() defer stub.Unlock() + if len(stub.activeFlags) == 0 { + stub.activeFlags = make(map[core.EnableEpochFlag]struct{}) + } + for _, flag := range flags { stub.activeFlags[flag] = struct{}{} } diff --git a/integrationTests/mock/epochStartSystemSCStub.go b/testscommon/epochStartSystemSCStub.go similarity index 72% rename from integrationTests/mock/epochStartSystemSCStub.go rename to testscommon/epochStartSystemSCStub.go index fd2c92553cf..ff4e4addbf4 100644 --- a/integrationTests/mock/epochStartSystemSCStub.go +++ b/testscommon/epochStartSystemSCStub.go @@ -1,6 +1,7 @@ -package mock +package testscommon import ( + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/state" @@ -8,7 +9,7 @@ import ( // EpochStartSystemSCStub - type EpochStartSystemSCStub struct { - ProcessSystemSmartContractCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error + ProcessSystemSmartContractCalled func(validatorsInfo state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error ProcessDelegationRewardsCalled func(miniBlocks block.MiniBlockSlice, txCache epochStart.TransactionCacher) error ToggleUnStakeUnBondCalled func(value bool) error } @@ -22,9 +23,12 @@ func (e *EpochStartSystemSCStub) ToggleUnStakeUnBond(value bool) error { } // ProcessSystemSmartContract - -func (e *EpochStartSystemSCStub) ProcessSystemSmartContract(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { +func (e *EpochStartSystemSCStub) ProcessSystemSmartContract( + validatorsInfo state.ShardValidatorsInfoMapHandler, + header data.HeaderHandler, +) error { if e.ProcessSystemSmartContractCalled != nil { - return e.ProcessSystemSmartContractCalled(validatorInfos, nonce, epoch) + return e.ProcessSystemSmartContractCalled(validatorsInfo, header) } return nil } diff --git a/process/mock/epochValidatorInfoCreatorStub.go b/testscommon/epochValidatorInfoCreatorStub.go similarity index 88% rename from process/mock/epochValidatorInfoCreatorStub.go rename to testscommon/epochValidatorInfoCreatorStub.go index 445d305596e..31c07037f1e 100644 --- a/process/mock/epochValidatorInfoCreatorStub.go +++ b/testscommon/epochValidatorInfoCreatorStub.go @@ -1,4 +1,4 @@ -package mock +package testscommon import ( "github.com/multiversx/mx-chain-core-go/data" @@ -9,8 +9,8 @@ import ( // EpochValidatorInfoCreatorStub - type EpochValidatorInfoCreatorStub struct { - CreateValidatorInfoMiniBlocksCalled func(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) - VerifyValidatorInfoMiniBlocksCalled func(miniblocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error + CreateValidatorInfoMiniBlocksCalled func(validatorsInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) + VerifyValidatorInfoMiniBlocksCalled func(miniblocks []*block.MiniBlock, validatorsInfo state.ShardValidatorsInfoMapHandler) error GetLocalValidatorInfoCacheCalled func() epochStart.ValidatorInfoCacher CreateMarshalledDataCalled func(body *block.Body) map[string][][]byte GetValidatorInfoTxsCalled func(body *block.Body) map[string]*state.ShardValidatorInfo @@ -20,7 +20,7 @@ type EpochValidatorInfoCreatorStub struct { } // CreateValidatorInfoMiniBlocks - -func (e *EpochValidatorInfoCreatorStub) CreateValidatorInfoMiniBlocks(validatorInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { +func (e *EpochValidatorInfoCreatorStub) CreateValidatorInfoMiniBlocks(validatorInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) { if e.CreateValidatorInfoMiniBlocksCalled != nil { return e.CreateValidatorInfoMiniBlocksCalled(validatorInfo) } @@ -28,7 +28,7 @@ func (e *EpochValidatorInfoCreatorStub) CreateValidatorInfoMiniBlocks(validatorI } // VerifyValidatorInfoMiniBlocks - -func (e *EpochValidatorInfoCreatorStub) VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error { +func (e *EpochValidatorInfoCreatorStub) VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo state.ShardValidatorsInfoMapHandler) error { if e.VerifyValidatorInfoMiniBlocksCalled != nil { return e.VerifyValidatorInfoMiniBlocksCalled(miniBlocks, validatorsInfo) } diff --git a/testscommon/esdtStorageHandlerStub.go b/testscommon/esdtStorageHandlerStub.go index 1a1af038e4e..47825717409 100644 --- a/testscommon/esdtStorageHandlerStub.go +++ b/testscommon/esdtStorageHandlerStub.go @@ -16,7 +16,7 @@ type EsdtStorageHandlerStub struct { GetESDTNFTTokenOnDestinationWithCustomSystemAccountCalled func(accnt vmcommon.UserAccountHandler, esdtTokenKey []byte, nonce uint64, systemAccount vmcommon.UserAccountHandler) (*esdt.ESDigitalToken, bool, error) WasAlreadySentToDestinationShardAndUpdateStateCalled func(tickerID []byte, nonce uint64, dstAddress []byte) (bool, error) SaveNFTMetaDataCalled func(tx data.TransactionHandler) error - AddToLiquiditySystemAccCalled func(esdtTokenKey []byte, tokenType uint32, nonce uint64, transferValue *big.Int) error + AddToLiquiditySystemAccCalled func(esdtTokenKey []byte, tokenType uint32, nonce uint64, transferValue *big.Int, keepMetadataOnZeroLiquidity bool) error SaveMetaDataToSystemAccountCalled func(tokenKey []byte, nonce uint64, esdtData *esdt.ESDigitalToken) error GetMetaDataFromSystemAccountCalled func(bytes []byte, u uint64) (*esdt.MetaData, error) } @@ -94,9 +94,9 @@ func (e *EsdtStorageHandlerStub) SaveNFTMetaData(tx data.TransactionHandler) err } // AddToLiquiditySystemAcc - -func (e *EsdtStorageHandlerStub) AddToLiquiditySystemAcc(esdtTokenKey []byte, tokenType uint32, nonce uint64, transferValue *big.Int) error { +func (e *EsdtStorageHandlerStub) AddToLiquiditySystemAcc(esdtTokenKey []byte, tokenType uint32, nonce uint64, transferValue *big.Int, keepMetadataOnZeroLiquidity bool) error { if e.AddToLiquiditySystemAccCalled != nil { - return e.AddToLiquiditySystemAccCalled(esdtTokenKey, tokenType, nonce, transferValue) + return e.AddToLiquiditySystemAccCalled(esdtTokenKey, tokenType, nonce, transferValue, keepMetadataOnZeroLiquidity) } return nil diff --git a/testscommon/generalConfig.go b/testscommon/generalConfig.go index 0cf69ff24ed..06814edb1f5 100644 --- a/testscommon/generalConfig.go +++ b/testscommon/generalConfig.go @@ -363,7 +363,8 @@ func GetGeneralConfig() config.Config { CheckNodesOnDisk: false, }, Antiflood: config.AntifloodConfig{ - NumConcurrentResolverJobs: 2, + NumConcurrentResolverJobs: 2, + NumConcurrentResolvingTrieNodesJobs: 1, TxAccumulator: config.TxAccumulatorConfig{ MaxAllowedTimeInMilliseconds: 10, MaxDeviationTimeInMilliseconds: 1, @@ -415,6 +416,9 @@ func GetGeneralConfig() config.Config { "erd1najnxxweyw6plhg8efql330nttrj6l5cf87wqsuym85s9ha0hmdqnqgenp", //shard 2 }, }, + ResourceStats: config.ResourceStatsConfig{ + RefreshIntervalInSec: 1, + }, } } diff --git a/integrationTests/mock/nodesSetupStub.go b/testscommon/genesisMocks/nodesSetupStub.go similarity index 95% rename from integrationTests/mock/nodesSetupStub.go rename to testscommon/genesisMocks/nodesSetupStub.go index affb71e3530..ebe1cfe778a 100644 --- a/integrationTests/mock/nodesSetupStub.go +++ b/testscommon/genesisMocks/nodesSetupStub.go @@ -1,82 +1,82 @@ -package mock +package genesisMocks -import "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +import ( + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +) // NodesSetupStub - type NodesSetupStub struct { - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - GetRoundDurationCalled func() uint64 - GetChainIdCalled func() string - GetMinTransactionVersionCalled func() uint32 + InitialNodesPubKeysCalled func() map[uint32][]string + InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) + GetShardIDForPubKeyCalled func(pubKey []byte) (uint32, error) + NumberOfShardsCalled func() uint32 GetShardConsensusGroupSizeCalled func() uint32 GetMetaConsensusGroupSizeCalled func() uint32 - NumberOfShardsCalled func() uint32 - MinNumberOfNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 + GetRoundDurationCalled func() uint64 MinNumberOfMetaNodesCalled func() uint32 + MinNumberOfShardNodesCalled func() uint32 GetHysteresisCalled func() float32 GetAdaptivityCalled func() bool + InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) + InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) + GetStartTimeCalled func() int64 + MinNumberOfNodesCalled func() uint32 AllInitialNodesCalled func() []nodesCoordinator.GenesisNodeInfoHandler - GetShardIDForPubKeyCalled func(pubkey []byte) (uint32, error) - InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) - InitialNodesPubKeysCalled func() map[uint32][]string MinNumberOfNodesWithHysteresisCalled func() uint32 MinShardHysteresisNodesCalled func() uint32 MinMetaHysteresisNodesCalled func() uint32 + GetChainIdCalled func() string + GetMinTransactionVersionCalled func() uint32 } -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() +// InitialNodesPubKeys - +func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { + if n.InitialNodesPubKeysCalled != nil { + return n.InitialNodesPubKeysCalled() } - return 1 + return map[uint32][]string{0: {"val1", "val2"}} } -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() +// InitialEligibleNodesPubKeysForShard - +func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { + if n.InitialEligibleNodesPubKeysForShardCalled != nil { + return n.InitialEligibleNodesPubKeysForShardCalled(shardId) } - return 1 + return []string{"val1", "val2"}, nil } -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() +// NumberOfShards - +func (n *NodesSetupStub) NumberOfShards() uint32 { + if n.NumberOfShardsCalled != nil { + return n.NumberOfShardsCalled() } - - return 0 + return 1 } -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() +// GetShardIDForPubKey - +func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { + if n.GetShardIDForPubKeyCalled != nil { + return n.GetShardIDForPubKeyCalled(pubkey) } - - return false + return 0, nil } -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() +// GetShardConsensusGroupSize - +func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { + if n.GetShardConsensusGroupSizeCalled != nil { + return n.GetShardConsensusGroupSizeCalled() } - return 0 + return 1 } -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() +// GetMetaConsensusGroupSize - +func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { + if n.GetMetaConsensusGroupSizeCalled != nil { + return n.GetMetaConsensusGroupSizeCalled() } - return 0 + return 1 } // GetRoundDuration - @@ -84,54 +84,49 @@ func (n *NodesSetupStub) GetRoundDuration() uint64 { if n.GetRoundDurationCalled != nil { return n.GetRoundDurationCalled() } - return 0 + return 4000 } -// GetChainId - -func (n *NodesSetupStub) GetChainId() string { - if n.GetChainIdCalled != nil { - return n.GetChainIdCalled() - } - return "chainID" -} - -// GetMinTransactionVersion - -func (n *NodesSetupStub) GetMinTransactionVersion() uint32 { - if n.GetMinTransactionVersionCalled != nil { - return n.GetMinTransactionVersionCalled() +// MinNumberOfMetaNodes - +func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { + if n.MinNumberOfMetaNodesCalled != nil { + return n.MinNumberOfMetaNodesCalled() } return 1 } -// GetShardConsensusGroupSize - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetShardConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() +// MinNumberOfShardNodes - +func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { + if n.MinNumberOfShardNodesCalled != nil { + return n.MinNumberOfShardNodesCalled() } - return 0 + return 1 } -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() +// GetHysteresis - +func (n *NodesSetupStub) GetHysteresis() float32 { + if n.GetHysteresisCalled != nil { + return n.GetHysteresisCalled() } return 0 } -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() +// GetAdaptivity - +func (n *NodesSetupStub) GetAdaptivity() bool { + if n.GetAdaptivityCalled != nil { + return n.GetAdaptivityCalled() } - return 0 + return false } // InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { +func (n *NodesSetupStub) InitialNodesInfoForShard( + shardId uint32, +) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { if n.InitialNodesInfoForShardCalled != nil { return n.InitialNodesInfoForShardCalled(shardId) } + return nil, nil, nil } @@ -140,49 +135,56 @@ func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.Genes if n.InitialNodesInfoCalled != nil { return n.InitialNodesInfoCalled() } + return nil, nil } -// AllInitialNodes - -func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHandler { - if n.AllInitialNodesCalled != nil { - return n.AllInitialNodesCalled() +// GetStartTime - +func (n *NodesSetupStub) GetStartTime() int64 { + if n.GetStartTimeCalled != nil { + return n.GetStartTimeCalled() } - return nil + return 0 } -// GetShardIDForPubKey - -func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { - if n.GetShardIDForPubKeyCalled != nil { - return n.GetShardIDForPubKeyCalled(pubkey) +// MinNumberOfNodes - +func (n *NodesSetupStub) MinNumberOfNodes() uint32 { + if n.MinNumberOfNodesCalled != nil { + return n.MinNumberOfNodesCalled() } - return 0, nil + return 1 } -// InitialEligibleNodesPubKeysForShard - -func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { - if n.InitialEligibleNodesPubKeysForShardCalled != nil { - return n.InitialEligibleNodesPubKeysForShardCalled(shardId) +// MinNumberOfNodesWithHysteresis - +func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { + if n.MinNumberOfNodesWithHysteresisCalled != nil { + return n.MinNumberOfNodesWithHysteresisCalled() } - - return []string{"val1", "val2"}, nil + return n.MinNumberOfNodes() } -// InitialNodesPubKeys - -func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { - if n.InitialNodesPubKeysCalled != nil { - return n.InitialNodesPubKeysCalled() +// AllInitialNodes - +func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHandler { + if n.AllInitialNodesCalled != nil { + return n.AllInitialNodesCalled() } + return nil +} - return map[uint32][]string{0: {"val1", "val2"}} +// GetChainId - +func (n *NodesSetupStub) GetChainId() string { + if n.GetChainIdCalled != nil { + return n.GetChainIdCalled() + } + return "chainID" } -// MinNumberOfNodesWithHysteresis - -func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { - if n.MinNumberOfNodesWithHysteresisCalled != nil { - return n.MinNumberOfNodesWithHysteresisCalled() +// GetMinTransactionVersion - +func (n *NodesSetupStub) GetMinTransactionVersion() uint32 { + if n.GetMinTransactionVersionCalled != nil { + return n.GetMinTransactionVersionCalled() } - return n.MinNumberOfNodes() + return 1 } // MinShardHysteresisNodes - diff --git a/testscommon/headerHandlerStub.go b/testscommon/headerHandlerStub.go index 7bbd8d2883e..ab1d354ec60 100644 --- a/testscommon/headerHandlerStub.go +++ b/testscommon/headerHandlerStub.go @@ -12,6 +12,7 @@ type HeaderHandlerStub struct { EpochField uint32 RoundField uint64 TimestampField uint64 + BlockBodyTypeInt32Field int32 GetMiniBlockHeadersWithDstCalled func(destId uint32) map[string]uint32 GetOrderedCrossMiniblocksWithDstCalled func(destId uint32) []*data.MiniBlockInfo GetPubKeysBitmapCalled func() []byte @@ -28,6 +29,15 @@ type HeaderHandlerStub struct { HasScheduledMiniBlocksCalled func() bool GetNonceCalled func() uint64 CheckFieldsForNilCalled func() error + SetShardIDCalled func(shardID uint32) error + SetPrevHashCalled func(hash []byte) error + SetPrevRandSeedCalled func(seed []byte) error + SetPubKeysBitmapCalled func(bitmap []byte) error + SetChainIDCalled func(chainID []byte) error + SetTimeStampCalled func(timestamp uint64) error + SetRandSeedCalled func(seed []byte) error + SetSignatureCalled func(signature []byte) error + SetLeaderSignatureCalled func(signature []byte) error } // GetAccumulatedFees - @@ -56,7 +66,10 @@ func (hhs *HeaderHandlerStub) GetReceiptsHash() []byte { } // SetShardID - -func (hhs *HeaderHandlerStub) SetShardID(_ uint32) error { +func (hhs *HeaderHandlerStub) SetShardID(shardID uint32) error { + if hhs.SetShardIDCalled != nil { + return hhs.SetShardIDCalled(shardID) + } return nil } @@ -114,7 +127,10 @@ func (hhs *HeaderHandlerStub) GetPrevHash() []byte { // GetPrevRandSeed - func (hhs *HeaderHandlerStub) GetPrevRandSeed() []byte { - return hhs.GetPrevRandSeedCalled() + if hhs.GetPrevRandSeedCalled != nil { + return hhs.GetPrevRandSeedCalled() + } + return make([]byte, 0) } // GetRandSeed - @@ -124,7 +140,10 @@ func (hhs *HeaderHandlerStub) GetRandSeed() []byte { // GetPubKeysBitmap - func (hhs *HeaderHandlerStub) GetPubKeysBitmap() []byte { - return hhs.GetPubKeysBitmapCalled() + if hhs.GetPubKeysBitmapCalled != nil { + return hhs.GetPubKeysBitmapCalled() + } + return make([]byte, 0) } // GetSignature - @@ -172,8 +191,11 @@ func (hhs *HeaderHandlerStub) SetRound(_ uint64) error { } // SetTimeStamp - -func (hhs *HeaderHandlerStub) SetTimeStamp(_ uint64) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetTimeStamp(timestamp uint64) error { + if hhs.SetTimeStampCalled != nil { + return hhs.SetTimeStampCalled(timestamp) + } + return nil } // SetRootHash - @@ -182,38 +204,59 @@ func (hhs *HeaderHandlerStub) SetRootHash(_ []byte) error { } // SetPrevHash - -func (hhs *HeaderHandlerStub) SetPrevHash(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetPrevHash(hash []byte) error { + if hhs.SetPrevHashCalled != nil { + return hhs.SetPrevHashCalled(hash) + } + return nil } // SetPrevRandSeed - -func (hhs *HeaderHandlerStub) SetPrevRandSeed(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetPrevRandSeed(seed []byte) error { + if hhs.SetPrevRandSeedCalled != nil { + return hhs.SetPrevRandSeedCalled(seed) + } + return nil } // SetRandSeed - -func (hhs *HeaderHandlerStub) SetRandSeed(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetRandSeed(seed []byte) error { + if hhs.SetRandSeedCalled != nil { + return hhs.SetRandSeedCalled(seed) + } + return nil } // SetPubKeysBitmap - -func (hhs *HeaderHandlerStub) SetPubKeysBitmap(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetPubKeysBitmap(bitmap []byte) error { + if hhs.SetPubKeysBitmapCalled != nil { + return hhs.SetPubKeysBitmapCalled(bitmap) + } + return nil } // SetSignature - -func (hhs *HeaderHandlerStub) SetSignature(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetSignature(signature []byte) error { + if hhs.SetSignatureCalled != nil { + return hhs.SetSignatureCalled(signature) + } + return nil } // SetLeaderSignature - -func (hhs *HeaderHandlerStub) SetLeaderSignature(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetLeaderSignature(signature []byte) error { + if hhs.SetLeaderSignatureCalled != nil { + return hhs.SetLeaderSignatureCalled(signature) + } + return nil } // SetChainID - -func (hhs *HeaderHandlerStub) SetChainID(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetChainID(chainID []byte) error { + if hhs.SetChainIDCalled != nil { + return hhs.SetChainIDCalled(chainID) + } + return nil } // SetTxCount - @@ -248,7 +291,7 @@ func (hhs *HeaderHandlerStub) GetMetaBlockHashes() [][]byte { // GetBlockBodyTypeInt32 - func (hhs *HeaderHandlerStub) GetBlockBodyTypeInt32() int32 { - panic("implement me") + return hhs.BlockBodyTypeInt32Field } // GetValidatorStatsRootHash - @@ -377,3 +420,10 @@ func (hhs *HeaderHandlerStub) HasScheduledMiniBlocks() bool { } return false } + +// SetBlockBodyTypeInt32 - +func (hhs *HeaderHandlerStub) SetBlockBodyTypeInt32(blockBodyType int32) error { + hhs.BlockBodyTypeInt32Field = blockBodyType + + return nil +} diff --git a/testscommon/integrationtests/factory.go b/testscommon/integrationtests/factory.go index 4d2f9ad02d8..9acfa7c5e10 100644 --- a/testscommon/integrationtests/factory.go +++ b/testscommon/integrationtests/factory.go @@ -62,8 +62,7 @@ func CreateStorer(parentDir string) storage.Storer { MaxBatchSize: 45000, MaxOpenFiles: 10, } - dbConfigHandler := factory.NewDBConfigHandler(dbConfig) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(dbConfig) if err != nil { return nil } diff --git a/testscommon/mainFactoryMocks/bootstrapComponentsStub.go b/testscommon/mainFactoryMocks/bootstrapComponentsStub.go index 8c9d56dca7b..62d7232eaf4 100644 --- a/testscommon/mainFactoryMocks/bootstrapComponentsStub.go +++ b/testscommon/mainFactoryMocks/bootstrapComponentsStub.go @@ -6,19 +6,21 @@ import ( "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" ) // BootstrapComponentsStub - type BootstrapComponentsStub struct { - Bootstrapper factory.EpochStartBootstrapper - BootstrapParams factory.BootstrapParamsHolder - NodeRole core.NodeType - ShCoordinator sharding.Coordinator - ShardCoordinatorCalled func() sharding.Coordinator - HdrVersionHandler nodeFactory.HeaderVersionHandler - VersionedHdrFactory nodeFactory.VersionedHeaderFactory - HdrIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler - GuardedAccountHandlerField process.GuardedAccountHandler + Bootstrapper factory.EpochStartBootstrapper + BootstrapParams factory.BootstrapParamsHolder + NodeRole core.NodeType + ShCoordinator sharding.Coordinator + ShardCoordinatorCalled func() sharding.Coordinator + HdrVersionHandler nodeFactory.HeaderVersionHandler + VersionedHdrFactory nodeFactory.VersionedHeaderFactory + HdrIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler + GuardedAccountHandlerField process.GuardedAccountHandler + NodesCoordinatorRegistryFactoryField nodesCoordinator.NodesCoordinatorRegistryFactory } // Create - @@ -85,6 +87,11 @@ func (bcs *BootstrapComponentsStub) GuardedAccountHandler() process.GuardedAccou return bcs.GuardedAccountHandlerField } +// NodesCoordinatorRegistryFactory - +func (bcs *BootstrapComponentsStub) NodesCoordinatorRegistryFactory() nodesCoordinator.NodesCoordinatorRegistryFactory { + return bcs.NodesCoordinatorRegistryFactoryField +} + // String - func (bcs *BootstrapComponentsStub) String() string { return "BootstrapComponentsStub" diff --git a/testscommon/mainFactoryMocks/dataComponentsStub.go b/testscommon/mainFactoryMocks/dataComponentsStub.go new file mode 100644 index 00000000000..3de2c0b33e6 --- /dev/null +++ b/testscommon/mainFactoryMocks/dataComponentsStub.go @@ -0,0 +1,69 @@ +package mainFactoryMocks + +import ( + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/factory" +) + +// DataComponentsHolderStub - +type DataComponentsHolderStub struct { + BlockchainCalled func() data.ChainHandler + SetBlockchainCalled func(chain data.ChainHandler) + StorageServiceCalled func() dataRetriever.StorageService + DatapoolCalled func() dataRetriever.PoolsHolder + MiniBlocksProviderCalled func() factory.MiniBlockProvider + CloneCalled func() interface{} +} + +// Blockchain - +func (dchs *DataComponentsHolderStub) Blockchain() data.ChainHandler { + if dchs.BlockchainCalled != nil { + return dchs.BlockchainCalled() + } + return nil +} + +// SetBlockchain - +func (dchs *DataComponentsHolderStub) SetBlockchain(chain data.ChainHandler) { + if dchs.SetBlockchainCalled != nil { + dchs.SetBlockchainCalled(chain) + } +} + +// StorageService - +func (dchs *DataComponentsHolderStub) StorageService() dataRetriever.StorageService { + if dchs.StorageServiceCalled != nil { + return dchs.StorageServiceCalled() + } + return nil +} + +// Datapool - +func (dchs *DataComponentsHolderStub) Datapool() dataRetriever.PoolsHolder { + if dchs.DatapoolCalled != nil { + return dchs.DatapoolCalled() + } + return nil +} + +// MiniBlocksProvider - +func (dchs *DataComponentsHolderStub) MiniBlocksProvider() factory.MiniBlockProvider { + if dchs.MiniBlocksProviderCalled != nil { + return dchs.MiniBlocksProviderCalled() + } + return nil +} + +// Clone - +func (dchs *DataComponentsHolderStub) Clone() interface{} { + if dchs.CloneCalled != nil { + return dchs.CloneCalled() + } + return nil +} + +// IsInterfaceNil - +func (dchs *DataComponentsHolderStub) IsInterfaceNil() bool { + return dchs == nil +} diff --git a/testscommon/managedPeersHolderStub.go b/testscommon/managedPeersHolderStub.go index 0bd1948d813..ef9a550fe2b 100644 --- a/testscommon/managedPeersHolderStub.go +++ b/testscommon/managedPeersHolderStub.go @@ -17,6 +17,7 @@ type ManagedPeersHolderStub struct { IncrementRoundsWithoutReceivedMessagesCalled func(pkBytes []byte) ResetRoundsWithoutReceivedMessagesCalled func(pkBytes []byte, pid core.PeerID) GetManagedKeysByCurrentNodeCalled func() map[string]crypto.PrivateKey + GetLoadedKeysByCurrentNodeCalled func() [][]byte IsKeyManagedByCurrentNodeCalled func(pkBytes []byte) bool IsKeyRegisteredCalled func(pkBytes []byte) bool IsPidManagedByCurrentNodeCalled func(pid core.PeerID) bool @@ -90,6 +91,14 @@ func (stub *ManagedPeersHolderStub) GetManagedKeysByCurrentNode() map[string]cry return nil } +// GetLoadedKeysByCurrentNode - +func (stub *ManagedPeersHolderStub) GetLoadedKeysByCurrentNode() [][]byte { + if stub.GetLoadedKeysByCurrentNodeCalled != nil { + return stub.GetLoadedKeysByCurrentNodeCalled() + } + return make([][]byte, 0) +} + // IsKeyManagedByCurrentNode - func (stub *ManagedPeersHolderStub) IsKeyManagedByCurrentNode(pkBytes []byte) bool { if stub.IsKeyManagedByCurrentNodeCalled != nil { diff --git a/testscommon/managedPeersMonitorStub.go b/testscommon/managedPeersMonitorStub.go index 2ae60ccc55e..43aea679c14 100644 --- a/testscommon/managedPeersMonitorStub.go +++ b/testscommon/managedPeersMonitorStub.go @@ -6,6 +6,7 @@ type ManagedPeersMonitorStub struct { GetEligibleManagedKeysCalled func() ([][]byte, error) GetWaitingManagedKeysCalled func() ([][]byte, error) GetManagedKeysCalled func() [][]byte + GetLoadedKeysCalled func() [][]byte } // GetManagedKeys - @@ -16,6 +17,14 @@ func (stub *ManagedPeersMonitorStub) GetManagedKeys() [][]byte { return make([][]byte, 0) } +// GetLoadedKeys - +func (stub *ManagedPeersMonitorStub) GetLoadedKeys() [][]byte { + if stub.GetLoadedKeysCalled != nil { + return stub.GetLoadedKeysCalled() + } + return make([][]byte, 0) +} + // GetManagedKeysCount - func (stub *ManagedPeersMonitorStub) GetManagedKeysCount() int { if stub.GetManagedKeysCountCalled != nil { diff --git a/testscommon/maxNodesChangeConfigProviderStub.go b/testscommon/maxNodesChangeConfigProviderStub.go new file mode 100644 index 00000000000..1d7195e84f7 --- /dev/null +++ b/testscommon/maxNodesChangeConfigProviderStub.go @@ -0,0 +1,40 @@ +package testscommon + +import "github.com/multiversx/mx-chain-go/config" + +// MaxNodesChangeConfigProviderStub - +type MaxNodesChangeConfigProviderStub struct { + GetAllNodesConfigCalled func() []config.MaxNodesChangeConfig + GetCurrentNodesConfigCalled func() config.MaxNodesChangeConfig + EpochConfirmedCalled func(epoch uint32, round uint64) +} + +// GetAllNodesConfig - +func (stub *MaxNodesChangeConfigProviderStub) GetAllNodesConfig() []config.MaxNodesChangeConfig { + if stub.GetAllNodesConfigCalled != nil { + return stub.GetAllNodesConfigCalled() + } + + return nil +} + +// GetCurrentNodesConfig - +func (stub *MaxNodesChangeConfigProviderStub) GetCurrentNodesConfig() config.MaxNodesChangeConfig { + if stub.GetCurrentNodesConfigCalled != nil { + return stub.GetCurrentNodesConfigCalled() + } + + return config.MaxNodesChangeConfig{} +} + +// EpochConfirmed - +func (stub *MaxNodesChangeConfigProviderStub) EpochConfirmed(epoch uint32, round uint64) { + if stub.EpochConfirmedCalled != nil { + stub.EpochConfirmedCalled(epoch, round) + } +} + +// IsInterfaceNil - +func (stub *MaxNodesChangeConfigProviderStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/testscommon/nodesSetupMock.go b/testscommon/nodesSetupMock.go deleted file mode 100644 index 070e0ecb6a2..00000000000 --- a/testscommon/nodesSetupMock.go +++ /dev/null @@ -1,191 +0,0 @@ -package testscommon - -import ( - "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" -) - -// NodesSetupStub - -type NodesSetupStub struct { - InitialNodesPubKeysCalled func() map[uint32][]string - InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) - GetShardIDForPubKeyCalled func(pubKey []byte) (uint32, error) - NumberOfShardsCalled func() uint32 - GetShardConsensusGroupSizeCalled func() uint32 - GetMetaConsensusGroupSizeCalled func() uint32 - GetRoundDurationCalled func() uint64 - MinNumberOfMetaNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 - GetHysteresisCalled func() float32 - GetAdaptivityCalled func() bool - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - MinNumberOfNodesCalled func() uint32 - AllInitialNodesCalled func() []nodesCoordinator.GenesisNodeInfoHandler - MinNumberOfNodesWithHysteresisCalled func() uint32 - MinShardHysteresisNodesCalled func() uint32 - MinMetaHysteresisNodesCalled func() uint32 -} - -// InitialNodesPubKeys - -func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { - if n.InitialNodesPubKeysCalled != nil { - return n.InitialNodesPubKeysCalled() - } - - return map[uint32][]string{0: {"val1", "val2"}} -} - -// InitialEligibleNodesPubKeysForShard - -func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { - if n.InitialEligibleNodesPubKeysForShardCalled != nil { - return n.InitialEligibleNodesPubKeysForShardCalled(shardId) - } - - return []string{"val1", "val2"}, nil -} - -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() - } - return 1 -} - -// GetShardIDForPubKey - -func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { - if n.GetShardIDForPubKeyCalled != nil { - return n.GetShardIDForPubKeyCalled(pubkey) - } - return 0, nil -} - -// GetShardConsensusGroupSize - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetShardConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() - } - return 1 -} - -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() - } - return 1 -} - -// GetRoundDuration - -func (n *NodesSetupStub) GetRoundDuration() uint64 { - if n.GetRoundDurationCalled != nil { - return n.GetRoundDurationCalled() - } - return 4000 -} - -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() - } - return 1 -} - -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() - } - return 1 -} - -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() - } - return 0 -} - -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() - } - return false -} - -// InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard( - shardId uint32, -) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { - if n.InitialNodesInfoForShardCalled != nil { - return n.InitialNodesInfoForShardCalled(shardId) - } - - return nil, nil, nil -} - -// InitialNodesInfo - -func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { - if n.InitialNodesInfoCalled != nil { - return n.InitialNodesInfoCalled() - } - - return nil, nil -} - -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() - } - return 0 -} - -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() - } - return 1 -} - -// MinNumberOfNodesWithHysteresis - -func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { - if n.MinNumberOfNodesWithHysteresisCalled != nil { - return n.MinNumberOfNodesWithHysteresisCalled() - } - return n.MinNumberOfNodes() -} - -// AllInitialNodes - -func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHandler { - if n.AllInitialNodesCalled != nil { - return n.AllInitialNodesCalled() - } - return nil -} - -// MinShardHysteresisNodes - -func (n *NodesSetupStub) MinShardHysteresisNodes() uint32 { - if n.MinShardHysteresisNodesCalled != nil { - return n.MinShardHysteresisNodesCalled() - } - return 1 -} - -// MinMetaHysteresisNodes - -func (n *NodesSetupStub) MinMetaHysteresisNodes() uint32 { - if n.MinMetaHysteresisNodesCalled != nil { - return n.MinMetaHysteresisNodesCalled() - } - return 1 -} - -// IsInterfaceNil - -func (n *NodesSetupStub) IsInterfaceNil() bool { - return n == nil -} diff --git a/testscommon/nodesSetupMock/nodesSetupMock.go b/testscommon/nodesSetupMock/nodesSetupMock.go new file mode 100644 index 00000000000..392cb038719 --- /dev/null +++ b/testscommon/nodesSetupMock/nodesSetupMock.go @@ -0,0 +1,47 @@ +package nodesSetupMock + +// NodesSetupMock - +type NodesSetupMock struct { + NumberOfShardsField uint32 + HysteresisField float32 + MinNumberOfMetaNodesField uint32 + MinNumberOfShardNodesField uint32 +} + +// NumberOfShards - +func (n *NodesSetupMock) NumberOfShards() uint32 { + return n.NumberOfShardsField +} + +// GetHysteresis - +func (n *NodesSetupMock) GetHysteresis() float32 { + return n.HysteresisField +} + +// MinNumberOfMetaNodes - +func (n *NodesSetupMock) MinNumberOfMetaNodes() uint32 { + return n.MinNumberOfMetaNodesField +} + +// MinNumberOfShardNodes - +func (n *NodesSetupMock) MinNumberOfShardNodes() uint32 { + return n.MinNumberOfShardNodesField +} + +// MinNumberOfNodes - +func (n *NodesSetupMock) MinNumberOfNodes() uint32 { + return n.NumberOfShardsField*n.MinNumberOfShardNodesField + n.MinNumberOfMetaNodesField +} + +// MinNumberOfNodesWithHysteresis - +func (n *NodesSetupMock) MinNumberOfNodesWithHysteresis() uint32 { + hystNodesMeta := getHysteresisNodes(n.MinNumberOfMetaNodesField, n.HysteresisField) + hystNodesShard := getHysteresisNodes(n.MinNumberOfShardNodesField, n.HysteresisField) + minNumberOfNodes := n.MinNumberOfNodes() + + return minNumberOfNodes + hystNodesMeta + n.NumberOfShardsField*hystNodesShard +} + +func getHysteresisNodes(minNumNodes uint32, hysteresis float32) uint32 { + return uint32(float32(minNumNodes) * hysteresis) +} diff --git a/testscommon/pool/headersPoolStub.go b/testscommon/pool/headersPoolStub.go new file mode 100644 index 00000000000..66c01d91c68 --- /dev/null +++ b/testscommon/pool/headersPoolStub.go @@ -0,0 +1,105 @@ +package pool + +import ( + "errors" + + "github.com/multiversx/mx-chain-core-go/data" +) + +// HeadersPoolStub - +type HeadersPoolStub struct { + AddCalled func(headerHash []byte, header data.HeaderHandler) + RemoveHeaderByHashCalled func(headerHash []byte) + RemoveHeaderByNonceAndShardIdCalled func(hdrNonce uint64, shardId uint32) + GetHeaderByNonceAndShardIdCalled func(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) + GetHeaderByHashCalled func(hash []byte) (data.HeaderHandler, error) + ClearCalled func() + RegisterHandlerCalled func(handler func(header data.HeaderHandler, shardHeaderHash []byte)) + NoncesCalled func(shardId uint32) []uint64 + LenCalled func() int + MaxSizeCalled func() int + GetNumHeadersCalled func(shardId uint32) int +} + +// AddHeader - +func (hps *HeadersPoolStub) AddHeader(headerHash []byte, header data.HeaderHandler) { + if hps.AddCalled != nil { + hps.AddCalled(headerHash, header) + } +} + +// RemoveHeaderByHash - +func (hps *HeadersPoolStub) RemoveHeaderByHash(headerHash []byte) { + if hps.RemoveHeaderByHashCalled != nil { + hps.RemoveHeaderByHashCalled(headerHash) + } +} + +// RemoveHeaderByNonceAndShardId - +func (hps *HeadersPoolStub) RemoveHeaderByNonceAndShardId(hdrNonce uint64, shardId uint32) { + if hps.RemoveHeaderByNonceAndShardIdCalled != nil { + hps.RemoveHeaderByNonceAndShardIdCalled(hdrNonce, shardId) + } +} + +// GetHeadersByNonceAndShardId - +func (hps *HeadersPoolStub) GetHeadersByNonceAndShardId(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) { + if hps.GetHeaderByNonceAndShardIdCalled != nil { + return hps.GetHeaderByNonceAndShardIdCalled(hdrNonce, shardId) + } + return nil, nil, errors.New("err") +} + +// GetHeaderByHash - +func (hps *HeadersPoolStub) GetHeaderByHash(hash []byte) (data.HeaderHandler, error) { + if hps.GetHeaderByHashCalled != nil { + return hps.GetHeaderByHashCalled(hash) + } + return nil, nil +} + +// Clear - +func (hps *HeadersPoolStub) Clear() { + if hps.ClearCalled != nil { + hps.ClearCalled() + } +} + +// RegisterHandler - +func (hps *HeadersPoolStub) RegisterHandler(handler func(header data.HeaderHandler, shardHeaderHash []byte)) { + if hps.RegisterHandlerCalled != nil { + hps.RegisterHandlerCalled(handler) + } +} + +// Nonces - +func (hps *HeadersPoolStub) Nonces(shardId uint32) []uint64 { + if hps.NoncesCalled != nil { + return hps.NoncesCalled(shardId) + } + return nil +} + +// Len - +func (hps *HeadersPoolStub) Len() int { + return 0 +} + +// MaxSize - +func (hps *HeadersPoolStub) MaxSize() int { + return 100 +} + +// IsInterfaceNil - +func (hps *HeadersPoolStub) IsInterfaceNil() bool { + return hps == nil +} + +// GetNumHeaders - +func (hps *HeadersPoolStub) GetNumHeaders(shardId uint32) int { + if hps.GetNumHeadersCalled != nil { + return hps.GetNumHeadersCalled(shardId) + } + + return 0 +} diff --git a/factory/mock/forkDetectorStub.go b/testscommon/processMocks/forkDetectorStub.go similarity index 94% rename from factory/mock/forkDetectorStub.go rename to testscommon/processMocks/forkDetectorStub.go index 640c7e3899f..80ddc4d2ebf 100644 --- a/factory/mock/forkDetectorStub.go +++ b/testscommon/processMocks/forkDetectorStub.go @@ -1,4 +1,4 @@ -package mock +package processMocks import ( "github.com/multiversx/mx-chain-core-go/data" @@ -28,7 +28,10 @@ func (fdm *ForkDetectorStub) RestoreToGenesis() { // AddHeader - func (fdm *ForkDetectorStub) AddHeader(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { - return fdm.AddHeaderCalled(header, hash, state, selfNotarizedHeaders, selfNotarizedHeadersHashes) + if fdm.AddHeaderCalled != nil { + return fdm.AddHeaderCalled(header, hash, state, selfNotarizedHeaders, selfNotarizedHeadersHashes) + } + return nil } // RemoveHeader - diff --git a/epochStart/mock/rewardsCreatorStub.go b/testscommon/rewardsCreatorStub.go similarity index 90% rename from epochStart/mock/rewardsCreatorStub.go rename to testscommon/rewardsCreatorStub.go index 9073048cca7..b9b0b2b0492 100644 --- a/epochStart/mock/rewardsCreatorStub.go +++ b/testscommon/rewardsCreatorStub.go @@ -1,4 +1,4 @@ -package mock +package testscommon import ( "math/big" @@ -12,10 +12,10 @@ import ( // RewardsCreatorStub - type RewardsCreatorStub struct { CreateRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) VerifyRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error GetProtocolSustainabilityRewardsCalled func() *big.Int GetLocalTxCacheCalled func() epochStart.TransactionCacher @@ -29,7 +29,7 @@ type RewardsCreatorStub struct { // CreateRewardsMiniBlocks - func (rcs *RewardsCreatorStub) CreateRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { if rcs.CreateRewardsMiniBlocksCalled != nil { @@ -42,7 +42,7 @@ func (rcs *RewardsCreatorStub) CreateRewardsMiniBlocks( // VerifyRewardsMiniBlocks - func (rcs *RewardsCreatorStub) VerifyRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { if rcs.VerifyRewardsMiniBlocksCalled != nil { diff --git a/testscommon/roundHandlerMock.go b/testscommon/roundHandlerMock.go index 976e8a55181..6c5d45cc7bc 100644 --- a/testscommon/roundHandlerMock.go +++ b/testscommon/roundHandlerMock.go @@ -10,12 +10,13 @@ type RoundHandlerMock struct { indexMut sync.RWMutex index int64 - IndexCalled func() int64 - TimeDurationCalled func() time.Duration - TimeStampCalled func() time.Time - UpdateRoundCalled func(time.Time, time.Time) - RemainingTimeCalled func(startTime time.Time, maxTime time.Duration) time.Duration - BeforeGenesisCalled func() bool + IndexCalled func() int64 + TimeDurationCalled func() time.Duration + TimeStampCalled func() time.Time + UpdateRoundCalled func(time.Time, time.Time) + RemainingTimeCalled func(startTime time.Time, maxTime time.Duration) time.Duration + BeforeGenesisCalled func() bool + IncrementIndexCalled func() } // BeforeGenesis - @@ -77,6 +78,13 @@ func (rndm *RoundHandlerMock) RemainingTime(startTime time.Time, maxTime time.Du return 4000 * time.Millisecond } +// IncrementIndex - +func (rndm *RoundHandlerMock) IncrementIndex() { + if rndm.IncrementIndexCalled != nil { + rndm.IncrementIndexCalled() + } +} + // IsInterfaceNil returns true if there is no value under the interface func (rndm *RoundHandlerMock) IsInterfaceNil() bool { return rndm == nil diff --git a/testscommon/shardingMocks/nodesCoordRegistryFactoryMock.go b/testscommon/shardingMocks/nodesCoordRegistryFactoryMock.go new file mode 100644 index 00000000000..2ed51dc9188 --- /dev/null +++ b/testscommon/shardingMocks/nodesCoordRegistryFactoryMock.go @@ -0,0 +1,32 @@ +package shardingMocks + +import ( + "encoding/json" + + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +) + +// NodesCoordinatorRegistryFactoryMock - +type NodesCoordinatorRegistryFactoryMock struct { +} + +// CreateNodesCoordinatorRegistry - +func (ncr *NodesCoordinatorRegistryFactoryMock) CreateNodesCoordinatorRegistry(buff []byte) (nodesCoordinator.NodesCoordinatorRegistryHandler, error) { + registry := &nodesCoordinator.NodesCoordinatorRegistry{} + err := json.Unmarshal(buff, registry) + if err != nil { + return nil, err + } + + return registry, nil +} + +// GetRegistryData - +func (ncr *NodesCoordinatorRegistryFactoryMock) GetRegistryData(registry nodesCoordinator.NodesCoordinatorRegistryHandler, _ uint32) ([]byte, error) { + return json.Marshal(registry) +} + +// IsInterfaceNil - +func (ncr *NodesCoordinatorRegistryFactoryMock) IsInterfaceNil() bool { + return ncr == nil +} diff --git a/testscommon/shardingMocks/nodesCoordinatorMock.go b/testscommon/shardingMocks/nodesCoordinatorMock.go index 8ea7177705b..3ee80f88d3d 100644 --- a/testscommon/shardingMocks/nodesCoordinatorMock.go +++ b/testscommon/shardingMocks/nodesCoordinatorMock.go @@ -11,21 +11,23 @@ import ( // NodesCoordinatorMock defines the behaviour of a struct able to do validator group selection type NodesCoordinatorMock struct { - Validators map[uint32][]nodesCoordinator.Validator - ShardConsensusSize uint32 - MetaConsensusSize uint32 - ShardId uint32 - NbShards uint32 - GetSelectedPublicKeysCalled func(selection []byte, shardId uint32, epoch uint32) (publicKeys []string, err error) - GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) - GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) - SetNodesPerShardsCalled func(nodes map[uint32][]nodesCoordinator.Validator, epoch uint32) error - ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) - GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) - GetAllEligibleValidatorsPublicKeysCalled func(epoch uint32) (map[uint32][][]byte, error) - GetAllWaitingValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) - ConsensusGroupSizeCalled func(uint32) int - GetValidatorsIndexesCalled func(publicKeys []string, epoch uint32) ([]uint64, error) + Validators map[uint32][]nodesCoordinator.Validator + ShardConsensusSize uint32 + MetaConsensusSize uint32 + ShardId uint32 + NbShards uint32 + GetSelectedPublicKeysCalled func(selection []byte, shardId uint32, epoch uint32) (publicKeys []string, err error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) + SetNodesPerShardsCalled func(nodes map[uint32][]nodesCoordinator.Validator, epoch uint32) error + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) + GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) + GetAllEligibleValidatorsPublicKeysCalled func(epoch uint32) (map[uint32][][]byte, error) + GetAllWaitingValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) + ConsensusGroupSizeCalled func(uint32) int + GetValidatorsIndexesCalled func(publicKeys []string, epoch uint32) ([]uint64, error) + GetAllShuffledOutValidatorsPublicKeysCalled func(epoch uint32) (map[uint32][][]byte, error) + GetNumTotalEligibleCalled func() uint64 } // NewNodesCoordinatorMock - @@ -78,6 +80,9 @@ func (ncm *NodesCoordinatorMock) GetAllLeavingValidatorsPublicKeys(_ uint32) (ma // GetNumTotalEligible - func (ncm *NodesCoordinatorMock) GetNumTotalEligible() uint64 { + if ncm.GetNumTotalEligibleCalled != nil { + return ncm.GetNumTotalEligibleCalled() + } return 1 } @@ -97,6 +102,14 @@ func (ncm *NodesCoordinatorMock) GetAllWaitingValidatorsPublicKeys(_ uint32) (ma return nil, nil } +// GetAllShuffledOutValidatorsPublicKeys - +func (ncm *NodesCoordinatorMock) GetAllShuffledOutValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) { + if ncm.GetAllShuffledOutValidatorsPublicKeysCalled != nil { + return ncm.GetAllShuffledOutValidatorsPublicKeysCalled(epoch) + } + return nil, nil +} + // GetValidatorsIndexes - func (ncm *NodesCoordinatorMock) GetValidatorsIndexes(publicKeys []string, epoch uint32) ([]uint64, error) { if ncm.GetValidatorsIndexesCalled != nil { diff --git a/testscommon/shardingMocks/nodesCoordinatorStub.go b/testscommon/shardingMocks/nodesCoordinatorStub.go index a9d3aecf380..9f82a5256e5 100644 --- a/testscommon/shardingMocks/nodesCoordinatorStub.go +++ b/testscommon/shardingMocks/nodesCoordinatorStub.go @@ -8,7 +8,6 @@ import ( // NodesCoordinatorStub - type NodesCoordinatorStub struct { - ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]nodesCoordinator.Validator, error) GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) @@ -21,10 +20,11 @@ type NodesCoordinatorStub struct { GetConsensusWhitelistedNodesCalled func(epoch uint32) (map[string]struct{}, error) GetOwnPublicKeyCalled func() []byte GetWaitingEpochsLeftForPublicKeyCalled func(publicKey []byte) (uint32, error) + GetNumTotalEligibleCalled func() uint64 } // NodesCoordinatorToRegistry - -func (ncm *NodesCoordinatorStub) NodesCoordinatorToRegistry() *nodesCoordinator.NodesCoordinatorRegistry { +func (ncm *NodesCoordinatorStub) NodesCoordinatorToRegistry(uint32) nodesCoordinator.NodesCoordinatorRegistryHandler { return nil } @@ -51,7 +51,7 @@ func (ncm *NodesCoordinatorStub) GetAllLeavingValidatorsPublicKeys(_ uint32) (ma } // SetConfig - -func (ncm *NodesCoordinatorStub) SetConfig(_ *nodesCoordinator.NodesCoordinatorRegistry) error { +func (ncm *NodesCoordinatorStub) SetConfig(_ nodesCoordinator.NodesCoordinatorRegistryHandler) error { return nil } @@ -77,8 +77,16 @@ func (ncm *NodesCoordinatorStub) GetAllWaitingValidatorsPublicKeys(epoch uint32) return nil, nil } +// GetAllShuffledOutValidatorsPublicKeys - +func (ncm *NodesCoordinatorStub) GetAllShuffledOutValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { + return nil, nil +} + // GetNumTotalEligible - func (ncm *NodesCoordinatorStub) GetNumTotalEligible() uint64 { + if ncm.GetNumTotalEligibleCalled != nil { + return ncm.GetNumTotalEligibleCalled() + } return 1 } @@ -103,8 +111,8 @@ func (ncm *NodesCoordinatorStub) ComputeConsensusGroup( shardId uint32, epoch uint32, ) (validatorsGroup []nodesCoordinator.Validator, err error) { - if ncm.ComputeValidatorsGroupCalled != nil { - return ncm.ComputeValidatorsGroupCalled(randomness, round, shardId, epoch) + if ncm.ComputeConsensusGroupCalled != nil { + return ncm.ComputeConsensusGroupCalled(randomness, round, shardId, epoch) } var list []nodesCoordinator.Validator diff --git a/testscommon/stakingcommon/auctionListSelectorStub.go b/testscommon/stakingcommon/auctionListSelectorStub.go new file mode 100644 index 00000000000..8cc24960c82 --- /dev/null +++ b/testscommon/stakingcommon/auctionListSelectorStub.go @@ -0,0 +1,25 @@ +package stakingcommon + +import "github.com/multiversx/mx-chain-go/state" + +// AuctionListSelectorStub - +type AuctionListSelectorStub struct { + SelectNodesFromAuctionListCalled func(validatorsInfoMap state.ShardValidatorsInfoMapHandler, randomness []byte) error +} + +// SelectNodesFromAuctionList - +func (als *AuctionListSelectorStub) SelectNodesFromAuctionList( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + randomness []byte, +) error { + if als.SelectNodesFromAuctionListCalled != nil { + return als.SelectNodesFromAuctionListCalled(validatorsInfoMap, randomness) + } + + return nil +} + +// IsInterfaceNil - +func (als *AuctionListSelectorStub) IsInterfaceNil() bool { + return als == nil +} diff --git a/testscommon/stakingcommon/stakingCommon.go b/testscommon/stakingcommon/stakingCommon.go new file mode 100644 index 00000000000..1af9b441b9c --- /dev/null +++ b/testscommon/stakingcommon/stakingCommon.go @@ -0,0 +1,333 @@ +package stakingcommon + +import ( + "math/big" + "strconv" + + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/genesis/process/disabled" + "github.com/multiversx/mx-chain-go/process" + economicsHandler "github.com/multiversx/mx-chain-go/process/economics" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/vm" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("testscommon/stakingCommon") + +// RegisterValidatorKeys will register validator's staked key in the provided accounts db +func RegisterValidatorKeys( + accountsDB state.AccountsAdapter, + ownerAddress []byte, + rewardAddress []byte, + stakedKeys [][]byte, + totalStake *big.Int, + marshaller marshal.Marshalizer, +) { + AddValidatorData(accountsDB, ownerAddress, stakedKeys, totalStake, marshaller) + AddStakingData(accountsDB, ownerAddress, rewardAddress, stakedKeys, marshaller) + _, err := accountsDB.Commit() + log.LogIfError(err) +} + +// AddValidatorData will add the validator's registered keys in the provided accounts db +func AddValidatorData( + accountsDB state.AccountsAdapter, + ownerKey []byte, + registeredKeys [][]byte, + totalStake *big.Int, + marshaller marshal.Marshalizer, +) { + validatorSC := LoadUserAccount(accountsDB, vm.ValidatorSCAddress) + ownerStoredData, _, _ := validatorSC.RetrieveValue(ownerKey) + validatorData := &systemSmartContracts.ValidatorDataV2{} + if len(ownerStoredData) != 0 { + _ = marshaller.Unmarshal(validatorData, ownerStoredData) + validatorData.BlsPubKeys = append(validatorData.BlsPubKeys, registeredKeys...) + validatorData.TotalStakeValue = totalStake + } else { + validatorData = &systemSmartContracts.ValidatorDataV2{ + RegisterNonce: 0, + Epoch: 0, + RewardAddress: ownerKey, + TotalStakeValue: totalStake, + LockedStake: big.NewInt(0), + TotalUnstaked: big.NewInt(0), + BlsPubKeys: registeredKeys, + NumRegistered: uint32(len(registeredKeys)), + } + } + + marshaledData, _ := marshaller.Marshal(validatorData) + _ = validatorSC.SaveKeyValue(ownerKey, marshaledData) + + _ = accountsDB.SaveAccount(validatorSC) +} + +// AddStakingData will add the owner's staked keys in the provided accounts db +func AddStakingData( + accountsDB state.AccountsAdapter, + ownerAddress []byte, + rewardAddress []byte, + stakedKeys [][]byte, + marshaller marshal.Marshalizer, +) { + stakedData := &systemSmartContracts.StakedDataV2_0{ + Staked: true, + RewardAddress: rewardAddress, + OwnerAddress: ownerAddress, + StakeValue: big.NewInt(100), + } + marshaledData, _ := marshaller.Marshal(stakedData) + + stakingSCAcc := LoadUserAccount(accountsDB, vm.StakingSCAddress) + for _, key := range stakedKeys { + _ = stakingSCAcc.SaveKeyValue(key, marshaledData) + } + + _ = accountsDB.SaveAccount(stakingSCAcc) +} + +// AddKeysToWaitingList will add the owner's provided bls keys in the staking queue list +func AddKeysToWaitingList( + accountsDB state.AccountsAdapter, + waitingKeys [][]byte, + marshaller marshal.Marshalizer, + rewardAddress []byte, + ownerAddress []byte, +) { + if len(waitingKeys) == 0 { + return + } + + stakingSCAcc := LoadUserAccount(accountsDB, vm.StakingSCAddress) + waitingList := getWaitingList(stakingSCAcc, marshaller) + + waitingListAlreadyHasElements := waitingList.Length > 0 + waitingListLastKeyBeforeAddingNewKeys := waitingList.LastKey + previousKey := waitingList.LastKey + if !waitingListAlreadyHasElements { + waitingList.FirstKey = getPrefixedWaitingKey(waitingKeys[0]) + previousKey = waitingList.FirstKey + } + + numWaitingKeys := len(waitingKeys) + waitingList.LastKey = getPrefixedWaitingKey(waitingKeys[numWaitingKeys-1]) + waitingList.Length += uint32(numWaitingKeys) + saveWaitingList(stakingSCAcc, marshaller, waitingList) + + for i, waitingKey := range waitingKeys { + waitingListElement := &systemSmartContracts.ElementInList{ + BLSPublicKey: waitingKey, + PreviousKey: previousKey, + NextKey: make([]byte, 0), + } + + if i < numWaitingKeys-1 { + nextKey := getPrefixedWaitingKey(waitingKeys[i+1]) + waitingListElement.NextKey = nextKey + } + + prefixedWaitingKey := getPrefixedWaitingKey(waitingKey) + saveStakedWaitingKey(stakingSCAcc, marshaller, rewardAddress, ownerAddress, waitingKey) + saveElemInList(stakingSCAcc, marshaller, waitingListElement, prefixedWaitingKey) + + previousKey = prefixedWaitingKey + } + + if waitingListAlreadyHasElements { + lastElem, _ := GetWaitingListElement(stakingSCAcc, marshaller, waitingListLastKeyBeforeAddingNewKeys) + lastElem.NextKey = getPrefixedWaitingKey(waitingKeys[0]) + saveElemInList(stakingSCAcc, marshaller, lastElem, waitingListLastKeyBeforeAddingNewKeys) + } + + _ = accountsDB.SaveAccount(stakingSCAcc) +} + +func getWaitingList( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, +) *systemSmartContracts.WaitingList { + marshaledData, _, _ := stakingSCAcc.RetrieveValue([]byte("waitingList")) + waitingList := &systemSmartContracts.WaitingList{} + _ = marshaller.Unmarshal(waitingList, marshaledData) + + return waitingList +} + +func saveWaitingList( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, + waitingList *systemSmartContracts.WaitingList, +) { + marshaledData, _ := marshaller.Marshal(waitingList) + _ = stakingSCAcc.SaveKeyValue([]byte("waitingList"), marshaledData) +} + +func getPrefixedWaitingKey(key []byte) []byte { + return []byte("w_" + string(key)) +} + +func saveStakedWaitingKey( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, + rewardAddress []byte, + ownerAddress []byte, + key []byte, +) { + stakedData := &systemSmartContracts.StakedDataV2_0{ + Waiting: true, + RewardAddress: rewardAddress, + OwnerAddress: ownerAddress, + StakeValue: big.NewInt(100), + } + + marshaledData, _ := marshaller.Marshal(stakedData) + _ = stakingSCAcc.SaveKeyValue(key, marshaledData) +} + +func saveElemInList( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, + elem *systemSmartContracts.ElementInList, + key []byte, +) { + marshaledData, _ := marshaller.Marshal(elem) + _ = stakingSCAcc.SaveKeyValue(key, marshaledData) +} + +// GetWaitingListElement returns the element in waiting list saved at the provided key +func GetWaitingListElement( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, + key []byte, +) (*systemSmartContracts.ElementInList, error) { + marshaledData, _, _ := stakingSCAcc.RetrieveValue(key) + if len(marshaledData) == 0 { + return nil, vm.ErrElementNotFound + } + + element := &systemSmartContracts.ElementInList{} + err := marshaller.Unmarshal(element, marshaledData) + if err != nil { + return nil, err + } + + return element, nil +} + +// LoadUserAccount returns address's state.UserAccountHandler from the provided db +func LoadUserAccount(accountsDB state.AccountsAdapter, address []byte) state.UserAccountHandler { + acc, _ := accountsDB.LoadAccount(address) + return acc.(state.UserAccountHandler) +} + +// CreateEconomicsData returns an initialized process.EconomicsDataHandler +func CreateEconomicsData() process.EconomicsDataHandler { + maxGasLimitPerBlock := strconv.FormatUint(1500000000, 10) + minGasPrice := strconv.FormatUint(10, 10) + minGasLimit := strconv.FormatUint(10, 10) + + argsNewEconomicsData := economicsHandler.ArgsNewEconomicsData{ + Economics: &config.EconomicsConfig{ + GlobalSettings: config.GlobalSettings{ + GenesisTotalSupply: "2000000000000000000000", + MinimumInflation: 0, + YearSettings: []*config.YearSetting{ + { + Year: 0, + MaximumInflation: 0.01, + }, + }, + }, + RewardsSettings: config.RewardsSettings{ + RewardsConfigByEpoch: []config.EpochRewardSettings{ + { + LeaderPercentage: 0.1, + DeveloperPercentage: 0.1, + ProtocolSustainabilityPercentage: 0.1, + ProtocolSustainabilityAddress: "protocol", + TopUpGradientPoint: "300000000000000000000", + TopUpFactor: 0.25, + }, + }, + }, + FeeSettings: config.FeeSettings{ + GasLimitSettings: []config.GasLimitSetting{ + { + MaxGasLimitPerBlock: maxGasLimitPerBlock, + MaxGasLimitPerMiniBlock: maxGasLimitPerBlock, + MaxGasLimitPerMetaBlock: maxGasLimitPerBlock, + MaxGasLimitPerMetaMiniBlock: maxGasLimitPerBlock, + MaxGasLimitPerTx: maxGasLimitPerBlock, + MinGasLimit: minGasLimit, + ExtraGasLimitGuardedTx: maxGasLimitPerBlock, + }, + }, + MinGasPrice: minGasPrice, + GasPerDataByte: "1", + GasPriceModifier: 1.0, + MaxGasPriceSetGuardian: minGasPrice, + }, + }, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + TxVersionChecker: &disabled.TxVersionChecker{}, + } + economicsData, _ := economicsHandler.NewEconomicsData(argsNewEconomicsData) + return economicsData +} + +// SaveNodesConfig saves the nodes config in accounts db under "nodesConfig" key with provided params +func SaveNodesConfig( + accountsDB state.AccountsAdapter, + marshaller marshal.Marshalizer, + stakedNodes, + minNumNodes, + maxNumNodes int64, +) { + nodesConfigData := &systemSmartContracts.StakingNodesConfig{ + StakedNodes: stakedNodes, + MinNumNodes: minNumNodes, + MaxNumNodes: maxNumNodes, + } + nodesDataBytes, err := marshaller.Marshal(nodesConfigData) + log.LogIfError(err) + + account, err := accountsDB.LoadAccount(vm.StakingSCAddress) + log.LogIfError(err) + + userAccount, _ := account.(state.UserAccountHandler) + err = userAccount.SaveKeyValue([]byte("nodesConfig"), nodesDataBytes) + log.LogIfError(err) + err = accountsDB.SaveAccount(account) + log.LogIfError(err) + _, err = accountsDB.Commit() + log.LogIfError(err) +} + +// SaveDelegationManagerConfig will save a mock configuration for the delegation manager SC +func SaveDelegationManagerConfig(accountsDB state.AccountsAdapter, marshaller marshal.Marshalizer) { + managementData := &systemSmartContracts.DelegationManagement{ + MinDeposit: big.NewInt(100), + LastAddress: vm.FirstDelegationSCAddress, + MinDelegationAmount: big.NewInt(1), + } + marshaledData, err := marshaller.Marshal(managementData) + log.LogIfError(err) + + acc, err := accountsDB.LoadAccount(vm.DelegationManagerSCAddress) + log.LogIfError(err) + delegationAcc, _ := acc.(state.UserAccountHandler) + + err = delegationAcc.SaveKeyValue([]byte("delegationManagement"), marshaledData) + log.LogIfError(err) + err = accountsDB.SaveAccount(delegationAcc) + log.LogIfError(err) + _, err = accountsDB.Commit() + log.LogIfError(err) +} diff --git a/epochStart/mock/stakingDataProviderStub.go b/testscommon/stakingcommon/stakingDataProviderStub.go similarity index 51% rename from epochStart/mock/stakingDataProviderStub.go rename to testscommon/stakingcommon/stakingDataProviderStub.go index a2cab61586b..dc2b990c20c 100644 --- a/epochStart/mock/stakingDataProviderStub.go +++ b/testscommon/stakingcommon/stakingDataProviderStub.go @@ -1,32 +1,35 @@ -package mock +package stakingcommon import ( "math/big" + "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/state" ) // StakingDataProviderStub - type StakingDataProviderStub struct { CleanCalled func() - PrepareStakingDataCalled func(keys map[uint32][][]byte) error + PrepareStakingDataCalled func(validatorsMap state.ShardValidatorsInfoMapHandler) error GetTotalStakeEligibleNodesCalled func() *big.Int GetTotalTopUpStakeEligibleNodesCalled func() *big.Int GetNodeStakedTopUpCalled func(blsKey []byte) (*big.Int, error) - FillValidatorInfoCalled func(blsKey []byte) error - ComputeUnQualifiedNodesCalled func(validatorInfos map[uint32][]*state.ValidatorInfo) ([][]byte, map[string][][]byte, error) + FillValidatorInfoCalled func(validator state.ValidatorInfoHandler) error + ComputeUnQualifiedNodesCalled func(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) + GetBlsKeyOwnerCalled func(blsKey []byte) (string, error) + GetOwnersDataCalled func() map[string]*epochStart.OwnerData } // FillValidatorInfo - -func (sdps *StakingDataProviderStub) FillValidatorInfo(blsKey []byte) error { +func (sdps *StakingDataProviderStub) FillValidatorInfo(validator state.ValidatorInfoHandler) error { if sdps.FillValidatorInfoCalled != nil { - return sdps.FillValidatorInfoCalled(blsKey) + return sdps.FillValidatorInfoCalled(validator) } return nil } // ComputeUnQualifiedNodes - -func (sdps *StakingDataProviderStub) ComputeUnQualifiedNodes(validatorInfos map[uint32][]*state.ValidatorInfo) ([][]byte, map[string][][]byte, error) { +func (sdps *StakingDataProviderStub) ComputeUnQualifiedNodes(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) { if sdps.ComputeUnQualifiedNodesCalled != nil { return sdps.ComputeUnQualifiedNodesCalled(validatorInfos) } @@ -57,10 +60,10 @@ func (sdps *StakingDataProviderStub) GetNodeStakedTopUp(blsKey []byte) (*big.Int return big.NewInt(0), nil } -// PrepareStakingDataForRewards - -func (sdps *StakingDataProviderStub) PrepareStakingDataForRewards(keys map[uint32][][]byte) error { +// PrepareStakingData - +func (sdps *StakingDataProviderStub) PrepareStakingData(validatorsMap state.ShardValidatorsInfoMapHandler) error { if sdps.PrepareStakingDataCalled != nil { - return sdps.PrepareStakingDataCalled(keys) + return sdps.PrepareStakingDataCalled(validatorsMap) } return nil } @@ -72,6 +75,31 @@ func (sdps *StakingDataProviderStub) Clean() { } } +// GetBlsKeyOwner - +func (sdps *StakingDataProviderStub) GetBlsKeyOwner(blsKey []byte) (string, error) { + if sdps.GetBlsKeyOwnerCalled != nil { + return sdps.GetBlsKeyOwnerCalled(blsKey) + } + return "", nil +} + +// GetNumOfValidatorsInCurrentEpoch - +func (sdps *StakingDataProviderStub) GetNumOfValidatorsInCurrentEpoch() uint32 { + return 0 +} + +// GetOwnersData - +func (sdps *StakingDataProviderStub) GetOwnersData() map[string]*epochStart.OwnerData { + if sdps.GetOwnersDataCalled != nil { + return sdps.GetOwnersDataCalled() + } + return nil +} + +// EpochConfirmed - +func (sdps *StakingDataProviderStub) EpochConfirmed(uint32, uint64) { +} + // IsInterfaceNil - func (sdps *StakingDataProviderStub) IsInterfaceNil() bool { return sdps == nil diff --git a/node/mock/validatorsProviderStub.go b/testscommon/stakingcommon/validatorsProviderStub.go similarity index 51% rename from node/mock/validatorsProviderStub.go rename to testscommon/stakingcommon/validatorsProviderStub.go index 98ea652340b..0db49b4fde8 100644 --- a/node/mock/validatorsProviderStub.go +++ b/testscommon/stakingcommon/validatorsProviderStub.go @@ -1,12 +1,15 @@ -package mock +package stakingcommon import ( "github.com/multiversx/mx-chain-core-go/data/validator" + "github.com/multiversx/mx-chain-go/common" ) // ValidatorsProviderStub - type ValidatorsProviderStub struct { GetLatestValidatorsCalled func() map[string]*validator.ValidatorStatistics + GetAuctionListCalled func() ([]*common.AuctionListValidatorAPIResponse, error) + ForceUpdateCalled func() error } // GetLatestValidators - @@ -14,6 +17,25 @@ func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*validator.Va if vp.GetLatestValidatorsCalled != nil { return vp.GetLatestValidatorsCalled() } + + return nil +} + +// GetAuctionList - +func (vp *ValidatorsProviderStub) GetAuctionList() ([]*common.AuctionListValidatorAPIResponse, error) { + if vp.GetAuctionListCalled != nil { + return vp.GetAuctionListCalled() + } + + return nil, nil +} + +// ForceUpdate - +func (vp *ValidatorsProviderStub) ForceUpdate() error { + if vp.ForceUpdateCalled != nil { + return vp.ForceUpdateCalled() + } + return nil } diff --git a/testscommon/state/accountAdapterStub.go b/testscommon/state/accountAdapterStub.go index 433722f7e21..fa9305f8222 100644 --- a/testscommon/state/accountAdapterStub.go +++ b/testscommon/state/accountAdapterStub.go @@ -177,14 +177,14 @@ func (aas *StateUserAccountHandlerStub) ClaimDeveloperRewards(senderAddr []byte) return nil, nil } -//AddToDeveloperReward - +// AddToDeveloperReward - func (aas *StateUserAccountHandlerStub) AddToDeveloperReward(val *big.Int) { if aas.AddToDeveloperRewardCalled != nil { aas.AddToDeveloperRewardCalled(val) } } -//GetDeveloperReward - +// GetDeveloperReward - func (aas *StateUserAccountHandlerStub) GetDeveloperReward() *big.Int { if aas.GetDeveloperRewardCalled != nil { return aas.GetDeveloperRewardCalled() @@ -230,7 +230,7 @@ func (aas *StateUserAccountHandlerStub) GetUserName() []byte { return nil } -//IsGuarded - +// IsGuarded - func (aas *StateUserAccountHandlerStub) IsGuarded() bool { if aas.IsGuardedCalled != nil { return aas.IsGuardedCalled() diff --git a/testscommon/state/accountWrapperMock.go b/testscommon/state/accountWrapperMock.go index 9cbac29d8ce..8f5e794646a 100644 --- a/testscommon/state/accountWrapperMock.go +++ b/testscommon/state/accountWrapperMock.go @@ -205,7 +205,7 @@ func (awm *AccountWrapMock) SetDataTrie(trie common.Trie) { awm.trackableDataTrie.SetDataTrie(trie) } -//IncreaseNonce adds the given value to the current nonce +// IncreaseNonce adds the given value to the current nonce func (awm *AccountWrapMock) IncreaseNonce(val uint64) { awm.nonce = awm.nonce + val } diff --git a/testscommon/state/peerAccountHandlerMock.go b/testscommon/state/peerAccountHandlerMock.go index b3283be1280..870836cc00d 100644 --- a/testscommon/state/peerAccountHandlerMock.go +++ b/testscommon/state/peerAccountHandlerMock.go @@ -14,6 +14,7 @@ type PeerAccountHandlerMock struct { IncreaseValidatorSuccessRateValue uint32 DecreaseValidatorSuccessRateValue uint32 IncreaseValidatorIgnoredSignaturesValue uint32 + PreviousList string IncreaseLeaderSuccessRateCalled func(uint32) DecreaseLeaderSuccessRateCalled func(uint32) @@ -52,11 +53,26 @@ func (p *PeerAccountHandlerMock) GetList() string { return "" } +// GetPreviousList - +func (p *PeerAccountHandlerMock) GetPreviousList() string { + return "" +} + // GetIndexInList - func (p *PeerAccountHandlerMock) GetIndexInList() uint32 { return 0 } +// GetPreviousIndexInList - +func (p *PeerAccountHandlerMock) GetPreviousIndexInList() uint32 { + return 0 +} + +// GetBLSPublicKey - +func (p *PeerAccountHandlerMock) GetBLSPublicKey() []byte { + return nil +} + // SetBLSPublicKey - func (p *PeerAccountHandlerMock) SetBLSPublicKey([]byte) error { return nil @@ -290,13 +306,18 @@ func (p *PeerAccountHandlerMock) SetConsecutiveProposerMisses(consecutiveMisses } // SetListAndIndex - -func (p *PeerAccountHandlerMock) SetListAndIndex(shardID uint32, list string, index uint32) { +func (p *PeerAccountHandlerMock) SetListAndIndex(shardID uint32, list string, index uint32, _ bool) { if p.SetListAndIndexCalled != nil { p.SetListAndIndexCalled(shardID, list, index) } } +// SetPreviousList - +func (p *PeerAccountHandlerMock) SetPreviousList(list string) { + p.PreviousList = list +} + // IsInterfaceNil - func (p *PeerAccountHandlerMock) IsInterfaceNil() bool { - return false + return p == nil } diff --git a/testscommon/state/userAccountStub.go b/testscommon/state/userAccountStub.go index 3e4278b2d38..ce54f059252 100644 --- a/testscommon/state/userAccountStub.go +++ b/testscommon/state/userAccountStub.go @@ -30,6 +30,7 @@ type UserAccountStub struct { RetrieveValueCalled func(_ []byte) ([]byte, uint32, error) SetDataTrieCalled func(dataTrie common.Trie) GetRootHashCalled func() []byte + SaveKeyValueCalled func(key []byte, value []byte) error } // HasNewCode - @@ -172,7 +173,10 @@ func (u *UserAccountStub) RetrieveValue(key []byte) ([]byte, uint32, error) { } // SaveKeyValue - -func (u *UserAccountStub) SaveKeyValue(_ []byte, _ []byte) error { +func (u *UserAccountStub) SaveKeyValue(key []byte, value []byte) error { + if u.SaveKeyValueCalled != nil { + return u.SaveKeyValueCalled(key, value) + } return nil } diff --git a/testscommon/stateStatisticsHandlerStub.go b/testscommon/stateStatisticsHandlerStub.go new file mode 100644 index 00000000000..bc13bea90d4 --- /dev/null +++ b/testscommon/stateStatisticsHandlerStub.go @@ -0,0 +1,136 @@ +package testscommon + +// StateStatisticsHandlerStub - +type StateStatisticsHandlerStub struct { + ResetCalled func() + ResetSnapshotCalled func() + IncrementCacheCalled func() + CacheCalled func() uint64 + IncrementSnapshotCacheCalled func() + SnapshotCacheCalled func() uint64 + IncrementPersisterCalled func(epoch uint32) + PersisterCalled func(epoch uint32) uint64 + IncrementSnapshotPersisterCalled func(epoch uint32) + SnapshotPersisterCalled func(epoch uint32) uint64 + IncrementTrieCalled func() + TrieCalled func() uint64 + ProcessingStatsCalled func() []string + SnapshotStatsCalled func() []string +} + +// Reset - +func (stub *StateStatisticsHandlerStub) Reset() { + if stub.ResetCalled != nil { + stub.ResetCalled() + } +} + +// ResetSnapshot - +func (stub *StateStatisticsHandlerStub) ResetSnapshot() { + if stub.ResetSnapshotCalled != nil { + stub.ResetSnapshotCalled() + } +} + +// IncrementCache - +func (stub *StateStatisticsHandlerStub) IncrementCache() { + if stub.IncrementCacheCalled != nil { + stub.IncrementCacheCalled() + } +} + +// Cache - +func (stub *StateStatisticsHandlerStub) Cache() uint64 { + if stub.CacheCalled != nil { + return stub.CacheCalled() + } + + return 0 +} + +// IncrementSnapshotCache - +func (stub *StateStatisticsHandlerStub) IncrementSnapshotCache() { + if stub.IncrementSnapshotCacheCalled != nil { + stub.IncrementSnapshotCacheCalled() + } +} + +// SnapshotCache - +func (stub *StateStatisticsHandlerStub) SnapshotCache() uint64 { + if stub.SnapshotCacheCalled != nil { + return stub.SnapshotCacheCalled() + } + + return 0 +} + +// IncrementPersister - +func (stub *StateStatisticsHandlerStub) IncrementPersister(epoch uint32) { + if stub.IncrementPersisterCalled != nil { + stub.IncrementPersisterCalled(epoch) + } +} + +// Persister - +func (stub *StateStatisticsHandlerStub) Persister(epoch uint32) uint64 { + if stub.PersisterCalled != nil { + return stub.PersisterCalled(epoch) + } + + return 0 +} + +// IncrementSnapshotPersister - +func (stub *StateStatisticsHandlerStub) IncrementSnapshotPersister(epoch uint32) { + if stub.IncrementSnapshotPersisterCalled != nil { + stub.IncrementSnapshotPersisterCalled(epoch) + } +} + +// SnapshotPersister - +func (stub *StateStatisticsHandlerStub) SnapshotPersister(epoch uint32) uint64 { + if stub.SnapshotPersisterCalled != nil { + return stub.SnapshotPersisterCalled(epoch) + } + + return 0 +} + +// IncrementTrie - +func (stub *StateStatisticsHandlerStub) IncrementTrie() { + if stub.IncrementTrieCalled != nil { + stub.IncrementTrieCalled() + } +} + +// Trie - +func (stub *StateStatisticsHandlerStub) Trie() uint64 { + if stub.TrieCalled != nil { + return stub.TrieCalled() + } + + return 0 +} + +// ProcessingStats - +func (stub *StateStatisticsHandlerStub) ProcessingStats() []string { + if stub.ProcessingStatsCalled != nil { + return stub.ProcessingStatsCalled() + } + + return make([]string, 0) +} + +// SnapshotStats - +func (stub *StateStatisticsHandlerStub) SnapshotStats() []string { + if stub.SnapshotStatsCalled != nil { + return stub.SnapshotStatsCalled() + } + + return make([]string, 0) +} + +// IsInterfaceNil - +func (stub *StateStatisticsHandlerStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/testscommon/storageManager/storageManagerStub.go b/testscommon/storageManager/storageManagerStub.go index b14d6c460a6..60e10541da6 100644 --- a/testscommon/storageManager/storageManagerStub.go +++ b/testscommon/storageManager/storageManagerStub.go @@ -7,30 +7,30 @@ import ( // StorageManagerStub - type StorageManagerStub struct { - PutCalled func([]byte, []byte) error - PutInEpochCalled func([]byte, []byte, uint32) error - PutInEpochWithoutCacheCalled func([]byte, []byte, uint32) error - GetCalled func([]byte) ([]byte, error) - GetFromCurrentEpochCalled func([]byte) ([]byte, error) - TakeSnapshotCalled func(string, []byte, []byte, *common.TrieIteratorChannels, chan []byte, common.SnapshotStatisticsHandler, uint32) - GetDbThatContainsHashCalled func([]byte) common.BaseStorer - IsPruningEnabledCalled func() bool - IsPruningBlockedCalled func() bool - EnterPruningBufferingModeCalled func() - ExitPruningBufferingModeCalled func() - RemoveFromCurrentEpochCalled func([]byte) error - RemoveCalled func([]byte) error - IsInterfaceNilCalled func() bool - SetEpochForPutOperationCalled func(uint32) - ShouldTakeSnapshotCalled func() bool - GetLatestStorageEpochCalled func() (uint32, error) - IsClosedCalled func() bool - GetBaseTrieStorageManagerCalled func() common.StorageManager - GetIdentifierCalled func() string - CloseCalled func() error - RemoveFromAllActiveEpochsCalled func(hash []byte) error - IsSnapshotSupportedCalled func() bool - GetStateStatsHandlerCalled func() common.StateStatisticsHandler + PutCalled func([]byte, []byte) error + PutInEpochCalled func([]byte, []byte, uint32) error + PutInEpochWithoutCacheCalled func([]byte, []byte, uint32) error + GetCalled func([]byte) ([]byte, error) + GetFromCurrentEpochCalled func([]byte) ([]byte, error) + TakeSnapshotCalled func(string, []byte, []byte, *common.TrieIteratorChannels, chan []byte, common.SnapshotStatisticsHandler, uint32) + GetDbThatContainsHashCalled func([]byte) common.BaseStorer + IsPruningEnabledCalled func() bool + IsPruningBlockedCalled func() bool + EnterPruningBufferingModeCalled func() + ExitPruningBufferingModeCalled func() + RemoveFromCurrentEpochCalled func([]byte) error + RemoveCalled func([]byte) error + IsInterfaceNilCalled func() bool + SetEpochForPutOperationCalled func(uint32) + ShouldTakeSnapshotCalled func() bool + GetLatestStorageEpochCalled func() (uint32, error) + IsClosedCalled func() bool + GetBaseTrieStorageManagerCalled func() common.StorageManager + GetIdentifierCalled func() string + CloseCalled func() error + RemoveFromAllActiveEpochsCalled func(hash []byte) error + IsSnapshotSupportedCalled func() bool + GetStateStatsHandlerCalled func() common.StateStatisticsHandler } // Put - diff --git a/testscommon/tableDisplayerMock.go b/testscommon/tableDisplayerMock.go new file mode 100644 index 00000000000..813c3e11fc5 --- /dev/null +++ b/testscommon/tableDisplayerMock.go @@ -0,0 +1,19 @@ +package testscommon + +import "github.com/multiversx/mx-chain-core-go/display" + +// TableDisplayerMock - +type TableDisplayerMock struct { + DisplayTableCalled func(tableHeader []string, lines []*display.LineData, message string) +} + +// DisplayTable - +func (mock *TableDisplayerMock) DisplayTable(tableHeader []string, lines []*display.LineData, message string) { + if mock.DisplayTableCalled != nil { + mock.DisplayTableCalled(tableHeader, lines, message) + } +} + +func (mock *TableDisplayerMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/testConfigs.go b/testscommon/testConfigs.go new file mode 100644 index 00000000000..fc0840e5237 --- /dev/null +++ b/testscommon/testConfigs.go @@ -0,0 +1,36 @@ +package testscommon + +import "github.com/multiversx/mx-chain-go/config" + +// GetDefaultRoundsConfig - +func GetDefaultRoundsConfig() config.RoundConfig { + return config.RoundConfig{ + RoundActivations: map[string]config.ActivationRoundByName{ + "DisableAsyncCallV1": { + Round: "18446744073709551615", + }, + }, + } +} + +// GetDefaultHeaderVersionConfig - +func GetDefaultHeaderVersionConfig() config.VersionsConfig { + return config.VersionsConfig{ + DefaultVersion: "default", + VersionsByEpochs: []config.VersionByEpochs{ + { + StartEpoch: 0, + Version: "*", + }, + { + StartEpoch: 1, + Version: "2", + }, + }, + Cache: config.CacheConfig{ + Name: "VersionsCache", + Type: "LRU", + Capacity: 100, + }, + } +} diff --git a/testscommon/transactionCoordinatorMock.go b/testscommon/transactionCoordinatorMock.go index e5a52257c67..cd25a769912 100644 --- a/testscommon/transactionCoordinatorMock.go +++ b/testscommon/transactionCoordinatorMock.go @@ -33,6 +33,8 @@ type TransactionCoordinatorMock struct { GetAllIntermediateTxsCalled func() map[block.Type]map[string]data.TransactionHandler AddTxsFromMiniBlocksCalled func(miniBlocks block.MiniBlockSlice) AddTransactionsCalled func(txHandlers []data.TransactionHandler, blockType block.Type) + + miniBlocks []*block.MiniBlock } // GetAllCurrentLogs - @@ -45,7 +47,7 @@ func (tcm *TransactionCoordinatorMock) CreatePostProcessMiniBlocks() block.MiniB if tcm.CreatePostProcessMiniBlocksCalled != nil { return tcm.CreatePostProcessMiniBlocksCalled() } - return nil + return tcm.miniBlocks } // CreateReceiptsHash - @@ -233,6 +235,7 @@ func (tcm *TransactionCoordinatorMock) GetAllIntermediateTxs() map[block.Type]ma // AddTxsFromMiniBlocks - func (tcm *TransactionCoordinatorMock) AddTxsFromMiniBlocks(miniBlocks block.MiniBlockSlice) { if tcm.AddTxsFromMiniBlocksCalled == nil { + tcm.miniBlocks = append(tcm.miniBlocks, miniBlocks...) return } @@ -248,6 +251,10 @@ func (tcm *TransactionCoordinatorMock) AddTransactions(txHandlers []data.Transac tcm.AddTransactionsCalled(txHandlers, blockType) } +func (tcm *TransactionCoordinatorMock) ClearStoredMbs() { + tcm.miniBlocks = make([]*block.MiniBlock, 0) +} + // IsInterfaceNil returns true if there is no value under the interface func (tcm *TransactionCoordinatorMock) IsInterfaceNil() bool { return tcm == nil diff --git a/testscommon/txDataBuilder/builder.go b/testscommon/txDataBuilder/builder.go index 49916cd5a1c..3198792ac57 100644 --- a/testscommon/txDataBuilder/builder.go +++ b/testscommon/txDataBuilder/builder.go @@ -5,6 +5,7 @@ import ( "math/big" "github.com/multiversx/mx-chain-core-go/core" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) // TxDataBuilder constructs a string to be used for transaction arguments @@ -176,11 +177,20 @@ func (builder *TxDataBuilder) TransferESDT(token string, value int64) *TxDataBui return builder.Func(core.BuiltInFunctionESDTTransfer).Str(token).Int64(value) } -//TransferESDTNFT appends to the data string all the elements required to request an ESDT NFT transfer. +// TransferESDTNFT appends to the data string all the elements required to request an ESDT NFT transfer. func (builder *TxDataBuilder) TransferESDTNFT(token string, nonce int, value int64) *TxDataBuilder { return builder.Func(core.BuiltInFunctionESDTNFTTransfer).Str(token).Int(nonce).Int64(value) } +// MultiTransferESDTNFT appends to the data string all the elements required to request an Multi ESDT NFT transfer. +func (builder *TxDataBuilder) MultiTransferESDTNFT(destinationAddress []byte, transfers []*vmcommon.ESDTTransfer) *TxDataBuilder { + txBuilder := builder.Func(core.BuiltInFunctionMultiESDTNFTTransfer).Bytes(destinationAddress).Int(len(transfers)) + for _, transfer := range transfers { + txBuilder.Bytes(transfer.ESDTTokenName).Int(int(transfer.ESDTTokenNonce)).BigInt(transfer.ESDTValue) + } + return txBuilder +} + // BurnESDT appends to the data string all the elements required to burn ESDT tokens. func (builder *TxDataBuilder) BurnESDT(token string, value int64) *TxDataBuilder { return builder.Func(core.BuiltInFunctionESDTBurn).Str(token).Int64(value) diff --git a/process/mock/validatorStatisticsProcessorStub.go b/testscommon/validatorStatisticsProcessorStub.go similarity index 84% rename from process/mock/validatorStatisticsProcessorStub.go rename to testscommon/validatorStatisticsProcessorStub.go index b3e4f947da0..4d588610d31 100644 --- a/process/mock/validatorStatisticsProcessorStub.go +++ b/testscommon/validatorStatisticsProcessorStub.go @@ -1,4 +1,4 @@ -package mock +package testscommon import ( "github.com/multiversx/mx-chain-core-go/data" @@ -12,23 +12,15 @@ type ValidatorStatisticsProcessorStub struct { GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) RootHashCalled func() ([]byte, error) LastFinalizedRootHashCalled func() []byte - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error + ResetValidatorStatisticsAtNewEpochCalled func(vInfos state.ShardValidatorsInfoMapHandler) error + GetValidatorInfoForRootHashCalled func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) + ProcessRatingsEndOfEpochCalled func(validatorInfos state.ShardValidatorsInfoMapHandler, epoch uint32) error ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error CommitCalled func() ([]byte, error) PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) } -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - // PeerAccountToValidatorInfo - func (vsp *ValidatorStatisticsProcessorStub) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { if vsp.PeerAccountToValidatorInfoCalled != nil { @@ -56,7 +48,7 @@ func (vsp *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { } // ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { +func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos state.ShardValidatorsInfoMapHandler) error { if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) } @@ -64,19 +56,11 @@ func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch( } // GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { +func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { if vsp.GetValidatorInfoForRootHashCalled != nil { return vsp.GetValidatorInfoForRootHashCalled(rootHash) } - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - return nil + return state.NewShardValidatorsInfoMap(), nil } // UpdatePeerState - @@ -87,6 +71,14 @@ func (vsp *ValidatorStatisticsProcessorStub) UpdatePeerState(header data.MetaHea return nil, nil } +// ProcessRatingsEndOfEpoch - +func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos state.ShardValidatorsInfoMapHandler, epoch uint32) error { + if vsp.ProcessRatingsEndOfEpochCalled != nil { + return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) + } + return nil +} + // RevertPeerState - func (vsp *ValidatorStatisticsProcessorStub) RevertPeerState(header data.MetaHeaderHandler) error { if vsp.RevertPeerStateCalled != nil { @@ -103,8 +95,20 @@ func (vsp *ValidatorStatisticsProcessorStub) RootHash() ([]byte, error) { return nil, nil } -// GetExistingPeerAccount - -func (vsp *ValidatorStatisticsProcessorStub) GetExistingPeerAccount(address []byte) (state.PeerAccountHandler, error) { +// SetLastFinalizedRootHash - +func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { +} + +// LastFinalizedRootHash - +func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { + if vsp.LastFinalizedRootHashCalled != nil { + return vsp.LastFinalizedRootHashCalled() + } + return nil +} + +// GetPeerAccount - +func (vsp *ValidatorStatisticsProcessorStub) GetPeerAccount(address []byte) (state.PeerAccountHandler, error) { if vsp.GetPeerAccountCalled != nil { return vsp.GetPeerAccountCalled(address) } @@ -116,19 +120,15 @@ func (vsp *ValidatorStatisticsProcessorStub) GetExistingPeerAccount(address []by func (vsp *ValidatorStatisticsProcessorStub) DisplayRatings(_ uint32) { } -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { - if vsp.LastFinalizedRootHashCalled != nil { - return vsp.LastFinalizedRootHashCalled() +// SaveNodesCoordinatorUpdates - +func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { + if vsp.SaveNodesCoordinatorUpdatesCalled != nil { + return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) } - return nil + return false, nil } // IsInterfaceNil - func (vsp *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - return false + return vsp == nil } diff --git a/testscommon/vmcommonMocks/userAccountStub.go b/testscommon/vmcommonMocks/userAccountStub.go index 6fb0b1f4d85..8f1eabf8a7f 100644 --- a/testscommon/vmcommonMocks/userAccountStub.go +++ b/testscommon/vmcommonMocks/userAccountStub.go @@ -159,7 +159,7 @@ func (uas *UserAccountStub) GetNonce() uint64 { return 0 } -//IsInterfaceNil - +// IsInterfaceNil - func (uas *UserAccountStub) IsInterfaceNil() bool { return uas == nil } diff --git a/trie/node.go b/trie/node.go index 6d82a238e95..754b3b3548d 100644 --- a/trie/node.go +++ b/trie/node.go @@ -152,7 +152,7 @@ func resolveIfCollapsed(n node, pos byte, db common.TrieStorageInteractor) error func handleStorageInteractorStats(db common.TrieStorageInteractor) { if db != nil { - db.GetStateStatsHandler().IncrTrie() + db.GetStateStatsHandler().IncrementTrie() } } diff --git a/trie/node_extension.go b/trie/node_extension.go index 4e7b38a6a7d..ffbdab699ad 100644 --- a/trie/node_extension.go +++ b/trie/node_extension.go @@ -26,8 +26,8 @@ func shouldTestNode(n node, key []byte) bool { } func snapshotGetTestPoint(key []byte, faultyChance int) error { - rand.Seed(time.Now().UnixNano()) - checkVal := rand.Intn(math.MaxInt) + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + checkVal := rnd.Intn(math.MaxInt) if checkVal%faultyChance == 0 { log.Debug("deliberately not returning hash", "hash", key) return fmt.Errorf("snapshot get error") diff --git a/update/factory/dataTrieFactory.go b/update/factory/dataTrieFactory.go index f9491350693..dcd83da1bd7 100644 --- a/update/factory/dataTrieFactory.go +++ b/update/factory/dataTrieFactory.go @@ -67,8 +67,7 @@ func NewDataTrieFactory(args ArgsNewDataTrieFactory) (*dataTrieFactory, error) { dbConfig := storageFactory.GetDBFromConfig(args.StorageConfig.DB) dbConfig.FilePath = path.Join(args.SyncFolder, args.StorageConfig.DB.FilePath) - dbConfigHandler := factory.NewDBConfigHandler(args.StorageConfig.DB) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(args.StorageConfig.DB) if err != nil { return nil, err } diff --git a/update/factory/exportHandlerFactory.go b/update/factory/exportHandlerFactory.go index 8dd429345bb..c13f25f3f5a 100644 --- a/update/factory/exportHandlerFactory.go +++ b/update/factory/exportHandlerFactory.go @@ -608,8 +608,7 @@ func createStorer(storageConfig config.StorageConfig, folder string) (storage.St dbConfig := storageFactory.GetDBFromConfig(storageConfig.DB) dbConfig.FilePath = path.Join(folder, storageConfig.DB.FilePath) - dbConfigHandler := storageFactory.NewDBConfigHandler(storageConfig.DB) - persisterFactory, err := storageFactory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := storageFactory.NewPersisterFactory(storageConfig.DB) if err != nil { return nil, err } diff --git a/update/genesis/common.go b/update/genesis/common.go index 2ce58de50af..d8d3b11ca0e 100644 --- a/update/genesis/common.go +++ b/update/genesis/common.go @@ -6,7 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/accounts" ) @@ -14,25 +14,20 @@ import ( // TODO: create a structure or use this function also in process/peer/process.go func getValidatorDataFromLeaves( leavesChannels *common.TrieIteratorChannels, - shardCoordinator sharding.Coordinator, marshalizer marshal.Marshalizer, -) (map[uint32][]*state.ValidatorInfo, error) { - - validators := make(map[uint32][]*state.ValidatorInfo, shardCoordinator.NumberOfShards()+1) - for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - validators[i] = make([]*state.ValidatorInfo, 0) - } - validators[core.MetachainShardId] = make([]*state.ValidatorInfo, 0) - +) (state.ShardValidatorsInfoMapHandler, error) { + validators := state.NewShardValidatorsInfoMap() for pa := range leavesChannels.LeavesChan { peerAccount, err := unmarshalPeer(pa, marshalizer) if err != nil { return nil, err } - currentShardId := peerAccount.GetShardId() validatorInfoData := peerAccountToValidatorInfo(peerAccount) - validators[currentShardId] = append(validators[currentShardId], validatorInfoData) + err = validators.Add(validatorInfoData) + if err != nil { + return nil, err + } } err := leavesChannels.ErrChan.ReadFromChanNonBlocking() @@ -60,7 +55,9 @@ func peerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.Val PublicKey: peerAccount.AddressBytes(), ShardId: peerAccount.GetShardId(), List: getActualList(peerAccount), + PreviousList: peerAccount.GetPreviousList(), Index: peerAccount.GetIndexInList(), + PreviousIndex: peerAccount.GetPreviousIndexInList(), TempRating: peerAccount.GetTempRating(), Rating: peerAccount.GetRating(), RewardAddress: peerAccount.GetRewardAddress(), @@ -92,7 +89,7 @@ func getActualList(peerAccount state.PeerAccountHandler) string { return string(common.LeavingList) } -func shouldExportValidator(validator *state.ValidatorInfo, allowedLists []common.PeerType) bool { +func shouldExportValidator(validator state.ValidatorInfoHandler, allowedLists []common.PeerType) bool { validatorList := validator.GetList() for _, list := range allowedLists { diff --git a/update/genesis/export.go b/update/genesis/export.go index 0f5c469afc9..ba4e678a0f8 100644 --- a/update/genesis/export.go +++ b/update/genesis/export.go @@ -311,8 +311,7 @@ func (se *stateExport) exportTrie(key string, trie common.Trie) error { } if accType == ValidatorAccount { - var validatorData map[uint32][]*state.ValidatorInfo - validatorData, err = getValidatorDataFromLeaves(leavesChannels, se.shardCoordinator, se.marshalizer) + validatorData, err := getValidatorDataFromLeaves(leavesChannels, se.marshalizer) if err != nil { return err } @@ -443,30 +442,28 @@ func (se *stateExport) exportValidatorInfo(key string, validatorInfo *state.Shar return nil } -func (se *stateExport) exportNodesSetupJson(validators map[uint32][]*state.ValidatorInfo) error { +func (se *stateExport) exportNodesSetupJson(validators state.ShardValidatorsInfoMapHandler) error { acceptedListsForExport := []common.PeerType{common.EligibleList, common.WaitingList, common.JailedList} initialNodes := make([]*sharding.InitialNode, 0) - for _, validatorsInShard := range validators { - for _, validator := range validatorsInShard { - if shouldExportValidator(validator, acceptedListsForExport) { - - pubKey, err := se.validatorPubKeyConverter.Encode(validator.GetPublicKey()) - if err != nil { - return nil - } - - rewardAddress, err := se.addressPubKeyConverter.Encode(validator.GetRewardAddress()) - if err != nil { - return nil - } - - initialNodes = append(initialNodes, &sharding.InitialNode{ - PubKey: pubKey, - Address: rewardAddress, - InitialRating: validator.GetRating(), - }) + for _, validator := range validators.GetAllValidatorsInfo() { + if shouldExportValidator(validator, acceptedListsForExport) { + + pubKey, err := se.validatorPubKeyConverter.Encode(validator.GetPublicKey()) + if err != nil { + return nil + } + + rewardAddress, err := se.addressPubKeyConverter.Encode(validator.GetRewardAddress()) + if err != nil { + return nil } + + initialNodes = append(initialNodes, &sharding.InitialNode{ + PubKey: pubKey, + Address: rewardAddress, + InitialRating: validator.GetRating(), + }) } } diff --git a/update/genesis/export_test.go b/update/genesis/export_test.go index f1fca206504..bad77b07959 100644 --- a/update/genesis/export_test.go +++ b/update/genesis/export_test.go @@ -389,16 +389,17 @@ func TestStateExport_ExportNodesSetupJsonShouldExportKeysInAlphabeticalOrder(t * require.False(t, check.IfNil(stateExporter)) - vals := make(map[uint32][]*state.ValidatorInfo) - val50 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("aaa"), List: string(common.EligibleList)} - val51 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("bbb"), List: string(common.EligibleList)} - val10 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("ccc"), List: string(common.EligibleList)} - val11 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("ddd"), List: string(common.EligibleList)} - val00 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("aaaaaa"), List: string(common.EligibleList)} - val01 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("bbbbbb"), List: string(common.EligibleList)} - vals[1] = []*state.ValidatorInfo{val50, val51} - vals[0] = []*state.ValidatorInfo{val00, val01} - vals[2] = []*state.ValidatorInfo{val10, val11} + vals := state.NewShardValidatorsInfoMap() + val50 := &state.ValidatorInfo{ShardId: 0, PublicKey: []byte("aaa"), List: string(common.EligibleList)} + val51 := &state.ValidatorInfo{ShardId: 0, PublicKey: []byte("bbb"), List: string(common.EligibleList)} + val10 := &state.ValidatorInfo{ShardId: 1, PublicKey: []byte("ccc"), List: string(common.EligibleList)} + val11 := &state.ValidatorInfo{ShardId: 1, PublicKey: []byte("ddd"), List: string(common.EligibleList)} + val00 := &state.ValidatorInfo{ShardId: 2, PublicKey: []byte("aaaaaa"), List: string(common.EligibleList)} + val01 := &state.ValidatorInfo{ShardId: 2, PublicKey: []byte("bbbbbb"), List: string(common.EligibleList)} + _ = vals.SetValidatorsInShard(0, []state.ValidatorInfoHandler{val50, val51}) + _ = vals.SetValidatorsInShard(1, []state.ValidatorInfoHandler{val10, val11}) + _ = vals.SetValidatorsInShard(2, []state.ValidatorInfoHandler{val00, val01}) + err = stateExporter.exportNodesSetupJson(vals) require.Nil(t, err) diff --git a/vm/errors.go b/vm/errors.go index 4a3cae31b04..f5c3638a624 100644 --- a/vm/errors.go +++ b/vm/errors.go @@ -265,5 +265,17 @@ var ErrWrongNewOwnerAddress = errors.New("wrong new owner address") // ErrInternalErrorWhileSettingNewOwner signals that an error occurred when setting the new contract owner var ErrInternalErrorWhileSettingNewOwner = errors.New("internal error when setting new contract owner") +// ErrInvalidStakeLimitPercentage signals the invalid stake limit percentage was provided +var ErrInvalidStakeLimitPercentage = errors.New("invalid stake limit percentage") + +// ErrInvalidNodeLimitPercentage signals the invalid node limit percentage was provided +var ErrInvalidNodeLimitPercentage = errors.New("invalid node limit percentage") + +// ErrNilNodesCoordinator signals that nil nodes coordinator was provided +var ErrNilNodesCoordinator = errors.New("nil nodes coordinator") + +// ErrWaitingListDisabled signals that waiting list has been disabled, since staking v4 is active +var ErrWaitingListDisabled = errors.New("waiting list is disabled since staking v4 activation") + // ErrCannotChangeToDynamic signals that tokenID cannot be change to type dynamic var ErrCannotChangeToDynamic = errors.New("cannot change to dynamic because of duplicated roles") diff --git a/vm/factory/systemSCFactory.go b/vm/factory/systemSCFactory.go index 0cccff2ce4b..5a6defa2d3c 100644 --- a/vm/factory/systemSCFactory.go +++ b/vm/factory/systemSCFactory.go @@ -31,6 +31,7 @@ type systemSCFactory struct { addressPubKeyConverter core.PubkeyConverter shardCoordinator sharding.Coordinator enableEpochsHandler common.EnableEpochsHandler + nodesCoordinator vm.NodesCoordinator } // ArgsNewSystemSCFactory defines the arguments struct needed to create the system SCs @@ -46,6 +47,7 @@ type ArgsNewSystemSCFactory struct { AddressPubKeyConverter core.PubkeyConverter ShardCoordinator sharding.Coordinator EnableEpochsHandler common.EnableEpochsHandler + NodesCoordinator vm.NodesCoordinator } // NewSystemSCFactory creates a factory which will instantiate the system smart contracts @@ -80,6 +82,9 @@ func NewSystemSCFactory(args ArgsNewSystemSCFactory) (*systemSCFactory, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, fmt.Errorf("%w in NewSystemSCFactory", vm.ErrNilEnableEpochsHandler) } + if check.IfNil(args.NodesCoordinator) { + return nil, fmt.Errorf("%w in NewSystemSCFactory", vm.ErrNilNodesCoordinator) + } scf := &systemSCFactory{ systemEI: args.SystemEI, @@ -92,6 +97,7 @@ func NewSystemSCFactory(args ArgsNewSystemSCFactory) (*systemSCFactory, error) { addressPubKeyConverter: args.AddressPubKeyConverter, shardCoordinator: args.ShardCoordinator, enableEpochsHandler: args.EnableEpochsHandler, + nodesCoordinator: args.NodesCoordinator, } err := scf.createGasConfig(args.GasSchedule.LatestGasSchedule()) @@ -197,6 +203,7 @@ func (scf *systemSCFactory) createValidatorContract() (vm.SystemSmartContract, e GovernanceSCAddress: vm.GovernanceSCAddress, ShardCoordinator: scf.shardCoordinator, EnableEpochsHandler: scf.enableEpochsHandler, + NodesCoordinator: scf.nodesCoordinator, } validatorSC, err := systemSmartContracts.NewValidatorSmartContract(args) return validatorSC, err diff --git a/vm/factory/systemSCFactory_test.go b/vm/factory/systemSCFactory_test.go index 8f16f1a46b1..76c46685cb1 100644 --- a/vm/factory/systemSCFactory_test.go +++ b/vm/factory/systemSCFactory_test.go @@ -65,6 +65,8 @@ func createMockNewSystemScFactoryArgs() ArgsNewSystemSCFactory { MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationSystemSCConfig: config.DelegationSystemSCConfig{ MinServiceFee: 0, @@ -75,10 +77,17 @@ func createMockNewSystemScFactoryArgs() ArgsNewSystemSCFactory { MinStakeAmount: "10", ConfigChangeAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, ShardCoordinator: &mock.ShardCoordinatorStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + NodesCoordinator: &mock.NodesCoordinatorStub{}, } } @@ -93,6 +102,17 @@ func TestNewSystemSCFactory_NilSystemEI(t *testing.T) { assert.True(t, errors.Is(err, vm.ErrNilSystemEnvironmentInterface)) } +func TestNewSystemSCFactory_NilNodesCoordinator(t *testing.T) { + t.Parallel() + + arguments := createMockNewSystemScFactoryArgs() + arguments.NodesCoordinator = nil + scFactory, err := NewSystemSCFactory(arguments) + + assert.Nil(t, scFactory) + assert.True(t, errors.Is(err, vm.ErrNilNodesCoordinator)) +} + func TestNewSystemSCFactory_NilSigVerifier(t *testing.T) { t.Parallel() diff --git a/vm/interface.go b/vm/interface.go index 02d78643821..ca8332c742f 100644 --- a/vm/interface.go +++ b/vm/interface.go @@ -37,7 +37,7 @@ type SystemSCContainer interface { type SystemEI interface { ExecuteOnDestContext(destination []byte, sender []byte, value *big.Int, input []byte) (*vmcommon.VMOutput, error) DeploySystemSC(baseContract []byte, newAddress []byte, ownerAddress []byte, initFunction string, value *big.Int, input [][]byte) (vmcommon.ReturnCode, error) - Transfer(destination []byte, sender []byte, value *big.Int, input []byte, gasLimit uint64) error + Transfer(destination []byte, sender []byte, value *big.Int, input []byte, gasLimit uint64) SendGlobalSettingToAll(sender []byte, input []byte) GetBalance(addr []byte) *big.Int SetStorage(key []byte, value []byte) @@ -60,6 +60,7 @@ type SystemEI interface { GetLogs() []*vmcommon.LogEntry SetOwnerOperatingOnAccount(newOwner []byte) error UpdateCodeDeployerAddress(scAddress string, newOwner []byte) error + ProcessBuiltInFunction(sender, destination []byte, function string, arguments [][]byte) (*vmcommon.VMOutput, error) IsInterfaceNil() bool } @@ -70,6 +71,12 @@ type EconomicsHandler interface { IsInterfaceNil() bool } +// NodesCoordinator defines the methods needed about nodes in system SCs from nodes coordinator +type NodesCoordinator interface { + GetNumTotalEligible() uint64 + IsInterfaceNil() bool +} + // ContextHandler defines the methods needed to execute system smart contracts type ContextHandler interface { SystemEI @@ -129,4 +136,5 @@ type BlockchainHook interface { GetSnapshot() int RevertToSnapshot(snapshot int) error IsBuiltinFunctionName(functionName string) bool + ProcessBuiltInFunction(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) } diff --git a/vm/mock/nodesCoordinatorStub.go b/vm/mock/nodesCoordinatorStub.go new file mode 100644 index 00000000000..de4a99e28e7 --- /dev/null +++ b/vm/mock/nodesCoordinatorStub.go @@ -0,0 +1,19 @@ +package mock + +// NodesCoordinatorStub - +type NodesCoordinatorStub struct { + GetNumTotalEligibleCalled func() uint64 +} + +// GetNumTotalEligible - +func (n *NodesCoordinatorStub) GetNumTotalEligible() uint64 { + if n.GetNumTotalEligibleCalled != nil { + return n.GetNumTotalEligibleCalled() + } + return 1000 +} + +// IsInterfaceNil - +func (n *NodesCoordinatorStub) IsInterfaceNil() bool { + return n == nil +} diff --git a/vm/mock/systemEIStub.go b/vm/mock/systemEIStub.go index 4162a34ab24..0c300010316 100644 --- a/vm/mock/systemEIStub.go +++ b/vm/mock/systemEIStub.go @@ -10,7 +10,7 @@ import ( // SystemEIStub - type SystemEIStub struct { - TransferCalled func(destination []byte, sender []byte, value *big.Int, input []byte) error + TransferCalled func(destination []byte, sender []byte, value *big.Int, input []byte, gasLimit uint64) GetBalanceCalled func(addr []byte) *big.Int SetStorageCalled func(key []byte, value []byte) AddReturnMessageCalled func(msg string) @@ -37,6 +37,7 @@ type SystemEIStub struct { GasLeftCalled func() uint64 CleanStorageUpdatesCalled func() ReturnMessage string + ProcessBuiltInFunctionCalled func(sender, destination []byte, function string, arguments [][]byte) (*vmcommon.VMOutput, error) AddLogEntryCalled func(entry *vmcommon.LogEntry) SetOwnerOperatingOnAccountCalled func(newOwner []byte) error UpdateCodeDeployerAddressCalled func(scAddress string, newOwner []byte) error @@ -203,11 +204,10 @@ func (s *SystemEIStub) SendGlobalSettingToAll(sender []byte, input []byte) { } // Transfer - -func (s *SystemEIStub) Transfer(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) error { +func (s *SystemEIStub) Transfer(destination []byte, sender []byte, value *big.Int, input []byte, gasLimit uint64) { if s.TransferCalled != nil { - return s.TransferCalled(destination, sender, value, input) + s.TransferCalled(destination, sender, value, input, gasLimit) } - return nil } // GetBalance - @@ -310,6 +310,14 @@ func (s *SystemEIStub) UpdateCodeDeployerAddress(scAddress string, newOwner []by return nil } +// ProcessBuiltInFunction - +func (s *SystemEIStub) ProcessBuiltInFunction(sender, destination []byte, function string, arguments [][]byte) (*vmcommon.VMOutput, error) { + if s.ProcessBuiltInFunctionCalled != nil { + return s.ProcessBuiltInFunctionCalled(sender, destination, function, arguments) + } + return &vmcommon.VMOutput{}, nil +} + // IsInterfaceNil - func (s *SystemEIStub) IsInterfaceNil() bool { return s == nil diff --git a/vm/systemSmartContracts/delegation.go b/vm/systemSmartContracts/delegation.go index d71afabb6e2..ac33ba81da2 100644 --- a/vm/systemSmartContracts/delegation.go +++ b/vm/systemSmartContracts/delegation.go @@ -1215,6 +1215,13 @@ func (d *delegation) stakeNodes(args *vmcommon.ContractCallInput) vmcommon.Retur return vmOutput.ReturnCode } + allLogs := d.eei.GetLogs() + tooManyNodesErrMsg := getTooManyNodesErrMsg(allLogs) + if len(tooManyNodesErrMsg) != 0 { + d.eei.AddReturnMessage(tooManyNodesErrMsg) + return vmcommon.UserError + } + err = d.updateDelegationStatusAfterStake(status, vmOutput.ReturnData, args.Arguments) if err != nil { d.eei.AddReturnMessage(err.Error()) @@ -1226,6 +1233,27 @@ func (d *delegation) stakeNodes(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.Ok } +func getTooManyNodesErrMsg(logEntries []*vmcommon.LogEntry) string { + for _, logEntry := range logEntries { + topics := logEntry.Topics + if len(topics) != 3 { + continue + } + if bytes.Equal(topics[0], []byte(numberOfNodesTooHigh)) { + return formatTooManyNodesMsg(topics) + } + } + + return "" +} + +func formatTooManyNodesMsg(topics [][]byte) string { + numRegisteredBlsKeys := big.NewInt(0).SetBytes(topics[1]).Int64() + nodeLimit := big.NewInt(0).SetBytes(topics[2]).Int64() + return fmt.Sprintf("%s, num registered bls keys: %d, node limit: %d", + numberOfNodesTooHigh, numRegisteredBlsKeys, nodeLimit) +} + func (d *delegation) updateDelegationStatusAfterStake( status *DelegationContractStatus, returnData [][]byte, @@ -1430,11 +1458,7 @@ func (d *delegation) unJailNodes(args *vmcommon.ContractCallInput) vmcommon.Retu sendBackValue := getTransferBackFromVMOutput(vmOutput) if sendBackValue.Cmp(zero) > 0 { - err = d.eei.Transfer(args.CallerAddr, args.RecipientAddr, sendBackValue, nil, 0) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + d.eei.Transfer(args.CallerAddr, args.RecipientAddr, sendBackValue, nil, 0) } d.createAndAddLogEntry(args, args.Arguments...) @@ -1532,70 +1556,54 @@ func (d *delegation) finishDelegateUser( return vmcommon.UserError } - var err error - if len(delegator.ActiveFund) == 0 { - var fundKey []byte - fundKey, err = d.createAndSaveNextKeyFund(callerAddr, delegateValue, active) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - delegator.ActiveFund = fundKey - if isNew { - dStatus.NumUsers++ - } - } else { - err = d.addValueToFund(delegator.ActiveFund, delegateValue) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - } - - err = d.checkActiveFund(delegator) + err := d.addToActiveFund(callerAddr, delegator, delegateValue, dStatus, isNew) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - stakeArgs := d.makeStakeArgsIfAutomaticActivation(dConfig, dStatus, globalFund) - vmOutput, err := d.executeOnValidatorSC(scAddress, "stake", stakeArgs, callValue) + err = d.checkActiveFund(delegator) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - if vmOutput.ReturnCode != vmcommon.Ok { - return vmOutput.ReturnCode - } - if len(stakeArgs) > 0 { - err = d.updateDelegationStatusAfterStake(dStatus, vmOutput.ReturnData, stakeArgs) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + returnCode := d.executeStakeAndUpdateStatus(dConfig, dStatus, globalFund, callValue, scAddress) + if returnCode != vmcommon.Ok { + return returnCode } - err = d.saveDelegationStatus(dStatus) + err = d.saveDelegatorData(callerAddr, delegator) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - err = d.saveGlobalFundData(globalFund) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError + return vmcommon.Ok +} + +func (d *delegation) addToActiveFund( + callerAddr []byte, + delegator *DelegatorData, + delegateValue *big.Int, + dStatus *DelegationContractStatus, + isNew bool, +) error { + if len(delegator.ActiveFund) > 0 { + return d.addValueToFund(delegator.ActiveFund, delegateValue) } - err = d.saveDelegatorData(callerAddr, delegator) + fundKey, err := d.createAndSaveNextKeyFund(callerAddr, delegateValue, active) if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError + return err } - return vmcommon.Ok + delegator.ActiveFund = fundKey + if isNew { + dStatus.NumUsers++ + } + + return nil } func (d *delegation) checkActiveFund(delegator *DelegatorData) error { @@ -1730,7 +1738,16 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.UserError } - isNew, delegator, err := d.getOrCreateDelegatorData(args.CallerAddr) + return d.unDelegateValueFromAddress(args, valueToUnDelegate, args.CallerAddr, args.RecipientAddr) +} + +func (d *delegation) unDelegateValueFromAddress( + args *vmcommon.ContractCallInput, + valueToUnDelegate *big.Int, + delegatorAddress []byte, + contractAddress []byte, +) vmcommon.ReturnCode { + isNew, delegator, err := d.getOrCreateDelegatorData(delegatorAddress) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1763,12 +1780,13 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur d.eei.AddReturnMessage("invalid value to undelegate - need to undelegate all - do not leave dust behind") return vmcommon.UserError } - err = d.checkOwnerCanUnDelegate(args.CallerAddr, activeFund, valueToUnDelegate) + + err = d.checkOwnerCanUnDelegate(delegatorAddress, activeFund, valueToUnDelegate) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - err = d.computeAndUpdateRewards(args.CallerAddr, delegator) + err = d.computeAndUpdateRewards(delegatorAddress, delegator) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1780,7 +1798,7 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.UserError } - returnData, returnCode := d.executeOnValidatorSCWithValueInArgs(args.RecipientAddr, "unStakeTokens", valueToUnDelegate) + returnData, returnCode := d.executeOnValidatorSCWithValueInArgs(contractAddress, "unStakeTokens", valueToUnDelegate) if returnCode != vmcommon.Ok { return returnCode } @@ -1798,7 +1816,7 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.UserError } - err = d.addNewUnStakedFund(args.CallerAddr, delegator, actualUserUnStake) + err = d.addNewUnStakedFund(delegatorAddress, delegator, actualUserUnStake) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1822,7 +1840,7 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.UserError } - err = d.saveDelegatorData(args.CallerAddr, delegator) + err = d.saveDelegatorData(delegatorAddress, delegator) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1971,11 +1989,31 @@ func (d *delegation) computeAndUpdateRewards(callerAddress []byte, delegator *De isOwner := d.isOwner(callerAddress) + totalRewards, err := d.computeRewards(delegator.RewardsCheckpoint, isOwner, activeFund.Value) + if err != nil { + return err + } + delegator.UnClaimedRewards.Add(delegator.UnClaimedRewards, totalRewards) + delegator.RewardsCheckpoint = currentEpoch + 1 + + return nil +} + +func (d *delegation) computeRewards( + rewardsCheckpoint uint32, + isOwner bool, + activeValue *big.Int, +) (*big.Int, error) { totalRewards := big.NewInt(0) - for i := delegator.RewardsCheckpoint; i <= currentEpoch; i++ { + if activeValue.Cmp(zero) <= 0 { + return totalRewards, nil + } + + currentEpoch := d.eei.BlockChainHook().CurrentEpoch() + for i := rewardsCheckpoint; i <= currentEpoch; i++ { found, rewardData, errGet := d.getRewardComputationData(i) if errGet != nil { - return errGet + return nil, errGet } if !found { continue @@ -1999,7 +2037,7 @@ func (d *delegation) computeAndUpdateRewards(callerAddress []byte, delegator *De rewardForDelegator := big.NewInt(0).Sub(rewardData.RewardsToDistribute, rewardsForOwner) // delegator reward is: rewardForDelegator * user stake / total active - rewardForDelegator.Mul(rewardForDelegator, activeFund.Value) + rewardForDelegator.Mul(rewardForDelegator, activeValue) rewardForDelegator.Div(rewardForDelegator, rewardData.TotalActive) if isOwner { @@ -2008,10 +2046,7 @@ func (d *delegation) computeAndUpdateRewards(callerAddress []byte, delegator *De totalRewards.Add(totalRewards, rewardForDelegator) } - delegator.UnClaimedRewards.Add(delegator.UnClaimedRewards, totalRewards) - delegator.RewardsCheckpoint = currentEpoch + 1 - - return nil + return totalRewards, nil } func (d *delegation) claimRewards(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { @@ -2041,11 +2076,7 @@ func (d *delegation) claimRewards(args *vmcommon.ContractCallInput) vmcommon.Ret return vmcommon.UserError } - err = d.eei.Transfer(args.CallerAddr, args.RecipientAddr, delegator.UnClaimedRewards, nil, 0) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + d.eei.Transfer(args.CallerAddr, args.RecipientAddr, delegator.UnClaimedRewards, nil, 0) unclaimedRewardsBytes := delegator.UnClaimedRewards.Bytes() delegator.TotalCumulatedRewards.Add(delegator.TotalCumulatedRewards, delegator.UnClaimedRewards) @@ -2112,6 +2143,7 @@ func (d *delegation) withdraw(args *vmcommon.ContractCallInput) vmcommon.ReturnC d.eei.AddReturnMessage(vm.ErrCallValueMustBeZero.Error()) return vmcommon.UserError } + err := d.eei.UseGas(d.gasCost.MetaChainSystemSCsCost.DelegationOps) if err != nil { d.eei.AddReturnMessage(err.Error()) @@ -2212,11 +2244,7 @@ func (d *delegation) withdraw(args *vmcommon.ContractCallInput) vmcommon.ReturnC return vmcommon.UserError } - err = d.eei.Transfer(args.CallerAddr, args.RecipientAddr, actualUserUnBond, nil, 0) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + d.eei.Transfer(args.CallerAddr, args.RecipientAddr, actualUserUnBond, nil, 0) var wasDeleted bool wasDeleted, err = d.deleteDelegatorOnWithdrawIfNeeded(args.CallerAddr, delegator) @@ -2900,6 +2928,45 @@ func (d *delegation) addTokens(args *vmcommon.ContractCallInput) vmcommon.Return return vmcommon.Ok } +func (d *delegation) executeStakeAndUpdateStatus( + dConfig *DelegationConfig, + dStatus *DelegationContractStatus, + globalFund *GlobalFundData, + valueToStake *big.Int, + scAddress []byte, +) vmcommon.ReturnCode { + stakeArgs := d.makeStakeArgsIfAutomaticActivation(dConfig, dStatus, globalFund) + vmOutput, err := d.executeOnValidatorSC(scAddress, "stake", stakeArgs, valueToStake) + if err != nil { + d.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if vmOutput.ReturnCode != vmcommon.Ok { + return vmOutput.ReturnCode + } + + if len(stakeArgs) > 0 { + err = d.updateDelegationStatusAfterStake(dStatus, vmOutput.ReturnData, stakeArgs) + if err != nil { + d.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + } + + err = d.saveDelegationStatus(dStatus) + if err != nil { + d.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + err = d.saveGlobalFundData(globalFund) + if err != nil { + d.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + func (d *delegation) executeOnValidatorSC(address []byte, function string, args [][]byte, value *big.Int) (*vmcommon.VMOutput, error) { validatorCall := function for _, key := range args { @@ -2912,7 +2979,6 @@ func (d *delegation) executeOnValidatorSC(address []byte, function string, args } return vmOutput, nil - } func (d *delegation) getDelegationContractConfig() (*DelegationConfig, error) { diff --git a/vm/systemSmartContracts/delegationManager_test.go b/vm/systemSmartContracts/delegationManager_test.go index b683ac4331c..e2b4de77d8f 100644 --- a/vm/systemSmartContracts/delegationManager_test.go +++ b/vm/systemSmartContracts/delegationManager_test.go @@ -1171,7 +1171,7 @@ func TestDelegationManagerSystemSC_ClaimMultipleDelegationDuplicatedInput(t *tes GetCalled: func(key []byte) (vm.SystemSmartContract, error) { return &mock.SystemSCStub{ ExecuteCalled: func(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - _ = d.eei.Transfer(args.RecipientAddr, args.CallerAddr, big.NewInt(10), nil, 0) + d.eei.Transfer(args.RecipientAddr, args.CallerAddr, big.NewInt(10), nil, 0) return vmcommon.Ok }, }, nil @@ -1197,7 +1197,7 @@ func TestDelegationManagerSystemSC_ClaimMultipleDelegation(t *testing.T) { GetCalled: func(key []byte) (vm.SystemSmartContract, error) { return &mock.SystemSCStub{ ExecuteCalled: func(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - _ = d.eei.Transfer(args.CallerAddr, args.RecipientAddr, big.NewInt(10), nil, 0) + d.eei.Transfer(args.CallerAddr, args.RecipientAddr, big.NewInt(10), nil, 0) return vmcommon.Ok }, }, nil diff --git a/vm/systemSmartContracts/delegation_test.go b/vm/systemSmartContracts/delegation_test.go index 73cbab30716..fe93b1c8368 100644 --- a/vm/systemSmartContracts/delegation_test.go +++ b/vm/systemSmartContracts/delegation_test.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process/smartContract/hooks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/vm" "github.com/multiversx/mx-chain-go/vm/mock" @@ -158,6 +159,14 @@ func createDelegationContractAndEEI() (*delegation, *vmContext) { args.DelegationSCConfig.MaxServiceFee = 10000 args.DelegationSCConfig.MinServiceFee = 0 d, _ := NewDelegationSystemSC(args) + + managementData := &DelegationManagement{ + MinDeposit: big.NewInt(10), + MinDelegationAmount: big.NewInt(10), + } + marshaledData, _ := d.marshalizer.Marshal(managementData) + eei.SetStorageForAddress(d.delegationMgrSCAddress, []byte(delegationManagementKey), marshaledData) + return d, eei } @@ -1615,9 +1624,16 @@ func TestDelegationSystemSC_ExecuteUnDelegateUserErrorsWhenGettingMinimumDelegat }) d.eei.SetStorage([]byte(lastFundKey), fundKey) + managementData := &DelegationManagement{ + MinDeposit: big.NewInt(50), + MinDelegationAmount: big.NewInt(50), + } + marshaledData, _ := d.marshalizer.Marshal(managementData) + eei.SetStorageForAddress(d.delegationMgrSCAddress, []byte(delegationManagementKey), marshaledData) + output := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, output) - assert.True(t, strings.Contains(eei.returnMessage, "error getting minimum delegation amount")) + assert.True(t, strings.Contains(eei.returnMessage, "invalid value to undelegate - need to undelegate all - do not leave dust behind")) } func TestDelegationSystemSC_ExecuteUnDelegateUserNotDelegatorOrNoActiveFundShouldErr(t *testing.T) { @@ -5028,3 +5044,139 @@ func TestDelegationSystemSC_SynchronizeOwner(t *testing.T) { eei.ResetReturnMessage() }) } + +func TestDelegationSystemSC_ExecuteAddNodesStakeNodesWithNodesLimit(t *testing.T) { + t.Parallel() + + sig := []byte("sig1") + args := createMockArgumentsForDelegation() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub( + common.StakingV4Step1Flag, + common.StakingV4Step2Flag, + common.StakingV4Step3Flag, + common.StakeLimitsFlag, + + common.DelegationSmartContractFlag, + common.StakingV2FlagAfterEpoch, + common.AddTokensToDelegationFlag, + common.DeleteDelegatorAfterClaimRewardsFlag, + common.ComputeRewardCheckpointFlag, + common.ValidatorToDelegationFlag, + common.ReDelegateBelowMinCheckFlag, + common.MultiClaimOnDelegationFlag, + ) + eei := createDefaultEei() + delegationsMap := map[string][]byte{} + delegationsMap[ownerKey] = []byte("owner") + eei.storageUpdate[string(eei.scAddress)] = delegationsMap + args.Eei = eei + + d, _ := NewDelegationSystemSC(args) + + blsKey1 := []byte("blsKey1") + blsKey2 := []byte("blsKey2") + key1 := &NodesData{ + BLSKey: blsKey1, + } + key2 := &NodesData{ + BLSKey: blsKey2, + } + dStatus := &DelegationContractStatus{ + StakedKeys: []*NodesData{key1, key2}, + } + _ = d.saveDelegationStatus(dStatus) + + globalFund := &GlobalFundData{ + TotalActive: big.NewInt(400), + } + _ = d.saveGlobalFundData(globalFund) + + addValidatorAndStakingScToVmContextWithBlsKeys(eei, [][]byte{blsKey1, blsKey2}) + + dStatus, _ = d.getDelegationStatus() + require.Equal(t, 2, len(dStatus.StakedKeys)) + require.Equal(t, 0, len(dStatus.UnStakedKeys)) + require.Equal(t, 0, len(dStatus.NotStakedKeys)) + + newBlsKey1 := []byte("newBlsKey1") + vmInput := getDefaultVmInputForFunc("addNodes", [][]byte{newBlsKey1, sig}) + output := d.Execute(vmInput) + require.Equal(t, vmcommon.Ok, output) + + vmInput = getDefaultVmInputForFunc("stakeNodes", [][]byte{newBlsKey1}) + output = d.Execute(vmInput) + require.Equal(t, vmcommon.Ok, output) + + dStatus, _ = d.getDelegationStatus() + require.Equal(t, 3, len(dStatus.StakedKeys)) + require.Equal(t, 0, len(dStatus.UnStakedKeys)) + require.Equal(t, 0, len(dStatus.NotStakedKeys)) + + addValidatorAndStakingScToVmContextWithBlsKeys(eei, [][]byte{blsKey1, blsKey2, newBlsKey1}) + + newBlsKey2 := []byte("newBlsKey2") + vmInput = getDefaultVmInputForFunc("addNodes", [][]byte{newBlsKey2, sig}) + output = d.Execute(vmInput) + require.Equal(t, vmcommon.Ok, output) + + vmInput = getDefaultVmInputForFunc("stakeNodes", [][]byte{newBlsKey2}) + output = d.Execute(vmInput) + require.Equal(t, vmcommon.UserError, output) + require.True(t, strings.Contains(eei.returnMessage, numberOfNodesTooHigh)) + require.True(t, strings.Contains(eei.returnMessage, "num registered bls keys: 3")) + require.True(t, strings.Contains(eei.returnMessage, "node limit: 3")) + + dStatus, _ = d.getDelegationStatus() + require.Equal(t, 3, len(dStatus.StakedKeys)) + require.Equal(t, 0, len(dStatus.UnStakedKeys)) + require.Equal(t, 1, len(dStatus.NotStakedKeys)) +} + +func addValidatorAndStakingScToVmContextWithBlsKeys(eei *vmContext, blsKeys [][]byte) { + validatorArgs := createMockArgumentsForValidatorSC() + validatorArgs.StakingSCConfig.NodeLimitPercentage = 1 + validatorArgs.Eei = eei + validatorArgs.StakingSCConfig.GenesisNodePrice = "100" + validatorArgs.StakingSCAddress = vm.StakingSCAddress + validatorArgs.NodesCoordinator = &shardingMocks.NodesCoordinatorStub{ + GetNumTotalEligibleCalled: func() uint64 { + return 3 + }, + } + validatorSc, _ := NewValidatorSmartContract(validatorArgs) + + stakingArgs := createMockStakingScArguments() + stakingArgs.Eei = eei + stakingSc, _ := NewStakingSmartContract(stakingArgs) + + _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { + if bytes.Equal(key, vm.StakingSCAddress) { + return stakingSc, nil + } + + if bytes.Equal(key, vm.ValidatorSCAddress) { + _ = validatorSc.saveRegistrationData([]byte("addr"), &ValidatorDataV2{ + RewardAddress: []byte("rewardAddr"), + TotalStakeValue: big.NewInt(1000), + LockedStake: big.NewInt(500), + BlsPubKeys: blsKeys, + TotalUnstaked: big.NewInt(150), + UnstakedInfo: []*UnstakedValue{ + { + UnstakedEpoch: 10, + UnstakedValue: big.NewInt(60), + }, + { + UnstakedEpoch: 50, + UnstakedValue: big.NewInt(80), + }, + }, + NumRegistered: uint32(len(blsKeys)), + }) + validatorSc.unBondPeriod = 50 + return validatorSc, nil + } + + return nil, nil + }}) +} diff --git a/vm/systemSmartContracts/eei.go b/vm/systemSmartContracts/eei.go index a2743693694..55f554d11b0 100644 --- a/vm/systemSmartContracts/eei.go +++ b/vm/systemSmartContracts/eei.go @@ -1,6 +1,7 @@ package systemSmartContracts import ( + "errors" "fmt" "math/big" @@ -75,6 +76,7 @@ func NewVMContext(args VMContextArgs) (*vmContext, error) { err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ common.MultiClaimOnDelegationFlag, common.SetSenderInEeiOutputTransferFlag, + common.AlwaysMergeContextsInEEIFlag, }) if err != nil { return nil, err @@ -217,6 +219,17 @@ func (host *vmContext) SendGlobalSettingToAll(_ []byte, input []byte) { } } +func (host *vmContext) transferValueOnly( + destination []byte, + sender []byte, + value *big.Int, +) { + senderAcc, destAcc := host.getSenderDestination(sender, destination) + + _ = senderAcc.BalanceDelta.Sub(senderAcc.BalanceDelta, value) + _ = destAcc.BalanceDelta.Add(destAcc.BalanceDelta, value) +} + func (host *vmContext) getSenderDestination(sender, destination []byte) (*vmcommon.OutputAccount, *vmcommon.OutputAccount) { senderAcc, exists := host.outputAccounts[string(sender)] if !exists { @@ -241,17 +254,6 @@ func (host *vmContext) getSenderDestination(sender, destination []byte) (*vmcomm return senderAcc, destAcc } -func (host *vmContext) transferValueOnly( - destination []byte, - sender []byte, - value *big.Int, -) { - senderAcc, destAcc := host.getSenderDestination(sender, destination) - - _ = senderAcc.BalanceDelta.Sub(senderAcc.BalanceDelta, value) - _ = destAcc.BalanceDelta.Add(destAcc.BalanceDelta, value) -} - // Transfer handles any necessary value transfer required and takes // the necessary steps to create accounts func (host *vmContext) Transfer( @@ -260,7 +262,7 @@ func (host *vmContext) Transfer( value *big.Int, input []byte, gasLimit uint64, -) error { +) { host.transferValueOnly(destination, sender, value) senderAcc, destAcc := host.getSenderDestination(sender, destination) outputTransfer := vmcommon.OutputTransfer{ @@ -275,8 +277,6 @@ func (host *vmContext) Transfer( outputTransfer.SenderAddress = senderAcc.Address } destAcc.OutputTransfers = append(destAcc.OutputTransfers, outputTransfer) - - return nil } // GetLogs returns the logs @@ -340,8 +340,11 @@ func (host *vmContext) properMergeContexts(parentContext *vmContext, returnCode host.scAddress = parentContext.scAddress host.AddReturnMessage(parentContext.returnMessage) - if returnCode != vmcommon.Ok { - // no need to merge - revert was done - transaction will fail + + // merge contexts if the return code is OK or the fix flag is activated because it was wrong not to merge them if the call failed + shouldMergeContexts := returnCode == vmcommon.Ok || host.enableEpochsHandler.IsFlagEnabled(common.AlwaysMergeContextsInEEIFlag) + if !shouldMergeContexts { + // backwards compatibility return } @@ -432,7 +435,8 @@ func createDirectCallInput( func (host *vmContext) transferBeforeInternalExec(callInput *vmcommon.ContractCallInput, sender []byte, callType string) error { if !host.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { - return host.Transfer(callInput.RecipientAddr, sender, callInput.CallValue, nil, 0) + host.Transfer(callInput.RecipientAddr, sender, callInput.CallValue, nil, 0) + return nil } host.transferValueOnly(callInput.RecipientAddr, sender, callInput.CallValue) @@ -530,6 +534,8 @@ func (host *vmContext) ExecuteOnDestContext(destination []byte, sender []byte, v vmOutput := &vmcommon.VMOutput{ReturnCode: vmcommon.UserError} currContext := host.copyToNewContext() defer func() { + // we need to reset here the output since it was already transferred in the vmOutput (host.CreateVMOutput() function) + // and we do not want to duplicate them host.output = make([][]byte, 0) host.properMergeContexts(currContext, vmOutput.ReturnCode) }() @@ -593,6 +599,42 @@ func (host *vmContext) AddLogEntry(entry *vmcommon.LogEntry) { host.logs = append(host.logs, entry) } +// ProcessBuiltInFunction will process the given built in function and will merge the generated output accounts and logs +func (host *vmContext) ProcessBuiltInFunction( + sender, destination []byte, + function string, + arguments [][]byte, +) (*vmcommon.VMOutput, error) { + vmInput := createDirectCallInput(destination, sender, big.NewInt(0), function, arguments) + vmInput.GasProvided = host.GasLeft() + vmOutput, err := host.blockChainHook.ProcessBuiltInFunction(vmInput) + if err != nil { + return nil, err + } + if vmOutput.ReturnCode != vmcommon.Ok { + return nil, errors.New(vmOutput.ReturnMessage) + } + + for address, outAcc := range vmOutput.OutputAccounts { + if len(outAcc.OutputTransfers) > 0 { + leftAccount, exist := host.outputAccounts[address] + if !exist { + leftAccount = &vmcommon.OutputAccount{ + Address: []byte(address), + } + host.outputAccounts[address] = leftAccount + } + leftAccount.OutputTransfers = append(leftAccount.OutputTransfers, outAcc.OutputTransfers...) + } + } + + for _, logEntry := range vmOutput.Logs { + host.AddLogEntry(logEntry) + } + + return vmOutput, nil +} + // BlockChainHook returns the blockchain hook func (host *vmContext) BlockChainHook() vm.BlockchainHook { return host.blockChainHook diff --git a/vm/systemSmartContracts/eei_test.go b/vm/systemSmartContracts/eei_test.go index 0d5df038a98..aa1120e452d 100644 --- a/vm/systemSmartContracts/eei_test.go +++ b/vm/systemSmartContracts/eei_test.go @@ -200,9 +200,7 @@ func TestVmContext_Transfer(t *testing.T) { value := big.NewInt(999) input := []byte("input") - err := vmCtx.Transfer(destination, sender, value, input, 0) - assert.Nil(t, err) - + vmCtx.Transfer(destination, sender, value, input, 0) balance := vmCtx.GetBalance(destination) assert.Equal(t, value.Uint64(), balance.Uint64()) diff --git a/vm/systemSmartContracts/esdt.go b/vm/systemSmartContracts/esdt.go index 2fa5d76c184..7d8fe4bba10 100644 --- a/vm/systemSmartContracts/esdt.go +++ b/vm/systemSmartContracts/esdt.go @@ -50,7 +50,7 @@ type esdt struct { gasCost vm.GasCost baseIssuingCost *big.Int ownerAddress []byte // do not use this in functions. Should use e.getEsdtOwner() - eSDTSCAddress []byte + esdtSCAddress []byte endOfEpochSCAddress []byte marshalizer marshal.Marshalizer hasher hashing.Hasher @@ -111,7 +111,6 @@ func NewESDTSmartContract(args ArgsNewESDTSmartContract) (*esdt, error) { if len(args.EndOfEpochSCAddress) == 0 { return nil, vm.ErrNilEndOfEpochSmartContractAddress } - baseIssuingCost, okConvert := big.NewInt(0).SetString(args.ESDTSCConfig.BaseIssuingCost, conversionBase) if !okConvert || baseIssuingCost.Cmp(big.NewInt(0)) < 0 { return nil, vm.ErrInvalidBaseIssuingCost @@ -124,7 +123,7 @@ func NewESDTSmartContract(args ArgsNewESDTSmartContract) (*esdt, error) { // we should have called pubkeyConverter.Decode here instead of a byte slice cast. Since that change would break // backwards compatibility, the fix was carried in the epochStart/metachain/systemSCs.go ownerAddress: []byte(args.ESDTSCConfig.OwnerAddress), - eSDTSCAddress: args.ESDTSCAddress, + esdtSCAddress: args.ESDTSCAddress, hasher: args.Hasher, marshalizer: args.Marshalizer, endOfEpochSCAddress: args.EndOfEpochSCAddress, @@ -326,11 +325,7 @@ func (e *esdt) issue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if initialSupply.Cmp(zero) > 0 { esdtTransferData := core.BuiltInFunctionESDTTransfer + "@" + hex.EncodeToString(tokenIdentifier) + "@" + hex.EncodeToString(initialSupply.Bytes()) - err = e.eei.Transfer(args.CallerAddr, e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.CallerAddr, e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) } else { e.eei.Finish(tokenIdentifier) } @@ -873,12 +868,7 @@ func (e *esdt) burn(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } if !token.Burnable { esdtTransferData := core.BuiltInFunctionESDTTransfer + "@" + hex.EncodeToString(args.Arguments[0]) + "@" + hex.EncodeToString(args.Arguments[1]) - err = e.eei.Transfer(args.CallerAddr, e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - + e.eei.Transfer(args.CallerAddr, e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) e.eei.AddReturnMessage("token is not burnable") if e.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { return vmcommon.UserError @@ -950,11 +940,7 @@ func (e *esdt) mint(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } esdtTransferData := core.BuiltInFunctionESDTTransfer + "@" + hex.EncodeToString(args.Arguments[0]) + "@" + hex.EncodeToString(mintValue.Bytes()) - err = e.eei.Transfer(destination, e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(destination, e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) return vmcommon.Ok } @@ -979,11 +965,7 @@ func (e *esdt) toggleFreeze(args *vmcommon.ContractCallInput, builtInFunc string } esdtTransferData := builtInFunc + "@" + hex.EncodeToString(args.Arguments[0]) - err := e.eei.Transfer(args.Arguments[1], e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.Arguments[1], e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) return vmcommon.Ok } @@ -1029,11 +1011,7 @@ func (e *esdt) toggleFreezeSingleNFT(args *vmcommon.ContractCallInput, builtInFu composedArg := append(args.Arguments[0], args.Arguments[1]...) esdtTransferData := builtInFunc + "@" + hex.EncodeToString(composedArg) - err := e.eei.Transfer(args.Arguments[2], e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.Arguments[2], e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) return vmcommon.Ok } @@ -1059,14 +1037,10 @@ func (e *esdt) wipeTokenFromAddress( } esdtTransferData := core.BuiltInFunctionESDTWipe + "@" + hex.EncodeToString(wipeArgument) - err := e.eei.Transfer(address, e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(address, e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) token.NumWiped++ - err = e.saveToken(tokenID, token) + err := e.saveToken(tokenID, token) if err != nil { e.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1128,7 +1102,7 @@ func (e *esdt) togglePause(args *vmcommon.ContractCallInput, builtInFunc string) } esdtTransferData := builtInFunc + "@" + hex.EncodeToString(args.Arguments[0]) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) logEntry := &vmcommon.LogEntry{ Identifier: []byte(builtInFunc), @@ -1162,7 +1136,7 @@ func (e *esdt) saveTokenAndSendForAll(token *ESDTDataV2, tokenID []byte, builtIn } esdtTransferData := builtInCall + "@" + hex.EncodeToString(tokenID) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) return vmcommon.Ok } @@ -1231,7 +1205,7 @@ func (e *esdt) addBurnRoleAndSendToAllShards(token *ESDTDataV2, tokenID []byte) e.eei.AddLogEntry(logEntry) esdtTransferData := vmcommon.BuiltInFunctionESDTSetBurnRoleForAll + "@" + hex.EncodeToString(tokenID) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } func (e *esdt) configChange(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { @@ -1315,11 +1289,7 @@ func (e *esdt) claim(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } scBalance := e.eei.GetBalance(args.RecipientAddr) - err = e.eei.Transfer(args.CallerAddr, args.RecipientAddr, scBalance, nil, 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.CallerAddr, args.RecipientAddr, scBalance, nil, 0) return vmcommon.Ok } @@ -1765,11 +1735,7 @@ func (e *esdt) changeToMultiShardCreate(args *vmcommon.ContractCallInput) vmcomm isAddressLastByteZero := addressWithCreateRole[len(addressWithCreateRole)-1] == 0 if !isAddressLastByteZero { multiCreateRoleOnly := [][]byte{[]byte(core.ESDTRoleNFTCreateMultiShard)} - err = e.sendRoleChangeData(args.Arguments[0], addressWithCreateRole, multiCreateRoleOnly, core.BuiltInFunctionSetESDTRole) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.sendRoleChangeData(args.Arguments[0], addressWithCreateRole, multiCreateRoleOnly, core.BuiltInFunctionSetESDTRole) } err = e.saveToken(args.Arguments[0], token) @@ -1890,16 +1856,13 @@ func (e *esdt) prepareAndSendRoleChangeData( if properties.isMultiShardNFTCreateSet { allRoles = append(allRoles, []byte(core.ESDTRoleNFTCreateMultiShard)) } - err := e.sendRoleChangeData(tokenID, address, allRoles, core.BuiltInFunctionSetESDTRole) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.sendRoleChangeData(tokenID, address, allRoles, core.BuiltInFunctionSetESDTRole) + isTransferRoleDefinedInArgs := isDefinedRoleInArgs(roles, []byte(core.ESDTRoleTransfer)) - firstTransferRoleSet := !properties.transferRoleExists && isTransferRoleDefinedInArgs + firstTransferRoleSet := !properties.transferRoleExists && isDefinedRoleInArgs(roles, []byte(core.ESDTRoleTransfer)) if firstTransferRoleSet { esdtTransferData := core.BuiltInFunctionESDTSetLimitedTransfer + "@" + hex.EncodeToString(tokenID) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } if isTransferRoleDefinedInArgs { @@ -2002,12 +1965,7 @@ func (e *esdt) unSetSpecialRole(args *vmcommon.ContractCallInput) vmcommon.Retur esdtRole.Roles = esdtRole.Roles[:len(esdtRole.Roles)-1] } - err := e.sendRoleChangeData(args.Arguments[0], args.Arguments[1], args.Arguments[2:], core.BuiltInFunctionUnSetESDTRole) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - + e.sendRoleChangeData(args.Arguments[0], args.Arguments[1], args.Arguments[2:], core.BuiltInFunctionUnSetESDTRole) if len(esdtRole.Roles) == 0 { for i, roles := range token.SpecialRoles { if bytes.Equal(roles.Address, address) { @@ -2025,14 +1983,14 @@ func (e *esdt) unSetSpecialRole(args *vmcommon.ContractCallInput) vmcommon.Retur lastTransferRoleWasDeleted := isTransferRoleInArgs && !transferRoleExists if lastTransferRoleWasDeleted { esdtTransferData := core.BuiltInFunctionESDTUnSetLimitedTransfer + "@" + hex.EncodeToString(args.Arguments[0]) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } if isTransferRoleInArgs { e.deleteTransferRoleAddressFromSystemAccount(args.Arguments[0], address) } - err = e.saveToken(args.Arguments[0], token) + err := e.saveToken(args.Arguments[0], token) if err != nil { e.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -2048,7 +2006,7 @@ func (e *esdt) sendNewTransferRoleAddressToSystemAccount(token []byte, address [ } esdtTransferData := vmcommon.BuiltInFunctionESDTTransferRoleAddAddress + "@" + hex.EncodeToString(token) + "@" + hex.EncodeToString(address) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } func (e *esdt) deleteTransferRoleAddressFromSystemAccount(token []byte, address []byte) { @@ -2058,7 +2016,7 @@ func (e *esdt) deleteTransferRoleAddressFromSystemAccount(token []byte, address } esdtTransferData := vmcommon.BuiltInFunctionESDTTransferRoleDeleteAddress + "@" + hex.EncodeToString(token) + "@" + hex.EncodeToString(address) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } func (e *esdt) sendAllTransferRoleAddresses(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { @@ -2094,7 +2052,7 @@ func (e *esdt) sendAllTransferRoleAddresses(args *vmcommon.ContractCallInput) vm return vmcommon.UserError } - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) return vmcommon.Ok } @@ -2184,11 +2142,7 @@ func (e *esdt) transferNFTCreateRole(args *vmcommon.ContractCallInput) vmcommon. esdtTransferNFTCreateData := core.BuiltInFunctionESDTNFTCreateRoleTransfer + "@" + hex.EncodeToString(args.Arguments[0]) + "@" + hex.EncodeToString(args.Arguments[2]) - err = e.eei.Transfer(args.Arguments[1], e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferNFTCreateData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.Arguments[1], e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferNFTCreateData), 0) return vmcommon.Ok } @@ -2226,11 +2180,7 @@ func (e *esdt) stopNFTCreateForever(args *vmcommon.ContractCallInput) vmcommon.R } for _, currentOwner := range currentOwners { - err = e.sendRoleChangeData(args.Arguments[0], currentOwner, [][]byte{[]byte(core.ESDTRoleNFTCreate)}, core.BuiltInFunctionUnSetESDTRole) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.sendRoleChangeData(args.Arguments[0], currentOwner, [][]byte{[]byte(core.ESDTRoleNFTCreate)}, core.BuiltInFunctionUnSetESDTRole) } return vmcommon.Ok @@ -2441,7 +2391,7 @@ func (e *esdt) sendTokenTypeToSystemAccounts(caller []byte, tokenID []byte, toke builtInFunc := core.ESDTSetTokenType esdtTransferData := builtInFunc + "@" + hex.EncodeToString(tokenID) + "@" + hex.EncodeToString(token.TokenType) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) logEntry := &vmcommon.LogEntry{ Identifier: []byte(builtInFunc), @@ -2452,14 +2402,13 @@ func (e *esdt) sendTokenTypeToSystemAccounts(caller []byte, tokenID []byte, toke e.eei.AddLogEntry(logEntry) } -func (e *esdt) sendRoleChangeData(tokenID []byte, destination []byte, roles [][]byte, builtInFunc string) error { +func (e *esdt) sendRoleChangeData(tokenID []byte, destination []byte, roles [][]byte, builtInFunc string) { esdtSetRoleData := builtInFunc + "@" + hex.EncodeToString(tokenID) for _, arg := range roles { esdtSetRoleData += "@" + hex.EncodeToString(arg) } - err := e.eei.Transfer(destination, e.eSDTSCAddress, big.NewInt(0), []byte(esdtSetRoleData), 0) - return err + e.eei.Transfer(destination, e.esdtSCAddress, big.NewInt(0), []byte(esdtSetRoleData), 0) } func (e *esdt) getAllAddressesAndRoles(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { diff --git a/vm/systemSmartContracts/esdt_test.go b/vm/systemSmartContracts/esdt_test.go index cc7b66705f1..59cb7922888 100644 --- a/vm/systemSmartContracts/esdt_test.go +++ b/vm/systemSmartContracts/esdt_test.go @@ -794,7 +794,7 @@ func TestEsdt_ExecuteMintInvalidDestinationAddressShouldFail(t *testing.T) { assert.True(t, strings.Contains(eei.returnMessage, "destination address of invalid length")) } -func TestEsdt_ExecuteMintTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteMintTransferNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -807,9 +807,6 @@ func TestEsdt_ExecuteMintTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -818,7 +815,7 @@ func TestEsdt_ExecuteMintTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("mint", [][]byte{[]byte("esdtToken"), {200}}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } func TestEsdt_ExecuteMintWithTwoArgsShouldSetOwnerAsDestination(t *testing.T) { @@ -1080,7 +1077,7 @@ func TestEsdt_ExecuteToggleFreezeNonFreezableTokenShouldFail(t *testing.T) { assert.True(t, strings.Contains(eei.returnMessage, "cannot freeze")) } -func TestEsdt_ExecuteToggleFreezeTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteToggleFreezeTransferNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -1092,9 +1089,6 @@ func TestEsdt_ExecuteToggleFreezeTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -1103,10 +1097,10 @@ func TestEsdt_ExecuteToggleFreezeTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("freeze", [][]byte{[]byte("esdtToken"), getAddress()}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } -func TestEsdt_ExecuteToggleFreezeSingleNFTTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteToggleFreezeSingleNFTTransferNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -1119,9 +1113,6 @@ func TestEsdt_ExecuteToggleFreezeSingleNFTTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -1130,7 +1121,7 @@ func TestEsdt_ExecuteToggleFreezeSingleNFTTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("freezeSingleNFT", [][]byte{[]byte("esdtToken"), big.NewInt(10).Bytes(), getAddress()}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } func TestEsdt_ExecuteToggleFreezeShouldWorkWithRealBech32Address(t *testing.T) { @@ -1566,7 +1557,7 @@ func TestEsdt_ExecuteWipeInvalidDestShouldFail(t *testing.T) { assert.True(t, strings.Contains(eei.returnMessage, "invalid")) } -func TestEsdt_ExecuteWipeTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteWipeTransferFailsNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -1579,9 +1570,6 @@ func TestEsdt_ExecuteWipeTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -1590,10 +1578,10 @@ func TestEsdt_ExecuteWipeTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("wipe", [][]byte{[]byte("esdtToken"), getAddress()}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } -func TestEsdt_ExecuteWipeSingleNFTTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteWipeSingleNFTTransferNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -1606,9 +1594,6 @@ func TestEsdt_ExecuteWipeSingleNFTTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -1617,7 +1602,7 @@ func TestEsdt_ExecuteWipeSingleNFTTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("wipeSingleNFT", [][]byte{[]byte("esdtToken"), big.NewInt(10).Bytes(), getAddress()}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } func TestEsdt_ExecuteWipeShouldWork(t *testing.T) { @@ -2817,7 +2802,6 @@ func TestEsdt_SetSpecialRoleCheckBasicOwnershipErr(t *testing.T) { func TestEsdt_SetSpecialRoleNewSendRoleChangeDataErr(t *testing.T) { t.Parallel() - localErr := errors.New("local err") args := createMockArgumentsForESDT() eei := &mock.SystemEIStub{ GetStorageCalled: func(key []byte) []byte { @@ -2827,9 +2811,8 @@ func TestEsdt_SetSpecialRoleNewSendRoleChangeDataErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4275726e"), input) - return localErr }, } args.Eei = eei @@ -2864,9 +2847,8 @@ func TestEsdt_SetSpecialRoleAlreadyExists(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4275726e"), input) - return nil }, } args.Eei = eei @@ -2903,11 +2885,10 @@ func TestEsdt_SetSpecialRoleCannotSaveToken(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4275726e"), input) castedMarshalizer := args.Marshalizer.(*mock.MarshalizerMock) castedMarshalizer.Fail = true - return nil }, } args.Eei = eei @@ -2944,9 +2925,8 @@ func TestEsdt_SetSpecialRoleShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4275726e"), input) - return nil }, SetStorageCalled: func(key []byte, value []byte) { token := &ESDTDataV2{} @@ -2988,9 +2968,8 @@ func TestEsdt_SetSpecialRoleNFTShouldErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654e4654437265617465"), input) - return nil }, SetStorageCalled: func(key []byte, value []byte) { token := &ESDTDataV2{} @@ -3295,9 +3274,8 @@ func TestEsdt_SetSpecialRoleSFTShouldErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654e46544164645175616e74697479"), input) - return nil }, SetStorageCalled: func(key []byte, value []byte) { token := &ESDTDataV2{} @@ -3566,10 +3544,9 @@ func TestEsdt_UnsetSpecialRoleCannotRemoveRoleNotExistsShouldErr(t *testing.T) { require.Equal(t, vmcommon.UserError, retCode) } -func TestEsdt_UnsetSpecialRoleRemoveRoleTransferErr(t *testing.T) { +func TestEsdt_UnsetSpecialRoleRemoveRoleTransfer(t *testing.T) { t.Parallel() - localErr := errors.New("local err") args := createMockArgumentsForESDT() eei := &mock.SystemEIStub{ GetStorageCalled: func(key []byte) []byte { @@ -3585,9 +3562,8 @@ func TestEsdt_UnsetSpecialRoleRemoveRoleTransferErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTUnSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4d696e74"), input) - return localErr }, } args.Eei = eei @@ -3601,7 +3577,7 @@ func TestEsdt_UnsetSpecialRoleRemoveRoleTransferErr(t *testing.T) { vmInput.GasProvided = 50000000 retCode := e.Execute(vmInput) - require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vmcommon.Ok, retCode) } func TestEsdt_UnsetSpecialRoleRemoveRoleSaveTokenErr(t *testing.T) { @@ -3622,11 +3598,10 @@ func TestEsdt_UnsetSpecialRoleRemoveRoleSaveTokenErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTUnSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4d696e74"), input) castedMarshalizer := args.Marshalizer.(*mock.MarshalizerMock) castedMarshalizer.Fail = true - return nil }, } args.Eei = eei @@ -3661,9 +3636,8 @@ func TestEsdt_UnsetSpecialRoleRemoveRoleShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTUnSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4d696e74"), input) - return nil }, SetStorageCalled: func(key []byte, value []byte) { token := &ESDTDataV2{} @@ -3770,9 +3744,8 @@ func TestEsdt_StopNFTCreateForeverCallShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTUnSetRole@746f6b656e4944@45534454526f6c654e4654437265617465"), input) - return nil }, } args.Eei = eei @@ -3882,10 +3855,9 @@ func TestEsdt_TransferNFTCreateCallShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTNFTCreateRoleTransfer@746f6b656e4944@63616c6c657232"), input) require.Equal(t, destination, []byte("caller3")) - return nil }, } args.Eei = eei @@ -3920,11 +3892,6 @@ func TestEsdt_TransferNFTCreateCallMultiShardShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { - require.Equal(t, []byte("ESDTNFTCreateRoleTransfer@746f6b656e4944@3263616c6c6572"), input) - require.Equal(t, destination, []byte("3caller")) - return nil - }, } args.Eei = eei diff --git a/vm/systemSmartContracts/governance.go b/vm/systemSmartContracts/governance.go index 042df1bc204..ae3f080c636 100644 --- a/vm/systemSmartContracts/governance.go +++ b/vm/systemSmartContracts/governance.go @@ -648,11 +648,7 @@ func (g *governanceContract) closeProposal(args *vmcommon.ContractCallInput) vmc g.addToAccumulatedFees(baseConfig.LostProposalFee) } - err = g.eei.Transfer(args.CallerAddr, args.RecipientAddr, tokensToReturn, nil, 0) - if err != nil { - g.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + g.eei.Transfer(args.CallerAddr, args.RecipientAddr, tokensToReturn, nil, 0) logEntry := &vmcommon.LogEntry{ Identifier: []byte(args.Function), @@ -701,12 +697,7 @@ func (g *governanceContract) claimAccumulatedFees(args *vmcommon.ContractCallInp accumulatedFees := g.getAccumulatedFees() g.setAccumulatedFees(big.NewInt(0)) - err = g.eei.Transfer(args.CallerAddr, args.RecipientAddr, accumulatedFees, nil, 0) - if err != nil { - g.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - + g.eei.Transfer(args.CallerAddr, args.RecipientAddr, accumulatedFees, nil, 0) return vmcommon.Ok } diff --git a/vm/systemSmartContracts/governance_test.go b/vm/systemSmartContracts/governance_test.go index e451d090f70..387e16b33fb 100644 --- a/vm/systemSmartContracts/governance_test.go +++ b/vm/systemSmartContracts/governance_test.go @@ -366,6 +366,101 @@ func TestGovernanceContract_ChangeConfig(t *testing.T) { require.Equal(t, vmcommon.Ok, retCode) } +func TestGovernanceContract_ValidatorVoteInvalidDelegated(t *testing.T) { + t.Parallel() + + returnMessage := "" + errInvalidVoteSubstr := "invalid delegator address" + callerAddress := vm.FirstDelegationSCAddress + proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) + + args := createMockGovernanceArgs() + + generalProposal := &GeneralProposal{ + CommitHash: proposalIdentifier, + StartVoteEpoch: 10, + EndVoteEpoch: 15, + } + args.Eei = &mock.SystemEIStub{ + GetStorageCalled: func(key []byte) []byte { + if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { + proposalBytes, _ := args.Marshalizer.Marshal(generalProposal) + return proposalBytes + } + + return nil + }, + BlockChainHookCalled: func() vm.BlockchainHook { + return &mock.BlockChainHookStub{ + CurrentNonceCalled: func() uint64 { + return 14 + }, + } + }, + AddReturnMessageCalled: func(msg string) { + returnMessage = msg + }, + } + voteArgs := [][]byte{ + proposalIdentifier, + []byte("yes"), + []byte("delegatedToWrongAddress"), + big.NewInt(1000).Bytes(), + } + + gsc, _ := NewGovernanceContract(args) + callInput := createVMInput(big.NewInt(0), "delegateVote", callerAddress, vm.GovernanceSCAddress, voteArgs) + retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.Contains(t, returnMessage, errInvalidVoteSubstr) +} + +func TestGovernanceContract_DelegateVoteUserErrors(t *testing.T) { + t.Parallel() + + gsc, blockchainHook, eei := createGovernanceBlockChainHookStubContextHandler() + blockchainHook.CurrentNonceCalled = func() uint64 { + return 12 + } + + callerAddress := bytes.Repeat([]byte{2}, 32) + proposalIdentifier := []byte("aaaaaaaaa") + generalProposal := &GeneralProposal{ + CommitHash: proposalIdentifier, + StartVoteEpoch: 10, + EndVoteEpoch: 15, + Yes: big.NewInt(0), + No: big.NewInt(0), + Veto: big.NewInt(0), + Abstain: big.NewInt(0), + } + + voteArgs := [][]byte{ + []byte("1"), + []byte("yes"), + } + gsc.eei.SetStorage(voteArgs[0], proposalIdentifier) + _ = gsc.saveGeneralProposal(proposalIdentifier, generalProposal) + + callInput := createVMInput(big.NewInt(0), "delegateVote", callerAddress, vm.GovernanceSCAddress, voteArgs) + retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, eei.GetReturnMessage(), "invalid number of arguments") + + callInput.Arguments = append(callInput.Arguments, []byte{1}, []byte{2}) + callInput.CallValue = big.NewInt(10) + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "function is not payable")) + + callInput.CallValue = big.NewInt(0) + callInput.GasProvided = 0 + gsc.gasCost.MetaChainSystemSCsCost.DelegateVote = 10 + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.OutOfGas, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "not enough gas")) +} + func TestGovernanceContract_ChangeConfigWrongCaller(t *testing.T) { t.Parallel() @@ -827,52 +922,6 @@ func TestGovernanceContract_VoteTwice(t *testing.T) { require.Equal(t, eei.GetReturnMessage(), "double vote is not allowed") } -func TestGovernanceContract_DelegateVoteUserErrors(t *testing.T) { - t.Parallel() - - gsc, blockchainHook, eei := createGovernanceBlockChainHookStubContextHandler() - blockchainHook.CurrentNonceCalled = func() uint64 { - return 12 - } - - callerAddress := bytes.Repeat([]byte{2}, 32) - proposalIdentifier := []byte("aaaaaaaaa") - generalProposal := &GeneralProposal{ - CommitHash: proposalIdentifier, - StartVoteEpoch: 10, - EndVoteEpoch: 15, - Yes: big.NewInt(0), - No: big.NewInt(0), - Veto: big.NewInt(0), - Abstain: big.NewInt(0), - } - - voteArgs := [][]byte{ - []byte("1"), - []byte("yes"), - } - gsc.eei.SetStorage(voteArgs[0], proposalIdentifier) - _ = gsc.saveGeneralProposal(proposalIdentifier, generalProposal) - - callInput := createVMInput(big.NewInt(0), "delegateVote", callerAddress, vm.GovernanceSCAddress, voteArgs) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - require.Equal(t, eei.GetReturnMessage(), "invalid number of arguments") - - callInput.Arguments = append(callInput.Arguments, []byte{1}, []byte{2}) - callInput.CallValue = big.NewInt(10) - retCode = gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - require.True(t, strings.Contains(eei.GetReturnMessage(), "function is not payable")) - - callInput.CallValue = big.NewInt(0) - callInput.GasProvided = 0 - gsc.gasCost.MetaChainSystemSCsCost.DelegateVote = 10 - retCode = gsc.Execute(callInput) - require.Equal(t, vmcommon.OutOfGas, retCode) - require.True(t, strings.Contains(eei.GetReturnMessage(), "not enough gas")) -} - func TestGovernanceContract_DelegateVoteMoreErrors(t *testing.T) { t.Parallel() diff --git a/vm/systemSmartContracts/staking.go b/vm/systemSmartContracts/staking.go index 004254ce87b..7acfb492d15 100644 --- a/vm/systemSmartContracts/staking.go +++ b/vm/systemSmartContracts/staking.go @@ -24,8 +24,6 @@ var log = logger.GetOrCreate("vm/systemsmartcontracts") const ownerKey = "owner" const nodesConfigKey = "nodesConfig" -const waitingListHeadKey = "waitingList" -const waitingElementPrefix = "w_" type stakingSC struct { eei vm.SystemEI @@ -60,13 +58,6 @@ type ArgsNewStakingSmartContract struct { EnableEpochsHandler common.EnableEpochsHandler } -type waitingListReturnData struct { - blsKeys [][]byte - stakedDataList []*StakedDataV2_0 - lastKey []byte - afterLastjailed bool -} - // NewStakingSmartContract creates a staking smart contract func NewStakingSmartContract( args ArgsNewStakingSmartContract, @@ -218,6 +209,8 @@ func (s *stakingSC) Execute(args *vmcommon.ContractCallInput) vmcommon.ReturnCod return s.fixWaitingListQueueSize(args) case "addMissingNodeToQueue": return s.addMissingNodeToQueue(args) + case "unStakeAllNodesFromQueue": + return s.unStakeAllNodesFromQueue(args) } return vmcommon.UserError @@ -243,6 +236,10 @@ func (s *stakingSC) numSpareNodes() int64 { } func (s *stakingSC) canStake() bool { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return true + } + stakeConfig := s.getConfig() return stakeConfig.StakedNodes < stakeConfig.MaxNumNodes } @@ -503,44 +500,6 @@ func (s *stakingSC) stake(args *vmcommon.ContractCallInput, onlyRegister bool) v return vmcommon.Ok } -func (s *stakingSC) processStake(blsKey []byte, registrationData *StakedDataV2_0, addFirst bool) error { - if registrationData.Staked { - return nil - } - - registrationData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() - if !s.canStake() { - s.eei.AddReturnMessage(fmt.Sprintf("staking is full key put into waiting list %s", hex.EncodeToString(blsKey))) - err := s.addToWaitingList(blsKey, addFirst) - if err != nil { - s.eei.AddReturnMessage("error while adding to waiting") - return err - } - registrationData.Waiting = true - s.eei.Finish([]byte{waiting}) - return nil - } - - err := s.removeFromWaitingList(blsKey) - if err != nil { - s.eei.AddReturnMessage("error while removing from waiting") - return err - } - s.addToStakedNodes(1) - s.activeStakingFor(registrationData) - - return nil -} - -func (s *stakingSC) activeStakingFor(stakingData *StakedDataV2_0) { - stakingData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() - stakingData.Staked = true - stakingData.StakedNonce = s.eei.BlockChainHook().CurrentNonce() - stakingData.UnStakedEpoch = common.DefaultUnstakedEpoch - stakingData.UnStakedNonce = 0 - stakingData.Waiting = false -} - func (s *stakingSC) unStakeAtEndOfEpoch(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { // backward compatibility - no need for return message @@ -573,6 +532,7 @@ func (s *stakingSC) unStakeAtEndOfEpoch(args *vmcommon.ContractCallInput) vmcomm if registrationData.Staked { s.removeFromStakedNodes() } + if registrationData.Waiting { err = s.removeFromWaitingList(args.Arguments[0]) if err != nil { @@ -595,76 +555,111 @@ func (s *stakingSC) unStakeAtEndOfEpoch(args *vmcommon.ContractCallInput) vmcomm return vmcommon.Ok } +func (s *stakingSC) activeStakingFor(stakingData *StakedDataV2_0) { + stakingData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() + stakingData.Staked = true + stakingData.StakedNonce = s.eei.BlockChainHook().CurrentNonce() + stakingData.UnStakedEpoch = common.DefaultUnstakedEpoch + stakingData.UnStakedNonce = 0 + stakingData.Waiting = false +} + +func (s *stakingSC) processStake(blsKey []byte, registrationData *StakedDataV2_0, addFirst bool) error { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return s.processStakeV2(registrationData) + } + + return s.processStakeV1(blsKey, registrationData, addFirst) +} + +func (s *stakingSC) processStakeV2(registrationData *StakedDataV2_0) error { + if registrationData.Staked { + return nil + } + + registrationData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() + s.addToStakedNodes(1) + s.activeStakingFor(registrationData) + + return nil +} + func (s *stakingSC) unStake(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return s.unStakeV2(args) + } + + return s.unStakeV1(args) +} + +func (s *stakingSC) unStakeV2(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + registrationData, retCode := s.checkUnStakeArgs(args) + if retCode != vmcommon.Ok { + return retCode + } + + if !registrationData.Staked { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.ExecutionFailed + } + + return s.tryUnStake(args.Arguments[0], registrationData) +} + +func (s *stakingSC) checkUnStakeArgs(args *vmcommon.ContractCallInput) (*StakedDataV2_0, vmcommon.ReturnCode) { if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { s.eei.AddReturnMessage("unStake function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError + return nil, vmcommon.UserError } if len(args.Arguments) < 2 { s.eei.AddReturnMessage("not enough arguments, needed BLS key and reward address") - return vmcommon.UserError + return nil, vmcommon.UserError } registrationData, err := s.getOrCreateRegisteredData(args.Arguments[0]) if err != nil { s.eei.AddReturnMessage("cannot get or create registered data: error " + err.Error()) - return vmcommon.UserError + return nil, vmcommon.UserError } if len(registrationData.RewardAddress) == 0 { s.eei.AddReturnMessage("cannot unStake a key that is not registered") - return vmcommon.UserError + return nil, vmcommon.UserError } if !bytes.Equal(args.Arguments[1], registrationData.RewardAddress) { s.eei.AddReturnMessage("unStake possible only from staker caller") - return vmcommon.UserError + return nil, vmcommon.UserError } if s.isNodeJailedOrWithBadRating(registrationData, args.Arguments[0]) { s.eei.AddReturnMessage("cannot unStake node which is jailed or with bad rating") - return vmcommon.UserError + return nil, vmcommon.UserError } if !registrationData.Staked && !registrationData.Waiting { s.eei.AddReturnMessage("cannot unStake node which was already unStaked") - return vmcommon.UserError - } - - if !registrationData.Staked { - registrationData.Waiting = false - err = s.removeFromWaitingList(args.Arguments[0]) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - err = s.saveStakingData(args.Arguments[0], registrationData) - if err != nil { - s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok + return nil, vmcommon.UserError } - addOneFromQueue := !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) || s.canStakeIfOneRemoved() - if addOneFromQueue { - _, err = s.moveFirstFromWaitingToStaked() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - } + return registrationData, vmcommon.Ok +} +func (s *stakingSC) tryUnStake(key []byte, registrationData *StakedDataV2_0) vmcommon.ReturnCode { if !s.canUnStake() { s.eei.AddReturnMessage("unStake is not possible as too many left") return vmcommon.UserError } s.removeFromStakedNodes() + + return s.doUnStake(key, registrationData) +} + +func (s *stakingSC) doUnStake(key []byte, registrationData *StakedDataV2_0) vmcommon.ReturnCode { registrationData.Staked = false registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() registrationData.Waiting = false - err = s.saveStakingData(args.Arguments[0], registrationData) + err := s.saveStakingData(key, registrationData) if err != nil { s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) return vmcommon.UserError @@ -673,53 +668,6 @@ func (s *stakingSC) unStake(args *vmcommon.ContractCallInput) vmcommon.ReturnCod return vmcommon.Ok } -func (s *stakingSC) moveFirstFromWaitingToStakedIfNeeded(blsKey []byte) (bool, error) { - waitingElementKey := createWaitingListKey(blsKey) - _, err := s.getWaitingListElement(waitingElementKey) - if err == nil { - // node in waiting - remove from it - and that's it - return false, s.removeFromWaitingList(blsKey) - } - - return s.moveFirstFromWaitingToStaked() -} - -func (s *stakingSC) moveFirstFromWaitingToStaked() (bool, error) { - waitingList, err := s.getWaitingListHead() - if err != nil { - return false, err - } - if waitingList.Length == 0 { - return false, nil - } - elementInList, err := s.getWaitingListElement(waitingList.FirstKey) - if err != nil { - return false, err - } - err = s.removeFromWaitingList(elementInList.BLSPublicKey) - if err != nil { - return false, err - } - - nodeData, err := s.getOrCreateRegisteredData(elementInList.BLSPublicKey) - if err != nil { - return false, err - } - if len(nodeData.RewardAddress) == 0 || nodeData.Staked { - return false, vm.ErrInvalidWaitingList - } - - nodeData.Waiting = false - nodeData.Staked = true - nodeData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() - nodeData.StakedNonce = s.eei.BlockChainHook().CurrentNonce() - nodeData.UnStakedNonce = 0 - nodeData.UnStakedEpoch = common.DefaultUnstakedEpoch - - s.addToStakedNodes(1) - return true, s.saveStakingData(elementInList.BLSPublicKey, nodeData) -} - func (s *stakingSC) unBond(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { s.eei.AddReturnMessage("unBond function not allowed to be called by address " + string(args.CallerAddr)) @@ -809,751 +757,159 @@ func (s *stakingSC) isStaked(args *vmcommon.ContractCallInput) vmcommon.ReturnCo return vmcommon.UserError } -func (s *stakingSC) addToWaitingList(blsKey []byte, addJailed bool) error { - inWaitingListKey := createWaitingListKey(blsKey) - marshaledData := s.eei.GetStorage(inWaitingListKey) - if len(marshaledData) != 0 { - return nil - } - - waitingList, err := s.getWaitingListHead() - if err != nil { - return err +func (s *stakingSC) tryRemoveJailedNodeFromStaked(registrationData *StakedDataV2_0) { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectJailedNotUnStakedEmptyQueueFlag) { + s.removeAndSetUnstaked(registrationData) + return } - waitingList.Length += 1 - if waitingList.Length == 1 { - return s.startWaitingList(waitingList, addJailed, blsKey) + if s.canUnStake() { + s.removeAndSetUnstaked(registrationData) + return } - if addJailed { - return s.insertAfterLastJailed(waitingList, blsKey) - } + s.eei.AddReturnMessage("did not switch as not enough validators remaining") +} - return s.addToEndOfTheList(waitingList, blsKey) +func (s *stakingSC) removeAndSetUnstaked(registrationData *StakedDataV2_0) { + s.removeFromStakedNodes() + registrationData.Staked = false + registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() + registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() + registrationData.StakedNonce = math.MaxUint64 } -func (s *stakingSC) startWaitingList( - waitingList *WaitingList, - addJailed bool, - blsKey []byte, -) error { - inWaitingListKey := createWaitingListKey(blsKey) - waitingList.FirstKey = inWaitingListKey - waitingList.LastKey = inWaitingListKey - if addJailed { - waitingList.LastJailedKey = inWaitingListKey +func (s *stakingSC) updateConfigMinNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("updateConfigMinNodes function not allowed to be called by address " + string(args.CallerAddr)) + return vmcommon.UserError } - elementInWaiting := &ElementInList{ - BLSPublicKey: blsKey, - PreviousKey: waitingList.LastKey, - NextKey: make([]byte, 0), + stakeConfig := s.getConfig() + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be 1") + return vmcommon.UserError } - return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) -} -func (s *stakingSC) addToEndOfTheList(waitingList *WaitingList, blsKey []byte) error { - inWaitingListKey := createWaitingListKey(blsKey) - oldLastKey := make([]byte, len(waitingList.LastKey)) - copy(oldLastKey, waitingList.LastKey) - - lastElement, err := s.getWaitingListElement(waitingList.LastKey) - if err != nil { - return err - } - lastElement.NextKey = inWaitingListKey - elementInWaiting := &ElementInList{ - BLSPublicKey: blsKey, - PreviousKey: oldLastKey, - NextKey: make([]byte, 0), + newMinNodes := big.NewInt(0).SetBytes(args.Arguments[0]).Int64() + if newMinNodes <= 0 { + s.eei.AddReturnMessage("new minimum number of nodes zero or negative") + return vmcommon.UserError } - err = s.saveWaitingListElement(oldLastKey, lastElement) - if err != nil { - return err + if newMinNodes > int64(s.maxNumNodes) { + s.eei.AddReturnMessage("new minimum number of nodes greater than maximum number of nodes") + return vmcommon.UserError } - waitingList.LastKey = inWaitingListKey - return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) -} - -func (s *stakingSC) insertAfterLastJailed( - waitingList *WaitingList, - blsKey []byte, -) error { - inWaitingListKey := createWaitingListKey(blsKey) - if len(waitingList.LastJailedKey) == 0 { - previousFirstKey := make([]byte, len(waitingList.FirstKey)) - copy(previousFirstKey, waitingList.FirstKey) - waitingList.FirstKey = inWaitingListKey - waitingList.LastJailedKey = inWaitingListKey - elementInWaiting := &ElementInList{ - BLSPublicKey: blsKey, - PreviousKey: inWaitingListKey, - NextKey: previousFirstKey, - } + stakeConfig.MinNumNodes = newMinNodes + s.setConfig(stakeConfig) - if s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && len(previousFirstKey) > 0 { - previousFirstElement, err := s.getWaitingListElement(previousFirstKey) - if err != nil { - return err - } - previousFirstElement.PreviousKey = inWaitingListKey - err = s.saveWaitingListElement(previousFirstKey, previousFirstElement) - if err != nil { - return err - } - } + return vmcommon.Ok +} - return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) +func (s *stakingSC) updateConfigMaxNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError } - - lastJailedElement, err := s.getWaitingListElement(waitingList.LastJailedKey) - if err != nil { - return err + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("updateConfigMaxNodes function not allowed to be called by address " + string(args.CallerAddr)) + return vmcommon.UserError } - if bytes.Equal(waitingList.LastKey, waitingList.LastJailedKey) { - waitingList.LastJailedKey = inWaitingListKey - return s.addToEndOfTheList(waitingList, blsKey) + stakeConfig := s.getConfig() + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be 1") + return vmcommon.UserError } - firstNonJailedElement, err := s.getWaitingListElement(lastJailedElement.NextKey) - if err != nil { - return err + newMaxNodes := big.NewInt(0).SetBytes(args.Arguments[0]).Int64() + if newMaxNodes <= 0 { + s.eei.AddReturnMessage("new max number of nodes zero or negative") + return vmcommon.UserError } - elementInWaiting := &ElementInList{ - BLSPublicKey: blsKey, - PreviousKey: make([]byte, len(inWaitingListKey)), - NextKey: make([]byte, len(inWaitingListKey)), + if newMaxNodes < int64(s.minNumNodes) { + s.eei.AddReturnMessage("new max number of nodes less than min number of nodes") + return vmcommon.UserError } - copy(elementInWaiting.PreviousKey, waitingList.LastJailedKey) - copy(elementInWaiting.NextKey, lastJailedElement.NextKey) - lastJailedElement.NextKey = inWaitingListKey - firstNonJailedElement.PreviousKey = inWaitingListKey - waitingList.LastJailedKey = inWaitingListKey + prevMaxNumNodes := big.NewInt(stakeConfig.MaxNumNodes) + s.eei.Finish(prevMaxNumNodes.Bytes()) + stakeConfig.MaxNumNodes = newMaxNodes + s.setConfig(stakeConfig) - err = s.saveWaitingListElement(elementInWaiting.PreviousKey, lastJailedElement) - if err != nil { - return err - } - err = s.saveWaitingListElement(elementInWaiting.NextKey, firstNonJailedElement) - if err != nil { - return err - } - err = s.saveWaitingListElement(inWaitingListKey, elementInWaiting) - if err != nil { - return err - } - return s.saveWaitingListHead(waitingList) + return vmcommon.Ok } -func (s *stakingSC) saveElementAndList(key []byte, element *ElementInList, waitingList *WaitingList) error { - err := s.saveWaitingListElement(key, element) - if err != nil { - return err - } - - return s.saveWaitingListHead(waitingList) +func (s *stakingSC) isNodeJailedOrWithBadRating(registrationData *StakedDataV2_0, blsKey []byte) bool { + return registrationData.Jailed || s.eei.CanUnJail(blsKey) || s.eei.IsBadRating(blsKey) } -func (s *stakingSC) removeFromWaitingList(blsKey []byte) error { - inWaitingListKey := createWaitingListKey(blsKey) - marshaledData := s.eei.GetStorage(inWaitingListKey) - if len(marshaledData) == 0 { - return nil +func (s *stakingSC) getRewardAddress(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError } - s.eei.SetStorage(inWaitingListKey, nil) - elementToRemove := &ElementInList{} - err := s.marshalizer.Unmarshal(elementToRemove, marshaledData) - if err != nil { - return err + stakedData, returnCode := s.getStakedDataIfExists(args) + if returnCode != vmcommon.Ok { + return returnCode } - waitingList, err := s.getWaitingListHead() + s.eei.Finish([]byte(hex.EncodeToString(stakedData.RewardAddress))) + return vmcommon.Ok +} + +func (s *stakingSC) getStakedDataIfExists(args *vmcommon.ContractCallInput) (*StakedDataV2_0, vmcommon.ReturnCode) { + err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.Get) if err != nil { - return err + s.eei.AddReturnMessage("insufficient gas") + return nil, vmcommon.OutOfGas } - if waitingList.Length == 0 { - return vm.ErrInvalidWaitingList + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be equal to 1") + return nil, vmcommon.UserError } - waitingList.Length -= 1 - if waitingList.Length == 0 { - s.eei.SetStorage([]byte(waitingListHeadKey), nil) - return nil + stakedData, err := s.getOrCreateRegisteredData(args.Arguments[0]) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return nil, vmcommon.UserError + } + if len(stakedData.RewardAddress) == 0 { + s.eei.AddReturnMessage("blsKey not registered in staking sc") + return nil, vmcommon.UserError } - // remove the first element - isCorrectFirstQueueFlagEnabled := s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) - isFirstElementBeforeFix := !isCorrectFirstQueueFlagEnabled && bytes.Equal(elementToRemove.PreviousKey, inWaitingListKey) - isFirstElementAfterFix := isCorrectFirstQueueFlagEnabled && bytes.Equal(waitingList.FirstKey, inWaitingListKey) - if isFirstElementBeforeFix || isFirstElementAfterFix { - if bytes.Equal(inWaitingListKey, waitingList.LastJailedKey) { - waitingList.LastJailedKey = make([]byte, 0) - } - - nextElement, errGet := s.getWaitingListElement(elementToRemove.NextKey) - if errGet != nil { - return errGet - } + return stakedData, vmcommon.Ok +} - nextElement.PreviousKey = elementToRemove.NextKey - waitingList.FirstKey = elementToRemove.NextKey - return s.saveElementAndList(elementToRemove.NextKey, nextElement, waitingList) +func (s *stakingSC) getBLSKeyStatus(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError } - if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) || bytes.Equal(inWaitingListKey, waitingList.LastJailedKey) { - waitingList.LastJailedKey = make([]byte, len(elementToRemove.PreviousKey)) - copy(waitingList.LastJailedKey, elementToRemove.PreviousKey) - } - - previousElement, _ := s.getWaitingListElement(elementToRemove.PreviousKey) - // search the other way around for the element in front - if s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && previousElement == nil { - previousElement, err = s.searchPreviousFromHead(waitingList, inWaitingListKey, elementToRemove) - if err != nil { - return err - } - } - if previousElement == nil { - previousElement, err = s.getWaitingListElement(elementToRemove.PreviousKey) - if err != nil { - return err - } - } - if len(elementToRemove.NextKey) == 0 { - waitingList.LastKey = elementToRemove.PreviousKey - previousElement.NextKey = make([]byte, 0) - return s.saveElementAndList(elementToRemove.PreviousKey, previousElement, waitingList) - } - - nextElement, err := s.getWaitingListElement(elementToRemove.NextKey) - if err != nil { - return err - } - - nextElement.PreviousKey = elementToRemove.PreviousKey - previousElement.NextKey = elementToRemove.NextKey - - err = s.saveWaitingListElement(elementToRemove.NextKey, nextElement) - if err != nil { - return err - } - return s.saveElementAndList(elementToRemove.PreviousKey, previousElement, waitingList) -} - -func (s *stakingSC) searchPreviousFromHead(waitingList *WaitingList, inWaitingListKey []byte, elementToRemove *ElementInList) (*ElementInList, error) { - var previousElement *ElementInList - index := uint32(1) - nextKey := make([]byte, len(waitingList.FirstKey)) - copy(nextKey, waitingList.FirstKey) - for len(nextKey) != 0 && index <= waitingList.Length { - element, errGet := s.getWaitingListElement(nextKey) - if errGet != nil { - return nil, errGet - } - - if bytes.Equal(inWaitingListKey, element.NextKey) { - previousElement = element - elementToRemove.PreviousKey = createWaitingListKey(previousElement.BLSPublicKey) - return previousElement, nil - } - - nextKey = make([]byte, len(element.NextKey)) - if len(element.NextKey) == 0 { - break - } - index++ - copy(nextKey, element.NextKey) - } - return nil, vm.ErrElementNotFound -} - -func (s *stakingSC) getWaitingListElement(key []byte) (*ElementInList, error) { - marshaledData := s.eei.GetStorage(key) - if len(marshaledData) == 0 { - return nil, vm.ErrElementNotFound - } - - element := &ElementInList{} - err := s.marshalizer.Unmarshal(element, marshaledData) - if err != nil { - return nil, err - } - - return element, nil -} - -func (s *stakingSC) saveWaitingListElement(key []byte, element *ElementInList) error { - marshaledData, err := s.marshalizer.Marshal(element) - if err != nil { - return err - } - - s.eei.SetStorage(key, marshaledData) - return nil -} - -func (s *stakingSC) getWaitingListHead() (*WaitingList, error) { - waitingList := &WaitingList{ - FirstKey: make([]byte, 0), - LastKey: make([]byte, 0), - Length: 0, - LastJailedKey: make([]byte, 0), - } - marshaledData := s.eei.GetStorage([]byte(waitingListHeadKey)) - if len(marshaledData) == 0 { - return waitingList, nil - } - - err := s.marshalizer.Unmarshal(waitingList, marshaledData) - if err != nil { - return nil, err - } - - return waitingList, nil -} - -func (s *stakingSC) saveWaitingListHead(waitingList *WaitingList) error { - marshaledData, err := s.marshalizer.Marshal(waitingList) - if err != nil { - return err - } - - s.eei.SetStorage([]byte(waitingListHeadKey), marshaledData) - return nil -} - -func createWaitingListKey(blsKey []byte) []byte { - return []byte(waitingElementPrefix + string(blsKey)) -} - -func (s *stakingSC) switchJailedWithWaiting(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("switchJailedWithWaiting function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError - } - if len(args.Arguments) != 1 { - return vmcommon.UserError - } - - blsKey := args.Arguments[0] - registrationData, err := s.getOrCreateRegisteredData(blsKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - if len(registrationData.RewardAddress) == 0 { - s.eei.AddReturnMessage("no need to jail as not a validator") - return vmcommon.UserError - } - if !registrationData.Staked { - s.eei.AddReturnMessage("no need to jail as not a validator") - return vmcommon.UserError - } - if registrationData.Jailed { - s.eei.AddReturnMessage(vm.ErrBLSPublicKeyAlreadyJailed.Error()) - return vmcommon.UserError - } - switched, err := s.moveFirstFromWaitingToStakedIfNeeded(blsKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - registrationData.NumJailed++ - registrationData.Jailed = true - registrationData.JailedNonce = s.eei.BlockChainHook().CurrentNonce() - - if !switched && !s.enableEpochsHandler.IsFlagEnabled(common.CorrectJailedNotUnStakedEmptyQueueFlag) { - s.eei.AddReturnMessage("did not switch as nobody in waiting, but jailed") - } else { - s.tryRemoveJailedNodeFromStaked(registrationData) - } - - err = s.saveStakingData(blsKey, registrationData) - if err != nil { - s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok -} - -func (s *stakingSC) tryRemoveJailedNodeFromStaked(registrationData *StakedDataV2_0) { - if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectJailedNotUnStakedEmptyQueueFlag) { - s.removeAndSetUnstaked(registrationData) - return - } - - if s.canUnStake() { - s.removeAndSetUnstaked(registrationData) - return - } - - s.eei.AddReturnMessage("did not switch as not enough validators remaining") -} - -func (s *stakingSC) removeAndSetUnstaked(registrationData *StakedDataV2_0) { - s.removeFromStakedNodes() - registrationData.Staked = false - registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() - registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() - registrationData.StakedNonce = math.MaxUint64 -} - -func (s *stakingSC) updateConfigMinNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("updateConfigMinNodes function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError - } - - stakeConfig := s.getConfig() - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be 1") - return vmcommon.UserError - } - - newMinNodes := big.NewInt(0).SetBytes(args.Arguments[0]).Int64() - if newMinNodes <= 0 { - s.eei.AddReturnMessage("new minimum number of nodes zero or negative") - return vmcommon.UserError - } - - if newMinNodes > int64(s.maxNumNodes) { - s.eei.AddReturnMessage("new minimum number of nodes greater than maximum number of nodes") - return vmcommon.UserError - } - - stakeConfig.MinNumNodes = newMinNodes - s.setConfig(stakeConfig) - - return vmcommon.Ok -} - -func (s *stakingSC) updateConfigMaxNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("updateConfigMaxNodes function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError - } - - stakeConfig := s.getConfig() - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be 1") - return vmcommon.UserError - } - - newMaxNodes := big.NewInt(0).SetBytes(args.Arguments[0]).Int64() - if newMaxNodes <= 0 { - s.eei.AddReturnMessage("new max number of nodes zero or negative") - return vmcommon.UserError - } - - if newMaxNodes < int64(s.minNumNodes) { - s.eei.AddReturnMessage("new max number of nodes less than min number of nodes") - return vmcommon.UserError - } - - prevMaxNumNodes := big.NewInt(stakeConfig.MaxNumNodes) - s.eei.Finish(prevMaxNumNodes.Bytes()) - stakeConfig.MaxNumNodes = newMaxNodes - s.setConfig(stakeConfig) - - return vmcommon.Ok -} - -func (s *stakingSC) isNodeJailedOrWithBadRating(registrationData *StakedDataV2_0, blsKey []byte) bool { - return registrationData.Jailed || s.eei.CanUnJail(blsKey) || s.eei.IsBadRating(blsKey) -} - -func (s *stakingSC) getWaitingListIndex(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { - s.eei.AddReturnMessage("this is only a view function") - return vmcommon.UserError - } - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be equal to 1") - return vmcommon.UserError - } - - waitingElementKey := createWaitingListKey(args.Arguments[0]) - _, err := s.getWaitingListElement(waitingElementKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - waitingListHead, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - if bytes.Equal(waitingElementKey, waitingListHead.FirstKey) { - s.eei.Finish([]byte(strconv.Itoa(1))) - return vmcommon.Ok - } - if bytes.Equal(waitingElementKey, waitingListHead.LastKey) { - s.eei.Finish([]byte(strconv.Itoa(int(waitingListHead.Length)))) - return vmcommon.Ok - } - - prevElement, err := s.getWaitingListElement(waitingListHead.FirstKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - index := uint32(2) - nextKey := make([]byte, len(waitingElementKey)) - copy(nextKey, prevElement.NextKey) - for len(nextKey) != 0 && index <= waitingListHead.Length { - if bytes.Equal(nextKey, waitingElementKey) { - s.eei.Finish([]byte(strconv.Itoa(int(index)))) - return vmcommon.Ok - } - - prevElement, err = s.getWaitingListElement(nextKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - if len(prevElement.NextKey) == 0 { - break - } - index++ - copy(nextKey, prevElement.NextKey) - } - - s.eei.AddReturnMessage("element in waiting list not found") - return vmcommon.UserError -} - -func (s *stakingSC) getWaitingListSize(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.Get) - if err != nil { - s.eei.AddReturnMessage("insufficient gas") - return vmcommon.OutOfGas - } - - waitingListHead, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - s.eei.Finish([]byte(strconv.Itoa(int(waitingListHead.Length)))) - return vmcommon.Ok -} - -func (s *stakingSC) getRewardAddress(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - stakedData, returnCode := s.getStakedDataIfExists(args) - if returnCode != vmcommon.Ok { - return returnCode - } - - s.eei.Finish([]byte(hex.EncodeToString(stakedData.RewardAddress))) - return vmcommon.Ok -} - -func (s *stakingSC) getStakedDataIfExists(args *vmcommon.ContractCallInput) (*StakedDataV2_0, vmcommon.ReturnCode) { - err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.Get) - if err != nil { - s.eei.AddReturnMessage("insufficient gas") - return nil, vmcommon.OutOfGas - } - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be equal to 1") - return nil, vmcommon.UserError - } - stakedData, err := s.getOrCreateRegisteredData(args.Arguments[0]) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return nil, vmcommon.UserError - } - if len(stakedData.RewardAddress) == 0 { - s.eei.AddReturnMessage("blsKey not registered in staking sc") - return nil, vmcommon.UserError - } - - return stakedData, vmcommon.Ok -} - -func (s *stakingSC) getBLSKeyStatus(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - stakedData, returnCode := s.getStakedDataIfExists(args) - if returnCode != vmcommon.Ok { - return returnCode + stakedData, returnCode := s.getStakedDataIfExists(args) + if returnCode != vmcommon.Ok { + return returnCode } if stakedData.Jailed || s.eei.CanUnJail(args.Arguments[0]) { s.eei.Finish([]byte("jailed")) return vmcommon.Ok } - if stakedData.Waiting { - s.eei.Finish([]byte("queued")) - return vmcommon.Ok - } - if stakedData.Staked { - s.eei.Finish([]byte("staked")) - return vmcommon.Ok - } - - s.eei.Finish([]byte("unStaked")) - return vmcommon.Ok -} - -func (s *stakingSC) getRemainingUnbondPeriod(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - stakedData, returnCode := s.getStakedDataIfExists(args) - if returnCode != vmcommon.Ok { - return returnCode - } - if stakedData.UnStakedNonce == 0 { - s.eei.AddReturnMessage("not in unbond period") - return vmcommon.UserError - } - - currentNonce := s.eei.BlockChainHook().CurrentNonce() - passedNonce := currentNonce - stakedData.UnStakedNonce - if passedNonce >= s.unBondPeriod { - if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - s.eei.Finish(zero.Bytes()) - } else { - s.eei.Finish([]byte("0")) - } - } else { - remaining := s.unBondPeriod - passedNonce - if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - s.eei.Finish(big.NewInt(0).SetUint64(remaining).Bytes()) - } else { - s.eei.Finish([]byte(strconv.Itoa(int(remaining)))) - } - } - - return vmcommon.Ok -} - -func (s *stakingSC) getWaitingListRegisterNonceAndRewardAddress(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { - s.eei.AddReturnMessage("this is only a view function") - return vmcommon.UserError - } - if len(args.Arguments) != 0 { - s.eei.AddReturnMessage("number of arguments must be equal to 0") - return vmcommon.UserError - } - - waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - if len(waitingListData.stakedDataList) == 0 { - s.eei.AddReturnMessage("no one in waitingList") - return vmcommon.UserError - } - - for index, stakedData := range waitingListData.stakedDataList { - s.eei.Finish(waitingListData.blsKeys[index]) - s.eei.Finish(stakedData.RewardAddress) - s.eei.Finish(big.NewInt(int64(stakedData.RegisterNonce)).Bytes()) - } - - return vmcommon.Ok -} - -func (s *stakingSC) setOwnersOnAddresses(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("setOwnersOnAddresses function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError - } - if len(args.Arguments)%2 != 0 { - s.eei.AddReturnMessage("invalid number of arguments: expected an even number of arguments") - return vmcommon.UserError - } - for i := 0; i < len(args.Arguments); i += 2 { - stakedData, err := s.getOrCreateRegisteredData(args.Arguments[i]) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - s.eei.AddReturnMessage(fmt.Sprintf("process stopped at index %d, bls key %s", i, hex.EncodeToString(args.Arguments[i]))) - return vmcommon.UserError - } - if len(stakedData.RewardAddress) == 0 { - log.Error("staking data does not exists", - "bls key", hex.EncodeToString(args.Arguments[i]), - "owner as hex", hex.EncodeToString(args.Arguments[i+1])) - continue - } - - stakedData.OwnerAddress = args.Arguments[i+1] - err = s.saveStakingData(args.Arguments[i], stakedData) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - s.eei.AddReturnMessage(fmt.Sprintf("process stopped at index %d, bls key %s", i, hex.EncodeToString(args.Arguments[i]))) - return vmcommon.UserError - } - } - - return vmcommon.Ok -} - -func (s *stakingSC) getOwner(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { - s.eei.AddReturnMessage("this is only a view function") - return vmcommon.UserError - } - if len(args.Arguments) < 1 { - s.eei.AddReturnMessage(fmt.Sprintf("invalid number of arguments: expected min %d, got %d", 1, len(args.Arguments))) - return vmcommon.UserError - } - - stakedData, errGet := s.getOrCreateRegisteredData(args.Arguments[0]) - if errGet != nil { - s.eei.AddReturnMessage(errGet.Error()) - return vmcommon.UserError + if stakedData.Waiting { + s.eei.Finish([]byte("queued")) + return vmcommon.Ok } - if len(stakedData.OwnerAddress) == 0 { - s.eei.AddReturnMessage("owner address is nil") - return vmcommon.UserError + if stakedData.Staked { + s.eei.Finish([]byte("staked")) + return vmcommon.Ok } - s.eei.Finish(stakedData.OwnerAddress) + s.eei.Finish([]byte("unStaked")) return vmcommon.Ok } @@ -1567,212 +923,117 @@ func (s *stakingSC) getTotalNumberOfRegisteredNodes(args *vmcommon.ContractCallI return vmcommon.UserError } - stakeConfig := s.getConfig() waitingListHead, err := s.getWaitingListHead() if err != nil { s.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } + stakeConfig := s.getConfig() totalRegistered := stakeConfig.StakedNodes + stakeConfig.JailedNodes + int64(waitingListHead.Length) s.eei.Finish(big.NewInt(totalRegistered).Bytes()) return vmcommon.Ok } -func (s *stakingSC) resetLastUnJailedFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { - // backward compatibility - return vmcommon.UserError - } - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") - return vmcommon.UserError - } - if len(args.Arguments) != 0 { - s.eei.AddReturnMessage("number of arguments must be equal to 0") - return vmcommon.UserError - } - - waitingList, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) +func (s *stakingSC) getRemainingUnbondPeriod(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) return vmcommon.UserError } - if len(waitingList.LastJailedKey) == 0 { - return vmcommon.Ok + stakedData, returnCode := s.getStakedDataIfExists(args) + if returnCode != vmcommon.Ok { + return returnCode } - - waitingList.LastJailedKey = make([]byte, 0) - err = s.saveWaitingListHead(waitingList) - if err != nil { - s.eei.AddReturnMessage(err.Error()) + if stakedData.UnStakedNonce == 0 { + s.eei.AddReturnMessage("not in unbond period") return vmcommon.UserError } - return vmcommon.Ok -} - -func (s *stakingSC) cleanAdditionalQueueNotEnoughFunds( - waitingListData *waitingListReturnData, -) ([]string, map[string][][]byte, error) { - - listOfOwners := make([]string, 0) - mapOwnersUnStakedNodes := make(map[string][][]byte) - mapCheckedOwners := make(map[string]*validatorFundInfo) - for i := len(waitingListData.blsKeys) - 1; i >= 0; i-- { - stakedData := waitingListData.stakedDataList[i] - validatorInfo, err := s.checkValidatorFunds(mapCheckedOwners, stakedData.OwnerAddress, s.stakeValue) - if err != nil { - return nil, nil, err - } - if validatorInfo.numNodesToUnstake == 0 { - continue - } - - validatorInfo.numNodesToUnstake-- - blsKey := waitingListData.blsKeys[i] - err = s.removeFromWaitingList(blsKey) - if err != nil { - return nil, nil, err - } - - registrationData, err := s.getOrCreateRegisteredData(blsKey) - if err != nil { - return nil, nil, err - } - - registrationData.Staked = false - registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() - registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() - registrationData.Waiting = false - - err = s.saveStakingData(blsKey, registrationData) - if err != nil { - return nil, nil, err + currentNonce := s.eei.BlockChainHook().CurrentNonce() + passedNonce := currentNonce - stakedData.UnStakedNonce + if passedNonce >= s.unBondPeriod { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + s.eei.Finish(zero.Bytes()) + } else { + s.eei.Finish([]byte("0")) } - - _, alreadyAdded := mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)] - if !alreadyAdded { - listOfOwners = append(listOfOwners, string(stakedData.OwnerAddress)) + } else { + remaining := s.unBondPeriod - passedNonce + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + s.eei.Finish(big.NewInt(0).SetUint64(remaining).Bytes()) + } else { + s.eei.Finish([]byte(strconv.Itoa(int(remaining)))) } - - mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)] = append(mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)], blsKey) } - return listOfOwners, mapOwnersUnStakedNodes, nil + return vmcommon.Ok } -func (s *stakingSC) stakeNodesFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { +func (s *stakingSC) setOwnersOnAddresses(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { s.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") - return vmcommon.UserError - } - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be equal to 1") + s.eei.AddReturnMessage("setOwnersOnAddresses function not allowed to be called by address " + string(args.CallerAddr)) return vmcommon.UserError } - - numNodesToStake := big.NewInt(0).SetBytes(args.Arguments[0]).Uint64() - waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) - if err != nil { - s.eei.AddReturnMessage(err.Error()) + if len(args.Arguments)%2 != 0 { + s.eei.AddReturnMessage("invalid number of arguments: expected an even number of arguments") return vmcommon.UserError } - if len(waitingListData.blsKeys) == 0 { - s.eei.AddReturnMessage("no nodes in queue") - return vmcommon.Ok - } - - nodePriceToUse := big.NewInt(0).Set(s.minNodePrice) - if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { - nodePriceToUse.Set(s.stakeValue) - } - - stakedNodes := uint64(0) - mapCheckedOwners := make(map[string]*validatorFundInfo) - for i, blsKey := range waitingListData.blsKeys { - stakedData := waitingListData.stakedDataList[i] - if stakedNodes >= numNodesToStake { - break - } - - validatorInfo, errCheck := s.checkValidatorFunds(mapCheckedOwners, stakedData.OwnerAddress, nodePriceToUse) - if errCheck != nil { - s.eei.AddReturnMessage(errCheck.Error()) - return vmcommon.UserError - } - if validatorInfo.numNodesToUnstake > 0 { - continue - } - - s.activeStakingFor(stakedData) - err = s.saveStakingData(blsKey, stakedData) + for i := 0; i < len(args.Arguments); i += 2 { + stakedData, err := s.getOrCreateRegisteredData(args.Arguments[i]) if err != nil { s.eei.AddReturnMessage(err.Error()) + s.eei.AddReturnMessage(fmt.Sprintf("process stopped at index %d, bls key %s", i, hex.EncodeToString(args.Arguments[i]))) return vmcommon.UserError } + if len(stakedData.RewardAddress) == 0 { + log.Error("staking data does not exists", + "bls key", hex.EncodeToString(args.Arguments[i]), + "owner as hex", hex.EncodeToString(args.Arguments[i+1])) + continue + } - // remove from waiting list - err = s.removeFromWaitingList(blsKey) + stakedData.OwnerAddress = args.Arguments[i+1] + err = s.saveStakingData(args.Arguments[i], stakedData) if err != nil { s.eei.AddReturnMessage(err.Error()) + s.eei.AddReturnMessage(fmt.Sprintf("process stopped at index %d, bls key %s", i, hex.EncodeToString(args.Arguments[i]))) return vmcommon.UserError } - - stakedNodes++ - // return the change key - s.eei.Finish(blsKey) - s.eei.Finish(stakedData.RewardAddress) } - s.addToStakedNodes(int64(stakedNodes)) - return vmcommon.Ok } -func (s *stakingSC) cleanAdditionalQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { +func (s *stakingSC) getOwner(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { s.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { + s.eei.AddReturnMessage("this is only a view function") return vmcommon.UserError } - if len(args.Arguments) != 0 { - s.eei.AddReturnMessage("number of arguments must be 0") + if len(args.Arguments) < 1 { + s.eei.AddReturnMessage(fmt.Sprintf("invalid number of arguments: expected min %d, got %d", 1, len(args.Arguments))) return vmcommon.UserError } - waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) - if err != nil { - s.eei.AddReturnMessage(err.Error()) + stakedData, errGet := s.getOrCreateRegisteredData(args.Arguments[0]) + if errGet != nil { + s.eei.AddReturnMessage(errGet.Error()) return vmcommon.UserError } - if len(waitingListData.blsKeys) == 0 { - s.eei.AddReturnMessage("no nodes in queue") - return vmcommon.Ok - } - - listOfOwners, mapOwnersAndBLSKeys, err := s.cleanAdditionalQueueNotEnoughFunds(waitingListData) - if err != nil { - s.eei.AddReturnMessage(err.Error()) + if len(stakedData.OwnerAddress) == 0 { + s.eei.AddReturnMessage("owner address is nil") return vmcommon.UserError } - for _, owner := range listOfOwners { - s.eei.Finish([]byte(owner)) - blsKeys := mapOwnersAndBLSKeys[owner] - for _, blsKey := range blsKeys { - s.eei.Finish(blsKey) - } - } - + s.eei.Finish(stakedData.OwnerAddress) return vmcommon.Ok } @@ -1894,193 +1155,6 @@ func (s *stakingSC) checkValidatorFunds( return validatorInfo, nil } -func (s *stakingSC) getFirstElementsFromWaitingList(numNodes uint32) (*waitingListReturnData, error) { - waitingListData := &waitingListReturnData{} - - waitingListHead, err := s.getWaitingListHead() - if err != nil { - return nil, err - } - if waitingListHead.Length == 0 { - return waitingListData, nil - } - - blsKeysToStake := make([][]byte, 0) - stakedDataList := make([]*StakedDataV2_0, 0) - index := uint32(1) - nextKey := make([]byte, len(waitingListHead.FirstKey)) - copy(nextKey, waitingListHead.FirstKey) - for len(nextKey) != 0 && index <= waitingListHead.Length && index <= numNodes { - element, errGet := s.getWaitingListElement(nextKey) - if errGet != nil { - return nil, errGet - } - - if bytes.Equal(nextKey, waitingListHead.LastJailedKey) { - waitingListData.afterLastjailed = true - } - - stakedData, errGet := s.getOrCreateRegisteredData(element.BLSPublicKey) - if errGet != nil { - return nil, errGet - } - - blsKeysToStake = append(blsKeysToStake, element.BLSPublicKey) - stakedDataList = append(stakedDataList, stakedData) - - if len(element.NextKey) == 0 { - break - } - index++ - copy(nextKey, element.NextKey) - } - - if numNodes >= waitingListHead.Length && len(blsKeysToStake) != int(waitingListHead.Length) { - log.Warn("mismatch length on waiting list elements in stakingSC.getFirstElementsFromWaitingList") - } - - waitingListData.blsKeys = blsKeysToStake - waitingListData.stakedDataList = stakedDataList - waitingListData.lastKey = nextKey - return waitingListData, nil -} - -func (s *stakingSC) fixWaitingListQueueSize(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.FixWaitingListSize) - if err != nil { - s.eei.AddReturnMessage("insufficient gas") - return vmcommon.OutOfGas - } - - waitingListHead, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - if waitingListHead.Length <= 1 { - return vmcommon.Ok - } - - foundLastJailedKey := len(waitingListHead.LastJailedKey) == 0 - - index := uint32(1) - nextKey := make([]byte, len(waitingListHead.FirstKey)) - copy(nextKey, waitingListHead.FirstKey) - for len(nextKey) != 0 && index <= waitingListHead.Length { - element, errGet := s.getWaitingListElement(nextKey) - if errGet != nil { - s.eei.AddReturnMessage(errGet.Error()) - return vmcommon.UserError - } - - if bytes.Equal(waitingListHead.LastJailedKey, nextKey) { - foundLastJailedKey = true - } - - _, errGet = s.getOrCreateRegisteredData(element.BLSPublicKey) - if errGet != nil { - s.eei.AddReturnMessage(errGet.Error()) - return vmcommon.UserError - } - - if len(element.NextKey) == 0 { - break - } - index++ - copy(nextKey, element.NextKey) - } - - waitingListHead.Length = index - waitingListHead.LastKey = nextKey - if !foundLastJailedKey { - waitingListHead.LastJailedKey = make([]byte, 0) - } - - err = s.saveWaitingListHead(waitingListHead) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok -} - -func (s *stakingSC) addMissingNodeToQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.FixWaitingListSize) - if err != nil { - s.eei.AddReturnMessage("insufficient gas") - return vmcommon.OutOfGas - } - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("invalid number of arguments") - return vmcommon.UserError - } - - blsKey := args.Arguments[0] - _, err = s.getWaitingListElement(createWaitingListKey(blsKey)) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - for _, keyInList := range waitingListData.blsKeys { - if bytes.Equal(keyInList, blsKey) { - s.eei.AddReturnMessage("key is in queue, not missing") - return vmcommon.UserError - } - } - - waitingList, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - waitingList.Length += 1 - if waitingList.Length == 1 { - err = s.startWaitingList(waitingList, false, blsKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok - } - - err = s.addToEndOfTheList(waitingList, blsKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok -} - // CanUseContract returns true if contract can be used func (s *stakingSC) CanUseContract() bool { return true diff --git a/vm/systemSmartContracts/stakingWaitingList.go b/vm/systemSmartContracts/stakingWaitingList.go new file mode 100644 index 00000000000..e1d0ff00cb4 --- /dev/null +++ b/vm/systemSmartContracts/stakingWaitingList.go @@ -0,0 +1,1084 @@ +package systemSmartContracts + +import ( + "bytes" + "encoding/hex" + "fmt" + "math" + "math/big" + "strconv" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/vm" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +const waitingListHeadKey = "waitingList" +const waitingElementPrefix = "w_" + +type waitingListReturnData struct { + blsKeys [][]byte + stakedDataList []*StakedDataV2_0 + lastKey []byte + afterLastJailed bool +} + +func (s *stakingSC) processStakeV1(blsKey []byte, registrationData *StakedDataV2_0, addFirst bool) error { + if registrationData.Staked { + return nil + } + + registrationData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() + if !s.canStake() { + s.eei.AddReturnMessage(fmt.Sprintf("staking is full key put into waiting list %s", hex.EncodeToString(blsKey))) + err := s.addToWaitingList(blsKey, addFirst) + if err != nil { + s.eei.AddReturnMessage("error while adding to waiting") + return err + } + registrationData.Waiting = true + s.eei.Finish([]byte{waiting}) + return nil + } + + err := s.removeFromWaitingList(blsKey) + if err != nil { + s.eei.AddReturnMessage("error while removing from waiting") + return err + } + + s.addToStakedNodes(1) + s.activeStakingFor(registrationData) + + return nil +} + +func (s *stakingSC) unStakeV1(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + registrationData, retCode := s.checkUnStakeArgs(args) + if retCode != vmcommon.Ok { + return retCode + } + + var err error + if !registrationData.Staked { + registrationData.Waiting = false + err = s.removeFromWaitingList(args.Arguments[0]) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + err = s.saveStakingData(args.Arguments[0], registrationData) + if err != nil { + s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok + } + + addOneFromQueue := !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) || s.canStakeIfOneRemoved() + if addOneFromQueue { + _, err = s.moveFirstFromWaitingToStaked() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + } + + return s.tryUnStake(args.Arguments[0], registrationData) +} + +func (s *stakingSC) moveFirstFromWaitingToStakedIfNeeded(blsKey []byte) (bool, error) { + waitingElementKey := createWaitingListKey(blsKey) + _, err := s.getWaitingListElement(waitingElementKey) + if err == nil { + // node in waiting - remove from it - and that's it + return false, s.removeFromWaitingList(blsKey) + } + + return s.moveFirstFromWaitingToStaked() +} + +func (s *stakingSC) moveFirstFromWaitingToStaked() (bool, error) { + waitingList, err := s.getWaitingListHead() + if err != nil { + return false, err + } + if waitingList.Length == 0 { + return false, nil + } + elementInList, err := s.getWaitingListElement(waitingList.FirstKey) + if err != nil { + return false, err + } + err = s.removeFromWaitingList(elementInList.BLSPublicKey) + if err != nil { + return false, err + } + + nodeData, err := s.getOrCreateRegisteredData(elementInList.BLSPublicKey) + if err != nil { + return false, err + } + if len(nodeData.RewardAddress) == 0 || nodeData.Staked { + return false, vm.ErrInvalidWaitingList + } + + nodeData.Waiting = false + nodeData.Staked = true + nodeData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() + nodeData.StakedNonce = s.eei.BlockChainHook().CurrentNonce() + nodeData.UnStakedNonce = 0 + nodeData.UnStakedEpoch = common.DefaultUnstakedEpoch + + s.addToStakedNodes(1) + return true, s.saveStakingData(elementInList.BLSPublicKey, nodeData) +} + +func (s *stakingSC) addToWaitingList(blsKey []byte, addJailed bool) error { + inWaitingListKey := createWaitingListKey(blsKey) + marshaledData := s.eei.GetStorage(inWaitingListKey) + if len(marshaledData) != 0 { + return nil + } + + waitingList, err := s.getWaitingListHead() + if err != nil { + return err + } + + waitingList.Length += 1 + if waitingList.Length == 1 { + return s.startWaitingList(waitingList, addJailed, blsKey) + } + + if addJailed { + return s.insertAfterLastJailed(waitingList, blsKey) + } + + return s.addToEndOfTheList(waitingList, blsKey) +} + +func (s *stakingSC) startWaitingList( + waitingList *WaitingList, + addJailed bool, + blsKey []byte, +) error { + inWaitingListKey := createWaitingListKey(blsKey) + waitingList.FirstKey = inWaitingListKey + waitingList.LastKey = inWaitingListKey + if addJailed { + waitingList.LastJailedKey = inWaitingListKey + } + + elementInWaiting := &ElementInList{ + BLSPublicKey: blsKey, + PreviousKey: waitingList.LastKey, + NextKey: make([]byte, 0), + } + return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) +} + +func (s *stakingSC) addToEndOfTheList(waitingList *WaitingList, blsKey []byte) error { + inWaitingListKey := createWaitingListKey(blsKey) + oldLastKey := make([]byte, len(waitingList.LastKey)) + copy(oldLastKey, waitingList.LastKey) + + lastElement, err := s.getWaitingListElement(waitingList.LastKey) + if err != nil { + return err + } + lastElement.NextKey = inWaitingListKey + elementInWaiting := &ElementInList{ + BLSPublicKey: blsKey, + PreviousKey: oldLastKey, + NextKey: make([]byte, 0), + } + + err = s.saveWaitingListElement(oldLastKey, lastElement) + if err != nil { + return err + } + + waitingList.LastKey = inWaitingListKey + return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) +} + +func (s *stakingSC) insertAfterLastJailed( + waitingList *WaitingList, + blsKey []byte, +) error { + inWaitingListKey := createWaitingListKey(blsKey) + if len(waitingList.LastJailedKey) == 0 { + previousFirstKey := make([]byte, len(waitingList.FirstKey)) + copy(previousFirstKey, waitingList.FirstKey) + waitingList.FirstKey = inWaitingListKey + waitingList.LastJailedKey = inWaitingListKey + elementInWaiting := &ElementInList{ + BLSPublicKey: blsKey, + PreviousKey: inWaitingListKey, + NextKey: previousFirstKey, + } + + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && len(previousFirstKey) > 0 { + previousFirstElement, err := s.getWaitingListElement(previousFirstKey) + if err != nil { + return err + } + previousFirstElement.PreviousKey = inWaitingListKey + err = s.saveWaitingListElement(previousFirstKey, previousFirstElement) + if err != nil { + return err + } + } + + return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) + } + + lastJailedElement, err := s.getWaitingListElement(waitingList.LastJailedKey) + if err != nil { + return err + } + + if bytes.Equal(waitingList.LastKey, waitingList.LastJailedKey) { + waitingList.LastJailedKey = inWaitingListKey + return s.addToEndOfTheList(waitingList, blsKey) + } + + firstNonJailedElement, err := s.getWaitingListElement(lastJailedElement.NextKey) + if err != nil { + return err + } + + elementInWaiting := &ElementInList{ + BLSPublicKey: blsKey, + PreviousKey: make([]byte, len(inWaitingListKey)), + NextKey: make([]byte, len(inWaitingListKey)), + } + copy(elementInWaiting.PreviousKey, waitingList.LastJailedKey) + copy(elementInWaiting.NextKey, lastJailedElement.NextKey) + + lastJailedElement.NextKey = inWaitingListKey + firstNonJailedElement.PreviousKey = inWaitingListKey + waitingList.LastJailedKey = inWaitingListKey + + err = s.saveWaitingListElement(elementInWaiting.PreviousKey, lastJailedElement) + if err != nil { + return err + } + err = s.saveWaitingListElement(elementInWaiting.NextKey, firstNonJailedElement) + if err != nil { + return err + } + err = s.saveWaitingListElement(inWaitingListKey, elementInWaiting) + if err != nil { + return err + } + return s.saveWaitingListHead(waitingList) +} + +func (s *stakingSC) saveElementAndList(key []byte, element *ElementInList, waitingList *WaitingList) error { + err := s.saveWaitingListElement(key, element) + if err != nil { + return err + } + + return s.saveWaitingListHead(waitingList) +} + +func (s *stakingSC) removeFromWaitingList(blsKey []byte) error { + inWaitingListKey := createWaitingListKey(blsKey) + marshaledData := s.eei.GetStorage(inWaitingListKey) + if len(marshaledData) == 0 { + return nil + } + s.eei.SetStorage(inWaitingListKey, nil) + + elementToRemove := &ElementInList{} + err := s.marshalizer.Unmarshal(elementToRemove, marshaledData) + if err != nil { + return err + } + + waitingList, err := s.getWaitingListHead() + if err != nil { + return err + } + if waitingList.Length == 0 { + return vm.ErrInvalidWaitingList + } + waitingList.Length -= 1 + if waitingList.Length == 0 { + s.eei.SetStorage([]byte(waitingListHeadKey), nil) + return nil + } + + // remove the first element + isFirstElementBeforeFix := !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && bytes.Equal(elementToRemove.PreviousKey, inWaitingListKey) + isFirstElementAfterFix := s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && bytes.Equal(waitingList.FirstKey, inWaitingListKey) + if isFirstElementBeforeFix || isFirstElementAfterFix { + if bytes.Equal(inWaitingListKey, waitingList.LastJailedKey) { + waitingList.LastJailedKey = make([]byte, 0) + } + + nextElement, errGet := s.getWaitingListElement(elementToRemove.NextKey) + if errGet != nil { + return errGet + } + + nextElement.PreviousKey = elementToRemove.NextKey + waitingList.FirstKey = elementToRemove.NextKey + return s.saveElementAndList(elementToRemove.NextKey, nextElement, waitingList) + } + + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) || bytes.Equal(inWaitingListKey, waitingList.LastJailedKey) { + waitingList.LastJailedKey = make([]byte, len(elementToRemove.PreviousKey)) + copy(waitingList.LastJailedKey, elementToRemove.PreviousKey) + } + + previousElement, _ := s.getWaitingListElement(elementToRemove.PreviousKey) + // search the other way around for the element in front + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && previousElement == nil { + previousElement, err = s.searchPreviousFromHead(waitingList, inWaitingListKey, elementToRemove) + if err != nil { + return err + } + } + if previousElement == nil { + previousElement, err = s.getWaitingListElement(elementToRemove.PreviousKey) + if err != nil { + return err + } + } + if len(elementToRemove.NextKey) == 0 { + waitingList.LastKey = elementToRemove.PreviousKey + previousElement.NextKey = make([]byte, 0) + return s.saveElementAndList(elementToRemove.PreviousKey, previousElement, waitingList) + } + + nextElement, err := s.getWaitingListElement(elementToRemove.NextKey) + if err != nil { + return err + } + + nextElement.PreviousKey = elementToRemove.PreviousKey + previousElement.NextKey = elementToRemove.NextKey + + err = s.saveWaitingListElement(elementToRemove.NextKey, nextElement) + if err != nil { + return err + } + return s.saveElementAndList(elementToRemove.PreviousKey, previousElement, waitingList) +} + +func (s *stakingSC) searchPreviousFromHead(waitingList *WaitingList, inWaitingListKey []byte, elementToRemove *ElementInList) (*ElementInList, error) { + var previousElement *ElementInList + index := uint32(1) + nextKey := make([]byte, len(waitingList.FirstKey)) + copy(nextKey, waitingList.FirstKey) + for len(nextKey) != 0 && index <= waitingList.Length { + element, errGet := s.getWaitingListElement(nextKey) + if errGet != nil { + return nil, errGet + } + + if bytes.Equal(inWaitingListKey, element.NextKey) { + previousElement = element + elementToRemove.PreviousKey = createWaitingListKey(previousElement.BLSPublicKey) + return previousElement, nil + } + + nextKey = make([]byte, len(element.NextKey)) + if len(element.NextKey) == 0 { + break + } + index++ + copy(nextKey, element.NextKey) + } + return nil, vm.ErrElementNotFound +} + +func (s *stakingSC) getWaitingListElement(key []byte) (*ElementInList, error) { + marshaledData := s.eei.GetStorage(key) + if len(marshaledData) == 0 { + return nil, vm.ErrElementNotFound + } + + element := &ElementInList{} + err := s.marshalizer.Unmarshal(element, marshaledData) + if err != nil { + return nil, err + } + + return element, nil +} + +func (s *stakingSC) saveWaitingListElement(key []byte, element *ElementInList) error { + marshaledData, err := s.marshalizer.Marshal(element) + if err != nil { + return err + } + + s.eei.SetStorage(key, marshaledData) + return nil +} + +func (s *stakingSC) getWaitingListHead() (*WaitingList, error) { + waitingList := &WaitingList{ + FirstKey: make([]byte, 0), + LastKey: make([]byte, 0), + Length: 0, + LastJailedKey: make([]byte, 0), + } + marshaledData := s.eei.GetStorage([]byte(waitingListHeadKey)) + if len(marshaledData) == 0 { + return waitingList, nil + } + + err := s.marshalizer.Unmarshal(waitingList, marshaledData) + if err != nil { + return nil, err + } + + return waitingList, nil +} + +func (s *stakingSC) saveWaitingListHead(waitingList *WaitingList) error { + marshaledData, err := s.marshalizer.Marshal(waitingList) + if err != nil { + return err + } + + s.eei.SetStorage([]byte(waitingListHeadKey), marshaledData) + return nil +} + +func createWaitingListKey(blsKey []byte) []byte { + return []byte(waitingElementPrefix + string(blsKey)) +} + +func (s *stakingSC) switchJailedWithWaiting(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("switchJailedWithWaiting function not allowed to be called by address " + string(args.CallerAddr)) + return vmcommon.UserError + } + if len(args.Arguments) != 1 { + return vmcommon.UserError + } + + blsKey := args.Arguments[0] + registrationData, err := s.getOrCreateRegisteredData(blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(registrationData.RewardAddress) == 0 { + s.eei.AddReturnMessage("no need to jail as not a validator") + return vmcommon.UserError + } + if !registrationData.Staked { + s.eei.AddReturnMessage("no need to jail as not a validator") + return vmcommon.UserError + } + if registrationData.Jailed { + s.eei.AddReturnMessage(vm.ErrBLSPublicKeyAlreadyJailed.Error()) + return vmcommon.UserError + } + switched, err := s.moveFirstFromWaitingToStakedIfNeeded(blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + registrationData.NumJailed++ + registrationData.Jailed = true + registrationData.JailedNonce = s.eei.BlockChainHook().CurrentNonce() + + if !switched && !s.enableEpochsHandler.IsFlagEnabled(common.CorrectJailedNotUnStakedEmptyQueueFlag) { + s.eei.AddReturnMessage("did not switch as nobody in waiting, but jailed") + } else { + s.tryRemoveJailedNodeFromStaked(registrationData) + } + + err = s.saveStakingData(blsKey, registrationData) + if err != nil { + s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + +func (s *stakingSC) getWaitingListIndex(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { + s.eei.AddReturnMessage("this is only a view function") + return vmcommon.UserError + } + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be equal to 1") + return vmcommon.UserError + } + + waitingElementKey := createWaitingListKey(args.Arguments[0]) + _, err := s.getWaitingListElement(waitingElementKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + waitingListHead, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + if bytes.Equal(waitingElementKey, waitingListHead.FirstKey) { + s.eei.Finish([]byte(strconv.Itoa(1))) + return vmcommon.Ok + } + if bytes.Equal(waitingElementKey, waitingListHead.LastKey) { + s.eei.Finish([]byte(strconv.Itoa(int(waitingListHead.Length)))) + return vmcommon.Ok + } + + prevElement, err := s.getWaitingListElement(waitingListHead.FirstKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + index := uint32(2) + nextKey := make([]byte, len(waitingElementKey)) + copy(nextKey, prevElement.NextKey) + for len(nextKey) != 0 && index <= waitingListHead.Length { + if bytes.Equal(nextKey, waitingElementKey) { + s.eei.Finish([]byte(strconv.Itoa(int(index)))) + return vmcommon.Ok + } + + prevElement, err = s.getWaitingListElement(nextKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + if len(prevElement.NextKey) == 0 { + break + } + index++ + copy(nextKey, prevElement.NextKey) + } + + s.eei.AddReturnMessage("element in waiting list not found") + return vmcommon.UserError +} + +func (s *stakingSC) getWaitingListSize(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError + } + + err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.Get) + if err != nil { + s.eei.AddReturnMessage("insufficient gas") + return vmcommon.OutOfGas + } + + waitingListHead, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + s.eei.Finish([]byte(strconv.Itoa(int(waitingListHead.Length)))) + return vmcommon.Ok +} + +func (s *stakingSC) getWaitingListRegisterNonceAndRewardAddress(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { + s.eei.AddReturnMessage("this is only a view function") + return vmcommon.UserError + } + if len(args.Arguments) != 0 { + s.eei.AddReturnMessage("number of arguments must be equal to 0") + return vmcommon.UserError + } + + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(waitingListData.stakedDataList) == 0 { + s.eei.AddReturnMessage("no one in waitingList") + return vmcommon.UserError + } + + for index, stakedData := range waitingListData.stakedDataList { + s.eei.Finish(waitingListData.blsKeys[index]) + s.eei.Finish(stakedData.RewardAddress) + s.eei.Finish(big.NewInt(int64(stakedData.RegisterNonce)).Bytes()) + } + + return vmcommon.Ok +} + +func (s *stakingSC) resetLastUnJailedFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { + // backward compatibility + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + return vmcommon.UserError + } + if len(args.Arguments) != 0 { + s.eei.AddReturnMessage("number of arguments must be equal to 0") + return vmcommon.UserError + } + + waitingList, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + if len(waitingList.LastJailedKey) == 0 { + return vmcommon.Ok + } + + waitingList.LastJailedKey = make([]byte, 0) + err = s.saveWaitingListHead(waitingList) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + +func (s *stakingSC) cleanAdditionalQueueNotEnoughFunds( + waitingListData *waitingListReturnData, +) ([]string, map[string][][]byte, error) { + + listOfOwners := make([]string, 0) + mapOwnersUnStakedNodes := make(map[string][][]byte) + mapCheckedOwners := make(map[string]*validatorFundInfo) + for i := len(waitingListData.blsKeys) - 1; i >= 0; i-- { + stakedData := waitingListData.stakedDataList[i] + validatorInfo, err := s.checkValidatorFunds(mapCheckedOwners, stakedData.OwnerAddress, s.stakeValue) + if err != nil { + return nil, nil, err + } + if validatorInfo.numNodesToUnstake == 0 { + continue + } + + validatorInfo.numNodesToUnstake-- + blsKey := waitingListData.blsKeys[i] + err = s.removeFromWaitingList(blsKey) + if err != nil { + return nil, nil, err + } + + registrationData, err := s.getOrCreateRegisteredData(blsKey) + if err != nil { + return nil, nil, err + } + + registrationData.Staked = false + registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() + registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() + registrationData.Waiting = false + + err = s.saveStakingData(blsKey, registrationData) + if err != nil { + return nil, nil, err + } + + _, alreadyAdded := mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)] + if !alreadyAdded { + listOfOwners = append(listOfOwners, string(stakedData.OwnerAddress)) + } + + mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)] = append(mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)], blsKey) + } + + return listOfOwners, mapOwnersUnStakedNodes, nil +} + +func (s *stakingSC) stakeNodesFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + return vmcommon.UserError + } + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be equal to 1") + return vmcommon.UserError + } + + numNodesToStake := big.NewInt(0).SetBytes(args.Arguments[0]).Uint64() + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(waitingListData.blsKeys) == 0 { + s.eei.AddReturnMessage("no nodes in queue") + return vmcommon.Ok + } + + nodePriceToUse := big.NewInt(0).Set(s.minNodePrice) + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { + nodePriceToUse.Set(s.stakeValue) + } + + stakedNodes := uint64(0) + mapCheckedOwners := make(map[string]*validatorFundInfo) + for i, blsKey := range waitingListData.blsKeys { + stakedData := waitingListData.stakedDataList[i] + if stakedNodes >= numNodesToStake { + break + } + + validatorInfo, errCheck := s.checkValidatorFunds(mapCheckedOwners, stakedData.OwnerAddress, nodePriceToUse) + if errCheck != nil { + s.eei.AddReturnMessage(errCheck.Error()) + return vmcommon.UserError + } + if validatorInfo.numNodesToUnstake > 0 { + continue + } + + s.activeStakingFor(stakedData) + err = s.saveStakingData(blsKey, stakedData) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + // remove from waiting list + err = s.removeFromWaitingList(blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + stakedNodes++ + // return the change key + s.eei.Finish(blsKey) + s.eei.Finish(stakedData.RewardAddress) + } + + s.addToStakedNodes(int64(stakedNodes)) + + return vmcommon.Ok +} + +func (s *stakingSC) unStakeAllNodesFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + return vmcommon.UserError + } + if len(args.Arguments) != 0 { + s.eei.AddReturnMessage("number of arguments must be equal to 0") + return vmcommon.UserError + } + + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(waitingListData.blsKeys) == 0 { + return vmcommon.Ok + } + + for i, blsKey := range waitingListData.blsKeys { + registrationData := waitingListData.stakedDataList[i] + + result := s.doUnStake(blsKey, registrationData) + if result != vmcommon.Ok { + return result + } + + // delete element from waiting list + inWaitingListKey := createWaitingListKey(blsKey) + s.eei.SetStorage(inWaitingListKey, nil) + } + + // delete waiting list head element + s.eei.SetStorage([]byte(waitingListHeadKey), nil) + + return vmcommon.Ok +} + +func (s *stakingSC) cleanAdditionalQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + return vmcommon.UserError + } + if len(args.Arguments) != 0 { + s.eei.AddReturnMessage("number of arguments must be 0") + return vmcommon.UserError + } + + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(waitingListData.blsKeys) == 0 { + s.eei.AddReturnMessage("no nodes in queue") + return vmcommon.Ok + } + + listOfOwners, mapOwnersAndBLSKeys, err := s.cleanAdditionalQueueNotEnoughFunds(waitingListData) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + for _, owner := range listOfOwners { + s.eei.Finish([]byte(owner)) + blsKeys := mapOwnersAndBLSKeys[owner] + for _, blsKey := range blsKeys { + s.eei.Finish(blsKey) + } + } + + return vmcommon.Ok +} + +func (s *stakingSC) getFirstElementsFromWaitingList(numNodes uint32) (*waitingListReturnData, error) { + waitingListData := &waitingListReturnData{} + + waitingListHead, err := s.getWaitingListHead() + if err != nil { + return nil, err + } + if waitingListHead.Length == 0 { + return waitingListData, nil + } + + blsKeysToStake := make([][]byte, 0) + stakedDataList := make([]*StakedDataV2_0, 0) + index := uint32(1) + nextKey := make([]byte, len(waitingListHead.FirstKey)) + copy(nextKey, waitingListHead.FirstKey) + for len(nextKey) != 0 && index <= waitingListHead.Length && index <= numNodes { + element, errGet := s.getWaitingListElement(nextKey) + if errGet != nil { + return nil, errGet + } + + if bytes.Equal(nextKey, waitingListHead.LastJailedKey) { + waitingListData.afterLastJailed = true + } + + stakedData, errGet := s.getOrCreateRegisteredData(element.BLSPublicKey) + if errGet != nil { + return nil, errGet + } + + blsKeysToStake = append(blsKeysToStake, element.BLSPublicKey) + stakedDataList = append(stakedDataList, stakedData) + + if len(element.NextKey) == 0 { + break + } + index++ + copy(nextKey, element.NextKey) + } + + if numNodes >= waitingListHead.Length && len(blsKeysToStake) != int(waitingListHead.Length) { + log.Warn("mismatch length on waiting list elements in stakingSC.getFirstElementsFromWaitingList") + } + + waitingListData.blsKeys = blsKeysToStake + waitingListData.stakedDataList = stakedDataList + waitingListData.lastKey = nextKey + return waitingListData, nil +} + +func (s *stakingSC) fixWaitingListQueueSize(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError + } + + err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.FixWaitingListSize) + if err != nil { + s.eei.AddReturnMessage("insufficient gas") + return vmcommon.OutOfGas + } + + waitingListHead, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + if waitingListHead.Length <= 1 { + return vmcommon.Ok + } + + foundLastJailedKey := len(waitingListHead.LastJailedKey) == 0 + + index := uint32(1) + nextKey := make([]byte, len(waitingListHead.FirstKey)) + copy(nextKey, waitingListHead.FirstKey) + for len(nextKey) != 0 && index <= waitingListHead.Length { + element, errGet := s.getWaitingListElement(nextKey) + if errGet != nil { + s.eei.AddReturnMessage(errGet.Error()) + return vmcommon.UserError + } + + if bytes.Equal(waitingListHead.LastJailedKey, nextKey) { + foundLastJailedKey = true + } + + _, errGet = s.getOrCreateRegisteredData(element.BLSPublicKey) + if errGet != nil { + s.eei.AddReturnMessage(errGet.Error()) + return vmcommon.UserError + } + + if len(element.NextKey) == 0 { + break + } + index++ + copy(nextKey, element.NextKey) + } + + waitingListHead.Length = index + waitingListHead.LastKey = nextKey + if !foundLastJailedKey { + waitingListHead.LastJailedKey = make([]byte, 0) + } + + err = s.saveWaitingListHead(waitingListHead) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + +func (s *stakingSC) addMissingNodeToQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError + } + err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.FixWaitingListSize) + if err != nil { + s.eei.AddReturnMessage("insufficient gas") + return vmcommon.OutOfGas + } + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("invalid number of arguments") + return vmcommon.UserError + } + + blsKey := args.Arguments[0] + _, err = s.getWaitingListElement(createWaitingListKey(blsKey)) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + for _, keyInList := range waitingListData.blsKeys { + if bytes.Equal(keyInList, blsKey) { + s.eei.AddReturnMessage("key is in queue, not missing") + return vmcommon.UserError + } + } + + waitingList, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + waitingList.Length += 1 + if waitingList.Length == 1 { + err = s.startWaitingList(waitingList, false, blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok + } + + err = s.addToEndOfTheList(waitingList, blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} diff --git a/vm/systemSmartContracts/staking_test.go b/vm/systemSmartContracts/staking_test.go index 7f46a417db5..53d78208cf1 100644 --- a/vm/systemSmartContracts/staking_test.go +++ b/vm/systemSmartContracts/staking_test.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process/smartContract/hooks" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/accounts" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" @@ -53,6 +54,8 @@ func createMockStakingScArgumentsWithSystemScAddresses( MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 1.0, + NodeLimitPercentage: 1.0, }, EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub( common.StakeFlag, @@ -95,6 +98,18 @@ func CreateVmContractCallInput() *vmcommon.ContractCallInput { } } +func createArgsVMContext() VMContextArgs { + return VMContextArgs{ + BlockChainHook: &mock.BlockChainHookStub{}, + CryptoHook: hooks.NewVMCryptoHook(), + InputParser: &mock.ArgumentParserMock{}, + ValidatorAccountsDB: &stateMock.AccountsStub{}, + ChanceComputer: &mock.RaterMock{}, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + UserAccountsDB: &stateMock.AccountsStub{}, + } +} + func TestNewStakingSmartContract_NilSystemEIShouldErr(t *testing.T) { t.Parallel() @@ -998,6 +1013,93 @@ func TestStakingSc_ExecuteIsStaked(t *testing.T) { checkIsStaked(t, stakingSmartContract, callerAddress, stakerPubKey, vmcommon.UserError) } +func TestStakingSc_StakeWithStakingV4(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + + args := createMockStakingScArguments() + stakingAccessAddress := []byte("stakingAccessAddress") + args.StakingAccessAddr = stakingAccessAddress + args.StakingSCConfig.MaxNumberOfNodesForStake = 4 + args.EnableEpochsHandler = enableEpochsHandler + + argsVMContext := createArgsVMContext() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + args.Eei = eei + + stakingSmartContract, _ := NewStakingSmartContract(args) + + for i := 0; i < 10; i++ { + idxStr := strconv.Itoa(i) + addr := []byte("addr" + idxStr) + doStake(t, stakingSmartContract, stakingAccessAddress, addr, addr) + + if uint64(i) < stakingSmartContract.maxNumNodes { + checkIsStaked(t, stakingSmartContract, addr, addr, vmcommon.Ok) + } else { + checkIsStaked(t, stakingSmartContract, addr, addr, vmcommon.UserError) + require.True(t, strings.Contains(eei.returnMessage, "staking is full")) + eei.returnMessage = "" + } + } + requireRegisteredNodes(t, stakingSmartContract, eei, 4, 6) + + doUnStake(t, stakingSmartContract, stakingAccessAddress, []byte("addr0"), []byte("addr0"), vmcommon.Ok) + requireRegisteredNodes(t, stakingSmartContract, eei, 4, 5) + + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + for i := 5; i < 10; i++ { + idxStr := strconv.Itoa(i) + addr := []byte("addr" + idxStr) + err := stakingSmartContract.removeFromWaitingList(addr) + require.Nil(t, err) + } + + for i := 10; i < 20; i++ { + idxStr := strconv.Itoa(i) + addr := []byte("addr" + idxStr) + doStake(t, stakingSmartContract, stakingAccessAddress, addr, addr) + checkIsStaked(t, stakingSmartContract, addr, addr, vmcommon.Ok) + } + requireRegisteredNodes(t, stakingSmartContract, eei, 14, 0) + + doUnStake(t, stakingSmartContract, stakingAccessAddress, []byte("addr10"), []byte("addr10"), vmcommon.Ok) + requireRegisteredNodes(t, stakingSmartContract, eei, 13, 0) +} + +func TestStakingSc_UnStakeNodeFromWaitingListAfterStakingV4ShouldError(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + + args := createMockStakingScArguments() + stakingAccessAddress := []byte("stakingAccessAddress") + args.StakingAccessAddr = stakingAccessAddress + args.StakingSCConfig.MaxNumberOfNodesForStake = 2 + args.EnableEpochsHandler = enableEpochsHandler + + argsVMContext := createArgsVMContext() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + args.Eei = eei + + stakingSmartContract, _ := NewStakingSmartContract(args) + + doStake(t, stakingSmartContract, stakingAccessAddress, []byte("address0"), []byte("address0")) + doStake(t, stakingSmartContract, stakingAccessAddress, []byte("address1"), []byte("address1")) + doStake(t, stakingSmartContract, stakingAccessAddress, []byte("address2"), []byte("address2")) + requireRegisteredNodes(t, stakingSmartContract, eei, 2, 1) + + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + eei.returnMessage = "" + doUnStake(t, stakingSmartContract, stakingAccessAddress, []byte("address2"), []byte("address2"), vmcommon.ExecutionFailed) + require.Equal(t, eei.returnMessage, vm.ErrWaitingListDisabled.Error()) +} + func TestStakingSc_StakeWithV1ShouldWork(t *testing.T) { t.Parallel() @@ -1161,14 +1263,7 @@ func TestStakingSc_ExecuteStakeStakeJailAndSwitch(t *testing.T) { _ = json.Unmarshal(marshaledData, stakedData) assert.True(t, stakedData.Jailed) assert.True(t, stakedData.Staked) - - arguments.Function = "getTotalNumberOfRegisteredNodes" - arguments.Arguments = [][]byte{} - retCode = stakingSmartContract.Execute(arguments) - assert.Equal(t, retCode, vmcommon.Ok) - - lastOutput := eei.output[len(eei.output)-1] - assert.Equal(t, lastOutput, []byte{2}) + requireTotalNumberOfRegisteredNodes(t, stakingSmartContract, eei, big.NewInt(2)) } func TestStakingSc_ExecuteStakeStakeJailAndSwitchWithBoundaries(t *testing.T) { @@ -1305,14 +1400,7 @@ func TestStakingSc_ExecuteStakeStakeJailAndSwitchWithBoundaries(t *testing.T) { _ = json.Unmarshal(marshaledData, stakedData) assert.Equal(t, tt.shouldBeJailed, stakedData.Jailed) assert.Equal(t, tt.shouldBeStaked, stakedData.Staked) - - arguments.Function = "getTotalNumberOfRegisteredNodes" - arguments.Arguments = [][]byte{} - retCode = stakingSmartContract.Execute(arguments) - assert.Equal(t, vmcommon.Ok, retCode) - - lastOutput := eei.output[len(eei.output)-1] - assert.Equal(t, []byte{byte(tt.remainingStakedNodesNumber)}, lastOutput) + requireTotalNumberOfRegisteredNodes(t, stakingSmartContract, eei, big.NewInt(int64(tt.remainingStakedNodesNumber))) }) } } @@ -1447,14 +1535,7 @@ func TestStakingSc_ExecuteStakeStakeStakeJailJailUnJailTwice(t *testing.T) { doGetWaitingListSize(t, stakingSmartContract, eei, 2) outPut = doGetWaitingListRegisterNonceAndRewardAddress(t, stakingSmartContract, eei) assert.Equal(t, 6, len(outPut)) - - arguments.Function = "getTotalNumberOfRegisteredNodes" - arguments.Arguments = [][]byte{} - retCode = stakingSmartContract.Execute(arguments) - assert.Equal(t, retCode, vmcommon.Ok) - - lastOutput := eei.output[len(eei.output)-1] - assert.Equal(t, lastOutput, []byte{4}) + requireTotalNumberOfRegisteredNodes(t, stakingSmartContract, eei, big.NewInt(4)) } func TestStakingSc_ExecuteStakeUnStakeJailCombinations(t *testing.T) { @@ -3143,7 +3224,7 @@ func doGetStatus(t *testing.T, sc *stakingSC, eei *vmContext, blsKey []byte, exp assert.Equal(t, vmcommon.Ok, retCode) lastOutput := eei.output[len(eei.output)-1] - assert.True(t, bytes.Equal(lastOutput, []byte(expectedStatus))) + assert.Equal(t, expectedStatus, string(lastOutput)) } func doGetWaitingListSize(t *testing.T, sc *stakingSC, eei *vmContext, expectedSize int) { @@ -3337,6 +3418,150 @@ func TestStakingSc_fixMissingNodeAddOneNodeOnly(t *testing.T) { assert.Equal(t, waitingListData.blsKeys[0], blsKey) } +func TestStakingSC_StakingV4Flags(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakeFlag) + enableEpochsHandler.AddActiveFlags(common.CorrectLastUnJailedFlag) + enableEpochsHandler.AddActiveFlags(common.CorrectFirstQueuedFlag) + enableEpochsHandler.AddActiveFlags(common.CorrectJailedNotUnStakedEmptyQueueFlag) + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) + enableEpochsHandler.AddActiveFlags(common.StakingV4Step1Flag) + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + + argsVMContext := createArgsVMContext() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + + args := createMockStakingScArguments() + args.Eei = eei + args.EnableEpochsHandler = enableEpochsHandler + stakingSmartContract, _ := NewStakingSmartContract(args) + + // Functions which are not allowed starting STAKING V4 INIT + arguments := CreateVmContractCallInput() + arguments.Function = "getQueueIndex" + retCode := stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "getQueueSize" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "fixWaitingListQueueSize" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "addMissingNodeToQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + // Functions which are allowed to be called by systemSC at the end of the epoch in epoch = STAKING V4 INIT + eei.CleanCache() + arguments.Function = "switchJailedWithWaiting" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.returnMessage, "function not allowed to be called by address")) + + eei.CleanCache() + arguments.Function = "resetLastUnJailedFromQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.returnMessage, "can be called by endOfEpochAccess address only")) + + eei.CleanCache() + arguments.Function = "stakeNodesFromQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.returnMessage, "can be called by endOfEpochAccess address only")) + + eei.CleanCache() + arguments.Function = "cleanAdditionalQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.returnMessage, "can be called by endOfEpochAccess address only")) + + enableEpochsHandler.RemoveActiveFlags(common.StakingV4Step1Flag) + // All functions from above are not allowed anymore starting STAKING V4 epoch + eei.CleanCache() + arguments.Function = "getQueueIndex" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "getQueueSize" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "fixWaitingListQueueSize" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "addMissingNodeToQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "switchJailedWithWaiting" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "resetLastUnJailedFromQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "stakeNodesFromQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "cleanAdditionalQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) +} + +func requireRegisteredNodes(t *testing.T, stakingSC *stakingSC, eei *vmContext, stakedNodes int64, waitingListNodes uint32) { + stakeConfig := stakingSC.getConfig() + waitingList, _ := stakingSC.getWaitingListHead() + require.Equal(t, stakedNodes, stakeConfig.StakedNodes) + require.Equal(t, waitingListNodes, waitingList.Length) + + requireTotalNumberOfRegisteredNodes(t, stakingSC, eei, big.NewInt(stakedNodes+int64(waitingListNodes))) +} + +func requireTotalNumberOfRegisteredNodes(t *testing.T, stakingSC *stakingSC, eei *vmContext, expectedRegisteredNodes *big.Int) { + arguments := CreateVmContractCallInput() + arguments.Function = "getTotalNumberOfRegisteredNodes" + arguments.Arguments = [][]byte{} + + retCode := stakingSC.Execute(arguments) + lastOutput := eei.output[len(eei.output)-1] + noOfRegisteredNodes := big.NewInt(0).SetBytes(lastOutput) + require.Equal(t, retCode, vmcommon.Ok) + require.Equal(t, expectedRegisteredNodes, noOfRegisteredNodes) +} + func TestStakingSc_fixMissingNodeAddAsLast(t *testing.T) { t.Parallel() @@ -3366,3 +3591,118 @@ func TestStakingSc_fixMissingNodeAddAsLast(t *testing.T) { assert.Equal(t, len(waitingListData.blsKeys), 4) assert.Equal(t, waitingListData.blsKeys[3], blsKey) } + +func TestStakingSC_UnStakeAllFromQueueErrors(t *testing.T) { + t.Parallel() + + blockChainHook := &mock.BlockChainHookStub{} + blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { + return nil, 0, nil + } + + eei := createDefaultEei() + eei.blockChainHook = blockChainHook + eei.SetSCAddress([]byte("addr")) + + stakingAccessAddress := vm.ValidatorSCAddress + args := createMockStakingScArguments() + args.StakingAccessAddr = stakingAccessAddress + args.StakingSCConfig.MaxNumberOfNodesForStake = 1 + enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) + + args.Eei = eei + args.StakingSCConfig.UnBondPeriod = 100 + sc, _ := NewStakingSmartContract(args) + + vmInput := CreateVmContractCallInput() + vmInput.Function = "unStakeAllNodesFromQueue" + + returnCode := sc.Execute(vmInput) + require.Equal(t, returnCode, vmcommon.UserError) + require.Equal(t, eei.returnMessage, "invalid method to call") + + eei.returnMessage = "" + enableEpochsHandler.AddActiveFlags(common.StakingV4Step1Flag) + returnCode = sc.Execute(vmInput) + require.Equal(t, returnCode, vmcommon.UserError) + require.Equal(t, eei.returnMessage, "stake nodes from waiting list can be called by endOfEpochAccess address only") + + eei.returnMessage = "" + vmInput.CallerAddr = []byte("endOfEpoch") + vmInput.Arguments = [][]byte{{1}} + returnCode = sc.Execute(vmInput) + require.Equal(t, returnCode, vmcommon.UserError) + require.Equal(t, "number of arguments must be equal to 0", eei.returnMessage) + + vmInput.Arguments = [][]byte{} + returnCode = sc.Execute(vmInput) + require.Equal(t, returnCode, vmcommon.Ok) +} + +func TestStakingSc_UnStakeAllFromQueue(t *testing.T) { + t.Parallel() + + blockChainHook := &mock.BlockChainHookStub{} + blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { + return nil, 0, nil + } + + eei := createDefaultEei() + eei.blockChainHook = blockChainHook + eei.SetSCAddress([]byte("addr")) + + stakingAccessAddress := vm.ValidatorSCAddress + args := createMockStakingScArguments() + args.StakingAccessAddr = stakingAccessAddress + args.StakingSCConfig.MaxNumberOfNodesForStake = 1 + enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) + args.Eei = eei + args.StakingSCConfig.UnBondPeriod = 100 + stakingSmartContract, _ := NewStakingSmartContract(args) + + stakerAddress := []byte("stakerAddr") + + blockChainHook.CurrentNonceCalled = func() uint64 { + return 1 + } + + // do stake should work + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("firstKey ")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("secondKey")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKey ")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("fourthKey")) + + waitingReturn := doGetWaitingListRegisterNonceAndRewardAddress(t, stakingSmartContract, eei) + assert.Equal(t, len(waitingReturn), 9) + + arguments := CreateVmContractCallInput() + validatorData := &ValidatorDataV2{ + TotalStakeValue: big.NewInt(400), + TotalUnstaked: big.NewInt(0), + RewardAddress: stakerAddress, + BlsPubKeys: [][]byte{[]byte("firstKey "), []byte("secondKey"), []byte("thirdKey "), []byte("fourthKey")}, + } + arguments.CallerAddr = stakingSmartContract.endOfEpochAccessAddr + marshaledData, _ := stakingSmartContract.marshalizer.Marshal(validatorData) + eei.SetStorageForAddress(vm.ValidatorSCAddress, stakerAddress, marshaledData) + + enableEpochsHandler.AddActiveFlags(common.StakingV4Step1Flag) + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + arguments.Function = "unStakeAllNodesFromQueue" + retCode := stakingSmartContract.Execute(arguments) + assert.Equal(t, retCode, vmcommon.Ok) + + assert.Equal(t, len(eei.GetStorage([]byte(waitingListHeadKey))), 0) + newHead, _ := stakingSmartContract.getWaitingListHead() + assert.Equal(t, uint32(0), newHead.Length) // no entries in the queue list + + doGetStatus(t, stakingSmartContract, eei, []byte("secondKey"), "unStaked") + + // stake them again - as they were deleted from waiting list + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKey ")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("fourthKey")) + + // surprisingly, the queue works again as we did not activate the staking v4 + doGetStatus(t, stakingSmartContract, eei, []byte("thirdKey "), "staked") + doGetStatus(t, stakingSmartContract, eei, []byte("fourthKey"), "staked") +} diff --git a/vm/systemSmartContracts/validator.go b/vm/systemSmartContracts/validator.go index 86350b5ef34..37799ccc447 100644 --- a/vm/systemSmartContracts/validator.go +++ b/vm/systemSmartContracts/validator.go @@ -21,6 +21,8 @@ import ( const unJailedFunds = "unJailFunds" const unStakeUnBondPauseKey = "unStakeUnBondPause" +const minPercentage = 0.0001 +const numberOfNodesTooHigh = "number of nodes too high, no new nodes activated" var zero = big.NewInt(0) @@ -51,6 +53,9 @@ type validatorSC struct { governanceSCAddress []byte shardCoordinator sharding.Coordinator enableEpochsHandler common.EnableEpochsHandler + nodesCoordinator vm.NodesCoordinator + totalStakeLimit *big.Int + nodeLimitPercentage float64 } // ArgsValidatorSmartContract is the arguments structure to create a new ValidatorSmartContract @@ -69,6 +74,7 @@ type ArgsValidatorSmartContract struct { GovernanceSCAddress []byte ShardCoordinator sharding.Coordinator EnableEpochsHandler common.EnableEpochsHandler + NodesCoordinator vm.NodesCoordinator } // NewValidatorSmartContract creates an validator smart contract @@ -120,6 +126,15 @@ func NewValidatorSmartContract( if err != nil { return nil, err } + if check.IfNil(args.NodesCoordinator) { + return nil, fmt.Errorf("%w in validatorSC", vm.ErrNilNodesCoordinator) + } + if args.StakingSCConfig.NodeLimitPercentage < minPercentage { + return nil, fmt.Errorf("%w in validatorSC", vm.ErrInvalidNodeLimitPercentage) + } + if args.StakingSCConfig.StakeLimitPercentage < minPercentage { + return nil, fmt.Errorf("%w in validatorSC", vm.ErrInvalidStakeLimitPercentage) + } baseConfig := ValidatorConfig{ TotalSupply: big.NewInt(0).Set(args.GenesisTotalSupply), @@ -151,7 +166,7 @@ func NewValidatorSmartContract( return nil, vm.ErrInvalidMinCreationDeposit } - return &validatorSC{ + reg := &validatorSC{ eei: args.Eei, unBondPeriod: args.StakingSCConfig.UnBondPeriod, unBondPeriodInEpochs: args.StakingSCConfig.UnBondPeriodInEpochs, @@ -169,7 +184,16 @@ func NewValidatorSmartContract( governanceSCAddress: args.GovernanceSCAddress, shardCoordinator: args.ShardCoordinator, enableEpochsHandler: args.EnableEpochsHandler, - }, nil + nodeLimitPercentage: args.StakingSCConfig.NodeLimitPercentage, + nodesCoordinator: args.NodesCoordinator, + } + + reg.totalStakeLimit = core.GetIntTrimmedPercentageOfValue(args.GenesisTotalSupply, args.StakingSCConfig.StakeLimitPercentage) + if reg.totalStakeLimit.Cmp(baseConfig.NodePrice) < 0 { + return nil, fmt.Errorf("%w, value is %f", vm.ErrInvalidStakeLimitPercentage, args.StakingSCConfig.StakeLimitPercentage) + } + + return reg, nil } // Execute calls one of the functions from the validator smart contract and runs the code according to the input @@ -388,11 +412,7 @@ func (v *validatorSC) unJail(args *vmcommon.ContractCallInput) vmcommon.ReturnCo } if transferBack.Cmp(zero) > 0 { - err = v.eei.Transfer(args.CallerAddr, args.RecipientAddr, transferBack, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on unJail function") - return vmcommon.UserError - } + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, transferBack, nil, 0) } finalUnJailFunds := big.NewInt(0).Sub(args.CallValue, transferBack) @@ -628,7 +648,12 @@ func (v *validatorSC) registerBLSKeys( return nil, nil, err } + newlyAddedKeys := make([][]byte, 0) for _, blsKey := range newKeys { + if v.isNumberOfNodesTooHigh(len(registrationData.BlsPubKeys) + 1) { + break + } + vmOutput, errExec := v.executeOnStakingSC([]byte("register@" + hex.EncodeToString(blsKey) + "@" + hex.EncodeToString(registrationData.RewardAddress) + "@" + @@ -649,9 +674,10 @@ func (v *validatorSC) registerBLSKeys( } registrationData.BlsPubKeys = append(registrationData.BlsPubKeys, blsKey) + newlyAddedKeys = append(newlyAddedKeys, blsKey) } - return blsKeys, newKeys, nil + return blsKeys, newlyAddedKeys, nil } func (v *validatorSC) updateStakeValue(registrationData *ValidatorDataV2, caller []byte) vmcommon.ReturnCode { @@ -796,6 +822,11 @@ func (v *validatorSC) reStakeUnStakedNodes(args *vmcommon.ContractCallInput) vmc return vmcommon.UserError } + if v.isNumberOfNodesTooHigh(len(registrationData.BlsPubKeys)) { + v.eei.AddReturnMessage("number of nodes is too high") + return vmcommon.UserError + } + numQualified := big.NewInt(0).Div(registrationData.TotalStakeValue, validatorConfig.NodePrice) if uint64(len(args.Arguments)) > numQualified.Uint64() { v.eei.AddReturnMessage("insufficient funds") @@ -898,6 +929,27 @@ func (v *validatorSC) checkAllGivenKeysAreUnStaked(registrationData *ValidatorDa return mapBlsKeys, nil } +func (v *validatorSC) isStakeTooHigh(registrationData *ValidatorDataV2) bool { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakeLimitsFlag) { + return false + } + + return registrationData.TotalStakeValue.Cmp(v.totalStakeLimit) > 0 +} + +func (v *validatorSC) isNumberOfNodesTooHigh(numNodes int) bool { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakeLimitsFlag) { + return false + } + + return numNodes > v.computeNodeLimit() +} + +func (v *validatorSC) computeNodeLimit() int { + nodeLimit := float64(v.nodesCoordinator.GetNumTotalEligible()) * v.nodeLimitPercentage + return int(nodeLimit) +} + func (v *validatorSC) stake(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { err := v.eei.UseGas(v.gasCost.MetaChainSystemSCsCost.Stake) if err != nil { @@ -931,6 +983,11 @@ func (v *validatorSC) stake(args *vmcommon.ContractCallInput) vmcommon.ReturnCod return vmcommon.UserError } + if v.isStakeTooHigh(registrationData) { + v.eei.AddReturnMessage("total stake limit reached") + return vmcommon.UserError + } + lenArgs := len(args.Arguments) if lenArgs == 0 { return v.updateStakeValue(registrationData, args.CallerAddr) @@ -1018,31 +1075,73 @@ func (v *validatorSC) stake(args *vmcommon.ContractCallInput) vmcommon.ReturnCod } } - v.activateStakingFor( + v.activateNewBLSKeys(registrationData, blsKeys, newKeys, &validatorConfig, args) + + err = v.saveRegistrationData(args.CallerAddr, registrationData) + if err != nil { + v.eei.AddReturnMessage("cannot save registration data: error " + err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + +func (v *validatorSC) activateNewBLSKeys( + registrationData *ValidatorDataV2, + blsKeys [][]byte, + newKeys [][]byte, + validatorConfig *ValidatorConfig, + args *vmcommon.ContractCallInput, +) { + numRegisteredBlsKeys := len(registrationData.BlsPubKeys) + allNodesActivated := v.activateStakingFor( blsKeys, + newKeys, registrationData, validatorConfig.NodePrice, registrationData.RewardAddress, args.CallerAddr, ) - err = v.saveRegistrationData(args.CallerAddr, registrationData) - if err != nil { - v.eei.AddReturnMessage("cannot save registration data: error " + err.Error()) - return vmcommon.UserError + if !allNodesActivated && len(blsKeys) > 0 { + nodeLimit := int64(v.computeNodeLimit()) + entry := &vmcommon.LogEntry{ + Identifier: []byte(args.Function), + Address: args.RecipientAddr, + Topics: [][]byte{ + []byte(numberOfNodesTooHigh), + big.NewInt(int64(numRegisteredBlsKeys)).Bytes(), + big.NewInt(nodeLimit).Bytes(), + }, + } + v.eei.AddLogEntry(entry) } - return vmcommon.Ok } func (v *validatorSC) activateStakingFor( blsKeys [][]byte, + newKeys [][]byte, registrationData *ValidatorDataV2, fixedStakeValue *big.Int, rewardAddress []byte, ownerAddress []byte, -) { - numRegistered := uint64(registrationData.NumRegistered) +) bool { + numActivatedKey := uint64(registrationData.NumRegistered) + + numAllBLSKeys := len(registrationData.BlsPubKeys) + if v.isNumberOfNodesTooHigh(numAllBLSKeys) { + return false + } + + maxNumNodesToActivate := len(blsKeys) + if v.enableEpochsHandler.IsFlagEnabled(common.StakeLimitsFlag) { + maxNumNodesToActivate = v.computeNodeLimit() - numAllBLSKeys + len(newKeys) + } + nodesActivated := 0 + if nodesActivated >= maxNumNodesToActivate && len(blsKeys) >= maxNumNodesToActivate { + return false + } for i := uint64(0); i < uint64(len(blsKeys)); i++ { currentBLSKey := blsKeys[i] @@ -1061,12 +1160,19 @@ func (v *validatorSC) activateStakingFor( } if stakedData.UnStakedNonce == 0 { - numRegistered++ + numActivatedKey++ + } + + nodesActivated++ + if nodesActivated >= maxNumNodesToActivate { + break } } - registrationData.NumRegistered = uint32(numRegistered) - registrationData.LockedStake.Mul(fixedStakeValue, big.NewInt(0).SetUint64(numRegistered)) + registrationData.NumRegistered = uint32(numActivatedKey) + registrationData.LockedStake.Mul(fixedStakeValue, big.NewInt(0).SetUint64(numActivatedKey)) + + return nodesActivated < maxNumNodesToActivate || len(blsKeys) <= maxNumNodesToActivate } func (v *validatorSC) stakeOneNode( @@ -1370,11 +1476,7 @@ func (v *validatorSC) unBondV1(args *vmcommon.ContractCallInput) vmcommon.Return } } - err := v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on unBond function") - return vmcommon.UserError - } + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) return vmcommon.Ok } @@ -1409,11 +1511,7 @@ func (v *validatorSC) unBond(args *vmcommon.ContractCallInput) vmcommon.ReturnCo return returnCode } - err := v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on unBond function") - return vmcommon.UserError - } + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) return vmcommon.Ok } @@ -1500,11 +1598,7 @@ func (v *validatorSC) claim(args *vmcommon.ContractCallInput) vmcommon.ReturnCod return vmcommon.UserError } - err = v.eei.Transfer(args.CallerAddr, args.RecipientAddr, claimable, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on finalizeUnStake function: error " + err.Error()) - return vmcommon.UserError - } + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, claimable, nil, 0) return vmcommon.Ok } @@ -1703,12 +1797,7 @@ func (v *validatorSC) unBondTokens(args *vmcommon.ContractCallInput) vmcommon.Re return vmcommon.UserError } - err = v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on unBond function") - return vmcommon.UserError - } - + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) err = v.saveRegistrationData(args.CallerAddr, registrationData) if err != nil { v.eei.AddReturnMessage("cannot save registration data: error " + err.Error()) @@ -2027,6 +2116,16 @@ func (v *validatorSC) mergeValidatorData(args *vmcommon.ContractCallInput) vmcom validatorConfig := v.getConfig(v.eei.BlockChainHook().CurrentEpoch()) finalValidatorData.LockedStake.Mul(validatorConfig.NodePrice, big.NewInt(int64(finalValidatorData.NumRegistered))) + if v.isNumberOfNodesTooHigh(len(finalValidatorData.BlsPubKeys)) { + v.eei.AddReturnMessage("number of nodes is too high") + return vmcommon.UserError + } + + if v.isStakeTooHigh(finalValidatorData) { + v.eei.AddReturnMessage("total stake limit reached") + return vmcommon.UserError + } + v.eei.SetStorage(oldAddress, nil) err = v.saveRegistrationData(delegationAddr, finalValidatorData) if err != nil { diff --git a/vm/systemSmartContracts/validator_test.go b/vm/systemSmartContracts/validator_test.go index f4aefd377ec..758e0167a9d 100644 --- a/vm/systemSmartContracts/validator_test.go +++ b/vm/systemSmartContracts/validator_test.go @@ -51,6 +51,8 @@ func createMockArgumentsForValidatorSCWithSystemScAddresses( MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, Marshalizer: &mock.MarshalizerMock{}, GenesisTotalSupply: big.NewInt(100000000), @@ -64,7 +66,9 @@ func createMockArgumentsForValidatorSCWithSystemScAddresses( common.ValidatorToDelegationFlag, common.DoubleKeyProtectionFlag, common.MultiClaimOnDelegationFlag, + common.StakeLimitsFlag, ), + NodesCoordinator: &mock.NodesCoordinatorStub{}, } return args @@ -224,6 +228,39 @@ func TestNewStakingValidatorSmartContract_NilValidatorSmartContractAddress(t *te assert.True(t, errors.Is(err, vm.ErrNilValidatorSmartContractAddress)) } +func TestNewStakingValidatorSmartContract_NilNodesCoordinator(t *testing.T) { + t.Parallel() + + arguments := createMockArgumentsForValidatorSC() + arguments.NodesCoordinator = nil + + asc, err := NewValidatorSmartContract(arguments) + require.Nil(t, asc) + assert.True(t, errors.Is(err, vm.ErrNilNodesCoordinator)) +} + +func TestNewStakingValidatorSmartContract_ZeroStakeLimit(t *testing.T) { + t.Parallel() + + arguments := createMockArgumentsForValidatorSC() + arguments.StakingSCConfig.StakeLimitPercentage = 0.0 + + asc, err := NewValidatorSmartContract(arguments) + require.Nil(t, asc) + assert.True(t, errors.Is(err, vm.ErrInvalidStakeLimitPercentage)) +} + +func TestNewStakingValidatorSmartContract_ZeroNodeLimit(t *testing.T) { + t.Parallel() + + arguments := createMockArgumentsForValidatorSC() + arguments.StakingSCConfig.NodeLimitPercentage = 0.0 + + asc, err := NewValidatorSmartContract(arguments) + require.Nil(t, asc) + assert.True(t, errors.Is(err, vm.ErrInvalidNodeLimitPercentage)) +} + func TestNewStakingValidatorSmartContract_NilSigVerifier(t *testing.T) { t.Parallel() @@ -368,6 +405,138 @@ func TestStakingValidatorSC_ExecuteStakeWithoutArgumentsShouldWork(t *testing.T) assert.Equal(t, vmcommon.Ok, errCode) } +func TestStakingValidatorSC_ExecuteStakeTooMuchStake(t *testing.T) { + t.Parallel() + + arguments := CreateVmContractCallInput() + validatorData := createAValidatorData(25000000, 2, 12500000) + validatorDataBytes, _ := json.Marshal(&validatorData) + + eei := &mock.SystemEIStub{} + eei.GetStorageCalled = func(key []byte) []byte { + if bytes.Equal(key, arguments.CallerAddr) { + return validatorDataBytes + } + return nil + } + eei.AddReturnMessageCalled = func(msg string) { + assert.Equal(t, msg, "total stake limit reached") + } + + args := createMockArgumentsForValidatorSC() + args.Eei = eei + + stakingValidatorSc, _ := NewValidatorSmartContract(args) + + arguments.Function = "stake" + arguments.CallValue = big.NewInt(0).Set(stakingValidatorSc.totalStakeLimit) + + errCode := stakingValidatorSc.Execute(arguments) + assert.Equal(t, vmcommon.UserError, errCode) +} + +func TestStakingValidatorSC_ExecuteStakeTooManyNodes(t *testing.T) { + t.Parallel() + + arguments := CreateVmContractCallInput() + + eei := &mock.SystemEIStub{} + + args := createMockArgumentsForValidatorSC() + args.Eei = eei + + args.NodesCoordinator = &mock.NodesCoordinatorStub{GetNumTotalEligibleCalled: func() uint64 { + return 1000 + }} + args.StakingSCConfig.NodeLimitPercentage = 0.005 + stakingValidatorSc, _ := NewValidatorSmartContract(args) + + validatorData := createAValidatorData(75000000, 5, 12500000) + validatorDataBytes, _ := json.Marshal(&validatorData) + + eei.GetStorageCalled = func(key []byte) []byte { + if bytes.Equal(key, arguments.CallerAddr) { + return validatorDataBytes + } + return nil + } + called := false + eei.AddLogEntryCalled = func(entry *vmcommon.LogEntry) { + called = true + assert.Equal(t, entry.Topics[0], []byte(numberOfNodesTooHigh)) + } + + eei.ExecuteOnDestContextCalled = func(destination, sender []byte, value *big.Int, input []byte) (*vmcommon.VMOutput, error) { + if strings.Contains(string(input), "stake") { + assert.Fail(t, "should not stake nodes") + } + return &vmcommon.VMOutput{}, nil + } + + key1 := []byte("Key1") + key2 := []byte("Key2") + key3 := []byte("Key3") + arguments.Function = "stake" + arguments.CallValue = big.NewInt(0).Mul(big.NewInt(3), big.NewInt(10000000)) + arguments.Arguments = [][]byte{big.NewInt(3).Bytes(), key1, []byte("msg1"), key2, []byte("msg2"), key3, []byte("msg3")} + + errCode := stakingValidatorSc.Execute(arguments) + assert.Equal(t, vmcommon.Ok, errCode) + assert.True(t, called) +} + +func TestStakingValidatorSC_ExecuteStakeTooManyNodesAddOnly2(t *testing.T) { + t.Parallel() + + arguments := CreateVmContractCallInput() + + eei := &mock.SystemEIStub{} + + args := createMockArgumentsForValidatorSC() + args.Eei = eei + + args.NodesCoordinator = &mock.NodesCoordinatorStub{GetNumTotalEligibleCalled: func() uint64 { + return 1000 + }} + args.StakingSCConfig.NodeLimitPercentage = 0.005 + stakingValidatorSc, _ := NewValidatorSmartContract(args) + + validatorData := createAValidatorData(75000000, 3, 12500000) + validatorDataBytes, _ := json.Marshal(&validatorData) + + eei.GetStorageCalled = func(key []byte) []byte { + if bytes.Equal(key, arguments.CallerAddr) { + return validatorDataBytes + } + return nil + } + called := false + eei.AddLogEntryCalled = func(entry *vmcommon.LogEntry) { + called = true + assert.Equal(t, entry.Topics[0], []byte(numberOfNodesTooHigh)) + } + + stakeCalledInStakingSC := 0 + eei.ExecuteOnDestContextCalled = func(destination, sender []byte, value *big.Int, input []byte) (*vmcommon.VMOutput, error) { + if strings.Contains(string(input), "stake") { + stakeCalledInStakingSC++ + } + return &vmcommon.VMOutput{}, nil + } + + key1 := []byte("Key1") + key2 := []byte("Key2") + key3 := []byte("Key3") + arguments.Function = "stake" + arguments.CallValue = big.NewInt(0).Mul(big.NewInt(3), big.NewInt(10000000)) + arguments.Arguments = [][]byte{big.NewInt(3).Bytes(), key1, []byte("msg1"), key2, []byte("msg2"), key3, []byte("msg3")} + + errCode := stakingValidatorSc.Execute(arguments) + assert.Equal(t, vmcommon.Ok, errCode) + assert.True(t, called) + assert.Equal(t, 2, stakeCalledInStakingSC) +} + func TestStakingValidatorSC_ExecuteStakeAddedNewPubKeysShouldWork(t *testing.T) { t.Parallel() @@ -1239,6 +1408,8 @@ func TestStakingValidatorSC_StakeUnStake3XRestake2(t *testing.T) { return stakingSc, nil }}) + nodesCoordinator := &mock.NodesCoordinatorStub{} + args.NodesCoordinator = nodesCoordinator args.StakingSCConfig = argsStaking.StakingSCConfig args.Eei = eei @@ -1282,9 +1453,21 @@ func TestStakingValidatorSC_StakeUnStake3XRestake2(t *testing.T) { retCode = sc.Execute(arguments) assert.Equal(t, vmcommon.Ok, retCode) + nodesCoordinator.GetNumTotalEligibleCalled = func() uint64 { + return 1 + } + arguments.Function = "reStakeUnStakedNodes" arguments.Arguments = [][]byte{stakerPubKey1, stakerPubKey2} arguments.CallValue = big.NewInt(0) + retCode = sc.Execute(arguments) + assert.Equal(t, vmcommon.UserError, retCode) + assert.Equal(t, eei.returnMessage, "number of nodes is too high") + + nodesCoordinator.GetNumTotalEligibleCalled = func() uint64 { + return 10 + } + retCode = sc.Execute(arguments) assert.Equal(t, vmcommon.Ok, retCode) } @@ -5104,6 +5287,101 @@ func TestStakingValidatorSC_MergeValidatorData(t *testing.T) { assert.Equal(t, stakedData.RewardAddress, vm.FirstDelegationSCAddress) } +func TestStakingValidatorSC_MergeValidatorDataTooMuchStake(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + argsVMContext := createArgsVMContext() + argsVMContext.InputParser = parsers.NewCallArgsParser() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + + argsStaking := createMockStakingScArguments() + argsStaking.Eei = eei + stakingSc, _ := NewStakingSmartContract(argsStaking) + eei.SetSCAddress([]byte("addr")) + _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { + return stakingSc, nil + }}) + + args := createMockArgumentsForValidatorSC() + args.StakingSCConfig = argsStaking.StakingSCConfig + args.Eei = eei + + sc, _ := NewValidatorSmartContract(args) + arguments := CreateVmContractCallInput() + arguments.CallerAddr = vm.ESDTSCAddress + arguments.Function = "mergeValidatorData" + arguments.Arguments = [][]byte{} + arguments.CallValue = big.NewInt(0) + arguments.CallerAddr = sc.delegationMgrSCAddress + randomAddress := bytes.Repeat([]byte{1}, len(arguments.CallerAddr)) + arguments.Arguments = [][]byte{randomAddress, vm.FirstDelegationSCAddress} + + limitPer4 := big.NewInt(0).Div(sc.totalStakeLimit, big.NewInt(4)) + + stake(t, sc, limitPer4, randomAddress, randomAddress, []byte("firsstKey"), big.NewInt(1).Bytes()) + stake(t, sc, limitPer4, randomAddress, randomAddress, []byte("secondKey"), big.NewInt(1).Bytes()) + stake(t, sc, limitPer4, randomAddress, randomAddress, []byte("thirddKey"), big.NewInt(1).Bytes()) + + stake(t, sc, limitPer4, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("fourthKey"), big.NewInt(1).Bytes()) + stake(t, sc, limitPer4, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("fifthhKey"), big.NewInt(1).Bytes()) + stake(t, sc, limitPer4, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("sixthhKey"), big.NewInt(1).Bytes()) + + retCode := sc.Execute(arguments) + assert.Equal(t, vmcommon.UserError, retCode) + assert.Equal(t, eei.returnMessage, "total stake limit reached") +} + +func TestStakingValidatorSC_MergeValidatorDataTooMuchNodes(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + argsVMContext := createArgsVMContext() + argsVMContext.InputParser = parsers.NewCallArgsParser() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + + argsStaking := createMockStakingScArguments() + argsStaking.Eei = eei + stakingSc, _ := NewStakingSmartContract(argsStaking) + eei.SetSCAddress([]byte("addr")) + _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { + return stakingSc, nil + }}) + + args := createMockArgumentsForValidatorSC() + args.NodesCoordinator = &mock.NodesCoordinatorStub{GetNumTotalEligibleCalled: func() uint64 { + return 5 + }} + args.StakingSCConfig = argsStaking.StakingSCConfig + args.Eei = eei + + sc, _ := NewValidatorSmartContract(args) + arguments := CreateVmContractCallInput() + arguments.CallerAddr = vm.ESDTSCAddress + arguments.Function = "mergeValidatorData" + arguments.Arguments = [][]byte{} + arguments.CallValue = big.NewInt(0) + arguments.CallerAddr = sc.delegationMgrSCAddress + randomAddress := bytes.Repeat([]byte{1}, len(arguments.CallerAddr)) + arguments.Arguments = [][]byte{randomAddress, vm.FirstDelegationSCAddress} + + stake(t, sc, stakingSc.stakeValue, randomAddress, randomAddress, []byte("firsstKey"), big.NewInt(1).Bytes()) + stake(t, sc, stakingSc.stakeValue, randomAddress, randomAddress, []byte("secondKey"), big.NewInt(1).Bytes()) + stake(t, sc, stakingSc.stakeValue, randomAddress, randomAddress, []byte("thirddKey"), big.NewInt(1).Bytes()) + + stake(t, sc, stakingSc.stakeValue, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("fourthKey"), big.NewInt(1).Bytes()) + stake(t, sc, stakingSc.stakeValue, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("fifthhKey"), big.NewInt(1).Bytes()) + stake(t, sc, stakingSc.stakeValue, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("sixthhKey"), big.NewInt(1).Bytes()) + + retCode := sc.Execute(arguments) + assert.Equal(t, vmcommon.UserError, retCode) + assert.Equal(t, eei.returnMessage, "number of nodes is too high") +} + func TestValidatorSC_getMinUnStakeTokensValueFromDelegationManagerMarshalizerFail(t *testing.T) { t.Parallel()