diff --git a/api/errors/errors.go b/api/errors/errors.go index 6f57a069558..7ec06e15201 100644 --- a/api/errors/errors.go +++ b/api/errors/errors.go @@ -31,6 +31,9 @@ var ErrGetKeyValuePairs = errors.New("get key-value pairs error") // ErrGetESDTBalance signals an error in getting esdt balance for given address var ErrGetESDTBalance = errors.New("get esdt balance for account error") +// ErrGetGuardianData signals an error in getting the guardian data for given address +var ErrGetGuardianData = errors.New("get guardian data for account error") + // ErrGetRolesForAccount signals an error in getting esdt tokens and roles for a given address var ErrGetRolesForAccount = errors.New("get roles for account error") @@ -150,3 +153,12 @@ var ErrFetchingNonceGapsCannotIncludeFields = errors.New("fetching nonce gaps ca // ErrInvalidFields signals that invalid fields were provided var ErrInvalidFields = errors.New("invalid fields") + +// ErrGetESDTTokensWithRole signals an error in getting the esdt tokens with the given role for given address +var ErrGetESDTTokensWithRole = errors.New("getting esdt tokens with role error") + +// ErrRegisteredNFTTokenIDs signals an error in getting the registered nft token ids by the given address +var ErrRegisteredNFTTokenIDs = errors.New("getting registered nft token ids error") + +// ErrInvalidRole signals that an invalid role was provided +var ErrInvalidRole = errors.New("invalid role") diff --git a/api/gin/common.go b/api/gin/common.go index 050a213c9a2..2b6ae6d725e 100644 --- a/api/gin/common.go +++ b/api/gin/common.go @@ -35,12 +35,8 @@ func skValidator( } func checkArgs(args ArgsNewWebServer) error { - errHandler := func(details string) error { - return fmt.Errorf("%w: %s", apiErrors.ErrCannotCreateGinWebServer, details) - } - if check.IfNil(args.Facade) { - return errHandler("nil facade") + return fmt.Errorf("%w: %s", apiErrors.ErrCannotCreateGinWebServer, apiErrors.ErrNilFacadeHandler.Error()) } return nil diff --git a/api/gin/common_test.go b/api/gin/common_test.go index 9e61059eee8..46a2492de8a 100644 --- a/api/gin/common_test.go +++ b/api/gin/common_test.go @@ -46,4 +46,5 @@ func TestCommon_isLogRouteEnabled(t *testing.T) { }, } require.True(t, isLogRouteEnabled(routesConfig)) + require.False(t, isLogRouteEnabled(config.ApiRoutesConfig{})) } diff --git a/api/gin/httpServer.go b/api/gin/httpServer.go index 1cfe53bc188..12de381278d 100644 --- a/api/gin/httpServer.go +++ b/api/gin/httpServer.go @@ -9,11 +9,11 @@ import ( ) type httpServer struct { - server *http.Server + server server } // NewHttpServer returns a new instance of httpServer -func NewHttpServer(server *http.Server) (*httpServer, error) { +func NewHttpServer(server server) (*httpServer, error) { if server == nil { return nil, apiErrors.ErrNilHttpServer } @@ -27,15 +27,18 @@ func NewHttpServer(server *http.Server) (*httpServer, error) { // called on a go routine (different from the main one) func (h *httpServer) Start() { err := h.server.ListenAndServe() - if err != nil { - if err != http.ErrServerClosed { - log.Error("could not start webserver", - "error", err.Error(), - ) - } else { - log.Debug("ListenAndServe - webserver closed") - } + if err == nil { + return } + + if err == http.ErrServerClosed { + log.Debug("ListenAndServe - webserver closed") + return + } + + log.Error("could not start webserver", + "error", err.Error(), + ) } // Close will handle the stopping of the gin web server diff --git a/api/gin/httpServer_test.go b/api/gin/httpServer_test.go index 057468b2607..035f9a20f1f 100644 --- a/api/gin/httpServer_test.go +++ b/api/gin/httpServer_test.go @@ -1,17 +1,92 @@ package gin import ( + "context" + "errors" + "net/http" "testing" - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/api/errors" + apiErrors "github.com/multiversx/mx-chain-go/api/errors" + "github.com/multiversx/mx-chain-go/testscommon/api" "github.com/stretchr/testify/require" ) -func TestNewHttpServer_NilServerShouldErr(t *testing.T) { +func TestNewHttpServer(t *testing.T) { t.Parallel() - hs, err := NewHttpServer(nil) - require.Equal(t, errors.ErrNilHttpServer, err) - require.True(t, check.IfNil(hs)) + t.Run("nil server should error", func(t *testing.T) { + t.Parallel() + + hs, err := NewHttpServer(nil) + require.Equal(t, apiErrors.ErrNilHttpServer, err) + require.Nil(t, hs) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + hs, err := NewHttpServer(&api.ServerStub{}) + require.NoError(t, err) + require.NotNil(t, hs) + }) +} + +func TestHttpServer_Start(t *testing.T) { + t.Parallel() + + t.Run("server starts", func(t *testing.T) { + t.Parallel() + + wasCalled := false + serverStub := &api.ServerStub{ + ListenAndServeCalled: func() error { + return nil + }, + ShutdownCalled: func(ctx context.Context) error { + wasCalled = true + return nil + }, + } + hs, _ := NewHttpServer(serverStub) + require.NotNil(t, hs) + + hs.Start() + require.NoError(t, hs.Close()) + require.True(t, wasCalled) + }) + t.Run("server is closed", func(t *testing.T) { + t.Parallel() + + serverStub := &api.ServerStub{ + ListenAndServeCalled: func() error { + return http.ErrServerClosed + }, + } + hs, _ := NewHttpServer(serverStub) + require.NotNil(t, hs) + + hs.Start() + }) + t.Run("server returns other error", func(t *testing.T) { + t.Parallel() + + serverStub := &api.ServerStub{ + ListenAndServeCalled: func() error { + return errors.New("other error") + }, + } + hs, _ := NewHttpServer(serverStub) + require.NotNil(t, hs) + + hs.Start() + }) +} + +func TestHttpServer_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var hs *httpServer + require.True(t, hs.IsInterfaceNil()) + + hs, _ = NewHttpServer(&api.ServerStub{}) + require.False(t, hs.IsInterfaceNil()) } diff --git a/api/gin/interface.go b/api/gin/interface.go index cfc90510cee..55ba1ce9d8a 100644 --- a/api/gin/interface.go +++ b/api/gin/interface.go @@ -1,6 +1,13 @@ package gin +import "context" + type resetHandler interface { Reset() IsInterfaceNil() bool } + +type server interface { + ListenAndServe() error + Shutdown(ctx context.Context) error +} diff --git a/api/gin/webServer.go b/api/gin/webServer.go index 8b1e61cb02e..bfbaf5336d8 100644 --- a/api/gin/webServer.go +++ b/api/gin/webServer.go @@ -12,6 +12,7 @@ import ( "github.com/gin-gonic/gin" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/api/errors" "github.com/multiversx/mx-chain-go/api/groups" "github.com/multiversx/mx-chain-go/api/middleware" "github.com/multiversx/mx-chain-go/api/shared" @@ -46,18 +47,20 @@ func NewGinWebServerHandler(args ArgsNewWebServer) (*webServer, error) { return nil, err } - gws := &webServer{ + return &webServer{ facade: args.Facade, antiFloodConfig: args.AntiFloodConfig, apiConfig: args.ApiConfig, - } - - return gws, nil + }, nil } // UpdateFacade updates the main api handler by closing the old server and starting it with the new facade. Returns the // new web server func (ws *webServer) UpdateFacade(facade shared.FacadeHandler) error { + if check.IfNil(facade) { + return errors.ErrNilFacadeHandler + } + ws.Lock() defer ws.Unlock() @@ -80,6 +83,7 @@ func (ws *webServer) StartHttpServer() error { defer ws.Unlock() if ws.facade.RestApiInterface() == facade.DefaultRestPortOff { + log.Debug("web server is turned off") return nil } diff --git a/api/gin/webServer_test.go b/api/gin/webServer_test.go new file mode 100644 index 00000000000..c966a2d9c98 --- /dev/null +++ b/api/gin/webServer_test.go @@ -0,0 +1,176 @@ +package gin + +import ( + "errors" + "net/http" + "strings" + "testing" + "time" + + apiErrors "github.com/multiversx/mx-chain-go/api/errors" + "github.com/multiversx/mx-chain-go/api/middleware" + "github.com/multiversx/mx-chain-go/api/mock" + "github.com/multiversx/mx-chain-go/api/shared" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/facade" + "github.com/multiversx/mx-chain-go/testscommon/api" + "github.com/stretchr/testify/require" +) + +func createMockArgsNewWebServer() ArgsNewWebServer { + return ArgsNewWebServer{ + Facade: &mock.FacadeStub{ + PprofEnabledCalled: func() bool { + return true // coverage + }, + }, + ApiConfig: config.ApiRoutesConfig{ + Logging: config.ApiLoggingConfig{ + LoggingEnabled: true, + ThresholdInMicroSeconds: 10, + }, + APIPackages: map[string]config.APIPackageConfig{ + "log": {Routes: []config.RouteConfig{ + { + Name: "/log", + Open: true, + }, + }}, + }, + }, + AntiFloodConfig: config.WebServerAntifloodConfig{ + WebServerAntifloodEnabled: true, + SimultaneousRequests: 1, + SameSourceRequests: 1, + SameSourceResetIntervalInSec: 1, + }, + } +} + +func TestNewGinWebServerHandler(t *testing.T) { + t.Parallel() + + t.Run("nil facade should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgsNewWebServer() + args.Facade = nil + + ws, err := NewGinWebServerHandler(args) + require.True(t, errors.Is(err, apiErrors.ErrCannotCreateGinWebServer)) + require.True(t, strings.Contains(err.Error(), apiErrors.ErrNilFacadeHandler.Error())) + require.Nil(t, ws) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + ws, err := NewGinWebServerHandler(createMockArgsNewWebServer()) + require.Nil(t, err) + require.NotNil(t, ws) + }) +} + +func TestWebServer_StartHttpServer(t *testing.T) { + t.Run("RestApiInterface returns DefaultRestPortOff", func(t *testing.T) { + args := createMockArgsNewWebServer() + args.Facade = &mock.FacadeStub{ + RestApiInterfaceCalled: func() string { + return facade.DefaultRestPortOff + }, + } + + ws, _ := NewGinWebServerHandler(args) + require.NotNil(t, ws) + + err := ws.StartHttpServer() + require.Nil(t, err) + }) + t.Run("createMiddlewareLimiters returns error due to middleware.NewSourceThrottler error", func(t *testing.T) { + args := createMockArgsNewWebServer() + args.AntiFloodConfig.SameSourceRequests = 0 + ws, _ := NewGinWebServerHandler(args) + require.NotNil(t, ws) + + err := ws.StartHttpServer() + require.Equal(t, middleware.ErrInvalidMaxNumRequests, err) + }) + t.Run("createMiddlewareLimiters returns error due to middleware.NewGlobalThrottler error", func(t *testing.T) { + args := createMockArgsNewWebServer() + args.AntiFloodConfig.SimultaneousRequests = 0 + ws, _ := NewGinWebServerHandler(args) + require.NotNil(t, ws) + + err := ws.StartHttpServer() + require.Equal(t, middleware.ErrInvalidMaxNumRequests, err) + }) + t.Run("should work", func(t *testing.T) { + ws, _ := NewGinWebServerHandler(createMockArgsNewWebServer()) + require.NotNil(t, ws) + + err := ws.StartHttpServer() + require.Nil(t, err) + + time.Sleep(2 * time.Second) + + client := &http.Client{} + req, err := http.NewRequest("GET", "http://127.0.0.1:8080/log", nil) + require.Nil(t, err) + + req.Header.Set("Sec-Websocket-Version", "13") + req.Header.Set("Connection", "upgrade") + req.Header.Set("Upgrade", "websocket") + req.Header.Set("Sec-Websocket-Key", "key") + + resp, err := client.Do(req) + require.Nil(t, err) + + err = resp.Body.Close() + require.Nil(t, err) + + time.Sleep(2 * time.Second) + err = ws.Close() + require.Nil(t, err) + }) +} + +func TestWebServer_UpdateFacade(t *testing.T) { + t.Parallel() + + t.Run("update with nil facade should error", func(t *testing.T) { + t.Parallel() + + ws, _ := NewGinWebServerHandler(createMockArgsNewWebServer()) + require.NotNil(t, ws) + + err := ws.UpdateFacade(nil) + require.Equal(t, apiErrors.ErrNilFacadeHandler, err) + }) + t.Run("should work - one of the groupHandlers returns err", func(t *testing.T) { + t.Parallel() + + args := createMockArgsNewWebServer() + args.Facade = &mock.FacadeStub{ + RestApiInterfaceCalled: func() string { + return "provided interface" + }, + } + + ws, _ := NewGinWebServerHandler(args) + require.NotNil(t, ws) + + ws.groups = make(map[string]shared.GroupHandler) + ws.groups["first"] = &api.GroupHandlerStub{ + UpdateFacadeCalled: func(newFacade interface{}) error { + return errors.New("error") + }, + } + ws.groups["second"] = &api.GroupHandlerStub{ + UpdateFacadeCalled: func(newFacade interface{}) error { + return nil + }, + } + + err := ws.UpdateFacade(&mock.FacadeStub{}) + require.Nil(t, err) + }) +} diff --git a/api/gin/writers_test.go b/api/gin/writers_test.go new file mode 100644 index 00000000000..a6a1b221126 --- /dev/null +++ b/api/gin/writers_test.go @@ -0,0 +1,29 @@ +package gin + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGinWriter_Write(t *testing.T) { + t.Parallel() + + gw := ginWriter{} + + providedBuff := []byte("provided buff") + l, err := gw.Write(providedBuff) + assert.Nil(t, err) + assert.Equal(t, len(providedBuff), l) +} + +func TestGinErrorWriter_Write(t *testing.T) { + t.Parallel() + + gew := ginErrorWriter{} + + providedBuff := []byte("provided buff") + l, err := gew.Write(providedBuff) + assert.Nil(t, err) + assert.Equal(t, len(providedBuff), l) +} diff --git a/api/groups/addressGroup.go b/api/groups/addressGroup.go index 6749087926c..410ef8c73c3 100644 --- a/api/groups/addressGroup.go +++ b/api/groups/addressGroup.go @@ -30,6 +30,7 @@ const ( getESDTsRolesPath = "/:address/esdts/roles" getRegisteredNFTsPath = "/:address/registered-nfts" getESDTNFTDataPath = "/:address/nft/:tokenIdentifier/nonce/:nonce" + getGuardianData = "/:address/guardian-data" urlParamOnFinalBlock = "onFinalBlock" urlParamOnStartOfEpoch = "onStartOfEpoch" urlParamBlockNonce = "blockNonce" @@ -52,6 +53,7 @@ type addressFacadeHandler interface { GetESDTsWithRole(address string, role string, options api.AccountQueryOptions) ([]string, api.BlockInfo, error) GetAllESDTTokens(address string, options api.AccountQueryOptions) (map[string]*esdt.ESDigitalToken, api.BlockInfo, error) GetKeyValuePairs(address string, options api.AccountQueryOptions) (map[string]string, api.BlockInfo, error) + GetGuardianData(address string, options api.AccountQueryOptions) (api.GuardianData, api.BlockInfo, error) IsInterfaceNil() bool } @@ -157,6 +159,11 @@ func NewAddressGroup(facade addressFacadeHandler) (*addressGroup, error) { Method: http.MethodGet, Handler: ag.getESDTsRoles, }, + { + Path: getGuardianData, + Method: http.MethodGet, + Handler: ag.getGuardianData, + }, } ag.endpoints = endpoints @@ -165,13 +172,7 @@ func NewAddressGroup(facade addressFacadeHandler) (*addressGroup, error) { // getAccount returns a response containing information about the account correlated with provided address func (ag *addressGroup) getAccount(c *gin.Context) { - addr := c.Param("address") - if addr == "" { - shared.RespondWithValidationError(c, errors.ErrCouldNotGetAccount, errors.ErrEmptyAddress) - return - } - - options, err := extractAccountQueryOptions(c) + addr, options, err := extractBaseParams(c) if err != nil { shared.RespondWithValidationError(c, errors.ErrCouldNotGetAccount, err) return @@ -213,13 +214,7 @@ func (ag *addressGroup) getAccounts(c *gin.Context) { // getBalance returns the balance for the address parameter func (ag *addressGroup) getBalance(c *gin.Context) { - addr := c.Param("address") - if addr == "" { - shared.RespondWithValidationError(c, errors.ErrGetBalance, errors.ErrEmptyAddress) - return - } - - options, err := extractAccountQueryOptions(c) + addr, options, err := extractBaseParams(c) if err != nil { shared.RespondWithValidationError(c, errors.ErrGetBalance, err) return @@ -236,13 +231,7 @@ func (ag *addressGroup) getBalance(c *gin.Context) { // getUsername returns the username for the address parameter func (ag *addressGroup) getUsername(c *gin.Context) { - addr := c.Param("address") - if addr == "" { - shared.RespondWithValidationError(c, errors.ErrGetUsername, errors.ErrEmptyAddress) - return - } - - options, err := extractAccountQueryOptions(c) + addr, options, err := extractBaseParams(c) if err != nil { shared.RespondWithValidationError(c, errors.ErrGetUsername, err) return @@ -259,15 +248,9 @@ func (ag *addressGroup) getUsername(c *gin.Context) { // getCodeHash returns the code hash for the address parameter func (ag *addressGroup) getCodeHash(c *gin.Context) { - addr := c.Param("address") - if addr == "" { - shared.RespondWithValidationError(c, errors.ErrGetCodeHash, errors.ErrEmptyAddress) - return - } - - options, err := parseAccountQueryOptions(c) + addr, options, err := extractBaseParams(c) if err != nil { - shared.RespondWithValidationError(c, errors.ErrGetCodeHash, errors.ErrBadUrlParams) + shared.RespondWithValidationError(c, errors.ErrGetCodeHash, err) return } @@ -290,7 +273,7 @@ func (ag *addressGroup) getValueForKey(c *gin.Context) { options, err := extractAccountQueryOptions(c) if err != nil { - shared.RespondWithValidationError(c, errors.ErrGetUsername, err) + shared.RespondWithValidationError(c, errors.ErrGetValueForKey, err) return } @@ -309,15 +292,26 @@ func (ag *addressGroup) getValueForKey(c *gin.Context) { shared.RespondWithSuccess(c, gin.H{"value": value, "blockInfo": blockInfo}) } -// addressGroup returns all the key-value pairs for the given address -func (ag *addressGroup) getKeyValuePairs(c *gin.Context) { - addr := c.Param("address") - if addr == "" { - shared.RespondWithValidationError(c, errors.ErrGetKeyValuePairs, errors.ErrEmptyAddress) +// getGuardianData returns the guardian data and guarded state for a given account +func (ag *addressGroup) getGuardianData(c *gin.Context) { + addr, options, err := extractBaseParams(c) + if err != nil { + shared.RespondWithValidationError(c, errors.ErrGetGuardianData, err) return } - options, err := extractAccountQueryOptions(c) + guardianData, blockInfo, err := ag.getFacade().GetGuardianData(addr, options) + if err != nil { + shared.RespondWithInternalError(c, errors.ErrGetGuardianData, err) + return + } + + shared.RespondWithSuccess(c, gin.H{"guardianData": guardianData, "blockInfo": blockInfo}) +} + +// addressGroup returns all the key-value pairs for the given address +func (ag *addressGroup) getKeyValuePairs(c *gin.Context) { + addr, options, err := extractBaseParams(c) if err != nil { shared.RespondWithValidationError(c, errors.ErrGetKeyValuePairs, err) return @@ -334,24 +328,12 @@ func (ag *addressGroup) getKeyValuePairs(c *gin.Context) { // getESDTBalance returns the balance for the given address and esdt token func (ag *addressGroup) getESDTBalance(c *gin.Context) { - addr := c.Param("address") - if addr == "" { - shared.RespondWithValidationError(c, errors.ErrGetESDTBalance, errors.ErrEmptyAddress) - return - } - - options, err := extractAccountQueryOptions(c) + addr, tokenIdentifier, options, err := extractGetESDTBalanceParams(c) if err != nil { shared.RespondWithValidationError(c, errors.ErrGetESDTBalance, err) return } - tokenIdentifier := c.Param("tokenIdentifier") - if tokenIdentifier == "" { - shared.RespondWithValidationError(c, errors.ErrGetESDTBalance, errors.ErrEmptyTokenIdentifier) - return - } - esdtData, blockInfo, err := ag.getFacade().GetESDTData(addr, tokenIdentifier, 0, options) if err != nil { shared.RespondWithInternalError(c, errors.ErrGetESDTBalance, err) @@ -369,13 +351,7 @@ func (ag *addressGroup) getESDTBalance(c *gin.Context) { // getESDTsRoles returns the token identifiers and roles for a given address func (ag *addressGroup) getESDTsRoles(c *gin.Context) { - addr := c.Param("address") - if addr == "" { - shared.RespondWithValidationError(c, errors.ErrGetRolesForAccount, errors.ErrEmptyAddress) - return - } - - options, err := extractAccountQueryOptions(c) + addr, options, err := extractBaseParams(c) if err != nil { shared.RespondWithValidationError(c, errors.ErrGetRolesForAccount, err) return @@ -392,32 +368,15 @@ func (ag *addressGroup) getESDTsRoles(c *gin.Context) { // getESDTTokensWithRole returns the token identifiers where a given address has the given role func (ag *addressGroup) getESDTTokensWithRole(c *gin.Context) { - addr := c.Param("address") - if addr == "" { - shared.RespondWithValidationError(c, errors.ErrGetESDTBalance, errors.ErrEmptyAddress) - return - } - - options, err := extractAccountQueryOptions(c) + addr, role, options, err := extractGetESDTTokensWithRoleParams(c) if err != nil { - shared.RespondWithValidationError(c, errors.ErrGetESDTBalance, err) - return - } - - role := c.Param("role") - if role == "" { - shared.RespondWithValidationError(c, errors.ErrGetESDTBalance, errors.ErrEmptyRole) - return - } - - if !core.IsValidESDTRole(role) { - shared.RespondWithValidationError(c, errors.ErrGetESDTBalance, fmt.Errorf("invalid role: %s", role)) + shared.RespondWithValidationError(c, errors.ErrGetESDTTokensWithRole, err) return } tokens, blockInfo, err := ag.getFacade().GetESDTsWithRole(addr, role, options) if err != nil { - shared.RespondWithInternalError(c, errors.ErrGetESDTBalance, err) + shared.RespondWithInternalError(c, errors.ErrGetESDTTokensWithRole, err) return } @@ -426,21 +385,15 @@ func (ag *addressGroup) getESDTTokensWithRole(c *gin.Context) { // getNFTTokenIDsRegisteredByAddress returns the token identifiers of the tokens where a given address is the owner func (ag *addressGroup) getNFTTokenIDsRegisteredByAddress(c *gin.Context) { - addr := c.Param("address") - if addr == "" { - shared.RespondWithValidationError(c, errors.ErrGetESDTBalance, errors.ErrEmptyAddress) - return - } - - options, err := extractAccountQueryOptions(c) + addr, options, err := extractBaseParams(c) if err != nil { - shared.RespondWithValidationError(c, errors.ErrGetESDTBalance, err) + shared.RespondWithValidationError(c, errors.ErrRegisteredNFTTokenIDs, err) return } tokens, blockInfo, err := ag.getFacade().GetNFTTokenIDsRegisteredByAddress(addr, options) if err != nil { - shared.RespondWithInternalError(c, errors.ErrGetESDTBalance, err) + shared.RespondWithInternalError(c, errors.ErrRegisteredNFTTokenIDs, err) return } @@ -449,37 +402,13 @@ func (ag *addressGroup) getNFTTokenIDsRegisteredByAddress(c *gin.Context) { // getESDTNFTData returns the nft data for the given token func (ag *addressGroup) getESDTNFTData(c *gin.Context) { - addr := c.Param("address") - if addr == "" { - shared.RespondWithValidationError(c, errors.ErrGetESDTNFTData, errors.ErrEmptyAddress) - return - } - - options, err := extractAccountQueryOptions(c) + addr, tokenIdentifier, nonce, options, err := extractGetESDTNFTDataParams(c) if err != nil { shared.RespondWithValidationError(c, errors.ErrGetESDTNFTData, err) return } - tokenIdentifier := c.Param("tokenIdentifier") - if tokenIdentifier == "" { - shared.RespondWithValidationError(c, errors.ErrGetESDTNFTData, errors.ErrEmptyTokenIdentifier) - return - } - - nonceAsStr := c.Param("nonce") - if nonceAsStr == "" { - shared.RespondWithValidationError(c, errors.ErrGetESDTNFTData, errors.ErrNonceInvalid) - return - } - - nonceAsBigInt, okConvert := big.NewInt(0).SetString(nonceAsStr, 10) - if !okConvert { - shared.RespondWithValidationError(c, errors.ErrGetESDTNFTData, errors.ErrNonceInvalid) - return - } - - esdtData, blockInfo, err := ag.getFacade().GetESDTData(addr, tokenIdentifier, nonceAsBigInt.Uint64(), options) + esdtData, blockInfo, err := ag.getFacade().GetESDTData(addr, tokenIdentifier, nonce.Uint64(), options) if err != nil { shared.RespondWithInternalError(c, errors.ErrGetESDTNFTData, err) return @@ -491,13 +420,7 @@ func (ag *addressGroup) getESDTNFTData(c *gin.Context) { // getAllESDTData returns the tokens list from this account func (ag *addressGroup) getAllESDTData(c *gin.Context) { - addr := c.Param("address") - if addr == "" { - shared.RespondWithValidationError(c, errors.ErrGetESDTNFTData, errors.ErrEmptyAddress) - return - } - - options, err := extractAccountQueryOptions(c) + addr, options, err := extractBaseParams(c) if err != nil { shared.RespondWithValidationError(c, errors.ErrGetESDTNFTData, err) return @@ -545,6 +468,76 @@ func (ag *addressGroup) getFacade() addressFacadeHandler { return ag.facade } +func extractBaseParams(c *gin.Context) (string, api.AccountQueryOptions, error) { + addr := c.Param("address") + if addr == "" { + return "", api.AccountQueryOptions{}, errors.ErrEmptyAddress + } + + options, err := extractAccountQueryOptions(c) + if err != nil { + return "", api.AccountQueryOptions{}, err + } + + return addr, options, nil +} + +func extractGetESDTBalanceParams(c *gin.Context) (string, string, api.AccountQueryOptions, error) { + addr, options, err := extractBaseParams(c) + if err != nil { + return "", "", api.AccountQueryOptions{}, err + } + + tokenIdentifier := c.Param("tokenIdentifier") + if tokenIdentifier == "" { + return "", "", api.AccountQueryOptions{}, errors.ErrEmptyTokenIdentifier + } + + return addr, tokenIdentifier, options, nil +} + +func extractGetESDTTokensWithRoleParams(c *gin.Context) (string, string, api.AccountQueryOptions, error) { + addr, options, err := extractBaseParams(c) + if err != nil { + return "", "", api.AccountQueryOptions{}, err + } + + role := c.Param("role") + if role == "" { + return "", "", api.AccountQueryOptions{}, errors.ErrEmptyRole + } + + if !core.IsValidESDTRole(role) { + return "", "", api.AccountQueryOptions{}, fmt.Errorf("%w: %s", errors.ErrInvalidRole, role) + } + + return addr, role, options, nil +} + +func extractGetESDTNFTDataParams(c *gin.Context) (string, string, *big.Int, api.AccountQueryOptions, error) { + addr, options, err := extractBaseParams(c) + if err != nil { + return "", "", nil, api.AccountQueryOptions{}, err + } + + tokenIdentifier := c.Param("tokenIdentifier") + if tokenIdentifier == "" { + return "", "", nil, api.AccountQueryOptions{}, errors.ErrEmptyTokenIdentifier + } + + nonceAsStr := c.Param("nonce") + if nonceAsStr == "" { + return "", "", nil, api.AccountQueryOptions{}, errors.ErrNonceInvalid + } + + nonceAsBigInt, okConvert := big.NewInt(0).SetString(nonceAsStr, 10) + if !okConvert { + return "", "", nil, api.AccountQueryOptions{}, errors.ErrNonceInvalid + } + + return addr, tokenIdentifier, nonceAsBigInt, options, nil +} + // UpdateFacade will update the facade func (ag *addressGroup) UpdateFacade(newFacade interface{}) error { if newFacade == nil { diff --git a/api/groups/addressGroupOptions_test.go b/api/groups/addressGroupOptions_test.go index 0c546641f19..e27b8b4294d 100644 --- a/api/groups/addressGroupOptions_test.go +++ b/api/groups/addressGroupOptions_test.go @@ -5,12 +5,17 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/api" + "github.com/multiversx/mx-chain-go/api/errors" "github.com/multiversx/mx-chain-go/testscommon" "github.com/stretchr/testify/require" ) func TestExtractAccountQueryOptions(t *testing.T) { + t.Parallel() + t.Run("good options", func(t *testing.T) { + t.Parallel() + options, err := extractAccountQueryOptions(testscommon.CreateGinContextWithRawQuery("onFinalBlock=true")) require.Nil(t, err) require.True(t, options.OnFinalBlock) @@ -38,6 +43,8 @@ func TestExtractAccountQueryOptions(t *testing.T) { }) t.Run("bad options", func(t *testing.T) { + t.Parallel() + options, err := extractAccountQueryOptions(testscommon.CreateGinContextWithRawQuery("blockNonce=42&blockHash=aaaa")) require.ErrorContains(t, err, "only one block coordinate") require.Equal(t, api.AccountQueryOptions{}, options) @@ -62,5 +69,46 @@ func TestExtractAccountQueryOptions(t *testing.T) { require.ErrorContains(t, err, "hintEpoch is optional, but only compatible with blockRootHash") require.Equal(t, api.AccountQueryOptions{}, options) + options, err = extractAccountQueryOptions(testscommon.CreateGinContextWithRawQuery("blockNonce=aaaa")) + require.ErrorContains(t, err, errors.ErrBadUrlParams.Error()) + require.Equal(t, api.AccountQueryOptions{}, options) }) } + +func TestParseAccountQueryOptions(t *testing.T) { + t.Parallel() + + options, err := parseAccountQueryOptions(testscommon.CreateGinContextWithRawQuery("onFinalBlock=test")) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid syntax") + require.Equal(t, api.AccountQueryOptions{}, options) + + options, err = parseAccountQueryOptions(testscommon.CreateGinContextWithRawQuery("onStartOfEpoch=test")) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid syntax") + require.Equal(t, api.AccountQueryOptions{}, options) + + options, err = parseAccountQueryOptions(testscommon.CreateGinContextWithRawQuery("blockNonce=test")) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid syntax") + require.Equal(t, api.AccountQueryOptions{}, options) + + options, err = parseAccountQueryOptions(testscommon.CreateGinContextWithRawQuery("blockHash=test")) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid byte") + require.Equal(t, api.AccountQueryOptions{}, options) + + options, err = parseAccountQueryOptions(testscommon.CreateGinContextWithRawQuery("blockRootHash=test")) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid byte") + require.Equal(t, api.AccountQueryOptions{}, options) + + options, err = parseAccountQueryOptions(testscommon.CreateGinContextWithRawQuery("hintEpoch=test")) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid syntax") + require.Equal(t, api.AccountQueryOptions{}, options) + + options, err = parseAccountQueryOptions(testscommon.CreateGinContextWithRawQuery("")) + require.NoError(t, err) + require.Equal(t, api.AccountQueryOptions{}, options) +} diff --git a/api/groups/addressGroup_test.go b/api/groups/addressGroup_test.go index 3de3f73d2f7..ac4fd92def2 100644 --- a/api/groups/addressGroup_test.go +++ b/api/groups/addressGroup_test.go @@ -6,13 +6,13 @@ import ( "encoding/json" "errors" "fmt" + "io" "math/big" "net/http" "net/http/httptest" "strings" "testing" - "github.com/gin-gonic/gin" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" apiErrors "github.com/multiversx/mx-chain-go/api/errors" @@ -24,12 +24,6 @@ import ( "github.com/stretchr/testify/require" ) -type wrappedAcountResponse struct { - Data accountResponse `json:"data"` - Error string `json:"error"` - Code string `json:"code"` -} - type accountResponse struct { Account struct { Address string `json:"address"` @@ -95,6 +89,16 @@ type esdtTokenResponse struct { Code string `json:"code"` } +type guardianDataResponseData struct { + GuardianData api.GuardianData `json:"guardianData"` +} + +type guardianDataResponse struct { + Data guardianDataResponseData `json:"data"` + Error string `json:"error"` + Code string `json:"code"` +} + type esdtNFTResponse struct { Data esdtNFTResponseData `json:"data"` Error string `json:"error"` @@ -182,180 +186,133 @@ func TestAddressRoute_EmptyTrailReturns404(t *testing.T) { assert.Equal(t, http.StatusNotFound, resp.Code) } -func getValueForKey(dataFromResponse interface{}, key string) string { - dataMap, ok := dataFromResponse.(map[string]interface{}) - if !ok { - return "" - } - - valueI, okCast := dataMap[key] - if okCast { - return fmt.Sprintf("%v", valueI) - } - return "" -} - -func TestGetBalance_WithCorrectAddressShouldNotReturnError(t *testing.T) { - t.Parallel() - - amount := big.NewInt(10) - addr := "testAddress" - facade := mock.FacadeStub{ - GetBalanceCalled: func(s string, _ api.AccountQueryOptions) (i *big.Int, info api.BlockInfo, e error) { - return amount, api.BlockInfo{}, nil - }, - } - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) - - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/balance", addr), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := shared.GenericAPIResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusOK, resp.Code) - - balanceStr := getValueForKey(response.Data, "balance") - balanceResponse, ok := big.NewInt(0).SetString(balanceStr, 10) - assert.True(t, ok) - assert.Equal(t, amount, balanceResponse) - assert.Equal(t, "", response.Error) -} - -func TestGetBalance_WithWrongAddressShouldError(t *testing.T) { +func TestAddressGroup_getAccount(t *testing.T) { t.Parallel() - otherAddress := "otherAddress" - facade := mock.FacadeStub{ - GetBalanceCalled: func(s string, _ api.AccountQueryOptions) (i *big.Int, info api.BlockInfo, e error) { - return big.NewInt(0), api.BlockInfo{}, nil - }, - } - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) + t.Run("invalid query options should error", + testErrorScenario("/address/erd1alice?blockNonce=not-uint64", "GET", nil, + formatExpectedErr(apiErrors.ErrCouldNotGetAccount, apiErrors.ErrBadUrlParams))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/balance", otherAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + facade := &mock.FacadeStub{ + GetAccountCalled: func(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { + return api.AccountResponse{}, api.BlockInfo{}, expectedErr + }, + } - response := shared.GenericAPIResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, "", response.Error) -} + testAddressGroup( + t, + facade, + "/address/addr", + "GET", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrCouldNotGetAccount, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() -func TestGetBalance_NodeGetBalanceReturnsError(t *testing.T) { - t.Parallel() - addr := "addr" - balanceError := errors.New("error") - facade := mock.FacadeStub{ - GetBalanceCalled: func(s string, _ api.AccountQueryOptions) (i *big.Int, info api.BlockInfo, e error) { - return nil, api.BlockInfo{}, balanceError - }, - } + facade := &mock.FacadeStub{ + GetAccountCalled: func(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { + return api.AccountResponse{ + Address: "addr", + Balance: big.NewInt(100).String(), + Nonce: 1, + DeveloperReward: big.NewInt(120).String(), + }, api.BlockInfo{}, nil + }, + } - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) + response := &shared.GenericAPIResponse{} + loadAddressGroupResponse(t, facade, "/address/addr", "GET", nil, response) - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) + mapResponse := response.Data.(map[string]interface{}) + accResp := accountResponse{} - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/balance", addr), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + mapResponseBytes, _ := json.Marshal(&mapResponse) + _ = json.Unmarshal(mapResponseBytes, &accResp) - response := shared.GenericAPIResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.Equal(t, fmt.Sprintf("%s: %s", apiErrors.ErrGetBalance.Error(), balanceError.Error()), response.Error) + assert.Equal(t, "addr", accResp.Account.Address) + assert.Equal(t, uint64(1), accResp.Account.Nonce) + assert.Equal(t, "100", accResp.Account.Balance) + assert.Equal(t, "120", accResp.Account.DeveloperReward) + assert.Empty(t, response.Error) + }) } -func TestGetBalance_WithEmptyAddressShouldReturnError(t *testing.T) { +func TestAddressGroup_getBalance(t *testing.T) { t.Parallel() - facade := mock.FacadeStub{ - GetBalanceCalled: func(s string, _ api.AccountQueryOptions) (i *big.Int, info api.BlockInfo, e error) { - return big.NewInt(0), api.BlockInfo{}, errors.New("address was empty") - }, - } - emptyAddress := "" + t.Run("empty address should error", + testErrorScenario("/address//balance", "GET", nil, + formatExpectedErr(apiErrors.ErrGetBalance, apiErrors.ErrEmptyAddress))) + t.Run("invalid query options should error", + testErrorScenario("/address/erd1alice/balance?blockNonce=not-uint64", "GET", nil, + formatExpectedErr(apiErrors.ErrGetBalance, apiErrors.ErrBadUrlParams))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) + facade := &mock.FacadeStub{ + GetBalanceCalled: func(s string, _ api.AccountQueryOptions) (i *big.Int, info api.BlockInfo, e error) { + return nil, api.BlockInfo{}, expectedErr + }, + } - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) + testAddressGroup( + t, + facade, + "/address/erd1alice/balance", + "GET", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBalance, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/balance", emptyAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + amount := big.NewInt(10) + addr := "testAddress" + facade := &mock.FacadeStub{ + GetBalanceCalled: func(s string, _ api.AccountQueryOptions) (i *big.Int, info api.BlockInfo, e error) { + return amount, api.BlockInfo{}, nil + }, + } - response := shared.GenericAPIResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.NotEmpty(t, response) - assert.True(t, strings.Contains(response.Error, - fmt.Sprintf("%s: %s", apiErrors.ErrGetBalance.Error(), apiErrors.ErrEmptyAddress.Error()), - )) + response := &shared.GenericAPIResponse{} + loadAddressGroupResponse( + t, + facade, + fmt.Sprintf("/address/%s/balance", addr), + "GET", + nil, + response, + ) + + balanceStr := getValueForKey(response.Data, "balance") + balanceResponse, ok := big.NewInt(0).SetString(balanceStr, 10) + assert.True(t, ok) + assert.Equal(t, amount, balanceResponse) + assert.Equal(t, "", response.Error) + }) } -func TestGetValueForKey_NodeFailsShouldError(t *testing.T) { - t.Parallel() - - testAddress := "address" - expectedErr := errors.New("expected error") - facade := mock.FacadeStub{ - GetValueForKeyCalled: func(_ string, _ string, _ api.AccountQueryOptions) (string, api.BlockInfo, error) { - return "", api.BlockInfo{}, expectedErr - }, +func getValueForKey(dataFromResponse interface{}, key string) string { + dataMap, ok := dataFromResponse.(map[string]interface{}) + if !ok { + return "" } - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) - - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/key/test", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - valueForKeyResponseObj := valueForKeyResponse{} - loadResponse(resp.Body, &valueForKeyResponseObj) - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(valueForKeyResponseObj.Error, expectedErr.Error())) -} - -func TestGetValueForKey_ShouldWork(t *testing.T) { - t.Parallel() - - testAddress := "address" - testValue := "value" - facade := mock.FacadeStub{ - GetValueForKeyCalled: func(_ string, _ string, _ api.AccountQueryOptions) (string, api.BlockInfo, error) { - return testValue, api.BlockInfo{}, nil - }, + valueI, okCast := dataMap[key] + if okCast { + return fmt.Sprintf("%v", valueI) } - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) - - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/key/test", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - valueForKeyResponseObj := valueForKeyResponse{} - loadResponse(resp.Body, &valueForKeyResponseObj) - assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, testValue, valueForKeyResponseObj.Data.Value) + return "" } -func TestGetAccounts(t *testing.T) { +func TestAddressGroup_getAccounts(t *testing.T) { t.Parallel() t.Run("wrong request, should err", func(t *testing.T) { @@ -375,17 +332,18 @@ func TestGetAccounts(t *testing.T) { require.NotEmpty(t, response.Error) require.Equal(t, shared.ReturnCodeRequestError, response.Code) }) - + t.Run("invalid query options should error", + testErrorScenario("/address/bulk?blockNonce=not-uint64", "POST", bytes.NewBuffer([]byte(`["erd1", "erd1"]`)), + formatExpectedErr(apiErrors.ErrCouldNotGetAccount, apiErrors.ErrBadUrlParams))) t.Run("facade error, should err", func(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected error") - facade := &mock.FacadeStub{ + facade := mock.FacadeStub{ GetAccountsCalled: func(_ []string, _ api.AccountQueryOptions) (map[string]*api.AccountResponse, api.BlockInfo, error) { return nil, api.BlockInfo{}, expectedErr }, } - addrGroup, _ := groups.NewAddressGroup(facade) + addrGroup, _ := groups.NewAddressGroup(&facade) ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) @@ -398,7 +356,6 @@ func TestGetAccounts(t *testing.T) { require.NotEmpty(t, response.Error) require.Equal(t, shared.ReturnCodeInternalError, response.Code) }) - t.Run("should work", func(t *testing.T) { t.Parallel() @@ -414,13 +371,6 @@ func TestGetAccounts(t *testing.T) { return expectedAccounts, api.BlockInfo{}, nil }, } - addrGroup, _ := groups.NewAddressGroup(facade) - - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) - - req, _ := http.NewRequest("POST", "/address/bulk", bytes.NewBuffer([]byte(`["erd1", "erd1"]`))) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) type responseType struct { Data struct { @@ -429,748 +379,756 @@ func TestGetAccounts(t *testing.T) { Error string `json:"error"` Code shared.ReturnCode `json:"code"` } - response := responseType{} - loadResponse(resp.Body, &response) + response := &responseType{} + loadAddressGroupResponse( + t, + facade, + "/address/bulk", + "POST", + bytes.NewBuffer([]byte(`["erd1", "erd1"]`)), + response, + ) + require.Empty(t, response.Error) require.Equal(t, shared.ReturnCodeSuccess, response.Code) require.Equal(t, expectedAccounts, response.Data.Accounts) }) } -func TestGetUsername_NodeFailsShouldError(t *testing.T) { +func TestAddressGroup_getUsername(t *testing.T) { t.Parallel() - testAddress := "address" - expectedErr := errors.New("expected error") - facade := mock.FacadeStub{ - GetUsernameCalled: func(_ string, _ api.AccountQueryOptions) (string, api.BlockInfo, error) { - return "", api.BlockInfo{}, expectedErr - }, - } + t.Run("empty address should error", + testErrorScenario("/address//username", "GET", nil, + formatExpectedErr(apiErrors.ErrGetUsername, apiErrors.ErrEmptyAddress))) + t.Run("invalid query options should error", + testErrorScenario("/address/erd1alice/username?blockNonce=not-uint64", "GET", nil, + formatExpectedErr(apiErrors.ErrGetUsername, apiErrors.ErrBadUrlParams))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) + facade := &mock.FacadeStub{ + GetUsernameCalled: func(_ string, _ api.AccountQueryOptions) (string, api.BlockInfo, error) { + return "", api.BlockInfo{}, expectedErr + }, + } - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) + testAddressGroup( + t, + facade, + "/address/erd1alice/username", + "GET", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetUsername, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/username", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + testUsername := "provided username" + facade := &mock.FacadeStub{ + GetUsernameCalled: func(_ string, _ api.AccountQueryOptions) (string, api.BlockInfo, error) { + return testUsername, api.BlockInfo{}, nil + }, + } - usernameResponseObj := usernameResponse{} - loadResponse(resp.Body, &usernameResponseObj) - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(usernameResponseObj.Error, expectedErr.Error())) + usernameResponseObj := &usernameResponse{} + loadAddressGroupResponse( + t, + facade, + "/address/erd1alice/username", + "GET", + nil, + usernameResponseObj, + ) + assert.Equal(t, testUsername, usernameResponseObj.Data.Username) + }) } -func TestGetUsername_ShouldWork(t *testing.T) { +func TestAddressGroup_getCodeHash(t *testing.T) { t.Parallel() - testAddress := "address" - testUsername := "value" - facade := mock.FacadeStub{ - GetUsernameCalled: func(_ string, _ api.AccountQueryOptions) (string, api.BlockInfo, error) { - return testUsername, api.BlockInfo{}, nil - }, - } + t.Run("empty address should error", + testErrorScenario("/address//code-hash", "GET", nil, + formatExpectedErr(apiErrors.ErrGetCodeHash, apiErrors.ErrEmptyAddress))) + t.Run("invalid query options should error", + testErrorScenario("/address/erd1alice/code-hash?blockNonce=not-uint64", "GET", nil, + formatExpectedErr(apiErrors.ErrGetCodeHash, apiErrors.ErrBadUrlParams))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) + facade := &mock.FacadeStub{ + GetCodeHashCalled: func(_ string, _ api.AccountQueryOptions) ([]byte, api.BlockInfo, error) { + return nil, api.BlockInfo{}, expectedErr + }, + } - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) + testAddressGroup( + t, + facade, + "/address/erd1alice/code-hash", + "GET", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetCodeHash, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/username", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + testCodeHash := []byte("value") + expectedResponseCodeHash := base64.StdEncoding.EncodeToString(testCodeHash) + facade := &mock.FacadeStub{ + GetCodeHashCalled: func(_ string, _ api.AccountQueryOptions) ([]byte, api.BlockInfo, error) { + return testCodeHash, api.BlockInfo{}, nil + }, + } - usernameResponseObj := usernameResponse{} - loadResponse(resp.Body, &usernameResponseObj) - assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, testUsername, usernameResponseObj.Data.Username) + codeHashResponseObj := &codeHashResponse{} + loadAddressGroupResponse( + t, + facade, + "/address/erd1alice/code-hash", + "GET", + nil, + codeHashResponseObj, + ) + assert.Equal(t, expectedResponseCodeHash, codeHashResponseObj.Data.CodeHash) + }) } -func TestGetCodeHash_NodeFailsShouldError(t *testing.T) { +func TestAddressGroup_getValueForKey(t *testing.T) { t.Parallel() - testAddress := "address" - expectedErr := errors.New("expected error") - facade := mock.FacadeStub{ - GetCodeHashCalled: func(_ string, _ api.AccountQueryOptions) ([]byte, api.BlockInfo, error) { - return nil, api.BlockInfo{}, expectedErr - }, - } + t.Run("empty address should error", + testErrorScenario("/address//key/test", "GET", nil, + formatExpectedErr(apiErrors.ErrGetValueForKey, apiErrors.ErrEmptyAddress))) + t.Run("invalid query options should error", + testErrorScenario("/address/erd1alice/key/test?blockNonce=not-uint64", "GET", nil, + formatExpectedErr(apiErrors.ErrGetValueForKey, apiErrors.ErrBadUrlParams))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) + facade := &mock.FacadeStub{ + GetValueForKeyCalled: func(_ string, _ string, _ api.AccountQueryOptions) (string, api.BlockInfo, error) { + return "", api.BlockInfo{}, expectedErr + }, + } - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) + testAddressGroup( + t, + facade, + "/address/erd1alice/key/test", + "GET", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetValueForKey, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/code-hash", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + testValue := "value" + facade := &mock.FacadeStub{ + GetValueForKeyCalled: func(_ string, _ string, _ api.AccountQueryOptions) (string, api.BlockInfo, error) { + return testValue, api.BlockInfo{}, nil + }, + } - codeHashResponseObj := codeHashResponse{} - loadResponse(resp.Body, &codeHashResponseObj) - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(codeHashResponseObj.Error, expectedErr.Error())) + valueForKeyResponseObj := &valueForKeyResponse{} + loadAddressGroupResponse( + t, + facade, + "/address/erd1alice/key/test", + "GET", + nil, + valueForKeyResponseObj, + ) + assert.Equal(t, testValue, valueForKeyResponseObj.Data.Value) + }) } -func TestGetCodeHash_ShouldWork(t *testing.T) { +func TestAddressGroup_getGuardianData(t *testing.T) { t.Parallel() - testAddress := "address" - testCodeHash := []byte("value") - expectedResponseCodeHash := base64.StdEncoding.EncodeToString(testCodeHash) - facade := mock.FacadeStub{ - GetCodeHashCalled: func(_ string, _ api.AccountQueryOptions) ([]byte, api.BlockInfo, error) { - return testCodeHash, api.BlockInfo{}, nil - }, - } - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) + t.Run("empty address should error", + testErrorScenario("/address//guardian-data", "GET", nil, + formatExpectedErr(apiErrors.ErrGetGuardianData, apiErrors.ErrEmptyAddress))) + t.Run("invalid query options should error", + testErrorScenario("/address/erd1alice/guardian-data?blockNonce=not-uint64", "GET", nil, + formatExpectedErr(apiErrors.ErrGetGuardianData, apiErrors.ErrBadUrlParams))) + t.Run("with node fail should err", func(t *testing.T) { + t.Parallel() - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) + facade := &mock.FacadeStub{ + GetGuardianDataCalled: func(address string, options api.AccountQueryOptions) (api.GuardianData, api.BlockInfo, error) { + return api.GuardianData{}, api.BlockInfo{}, expectedErr + }, + } + testAddressGroup( + t, + facade, + "/address/erd1alice/guardian-data", + "GET", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetGuardianData, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/code-hash", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + expectedGuardianData := api.GuardianData{ + ActiveGuardian: &api.Guardian{ + Address: "guardian1", + ActivationEpoch: 0, + }, + PendingGuardian: &api.Guardian{ + Address: "guardian2", + ActivationEpoch: 10, + }, + Guarded: true, + } + facade := &mock.FacadeStub{ + GetGuardianDataCalled: func(address string, options api.AccountQueryOptions) (api.GuardianData, api.BlockInfo, error) { + return expectedGuardianData, api.BlockInfo{}, nil + }, + } - codeHashResponseObj := codeHashResponse{} - loadResponse(resp.Body, &codeHashResponseObj) - assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, expectedResponseCodeHash, codeHashResponseObj.Data.CodeHash) + response := &guardianDataResponse{} + loadAddressGroupResponse( + t, + facade, + "/address/erd1alice/guardian-data", + "GET", + nil, + response, + ) + assert.Equal(t, expectedGuardianData, response.Data.GuardianData) + }) } -func TestGetAccount_FailWhenFacadeStubGetAccountFails(t *testing.T) { +func TestAddressGroup_getKeyValuePairs(t *testing.T) { t.Parallel() - returnedError := "i am an error" - facade := mock.FacadeStub{ - GetAccountCalled: func(address string, _ api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { - return api.AccountResponse{}, api.BlockInfo{}, errors.New(returnedError) - }, - } - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) + t.Run("empty address should error", + testErrorScenario("/address//keys", "GET", nil, + formatExpectedErr(apiErrors.ErrGetKeyValuePairs, apiErrors.ErrEmptyAddress))) + t.Run("invalid query options should error", + testErrorScenario("/address/erd1alice/keys?blockNonce=not-uint64", "GET", nil, + formatExpectedErr(apiErrors.ErrGetKeyValuePairs, apiErrors.ErrBadUrlParams))) + t.Run("with node fail should err", func(t *testing.T) { + t.Parallel() - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) + facade := &mock.FacadeStub{ + GetKeyValuePairsCalled: func(_ string, _ api.AccountQueryOptions) (map[string]string, api.BlockInfo, error) { + return nil, api.BlockInfo{}, expectedErr + }, + } + testAddressGroup( + t, + facade, + "/address/erd1alice/keys", + "GET", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetKeyValuePairs, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", "/address/test", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + pairs := map[string]string{ + "k1": "v1", + "k2": "v2", + } + facade := &mock.FacadeStub{ + GetKeyValuePairsCalled: func(_ string, _ api.AccountQueryOptions) (map[string]string, api.BlockInfo, error) { + return pairs, api.BlockInfo{}, nil + }, + } - response := shared.GenericAPIResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.Empty(t, response.Data) - assert.NotEmpty(t, response.Error) - assert.True(t, strings.Contains(response.Error, fmt.Sprintf("%s: %s", apiErrors.ErrCouldNotGetAccount.Error(), returnedError))) + response := &keyValuePairsResponse{} + loadAddressGroupResponse( + t, + facade, + "/address/erd1alice/keys", + "GET", + nil, + response, + ) + assert.Equal(t, pairs, response.Data.Pairs) + }) } -func TestGetAccount_ReturnsSuccessfully(t *testing.T) { +func TestAddressGroup_getESDTBalance(t *testing.T) { t.Parallel() - facade := mock.FacadeStub{ - GetAccountCalled: func(address string, _ api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { - return api.AccountResponse{ - Address: "1234", - Balance: big.NewInt(100).String(), - Nonce: 1, - DeveloperReward: big.NewInt(120).String(), - }, api.BlockInfo{}, nil - }, - } + t.Run("empty address should error", + testErrorScenario("/address//esdt/newToken", "GET", nil, + formatExpectedErr(apiErrors.ErrGetESDTBalance, apiErrors.ErrEmptyAddress))) + t.Run("invalid query options should error", + testErrorScenario("/address/erd1alice/esdt/newToken?blockNonce=not-uint64", "GET", nil, + formatExpectedErr(apiErrors.ErrGetESDTBalance, apiErrors.ErrBadUrlParams))) + t.Run("with node fail should err", func(t *testing.T) { + t.Parallel() - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) + facade := &mock.FacadeStub{ + GetESDTDataCalled: func(_ string, _ string, _ uint64, _ api.AccountQueryOptions) (*esdt.ESDigitalToken, api.BlockInfo, error) { + return &esdt.ESDigitalToken{}, api.BlockInfo{}, expectedErr + }, + } + testAddressGroup( + t, + facade, + "/address/erd1alice/esdt/newToken", + "GET", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetESDTBalance, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) + testValue := big.NewInt(100).String() + testProperties := []byte{byte(0), byte(1), byte(0)} + facade := &mock.FacadeStub{ + GetESDTDataCalled: func(_ string, _ string, _ uint64, _ api.AccountQueryOptions) (*esdt.ESDigitalToken, api.BlockInfo, error) { + return &esdt.ESDigitalToken{Value: big.NewInt(100), Properties: testProperties}, api.BlockInfo{}, nil + }, + } - reqAddress := "test" - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s", reqAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := shared.GenericAPIResponse{} - loadResponse(resp.Body, &response) - mapResponse := response.Data.(map[string]interface{}) - accountResponse := accountResponse{} - - mapResponseBytes, _ := json.Marshal(&mapResponse) - _ = json.Unmarshal(mapResponseBytes, &accountResponse) - - assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, reqAddress, accountResponse.Account.Address) - assert.Equal(t, uint64(1), accountResponse.Account.Nonce) - assert.Equal(t, "100", accountResponse.Account.Balance) - assert.Equal(t, "120", accountResponse.Account.DeveloperReward) - assert.Empty(t, response.Error) -} - -func TestGetAccount_WithBadQueryOptionsShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetAccountCalled: func(address string, _ api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { - return api.AccountResponse{Nonce: 1}, api.BlockInfo{}, nil - }, - } - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) - - response, code := httpGetAccount(ws, "/address/alice?onFinalBlock=bad") - require.Equal(t, http.StatusBadRequest, code) - require.Contains(t, response.Error, apiErrors.ErrBadUrlParams.Error()) - - response, code = httpGetAccount(ws, "/address/alice?onStartOfEpoch=bad") - require.Equal(t, http.StatusBadRequest, code) - require.Contains(t, response.Error, apiErrors.ErrBadUrlParams.Error()) -} - -func TestGetAccount_WithQueryOptionsShouldWork(t *testing.T) { - t.Parallel() - - var calledWithAddress string - var calledWithOptions api.AccountQueryOptions - - facade := mock.FacadeStub{ - GetAccountCalled: func(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { - calledWithAddress = address - calledWithOptions = options - return api.AccountResponse{Nonce: 1}, api.BlockInfo{}, nil - }, - } - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) - - response, code := httpGetAccount(ws, "/address/alice?onFinalBlock=true") - require.Equal(t, http.StatusOK, code) - require.NotNil(t, response) - require.Equal(t, "alice", calledWithAddress) - require.Equal(t, api.AccountQueryOptions{OnFinalBlock: true}, calledWithOptions) -} - -func httpGetAccount(ws *gin.Engine, url string) (wrappedAcountResponse, int) { - httpRequest, _ := http.NewRequest("GET", url, nil) - httpResponse := httptest.NewRecorder() - ws.ServeHTTP(httpResponse, httpRequest) - - accountResponse := wrappedAcountResponse{} - loadResponse(httpResponse.Body, &accountResponse) - return accountResponse, httpResponse.Code + esdtBalanceResponseObj := &esdtTokenResponse{} + loadAddressGroupResponse( + t, + facade, + "/address/erd1alice/esdt/newToken", + "GET", + nil, + esdtBalanceResponseObj, + ) + assert.Equal(t, testValue, esdtBalanceResponseObj.Data.Balance) + assert.Equal(t, "000100", esdtBalanceResponseObj.Data.Properties) + }) } -func TestGetESDTBalance_NodeFailsShouldError(t *testing.T) { +func TestAddressGroup_getESDTsRoles(t *testing.T) { t.Parallel() - testAddress := "address" - expectedErr := errors.New("expected error") - facade := mock.FacadeStub{ - GetESDTDataCalled: func(_ string, _ string, _ uint64, _ api.AccountQueryOptions) (*esdt.ESDigitalToken, api.BlockInfo, error) { - return nil, api.BlockInfo{}, expectedErr - }, - } - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) + t.Run("empty address should error", + testErrorScenario("/address//esdts/roles", "GET", nil, + formatExpectedErr(apiErrors.ErrGetRolesForAccount, apiErrors.ErrEmptyAddress))) + t.Run("invalid query options should error", + testErrorScenario("/address/erd1alice/esdts/roles?blockNonce=not-uint64", "GET", nil, + formatExpectedErr(apiErrors.ErrGetRolesForAccount, apiErrors.ErrBadUrlParams))) + t.Run("with node fail should err", func(t *testing.T) { + t.Parallel() - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) + facade := &mock.FacadeStub{ + GetESDTsRolesCalled: func(_ string, _ api.AccountQueryOptions) (map[string][]string, api.BlockInfo, error) { + return nil, api.BlockInfo{}, expectedErr + }, + } + testAddressGroup( + t, + facade, + "/address/erd1alice/esdts/roles", + "GET", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetRolesForAccount, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/esdt/newToken", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + roles := map[string][]string{ + "token0": {"role0", "role1"}, + "token1": {"role3", "role1"}, + } + facade := &mock.FacadeStub{ + GetESDTsRolesCalled: func(_ string, _ api.AccountQueryOptions) (map[string][]string, api.BlockInfo, error) { + return roles, api.BlockInfo{}, nil + }, + } - usernameResponseObj := usernameResponse{} - loadResponse(resp.Body, &usernameResponseObj) - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(usernameResponseObj.Error, expectedErr.Error())) + response := &esdtRolesResponse{} + loadAddressGroupResponse( + t, + facade, + "/address/erd1alice/esdts/roles", + "GET", + nil, + response, + ) + assert.Equal(t, roles, response.Data.Roles) + }) } -func TestGetESDTBalance_ShouldWork(t *testing.T) { +func TestAddressGroup_getESDTTokensWithRole(t *testing.T) { t.Parallel() - testAddress := "address" - testValue := big.NewInt(100).String() - testProperties := []byte{byte(0), byte(1), byte(0)} - facade := mock.FacadeStub{ - GetESDTDataCalled: func(_ string, _ string, _ uint64, _ api.AccountQueryOptions) (*esdt.ESDigitalToken, api.BlockInfo, error) { - return &esdt.ESDigitalToken{Value: big.NewInt(100), Properties: testProperties}, api.BlockInfo{}, nil - }, - } - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) + t.Run("empty address should error", + testErrorScenario("/address//esdts-with-role/ESDTRoleNFTCreate", "GET", nil, + formatExpectedErr(apiErrors.ErrGetESDTTokensWithRole, apiErrors.ErrEmptyAddress))) + t.Run("invalid query options should error", + testErrorScenario("/address/erd1alice/esdts-with-role/ESDTRoleNFTCreate?blockNonce=not-uint64", "GET", nil, + formatExpectedErr(apiErrors.ErrGetESDTTokensWithRole, apiErrors.ErrBadUrlParams))) + t.Run("invalid role should error", + testErrorScenario("/address/erd1alice/esdts-with-role/invalid", "GET", nil, + formatExpectedErr(apiErrors.ErrGetESDTTokensWithRole, fmt.Errorf("invalid role: %s", "invalid")))) + t.Run("with node fail should err", func(t *testing.T) { + t.Parallel() - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) + facade := &mock.FacadeStub{ + GetESDTsWithRoleCalled: func(_ string, _ string, _ api.AccountQueryOptions) ([]string, api.BlockInfo, error) { + return nil, api.BlockInfo{}, expectedErr + }, + } + testAddressGroup( + t, + facade, + "/address/erd1alice/esdts-with-role/ESDTRoleNFTCreate", + "GET", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetESDTTokensWithRole, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/esdt/newToken", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + expectedTokens := []string{"ABC-0o9i8u", "XYZ-r5y7i9"} + facade := &mock.FacadeStub{ + GetESDTsWithRoleCalled: func(address string, role string, _ api.AccountQueryOptions) ([]string, api.BlockInfo, error) { + return expectedTokens, api.BlockInfo{}, nil + }, + } - esdtBalanceResponseObj := esdtTokenResponse{} - loadResponse(resp.Body, &esdtBalanceResponseObj) - assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, testValue, esdtBalanceResponseObj.Data.Balance) - assert.Equal(t, "000100", esdtBalanceResponseObj.Data.Properties) + esdtResponseObj := &esdtsWithRoleResponse{} + loadAddressGroupResponse( + t, + facade, + "/address/erd1alice/esdts-with-role/ESDTRoleNFTCreate", + "GET", + nil, + esdtResponseObj, + ) + assert.Equal(t, expectedTokens, esdtResponseObj.Data.Tokens) + }) } -func TestGetESDTNFTData_NodeFailsShouldError(t *testing.T) { +func TestAddressGroup_getNFTTokenIDsRegisteredByAddress(t *testing.T) { t.Parallel() - testAddress := "address" - expectedErr := errors.New("expected error") - facade := mock.FacadeStub{ - GetESDTDataCalled: func(_ string, _ string, _ uint64, _ api.AccountQueryOptions) (*esdt.ESDigitalToken, api.BlockInfo, error) { - return nil, api.BlockInfo{}, expectedErr - }, - } - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) + t.Run("empty address should error", + testErrorScenario("/address//registered-nfts", "GET", nil, + formatExpectedErr(apiErrors.ErrRegisteredNFTTokenIDs, apiErrors.ErrEmptyAddress))) + t.Run("invalid query options should error", + testErrorScenario("/address/erd1alice/registered-nfts?blockNonce=not-uint64", "GET", nil, + formatExpectedErr(apiErrors.ErrRegisteredNFTTokenIDs, apiErrors.ErrBadUrlParams))) + t.Run("with node fail should err", func(t *testing.T) { + t.Parallel() - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) + facade := &mock.FacadeStub{ + GetNFTTokenIDsRegisteredByAddressCalled: func(_ string, _ api.AccountQueryOptions) ([]string, api.BlockInfo, error) { + return nil, api.BlockInfo{}, expectedErr + }, + } + testAddressGroup( + t, + facade, + "/address/erd1alice/registered-nfts", + "GET", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrRegisteredNFTTokenIDs, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/nft/newToken/nonce/10", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + expectedTokens := []string{"ABC-0o9i8u", "XYZ-r5y7i9"} + facade := &mock.FacadeStub{ + GetNFTTokenIDsRegisteredByAddressCalled: func(address string, _ api.AccountQueryOptions) ([]string, api.BlockInfo, error) { + return expectedTokens, api.BlockInfo{}, nil + }, + } - esdtResponseObj := esdtNFTResponse{} - loadResponse(resp.Body, &esdtResponseObj) - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(esdtResponseObj.Error, expectedErr.Error())) + esdtResponseObj := &esdtsWithRoleResponse{} + loadAddressGroupResponse( + t, + facade, + "/address/erd1alice/registered-nfts", + "GET", + nil, + esdtResponseObj, + ) + assert.Equal(t, expectedTokens, esdtResponseObj.Data.Tokens) + }) } -func TestGetESDTNFTData_ShouldWork(t *testing.T) { +func TestAddressGroup_getESDTNFTData(t *testing.T) { t.Parallel() - testAddress := "address" - testValue := big.NewInt(100).String() - testNonce := uint64(37) - testProperties := []byte{byte(1), byte(0), byte(0)} - facade := mock.FacadeStub{ - GetESDTDataCalled: func(_ string, _ string, _ uint64, _ api.AccountQueryOptions) (*esdt.ESDigitalToken, api.BlockInfo, error) { - return &esdt.ESDigitalToken{ - Value: big.NewInt(100), - Properties: testProperties, - TokenMetaData: &esdt.MetaData{Nonce: testNonce, Creator: []byte(testAddress)}}, api.BlockInfo{}, nil - }, - } - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) + t.Run("empty address should error", + testErrorScenario("/address//nft/newToken/nonce/10", "GET", nil, + formatExpectedErr(apiErrors.ErrGetESDTNFTData, apiErrors.ErrEmptyAddress))) + t.Run("invalid query options should error", + testErrorScenario("/address/erd1alice/nft/newToken/nonce/10?blockNonce=not-uint64", "GET", nil, + formatExpectedErr(apiErrors.ErrGetESDTNFTData, apiErrors.ErrBadUrlParams))) + t.Run("invalid nonce should error", + testErrorScenario("/address/erd1alice/nft/newToken/nonce/not-int", "GET", nil, + formatExpectedErr(apiErrors.ErrGetESDTNFTData, apiErrors.ErrNonceInvalid))) + t.Run("with node fail should err", func(t *testing.T) { + t.Parallel() - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) + facade := &mock.FacadeStub{ + GetESDTDataCalled: func(_ string, _ string, _ uint64, _ api.AccountQueryOptions) (*esdt.ESDigitalToken, api.BlockInfo, error) { + return nil, api.BlockInfo{}, expectedErr + }, + } + testAddressGroup( + t, + facade, + "/address/erd1alice/nft/newToken/nonce/10", + "GET", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetESDTNFTData, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/nft/newToken/nonce/10", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + testAddress := "address" + testValue := big.NewInt(100).String() + testNonce := uint64(37) + testProperties := []byte{byte(1), byte(0), byte(0)} + facade := &mock.FacadeStub{ + GetESDTDataCalled: func(_ string, _ string, _ uint64, _ api.AccountQueryOptions) (*esdt.ESDigitalToken, api.BlockInfo, error) { + return &esdt.ESDigitalToken{ + Value: big.NewInt(100), + Properties: testProperties, + TokenMetaData: &esdt.MetaData{Nonce: testNonce, Creator: []byte(testAddress)}}, api.BlockInfo{}, nil + }, + } - esdtResponseObj := esdtNFTResponse{} - loadResponse(resp.Body, &esdtResponseObj) - assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, testValue, esdtResponseObj.Data.Balance) - assert.Equal(t, "010000", esdtResponseObj.Data.Properties) - assert.Equal(t, testAddress, esdtResponseObj.Data.Creator) - assert.Equal(t, testNonce, esdtResponseObj.Data.Nonce) + esdtResponseObj := &esdtNFTResponse{} + loadAddressGroupResponse( + t, + facade, + "/address/erd1alice/nft/newToken/nonce/10", + "GET", + nil, + esdtResponseObj, + ) + assert.Equal(t, testValue, esdtResponseObj.Data.Balance) + assert.Equal(t, "010000", esdtResponseObj.Data.Properties) + assert.Equal(t, testAddress, esdtResponseObj.Data.Creator) + assert.Equal(t, testNonce, esdtResponseObj.Data.Nonce) + }) } -func TestGetESDTTokensWithRole_InvalidRoleShouldError(t *testing.T) { +func TestAddressGroup_getAllESDTData(t *testing.T) { t.Parallel() - testAddress := "address" - expectedErr := errors.New("expected error") - facade := mock.FacadeStub{ - GetESDTsWithRoleCalled: func(_ string, _ string, _ api.AccountQueryOptions) ([]string, api.BlockInfo, error) { - return nil, api.BlockInfo{}, expectedErr - }, - } - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) + t.Run("empty address should error", + testErrorScenario("/address//esdt", "GET", nil, + formatExpectedErr(apiErrors.ErrGetESDTNFTData, apiErrors.ErrEmptyAddress))) + t.Run("invalid query options should error", + testErrorScenario("/address/erd1alice/esdt?blockNonce=not-uint64", "GET", nil, + formatExpectedErr(apiErrors.ErrGetESDTNFTData, apiErrors.ErrBadUrlParams))) + t.Run("with node fail should err", func(t *testing.T) { + t.Parallel() - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) + facade := &mock.FacadeStub{ + GetAllESDTTokensCalled: func(address string, options api.AccountQueryOptions) (map[string]*esdt.ESDigitalToken, api.BlockInfo, error) { + return nil, api.BlockInfo{}, expectedErr + }, + } + testAddressGroup( + t, + facade, + "/address/erd1alice/esdt", + "GET", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetESDTNFTData, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/esdts-with-role/invalid", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + testValue1 := "token1" + testValue2 := "token2" + facade := &mock.FacadeStub{ + GetAllESDTTokensCalled: func(address string, _ api.AccountQueryOptions) (map[string]*esdt.ESDigitalToken, api.BlockInfo, error) { + tokens := make(map[string]*esdt.ESDigitalToken) + tokens[testValue1] = &esdt.ESDigitalToken{Value: big.NewInt(10)} + tokens[testValue2] = &esdt.ESDigitalToken{Value: big.NewInt(100)} + return tokens, api.BlockInfo{}, nil + }, + } - esdtResponseObj := esdtsWithRoleResponse{} - loadResponse(resp.Body, &esdtResponseObj) - assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.True(t, strings.Contains(esdtResponseObj.Error, "invalid role")) + esdtTokenResponseObj := &esdtTokensCompleteResponse{} + loadAddressGroupResponse( + t, + facade, + "/address/erd1alice/esdt", + "GET", + nil, + esdtTokenResponseObj, + ) + assert.Equal(t, 2, len(esdtTokenResponseObj.Data.Tokens)) + }) } -func TestGetESDTTokensWithRole_NodeFailsShouldError(t *testing.T) { +func TestAddressGroup_UpdateFacade(t *testing.T) { t.Parallel() - testAddress := "address" - expectedErr := errors.New("expected error") - facade := mock.FacadeStub{ - GetESDTsWithRoleCalled: func(_ string, _ string, _ api.AccountQueryOptions) ([]string, api.BlockInfo, error) { - return nil, api.BlockInfo{}, expectedErr - }, - } - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) - - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/esdts-with-role/ESDTRoleNFTCreate", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - esdtResponseObj := esdtsWithRoleResponse{} - loadResponse(resp.Body, &esdtResponseObj) - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(esdtResponseObj.Error, expectedErr.Error())) -} + t.Run("nil facade should error", func(t *testing.T) { + t.Parallel() -func TestGetESDTTokensWithRole_ShouldWork(t *testing.T) { - t.Parallel() + addrGroup, err := groups.NewAddressGroup(&mock.FacadeStub{}) + require.NoError(t, err) - testAddress := "address" - expectedTokens := []string{"ABC-0o9i8u", "XYZ-r5y7i9"} - facade := mock.FacadeStub{ - GetESDTsWithRoleCalled: func(address string, role string, _ api.AccountQueryOptions) ([]string, api.BlockInfo, error) { - return expectedTokens, api.BlockInfo{}, nil - }, - } + err = addrGroup.UpdateFacade(nil) + require.Equal(t, apiErrors.ErrNilFacadeHandler, err) + }) + t.Run("cast failure should error", func(t *testing.T) { + t.Parallel() - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) + addrGroup, err := groups.NewAddressGroup(&mock.FacadeStub{}) + require.NoError(t, err) - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) + err = addrGroup.UpdateFacade("this is not a facade handler") + require.True(t, errors.Is(err, apiErrors.ErrFacadeWrongTypeAssertion)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/esdts-with-role/ESDTRoleNFTCreate", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + roles := map[string][]string{ + "token0": {"role0", "role1"}, + "token1": {"role3", "role1"}, + } + testAddress := "address" + facade := mock.FacadeStub{ + GetESDTsRolesCalled: func(_ string, _ api.AccountQueryOptions) (map[string][]string, api.BlockInfo, error) { + return roles, api.BlockInfo{}, nil + }, + } - esdtResponseObj := esdtsWithRoleResponse{} - loadResponse(resp.Body, &esdtResponseObj) - assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, expectedTokens, esdtResponseObj.Data.Tokens) -} + addrGroup, err := groups.NewAddressGroup(&facade) + require.NoError(t, err) -func TestGetNFTTokenIDsRegisteredByAddress_NodeFailsShouldError(t *testing.T) { - t.Parallel() + ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) - testAddress := "address" - expectedErr := errors.New("expected error") - facade := mock.FacadeStub{ - GetNFTTokenIDsRegisteredByAddressCalled: func(_ string, _ api.AccountQueryOptions) ([]string, api.BlockInfo, error) { - return nil, api.BlockInfo{}, expectedErr - }, - } + req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/esdts/roles", testAddress), nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) + response := esdtRolesResponse{} + loadResponse(resp.Body, &response) + assert.Equal(t, http.StatusOK, resp.Code) + assert.Equal(t, roles, response.Data.Roles) - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) + newErr := errors.New("new error") + newFacade := mock.FacadeStub{ + GetESDTsRolesCalled: func(_ string, _ api.AccountQueryOptions) (map[string][]string, api.BlockInfo, error) { + return nil, api.BlockInfo{}, newErr + }, + } + err = addrGroup.UpdateFacade(&newFacade) + require.NoError(t, err) - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/registered-nfts", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + req, _ = http.NewRequest("GET", fmt.Sprintf("/address/%s/esdts/roles", testAddress), nil) + resp = httptest.NewRecorder() + ws.ServeHTTP(resp, req) - esdtResponseObj := esdtsWithRoleResponse{} - loadResponse(resp.Body, &esdtResponseObj) - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(esdtResponseObj.Error, expectedErr.Error())) + response = esdtRolesResponse{} + loadResponse(resp.Body, &response) + assert.Equal(t, http.StatusInternalServerError, resp.Code) + assert.True(t, strings.Contains(response.Error, newErr.Error())) + }) } -func TestGetNFTTokenIDsRegisteredByAddress_ShouldWork(t *testing.T) { +func TestAddressGroup_IsInterfaceNil(t *testing.T) { t.Parallel() - testAddress := "address" - expectedTokens := []string{"ABC-0o9i8u", "XYZ-r5y7i9"} - facade := mock.FacadeStub{ - GetNFTTokenIDsRegisteredByAddressCalled: func(address string, _ api.AccountQueryOptions) ([]string, api.BlockInfo, error) { - return expectedTokens, api.BlockInfo{}, nil - }, - } - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) + addrGroup, _ := groups.NewAddressGroup(nil) + require.True(t, addrGroup.IsInterfaceNil()) - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/registered-nfts", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - esdtResponseObj := esdtsWithRoleResponse{} - loadResponse(resp.Body, &esdtResponseObj) - assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, expectedTokens, esdtResponseObj.Data.Tokens) + addrGroup, _ = groups.NewAddressGroup(&mock.FacadeStub{}) + require.False(t, addrGroup.IsInterfaceNil()) } -func TestGetFullESDTTokens_NodeFailsShouldError(t *testing.T) { - t.Parallel() +func testErrorScenario(url string, method string, body io.Reader, expectedErr string) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() - testAddress := "address" - expectedErr := errors.New("expected error") - facade := mock.FacadeStub{ - GetAllESDTTokensCalled: func(_ string, _ api.AccountQueryOptions) (map[string]*esdt.ESDigitalToken, api.BlockInfo, error) { - return nil, api.BlockInfo{}, expectedErr - }, + testAddressGroup( + t, + &mock.FacadeStub{}, + url, + method, + body, + http.StatusBadRequest, + expectedErr, + ) } - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) - - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/esdt", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - esdtTokenResponseObj := esdtTokensCompleteResponse{} - loadResponse(resp.Body, &esdtTokenResponseObj) - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(esdtTokenResponseObj.Error, expectedErr.Error())) } -func TestGetFullESDTTokens_ShouldWork(t *testing.T) { - t.Parallel() - - testAddress := "address" - testValue1 := "token1" - testValue2 := "token2" - facade := mock.FacadeStub{ - GetAllESDTTokensCalled: func(address string, _ api.AccountQueryOptions) (map[string]*esdt.ESDigitalToken, api.BlockInfo, error) { - tokens := make(map[string]*esdt.ESDigitalToken) - tokens[testValue1] = &esdt.ESDigitalToken{Value: big.NewInt(10)} - tokens[testValue2] = &esdt.ESDigitalToken{Value: big.NewInt(100)} - return tokens, api.BlockInfo{}, nil - }, - } - - addrGroup, err := groups.NewAddressGroup(&facade) +func loadAddressGroupResponse( + t *testing.T, + facade shared.FacadeHandler, + url string, + method string, + body io.Reader, + destination interface{}, +) { + addrGroup, err := groups.NewAddressGroup(facade) require.NoError(t, err) ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/esdt", testAddress), nil) + req, _ := http.NewRequest(method, url, body) resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) - esdtTokenResponseObj := esdtTokensCompleteResponse{} - loadResponse(resp.Body, &esdtTokenResponseObj) assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, 2, len(esdtTokenResponseObj.Data.Tokens)) -} - -func TestGetKeyValuePairs_WithEmptyAddressShouldReturnError(t *testing.T) { - t.Parallel() - facade := mock.FacadeStub{} - - emptyAddress := "" - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) - - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/keys", emptyAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := shared.GenericAPIResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.NotEmpty(t, response) - assert.True(t, strings.Contains(response.Error, - fmt.Sprintf("%s: %s", apiErrors.ErrGetKeyValuePairs.Error(), apiErrors.ErrEmptyAddress.Error()), - )) -} - -func TestGetKeyValuePairs_NodeFailsShouldError(t *testing.T) { - t.Parallel() - - testAddress := "address" - expectedErr := errors.New("expected error") - facade := mock.FacadeStub{ - GetKeyValuePairsCalled: func(_ string, _ api.AccountQueryOptions) (map[string]string, api.BlockInfo, error) { - return nil, api.BlockInfo{}, expectedErr - }, - } - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) - - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/keys", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - response := &shared.GenericAPIResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(response.Error, expectedErr.Error())) + loadResponse(resp.Body, destination) } -func TestGetKeyValuePairs_ShouldWork(t *testing.T) { - t.Parallel() - - pairs := map[string]string{ - "k1": "v1", - "k2": "v2", - } - testAddress := "address" - facade := mock.FacadeStub{ - GetKeyValuePairsCalled: func(_ string, _ api.AccountQueryOptions) (map[string]string, api.BlockInfo, error) { - return pairs, api.BlockInfo{}, nil - }, - } - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) - - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/keys", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := keyValuePairsResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, pairs, response.Data.Pairs) -} - -func TestGetESDTsRoles_WithEmptyAddressShouldReturnError(t *testing.T) { - t.Parallel() - facade := mock.FacadeStub{} - - emptyAddress := "" - - addrGroup, err := groups.NewAddressGroup(&facade) +func testAddressGroup( + t *testing.T, + facade shared.FacadeHandler, + url string, + method string, + body io.Reader, + expectedRespCode int, + expectedRespError string, +) { + addrGroup, err := groups.NewAddressGroup(facade) require.NoError(t, err) ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/esdts/roles", emptyAddress), nil) + req, _ := http.NewRequest(method, url, body) resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) response := shared.GenericAPIResponse{} loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.NotEmpty(t, response) - assert.True(t, strings.Contains(response.Error, - fmt.Sprintf("%s: %s", apiErrors.ErrGetRolesForAccount.Error(), apiErrors.ErrEmptyAddress.Error()), - )) + assert.Equal(t, expectedRespCode, resp.Code) + assert.True(t, strings.Contains(response.Error, expectedRespError)) } -func TestGetESDTsRoles_NodeFailsShouldError(t *testing.T) { - t.Parallel() - - testAddress := "address" - expectedErr := errors.New("expected error") - facade := mock.FacadeStub{ - GetESDTsRolesCalled: func(_ string, _ api.AccountQueryOptions) (map[string][]string, api.BlockInfo, error) { - return nil, api.BlockInfo{}, expectedErr - }, - } - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) - - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/esdts/roles", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := &shared.GenericAPIResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(response.Error, expectedErr.Error())) -} - -func TestGetESDTsRoles_ShouldWork(t *testing.T) { - t.Parallel() - - roles := map[string][]string{ - "token0": {"role0", "role1"}, - "token1": {"role3", "role1"}, - } - testAddress := "address" - facade := mock.FacadeStub{ - GetESDTsRolesCalled: func(_ string, _ api.AccountQueryOptions) (map[string][]string, api.BlockInfo, error) { - return roles, api.BlockInfo{}, nil - }, - } - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) - - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/esdts/roles", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := esdtRolesResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, roles, response.Data.Roles) -} - -func TestAddressGroup_UpdateFacadeStub(t *testing.T) { - t.Parallel() - - roles := map[string][]string{ - "token0": {"role0", "role1"}, - "token1": {"role3", "role1"}, - } - testAddress := "address" - facade := mock.FacadeStub{ - GetESDTsRolesCalled: func(_ string, _ api.AccountQueryOptions) (map[string][]string, api.BlockInfo, error) { - return roles, api.BlockInfo{}, nil - }, - } - - addrGroup, err := groups.NewAddressGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(addrGroup, "address", getAddressRoutesConfig()) - - req, _ := http.NewRequest("GET", fmt.Sprintf("/address/%s/esdts/roles", testAddress), nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := esdtRolesResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, roles, response.Data.Roles) - - newErr := errors.New("new error") - newFacadeStub := mock.FacadeStub{ - GetESDTsRolesCalled: func(_ string, _ api.AccountQueryOptions) (map[string][]string, api.BlockInfo, error) { - return nil, api.BlockInfo{}, newErr - }, - } - err = addrGroup.UpdateFacade(&newFacadeStub) - require.NoError(t, err) - - req, _ = http.NewRequest("GET", fmt.Sprintf("/address/%s/esdts/roles", testAddress), nil) - resp = httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response = esdtRolesResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(response.Error, newErr.Error())) +func formatExpectedErr(err, innerErr error) string { + return fmt.Sprintf("%s: %s", err.Error(), innerErr.Error()) } func getAddressRoutesConfig() config.ApiRoutesConfig { @@ -1180,6 +1138,7 @@ func getAddressRoutesConfig() config.ApiRoutesConfig { Routes: []config.RouteConfig{ {Name: "/:address", Open: true}, {Name: "/bulk", Open: true}, + {Name: "/:address/guardian-data", Open: true}, {Name: "/:address/balance", Open: true}, {Name: "/:address/username", Open: true}, {Name: "/:address/code-hash", Open: true}, diff --git a/api/groups/blockGroup.go b/api/groups/blockGroup.go index ce49375137a..e8182e70327 100644 --- a/api/groups/blockGroup.go +++ b/api/groups/blockGroup.go @@ -108,7 +108,6 @@ func (bg *blockGroup) getBlockByNonce(c *gin.Context) { } shared.RespondWith(c, http.StatusOK, gin.H{"block": block}, "", shared.ReturnCodeSuccess) - } func (bg *blockGroup) getBlockByHash(c *gin.Context) { @@ -166,11 +165,7 @@ func (bg *blockGroup) getAlteredAccountsByNonce(c *gin.Context) { return } - options, err := parseAlteredAccountsForBlockQueryOptionsWithoutRequestType(c) - if err != nil { - shared.RespondWithValidationError(c, errors.ErrGetAlteredAccountsForBlock, err) - return - } + options := parseAlteredAccountsForBlockQueryOptionsWithoutRequestType(c) options.GetBlockParameters = api.GetBlockParameters{ RequestType: api.BlockFetchTypeByNonce, @@ -195,11 +190,7 @@ func (bg *blockGroup) getAlteredAccountsByHash(c *gin.Context) { return } - options, err := parseAlteredAccountsForBlockQueryOptionsWithoutRequestType(c) - if err != nil { - shared.RespondWithValidationError(c, errors.ErrGetAlteredAccountsForBlock, err) - return - } + options := parseAlteredAccountsForBlockQueryOptionsWithoutRequestType(c) options.GetBlockParameters = api.GetBlockParameters{ RequestType: api.BlockFetchTypeByHash, @@ -232,12 +223,12 @@ func parseBlockQueryOptions(c *gin.Context) (api.BlockQueryOptions, error) { return options, nil } -func parseAlteredAccountsForBlockQueryOptionsWithoutRequestType(c *gin.Context) (api.GetAlteredAccountsForBlockOptions, error) { +func parseAlteredAccountsForBlockQueryOptionsWithoutRequestType(c *gin.Context) api.GetAlteredAccountsForBlockOptions { tokensFilter := c.Request.URL.Query().Get(urlParamTokensFilter) return api.GetAlteredAccountsForBlockOptions{ TokensFilter: tokensFilter, - }, nil + } } func getQueryParamNonce(c *gin.Context) (uint64, error) { diff --git a/api/groups/blockGroup_test.go b/api/groups/blockGroup_test.go index 2ef12bc17f3..6bc58267f3e 100644 --- a/api/groups/blockGroup_test.go +++ b/api/groups/blockGroup_test.go @@ -3,12 +3,13 @@ package groups_test import ( "encoding/hex" "errors" + "fmt" + "io" "net/http" "net/http/httptest" "strings" "testing" - "github.com/gin-gonic/gin" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/outport" apiErrors "github.com/multiversx/mx-chain-go/api/errors" @@ -54,187 +55,460 @@ type blockResponse struct { Code string `json:"code"` } -func TestGetBlockByNonce_EmptyNonceUrlParameterShouldErr(t *testing.T) { +func TestBlockGroup_getBlockByNonce(t *testing.T) { t.Parallel() - facade := mock.FacadeStub{ - GetBlockByNonceCalled: func(_ uint64, _ api.BlockQueryOptions) (*api.Block, error) { - return &api.Block{}, nil - }, - } + t.Run("empty nonce should error", func(t *testing.T) { + t.Parallel() - blockGroup, err := groups.NewBlockGroup(&facade) - require.NoError(t, err) + testBlockGroup(t, &mock.FacadeStub{}, "/block/by-nonce", nil, http.StatusNotFound, "") + }) + t.Run("invalid nonce should error", + testBlockGroupErrorScenario("/block/by-nonce/invalid", nil, formatExpectedErr(apiErrors.ErrGetBlock, apiErrors.ErrInvalidBlockNonce))) + t.Run("invalid query options should error", + testBlockGroupErrorScenario("/block/by-nonce/10?withTxs=not-bool", nil, + formatExpectedErr(apiErrors.ErrGetBlock, apiErrors.ErrBadUrlParams))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() + + facade := &mock.FacadeStub{ + GetBlockByNonceCalled: func(_ uint64, _ api.BlockQueryOptions) (*api.Block, error) { + return nil, expectedErr + }, + } + + testBlockGroup( + t, + facade, + "/block/by-nonce/10", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedNonce := uint64(37) + expectedOptions := api.BlockQueryOptions{WithTransactions: true} + expectedBlock := api.Block{ + Nonce: 37, + Round: 39, + } + facade := &mock.FacadeStub{ + GetBlockByNonceCalled: func(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) { + require.Equal(t, providedNonce, nonce) + require.Equal(t, expectedOptions, options) + return &expectedBlock, nil + }, + } + + response := &blockResponse{} + loadBlockGroupResponse( + t, + facade, + fmt.Sprintf("/block/by-nonce/%d?withTxs=true", providedNonce), + "GET", + nil, + response, + ) + assert.Equal(t, expectedBlock, response.Data.Block) + }) +} - ws := startWebServer(blockGroup, "block", getBlockRoutesConfig()) +func TestBlockGroup_getBlockByHash(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", "/block/by-nonce", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + t.Run("empty hash should error", func(t *testing.T) { + t.Parallel() - response := blockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) + testBlockGroup(t, &mock.FacadeStub{}, "/block/by-hash", nil, http.StatusNotFound, "") + }) + t.Run("invalid query options should error", + testBlockGroupErrorScenario("/block/by-hash/hash?withLogs=not-bool", nil, + formatExpectedErr(apiErrors.ErrGetBlock, apiErrors.ErrBadUrlParams))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() + + facade := &mock.FacadeStub{ + GetBlockByHashCalled: func(_ string, _ api.BlockQueryOptions) (*api.Block, error) { + return nil, expectedErr + }, + } + + testBlockGroup( + t, + facade, + "/block/by-hash/hash", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedHash := "hash" + expectedOptions := api.BlockQueryOptions{WithTransactions: true} + expectedBlock := api.Block{ + Nonce: 37, + Round: 39, + } + facade := &mock.FacadeStub{ + GetBlockByHashCalled: func(hash string, options api.BlockQueryOptions) (*api.Block, error) { + require.Equal(t, providedHash, hash) + require.Equal(t, expectedOptions, options) + return &expectedBlock, nil + }, + } + + response := &blockResponse{} + loadBlockGroupResponse( + t, + facade, + fmt.Sprintf("/block/by-hash/%s?withTxs=true", providedHash), + "GET", + nil, + response, + ) + assert.Equal(t, expectedBlock, response.Data.Block) + }) } -func TestGetBlockByNonce_InvalidNonceShouldErr(t *testing.T) { +func TestBlockGroup_getBlockByRound(t *testing.T) { t.Parallel() - facade := mock.FacadeStub{ - GetBlockByNonceCalled: func(_ uint64, _ api.BlockQueryOptions) (*api.Block, error) { - return &api.Block{}, nil - }, - } + t.Run("empty round should error", func(t *testing.T) { + t.Parallel() - blockGroup, err := groups.NewBlockGroup(&facade) - require.NoError(t, err) + testBlockGroup(t, &mock.FacadeStub{}, "/block/by-round", nil, http.StatusNotFound, "") + }) + t.Run("invalid round should error", + testBlockGroupErrorScenario("/block/by-round/invalid", nil, formatExpectedErr(apiErrors.ErrGetBlock, apiErrors.ErrInvalidBlockRound))) + t.Run("invalid query options should error", + testBlockGroupErrorScenario("/block/by-round/123?withTxs=not-bool", nil, + formatExpectedErr(apiErrors.ErrGetBlock, apiErrors.ErrBadUrlParams))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() + + facade := &mock.FacadeStub{ + GetBlockByRoundCalled: func(_ uint64, _ api.BlockQueryOptions) (*api.Block, error) { + return nil, expectedErr + }, + } + + testBlockGroup( + t, + facade, + "/block/by-round/123", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedRound := uint64(37) + expectedOptions := api.BlockQueryOptions{WithTransactions: true} + expectedBlock := api.Block{ + Nonce: 37, + Round: 39, + } + facade := &mock.FacadeStub{ + GetBlockByRoundCalled: func(round uint64, options api.BlockQueryOptions) (*api.Block, error) { + require.Equal(t, providedRound, round) + require.Equal(t, expectedOptions, options) + return &expectedBlock, nil + }, + } + + response := &blockResponse{} + loadBlockGroupResponse( + t, + facade, + fmt.Sprintf("/block/by-round/%d?withTxs=true", providedRound), + "GET", + nil, + response, + ) + assert.Equal(t, expectedBlock, response.Data.Block) + }) +} - ws := startWebServer(blockGroup, "block", getBlockRoutesConfig()) +func TestBlockGroup_getAlteredAccountsByNonce(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", "/block/by-nonce/invalid", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + t.Run("empty nonce should error", func(t *testing.T) { + t.Parallel() - response := blockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusBadRequest, resp.Code) + testBlockGroup(t, &mock.FacadeStub{}, "/block/altered-accounts/by-nonce", nil, http.StatusNotFound, "") + }) + t.Run("invalid nonce should error", + testBlockGroupErrorScenario("/block/altered-accounts/by-nonce/invalid", nil, + formatExpectedErr(apiErrors.ErrGetAlteredAccountsForBlock, apiErrors.ErrInvalidBlockRound))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() + + facade := &mock.FacadeStub{ + GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { + return nil, expectedErr + }, + } + + testBlockGroup( + t, + facade, + "/block/altered-accounts/by-nonce/123", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetAlteredAccountsForBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedNonce := uint64(37) + expectedOptions := api.GetAlteredAccountsForBlockOptions{ + GetBlockParameters: api.GetBlockParameters{ + RequestType: api.BlockFetchTypeByNonce, + Nonce: providedNonce, + }, + } + expectedResponse := []*outport.AlteredAccount{ + { + Address: "alice", + Balance: "100000", + }, + } - assert.True(t, strings.Contains(response.Error, apiErrors.ErrInvalidBlockNonce.Error())) + facade := &mock.FacadeStub{ + GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { + require.Equal(t, expectedOptions, options) + return expectedResponse, nil + }, + } + + response := &alteredAccountsForBlockResponse{} + loadBlockGroupResponse( + t, + facade, + fmt.Sprintf("/block/altered-accounts/by-nonce/%d", providedNonce), + "GET", + nil, + response, + ) + require.Equal(t, expectedResponse, response.Data.Accounts) + require.Empty(t, response.Error) + require.Equal(t, string(shared.ReturnCodeSuccess), response.Code) + }) } -func TestGetBlockByNonce_FacadeErrorShouldErr(t *testing.T) { +func TestBlockGroup_getAlteredAccountsByHash(t *testing.T) { t.Parallel() - expectedErr := errors.New("local err") - facade := mock.FacadeStub{ - GetBlockByNonceCalled: func(_ uint64, _ api.BlockQueryOptions) (*api.Block, error) { - return nil, expectedErr - }, - } - - blockGroup, err := groups.NewBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "block", getBlockRoutesConfig()) + t.Run("empty hash should error", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", "/block/by-nonce/37", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + testBlockGroup(t, &mock.FacadeStub{}, "/block/altered-accounts/by-hash", nil, http.StatusNotFound, "") + }) + t.Run("invalid hash should error", + testBlockGroupErrorScenario("/block/altered-accounts/by-hash/hash", nil, + apiErrors.ErrGetAlteredAccountsForBlock.Error())) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() + + providedHash := hex.EncodeToString([]byte("hash")) + facade := &mock.FacadeStub{ + GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { + return nil, expectedErr + }, + } + + testBlockGroup( + t, + facade, + "/block/altered-accounts/by-hash/"+providedHash, + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetAlteredAccountsForBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - response := blockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusInternalServerError, resp.Code) + providedHash := hex.EncodeToString([]byte("hash")) + expectedOptions := api.GetAlteredAccountsForBlockOptions{ + GetBlockParameters: api.GetBlockParameters{ + RequestType: api.BlockFetchTypeByHash, + Hash: []byte("hash"), + }, + } + expectedResponse := []*outport.AlteredAccount{ + { + Address: "alice", + Balance: "100000", + }, + } - assert.True(t, strings.Contains(response.Error, expectedErr.Error())) + facade := &mock.FacadeStub{ + GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { + require.Equal(t, providedHash, hex.EncodeToString(options.Hash)) + require.Equal(t, expectedOptions, options) + return expectedResponse, nil + }, + } + + response := &alteredAccountsForBlockResponse{} + loadBlockGroupResponse( + t, + facade, + fmt.Sprintf("/block/altered-accounts/by-hash/%s", providedHash), + "GET", + nil, + response, + ) + require.Equal(t, expectedResponse, response.Data.Accounts) + require.Empty(t, response.Error) + require.Equal(t, string(shared.ReturnCodeSuccess), response.Code) + }) } -func TestGetBlockByNonce_ShouldWork(t *testing.T) { +func TestBlockGroup_IsInterfaceNil(t *testing.T) { t.Parallel() - expectedBlock := api.Block{ - Nonce: 37, - Round: 39, - } - facade := mock.FacadeStub{ - GetBlockByNonceCalled: func(_ uint64, _ api.BlockQueryOptions) (*api.Block, error) { - return &expectedBlock, nil - }, - } + blockGroup, _ := groups.NewBlockGroup(nil) + require.True(t, blockGroup.IsInterfaceNil()) - blockGroup, err := groups.NewBlockGroup(&facade) - require.NoError(t, err) + blockGroup, _ = groups.NewBlockGroup(&mock.FacadeStub{}) + require.False(t, blockGroup.IsInterfaceNil()) +} - ws := startWebServer(blockGroup, "block", getBlockRoutesConfig()) +func TestBlockGroup_UpdateFacadeStub(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", "/block/by-nonce/37", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + t.Run("nil facade should error", func(t *testing.T) { + t.Parallel() - response := blockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusOK, resp.Code) + blockGroup, err := groups.NewBlockGroup(&mock.FacadeStub{}) + require.NoError(t, err) - assert.Equal(t, expectedBlock, response.Data.Block) -} + err = blockGroup.UpdateFacade(nil) + require.Equal(t, apiErrors.ErrNilFacadeHandler, err) + }) + t.Run("cast failure should error", func(t *testing.T) { + t.Parallel() -// ---- by hash + blockGroup, err := groups.NewBlockGroup(&mock.FacadeStub{}) + require.NoError(t, err) -func TestGetBlockByHash_NoHashUrlParameterShouldErr(t *testing.T) { - t.Parallel() + err = blockGroup.UpdateFacade("this is not a facade handler") + require.True(t, errors.Is(err, apiErrors.ErrFacadeWrongTypeAssertion)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + expectedBlock := api.Block{ + Nonce: 37, + Round: 39, + } + facade := mock.FacadeStub{ + GetBlockByNonceCalled: func(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) { + return &expectedBlock, nil + }, + } - facade := mock.FacadeStub{ - GetBlockByNonceCalled: func(_ uint64, _ api.BlockQueryOptions) (*api.Block, error) { - return &api.Block{}, nil - }, - } + blockGroup, err := groups.NewBlockGroup(&facade) + require.NoError(t, err) - blockGroup, err := groups.NewBlockGroup(&facade) - require.NoError(t, err) + ws := startWebServer(blockGroup, "block", getBlockRoutesConfig()) - ws := startWebServer(blockGroup, "block", getBlockRoutesConfig()) + req, _ := http.NewRequest("GET", "/block/by-nonce/10", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) - req, _ := http.NewRequest("GET", "/block/by-hash", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + response := blockResponse{} + loadResponse(resp.Body, &response) + assert.Equal(t, http.StatusOK, resp.Code) + assert.Equal(t, expectedBlock, response.Data.Block) - response := blockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) -} + newFacade := mock.FacadeStub{ + GetBlockByNonceCalled: func(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) { + return nil, expectedErr + }, + } + err = blockGroup.UpdateFacade(&newFacade) + require.NoError(t, err) -func TestGetBlockByHash_FacadeErrorShouldErr(t *testing.T) { - t.Parallel() + req, _ = http.NewRequest("GET", "/block/by-nonce/10", nil) + resp = httptest.NewRecorder() + ws.ServeHTTP(resp, req) - expectedErr := errors.New("local err") - facade := mock.FacadeStub{ - GetBlockByHashCalled: func(_ string, _ api.BlockQueryOptions) (*api.Block, error) { - return nil, expectedErr - }, - } + response = blockResponse{} + loadResponse(resp.Body, &response) + assert.Equal(t, http.StatusInternalServerError, resp.Code) + assert.True(t, strings.Contains(response.Error, expectedErr.Error())) + }) +} - blockGroup, err := groups.NewBlockGroup(&facade) +func loadBlockGroupResponse( + t *testing.T, + facade shared.FacadeHandler, + url string, + method string, + body io.Reader, + destination interface{}, +) { + blockGroup, err := groups.NewBlockGroup(facade) require.NoError(t, err) ws := startWebServer(blockGroup, "block", getBlockRoutesConfig()) - req, _ := http.NewRequest("GET", "/block/by-hash/hash", nil) + req, _ := http.NewRequest(method, url, body) resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) - response := blockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusInternalServerError, resp.Code) + assert.Equal(t, http.StatusOK, resp.Code) - assert.True(t, strings.Contains(response.Error, expectedErr.Error())) + loadResponse(resp.Body, destination) } -func TestGetBlockByHash_ShouldWork(t *testing.T) { - t.Parallel() - - expectedBlock := api.Block{ - Nonce: 37, - Round: 39, - } - facade := mock.FacadeStub{ - GetBlockByHashCalled: func(_ string, _ api.BlockQueryOptions) (*api.Block, error) { - return &expectedBlock, nil - }, +func testBlockGroupErrorScenario(url string, body io.Reader, expectedErr string) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() + + testBlockGroup( + t, + &mock.FacadeStub{}, + url, + body, + http.StatusBadRequest, + expectedErr, + ) } +} - blockGroup, err := groups.NewBlockGroup(&facade) +func testBlockGroup( + t *testing.T, + facade shared.FacadeHandler, + url string, + body io.Reader, + expectedRespCode int, + expectedRespError string, +) { + blockGroup, err := groups.NewBlockGroup(facade) require.NoError(t, err) ws := startWebServer(blockGroup, "block", getBlockRoutesConfig()) - req, _ := http.NewRequest("GET", "/block/by-hash/hash", nil) + req, _ := http.NewRequest("GET", url, body) resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) response := blockResponse{} loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusOK, resp.Code) - - assert.Equal(t, expectedBlock, response.Data.Block) + assert.Equal(t, expectedRespCode, resp.Code) + assert.True(t, strings.Contains(response.Error, expectedRespError)) } func getBlockRoutesConfig() config.ApiRoutesConfig { @@ -252,270 +526,3 @@ func getBlockRoutesConfig() config.ApiRoutesConfig { }, } } - -// ---- by round - -func TestGetBlockByRound_WrongFacadeShouldErr(t *testing.T) { - t.Parallel() - - expectedErr := errors.New("local err") - facade := mock.FacadeStub{ - GetBlockByRoundCalled: func(_ uint64, _ api.BlockQueryOptions) (*api.Block, error) { - return nil, expectedErr - }, - } - - blockGroup, err := groups.NewBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "block", getBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/block/by-round/2", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := blockResponse{} - loadResponse(resp.Body, &response) - - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(response.Error, expectedErr.Error())) -} - -func TestGetBlockByRound_EmptyRoundUrlParameterShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetBlockByRoundCalled: func(_ uint64, _ api.BlockQueryOptions) (*api.Block, error) { - return &api.Block{}, nil - }, - } - - blockGroup, err := groups.NewBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "block", getBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/block/by-round", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := blockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) -} - -func TestGetBlockByRound_InvalidRoundShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetBlockByNonceCalled: func(_ uint64, _ api.BlockQueryOptions) (*api.Block, error) { - return &api.Block{}, nil - }, - } - - blockGroup, err := groups.NewBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "block", getBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/block/by-round/invalid", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := blockResponse{} - loadResponse(resp.Body, &response) - - assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.True(t, strings.Contains(response.Error, apiErrors.ErrInvalidBlockRound.Error())) -} - -func TestGetBlockByRound_FacadeErrorShouldErr(t *testing.T) { - t.Parallel() - - expectedErr := errors.New("local err") - facade := mock.FacadeStub{ - GetBlockByRoundCalled: func(_ uint64, _ api.BlockQueryOptions) (*api.Block, error) { - return nil, expectedErr - }, - } - - blockGroup, err := groups.NewBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "block", getBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/block/by-round/37", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := blockResponse{} - loadResponse(resp.Body, &response) - - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(response.Error, expectedErr.Error())) -} - -func TestGetBlockByRound_ShouldWork(t *testing.T) { - t.Parallel() - - expectedBlock := api.Block{ - Nonce: 37, - Round: 39, - } - facade := mock.FacadeStub{ - GetBlockByRoundCalled: func(_ uint64, _ api.BlockQueryOptions) (*api.Block, error) { - return &expectedBlock, nil - }, - } - - blockGroup, err := groups.NewBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "block", getBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/block/by-round/37", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := blockResponse{} - loadResponse(resp.Body, &response) - - assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, expectedBlock, response.Data.Block) -} - -func TestGetBlockByRound_WithBadBlockQueryOptionsShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetBlockByRoundCalled: func(_ uint64, _ api.BlockQueryOptions) (*api.Block, error) { - return &api.Block{}, nil - }, - } - - blockGroup, err := groups.NewBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "block", getBlockRoutesConfig()) - - response, code := httpGetBlock(ws, "/block/by-round/37?withTxs=bad") - require.Equal(t, http.StatusBadRequest, code) - require.Contains(t, response.Error, apiErrors.ErrBadUrlParams.Error()) - - response, code = httpGetBlock(ws, "/block/by-round/37?withLogs=bad") - require.Equal(t, http.StatusBadRequest, code) - require.Contains(t, response.Error, apiErrors.ErrBadUrlParams.Error()) -} - -func TestGetBlockByRound_WithBlockQueryOptionsShouldWork(t *testing.T) { - t.Parallel() - - var calledWithRound uint64 - var calledWithOptions api.BlockQueryOptions - - facade := mock.FacadeStub{ - GetBlockByRoundCalled: func(round uint64, options api.BlockQueryOptions) (*api.Block, error) { - calledWithRound = round - calledWithOptions = options - return &api.Block{}, nil - }, - } - - blockGroup, err := groups.NewBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "block", getBlockRoutesConfig()) - - response, code := httpGetBlock(ws, "/block/by-round/37?withTxs=true") - require.Equal(t, http.StatusOK, code) - require.NotNil(t, response) - require.Equal(t, uint64(37), calledWithRound) - require.Equal(t, api.BlockQueryOptions{WithTransactions: true}, calledWithOptions) - - response, code = httpGetBlock(ws, "/block/by-round/38?withTxs=true&withLogs=true") - require.Equal(t, http.StatusOK, code) - require.NotNil(t, response) - require.Equal(t, uint64(38), calledWithRound) - require.Equal(t, api.BlockQueryOptions{WithTransactions: true, WithLogs: true}, calledWithOptions) -} - -func TestGetAlteredAccountsByNonce_ShouldWork(t *testing.T) { - t.Parallel() - - expectedResponse := []*outport.AlteredAccount{ - { - Address: "alice", - Balance: "100000", - }, - } - - facade := mock.FacadeStub{ - GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { - require.Equal(t, api.BlockFetchTypeByNonce, options.RequestType) - require.Equal(t, uint64(37), options.Nonce) - - return expectedResponse, nil - }, - } - - blockGroup, err := groups.NewBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "block", getBlockRoutesConfig()) - - response, code := httpGetAlteredAccountsForBlockBlock(ws, "/block/altered-accounts/by-nonce/37") - require.Equal(t, http.StatusOK, code) - require.Equal(t, expectedResponse, response.Data.Accounts) - require.Empty(t, response.Error) - require.Equal(t, string(shared.ReturnCodeSuccess), response.Code) -} - -func TestGetAlteredAccountsByHash_ShouldWork(t *testing.T) { - t.Parallel() - - expectedResponse := []*outport.AlteredAccount{ - { - Address: "alice", - Balance: "100000", - }, - } - facade := mock.FacadeStub{ - GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { - require.Equal(t, api.BlockFetchTypeByHash, options.RequestType) - require.Equal(t, "aabb", hex.EncodeToString(options.Hash)) - - return expectedResponse, nil - }, - } - - blockGroup, err := groups.NewBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "block", getBlockRoutesConfig()) - - response, code := httpGetAlteredAccountsForBlockBlock(ws, "/block/altered-accounts/by-hash/aabb") - require.Equal(t, http.StatusOK, code) - require.Equal(t, expectedResponse, response.Data.Accounts) - require.Empty(t, response.Error) - require.Equal(t, string(shared.ReturnCodeSuccess), response.Code) -} - -func httpGetBlock(ws *gin.Engine, url string) (blockResponse, int) { - httpRequest, _ := http.NewRequest("GET", url, nil) - httpResponse := httptest.NewRecorder() - ws.ServeHTTP(httpResponse, httpRequest) - - blockResponse := blockResponse{} - loadResponse(httpResponse.Body, &blockResponse) - return blockResponse, httpResponse.Code -} - -func httpGetAlteredAccountsForBlockBlock(ws *gin.Engine, url string) (alteredAccountsForBlockResponse, int) { - httpRequest, _ := http.NewRequest("GET", url, nil) - httpResponse := httptest.NewRecorder() - ws.ServeHTTP(httpResponse, httpRequest) - - response := alteredAccountsForBlockResponse{} - loadResponse(httpResponse.Body, &response) - return response, httpResponse.Code -} diff --git a/api/groups/hardforkGroup_test.go b/api/groups/hardforkGroup_test.go index 0c25c71e03f..b5ff8928ccc 100644 --- a/api/groups/hardforkGroup_test.go +++ b/api/groups/hardforkGroup_test.go @@ -6,6 +6,7 @@ import ( "errors" "net/http" "net/http/httptest" + "strings" "sync/atomic" "testing" @@ -18,7 +19,7 @@ import ( "github.com/stretchr/testify/require" ) -type TriggerResponse struct { +type triggerResponse struct { Status string `json:"status"` } @@ -38,10 +39,9 @@ func TestNewHardforkGroup(t *testing.T) { }) } -func TestTrigger_TriggerCanNotExecuteShouldErr(t *testing.T) { +func TestHardforkGroup_TriggerCannotExecuteShouldErr(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected error") hardforkFacade := &mock.HardforkFacade{ TriggerCalled: func(_ uint32, _ bool) error { return expectedErr @@ -69,7 +69,7 @@ func TestTrigger_TriggerCanNotExecuteShouldErr(t *testing.T) { assert.Contains(t, response.Error, expectedErr.Error()) } -func TestTrigger_TriggerWrongRequestTypeShouldErr(t *testing.T) { +func TestHardforkGroup_TriggerWrongRequestTypeShouldErr(t *testing.T) { t.Parallel() hardforkGroup, err := groups.NewHardforkGroup(&mock.HardforkFacade{}) @@ -81,13 +81,13 @@ func TestTrigger_TriggerWrongRequestTypeShouldErr(t *testing.T) { resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) - triggerResponse := TriggerResponse{} - loadResponse(resp.Body, &triggerResponse) + triggerResp := triggerResponse{} + loadResponse(resp.Body, &triggerResp) assert.Equal(t, resp.Code, http.StatusBadRequest) } -func TestTrigger_ManualShouldWork(t *testing.T) { +func TestHardforkGroup_ManualShouldWork(t *testing.T) { t.Parallel() recoveredEpoch := uint32(0) @@ -118,17 +118,17 @@ func TestTrigger_ManualShouldWork(t *testing.T) { response := shared.GenericAPIResponse{} loadResponse(resp.Body, &response) - triggerResponse := TriggerResponse{} + triggerResp := triggerResponse{} mapResponseData := response.Data.(map[string]interface{}) mapResponseBytes, _ := json.Marshal(&mapResponseData) - _ = json.Unmarshal(mapResponseBytes, &triggerResponse) + _ = json.Unmarshal(mapResponseBytes, &triggerResp) assert.Equal(t, resp.Code, http.StatusOK) - assert.Equal(t, groups.ExecManualTrigger, triggerResponse.Status) + assert.Equal(t, groups.ExecManualTrigger, triggerResp.Status) assert.Equal(t, hr.Epoch, atomic.LoadUint32(&recoveredEpoch)) } -func TestTrigger_BroadcastShouldWork(t *testing.T) { +func TestHardforkGroup_BroadcastShouldWork(t *testing.T) { t.Parallel() hardforkFacade := &mock.HardforkFacade{ @@ -156,13 +156,99 @@ func TestTrigger_BroadcastShouldWork(t *testing.T) { response := shared.GenericAPIResponse{} loadResponse(resp.Body, &response) - triggerResponse := TriggerResponse{} + triggerResp := triggerResponse{} mapResponseData := response.Data.(map[string]interface{}) mapResponseBytes, _ := json.Marshal(&mapResponseData) - _ = json.Unmarshal(mapResponseBytes, &triggerResponse) + _ = json.Unmarshal(mapResponseBytes, &triggerResp) assert.Equal(t, resp.Code, http.StatusOK) - assert.Equal(t, groups.ExecBroadcastTrigger, triggerResponse.Status) + assert.Equal(t, groups.ExecBroadcastTrigger, triggerResp.Status) +} + +func TestHardforkGroup_IsInterfaceNil(t *testing.T) { + t.Parallel() + + hardforkGroup, _ := groups.NewHardforkGroup(nil) + require.True(t, hardforkGroup.IsInterfaceNil()) + + hardforkGroup, _ = groups.NewHardforkGroup(&mock.FacadeStub{}) + require.False(t, hardforkGroup.IsInterfaceNil()) +} + +func TestHardforkGroup_UpdateFacadeStub(t *testing.T) { + t.Parallel() + + t.Run("nil facade should error", func(t *testing.T) { + t.Parallel() + + hardforkGroup, err := groups.NewHardforkGroup(&mock.FacadeStub{}) + require.NoError(t, err) + + err = hardforkGroup.UpdateFacade(nil) + require.Equal(t, apiErrors.ErrNilFacadeHandler, err) + }) + t.Run("cast failure should error", func(t *testing.T) { + t.Parallel() + + hardforkGroup, err := groups.NewHardforkGroup(&mock.FacadeStub{}) + require.NoError(t, err) + + err = hardforkGroup.UpdateFacade("this is not a facade handler") + require.True(t, errors.Is(err, apiErrors.ErrFacadeWrongTypeAssertion)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + hardforkFacade := &mock.HardforkFacade{ + TriggerCalled: func(_ uint32, _ bool) error { + return nil + }, + IsSelfTriggerCalled: func() bool { + return true + }, + } + + hardforkGroup, err := groups.NewHardforkGroup(hardforkFacade) + require.NoError(t, err) + + ws := startWebServer(hardforkGroup, "hardfork", getHardforkRoutesConfig()) + + hr := &groups.HardforkRequest{ + Epoch: 4, + } + buffHr, _ := json.Marshal(hr) + req, _ := http.NewRequest("POST", "/hardfork/trigger", bytes.NewBuffer(buffHr)) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := shared.GenericAPIResponse{} + loadResponse(resp.Body, &response) + + triggerResp := triggerResponse{} + mapResponseData := response.Data.(map[string]interface{}) + mapResponseBytes, _ := json.Marshal(&mapResponseData) + _ = json.Unmarshal(mapResponseBytes, &triggerResp) + + assert.Equal(t, resp.Code, http.StatusOK) + assert.Equal(t, groups.ExecBroadcastTrigger, triggerResp.Status) + + newFacade := &mock.HardforkFacade{ + TriggerCalled: func(_ uint32, _ bool) error { + return expectedErr + }, + } + err = hardforkGroup.UpdateFacade(newFacade) + require.NoError(t, err) + + req, _ = http.NewRequest("POST", "/hardfork/trigger", bytes.NewBuffer(buffHr)) + resp = httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response = shared.GenericAPIResponse{} + loadResponse(resp.Body, &response) + assert.Equal(t, http.StatusInternalServerError, resp.Code) + assert.True(t, strings.Contains(response.Error, expectedErr.Error())) + }) } func getHardforkRoutesConfig() config.ApiRoutesConfig { diff --git a/api/groups/internalGroup_test.go b/api/groups/internalGroup_test.go index c36cd07e0c8..92f34ac9320 100644 --- a/api/groups/internalGroup_test.go +++ b/api/groups/internalGroup_test.go @@ -3,6 +3,7 @@ package groups_test import ( "bytes" "errors" + "io" "net/http" "net/http/httptest" "strings" @@ -12,6 +13,7 @@ import ( apiErrors "github.com/multiversx/mx-chain-go/api/errors" "github.com/multiversx/mx-chain-go/api/groups" "github.com/multiversx/mx-chain-go/api/mock" + "github.com/multiversx/mx-chain-go/api/shared" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/state" @@ -77,6 +79,18 @@ type internalValidatorsInfoResponse struct { Code string `json:"code"` } +var ( + expectedRawBlockOutput = bytes.Repeat([]byte("1"), 10) + expectedMetaBlock = block.MetaBlock{ + Nonce: 15, + Epoch: 15, + } + expectedShardBlock = block.Header{ + Nonce: 15, + Round: 15, + } +) + func TestNewInternalBlockGroup(t *testing.T) { t.Parallel() @@ -93,1467 +107,911 @@ func TestNewInternalBlockGroup(t *testing.T) { }) } -// ---- RAW - -// ---- MetaBlock - by nonce - -func TestGetRawMetaBlockByNonce_EmptyNonceUrlParameterShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetInternalMetaBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return []byte{}, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/raw/metablock/by-nonce", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) -} - -func TestGetRawMetaBlockByNonce_InvalidNonceShouldErr(t *testing.T) { +func TestInternalBlockGroup_getMetaBlockByNonce(t *testing.T) { t.Parallel() - facade := mock.FacadeStub{ - GetInternalMetaBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return []byte{}, nil - }, - } + t.Run("empty nonce should error", func(t *testing.T) { + t.Parallel() - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) + testInternalGroup(t, &mock.FacadeStub{}, "/internal/raw/metablock/by-nonce", nil, http.StatusNotFound, "") + }) + t.Run("invalid nonce should error", + testInternalGroupErrorScenario("/internal/raw/metablock/by-nonce/invalid", nil, + formatExpectedErr(apiErrors.ErrGetBlock, apiErrors.ErrInvalidBlockNonce))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) + facade := &mock.FacadeStub{ + GetInternalMetaBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { + return nil, expectedErr + }, + } - req, _ := http.NewRequest("GET", "/internal/raw/metablock/by-nonce/invalid", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + testInternalGroup( + t, + facade, + "/internal/raw/metablock/by-nonce/15", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - response := rawBlockResponse{} - loadResponse(resp.Body, &response) + facade := &mock.FacadeStub{ + GetInternalMetaBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { + return expectedRawBlockOutput, nil + }, + } - assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.True(t, strings.Contains(response.Error, apiErrors.ErrInvalidBlockNonce.Error())) + response := &rawBlockResponse{} + loadInternalBlockGroupResponse( + t, + facade, + "/internal/raw/metablock/by-nonce/15", + "GET", + nil, + response, + ) + assert.Equal(t, expectedRawBlockOutput, response.Data.Block) + }) } -func TestGetRawMetaBlockByNonce_FacadeErrorShouldErr(t *testing.T) { +func TestInternalBlockGroup_getRawMetaBlockByRound(t *testing.T) { t.Parallel() - expectedErr := errors.New("local err") - facade := mock.FacadeStub{ - GetInternalMetaBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return nil, expectedErr - }, - } + t.Run("empty round should error", func(t *testing.T) { + t.Parallel() - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) + testInternalGroup(t, &mock.FacadeStub{}, "/internal/raw/metablock/by-round", nil, http.StatusNotFound, "") + }) + t.Run("invalid round should error", + testInternalGroupErrorScenario("/internal/raw/metablock/by-round/invalid", nil, + formatExpectedErr(apiErrors.ErrGetBlock, apiErrors.ErrInvalidBlockRound))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) + facade := &mock.FacadeStub{ + GetInternalMetaBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { + return nil, expectedErr + }, + } - req, _ := http.NewRequest("GET", "/internal/raw/metablock/by-nonce/15", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + testInternalGroup( + t, + facade, + "/internal/raw/metablock/by-round/15", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - response := rawBlockResponse{} - loadResponse(resp.Body, &response) + facade := &mock.FacadeStub{ + GetInternalMetaBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { + return expectedRawBlockOutput, nil + }, + } - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(response.Error, expectedErr.Error())) + response := &rawBlockResponse{} + loadInternalBlockGroupResponse( + t, + facade, + "/internal/raw/metablock/by-round/15", + "GET", + nil, + response, + ) + assert.Equal(t, expectedRawBlockOutput, response.Data.Block) + }) } -func TestGetRawMetaBlockByNonce_ShouldWork(t *testing.T) { +func TestInternalBlockGroup_getRawMetaBlockByHash(t *testing.T) { t.Parallel() - expectedOutput := bytes.Repeat([]byte("1"), 10) - - facade := mock.FacadeStub{ - GetInternalMetaBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return expectedOutput, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/raw/metablock/by-nonce/15", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusOK, resp.Code) - - assert.Equal(t, expectedOutput, response.Data.Block) -} - -// ---- MetaBlock - by round - -func TestGetRawMetaBlockByRound_EmptyRoundUrlParameterShouldErr(t *testing.T) { - t.Parallel() + t.Run("empty hash should error", func(t *testing.T) { + t.Parallel() - facade := mock.FacadeStub{ - GetInternalMetaBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return []byte{}, nil - }, - } + testInternalGroup(t, &mock.FacadeStub{}, "/internal/raw/metablock/by-hash", nil, http.StatusNotFound, "") + }) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) + facade := &mock.FacadeStub{ + GetInternalMetaBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { + return nil, expectedErr + }, + } - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) + testInternalGroup( + t, + facade, + "/internal/raw/metablock/by-hash/dummyhash", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", "/internal/raw/metablock/by-round", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + facade := &mock.FacadeStub{ + GetInternalMetaBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { + return expectedRawBlockOutput, nil + }, + } - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) + response := &rawBlockResponse{} + loadInternalBlockGroupResponse( + t, + facade, + "/internal/raw/metablock/by-hash/d08089f2ab739520598fd7aeed08c427460fe94f286383047f3f61951afc4e00", + "GET", + nil, + response, + ) + assert.Equal(t, expectedRawBlockOutput, response.Data.Block) + }) } -func TestGetRawMetaBlockByRound_InvalidRoundShouldErr(t *testing.T) { +func TestInternalBlockGroup_getRawStartOfEpochMetaBlock(t *testing.T) { t.Parallel() - facade := mock.FacadeStub{ - GetInternalMetaBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return []byte{}, nil - }, - } + t.Run("empty epoch should error", func(t *testing.T) { + t.Parallel() - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) + testInternalGroup(t, &mock.FacadeStub{}, "/internal/raw/startofepoch/metablock/by-epoch/", nil, http.StatusNotFound, "") + }) + t.Run("invalid epoch should error", + testInternalGroupErrorScenario("/internal/raw/startofepoch/metablock/by-epoch/invalid", nil, + formatExpectedErr(apiErrors.ErrGetBlock, apiErrors.ErrInvalidEpoch))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) + facade := &mock.FacadeStub{ + GetInternalStartOfEpochMetaBlockCalled: func(_ common.ApiOutputFormat, epoch uint32) (interface{}, error) { + return nil, expectedErr + }, + } - req, _ := http.NewRequest("GET", "/internal/raw/metablock/by-round/invalid", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + testInternalGroup( + t, + facade, + "/internal/raw/startofepoch/metablock/by-epoch/1", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - response := rawBlockResponse{} - loadResponse(resp.Body, &response) + facade := &mock.FacadeStub{ + GetInternalStartOfEpochMetaBlockCalled: func(_ common.ApiOutputFormat, epoch uint32) (interface{}, error) { + return expectedRawBlockOutput, nil + }, + } - assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.True(t, strings.Contains(response.Error, apiErrors.ErrInvalidBlockRound.Error())) + response := &rawBlockResponse{} + loadInternalBlockGroupResponse( + t, + facade, + "/internal/raw/startofepoch/metablock/by-epoch/1", + "GET", + nil, + response, + ) + assert.Equal(t, expectedRawBlockOutput, response.Data.Block) + }) } -func TestGetRawMetaBlockByRound_FacadeErrorShouldErr(t *testing.T) { +func TestInternalBlockGroup_getRawShardBlockByNonce(t *testing.T) { t.Parallel() - expectedErr := errors.New("local err") - facade := mock.FacadeStub{ - GetInternalMetaBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return nil, expectedErr - }, - } + t.Run("empty nonce should error", func(t *testing.T) { + t.Parallel() - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) + testInternalGroup(t, &mock.FacadeStub{}, "/internal/raw/shardblock/by-nonce", nil, http.StatusNotFound, "") + }) + t.Run("invalid nonce should error", + testInternalGroupErrorScenario("/internal/raw/shardblock/by-nonce/invalid", nil, + formatExpectedErr(apiErrors.ErrGetBlock, apiErrors.ErrInvalidBlockNonce))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) + facade := &mock.FacadeStub{ + GetInternalShardBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { + return nil, expectedErr + }, + } - req, _ := http.NewRequest("GET", "/internal/raw/metablock/by-round/15", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + testInternalGroup( + t, + facade, + "/internal/raw/shardblock/by-nonce/15", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - response := rawBlockResponse{} - loadResponse(resp.Body, &response) + facade := &mock.FacadeStub{ + GetInternalShardBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { + return expectedRawBlockOutput, nil + }, + } - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(response.Error, expectedErr.Error())) + response := &rawBlockResponse{} + loadInternalBlockGroupResponse( + t, + facade, + "/internal/raw/shardblock/by-nonce/15", + "GET", + nil, + response, + ) + assert.Equal(t, expectedRawBlockOutput, response.Data.Block) + }) } -func TestGetRawMetaBlockByRound_ShouldWork(t *testing.T) { +func TestInternalBlockGroup_getRawShardBlockByRound(t *testing.T) { t.Parallel() - expectedOutput := bytes.Repeat([]byte("1"), 10) - - facade := mock.FacadeStub{ - GetInternalMetaBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return expectedOutput, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/raw/metablock/by-round/15", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - - assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, expectedOutput, response.Data.Block) -} - -// ---- MetaBlock - by hash - -func TestGetRawMetaBlockByHash_NoHashUrlParameterShouldErr(t *testing.T) { - t.Parallel() + t.Run("empty round should error", func(t *testing.T) { + t.Parallel() - facade := mock.FacadeStub{ - GetInternalMetaBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { - return []byte{}, nil - }, - } + testInternalGroup(t, &mock.FacadeStub{}, "/internal/raw/shardblock/by-round", nil, http.StatusNotFound, "") + }) + t.Run("invalid round should error", + testInternalGroupErrorScenario("/internal/raw/shardblock/by-round/invalid", nil, + formatExpectedErr(apiErrors.ErrGetBlock, apiErrors.ErrInvalidBlockRound))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) + facade := &mock.FacadeStub{ + GetInternalShardBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { + return nil, expectedErr + }, + } - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) + testInternalGroup( + t, + facade, + "/internal/raw/shardblock/by-round/15", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", "/internal/raw/metablock/by-hash", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + facade := &mock.FacadeStub{ + GetInternalShardBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { + return expectedRawBlockOutput, nil + }, + } - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) + response := &rawBlockResponse{} + loadInternalBlockGroupResponse( + t, + facade, + "/internal/raw/shardblock/by-round/15", + "GET", + nil, + response, + ) + assert.Equal(t, expectedRawBlockOutput, response.Data.Block) + }) } -func TestGetRawMetaBlockByHash_FacadeErrorShouldErr(t *testing.T) { +func TestInternalBlockGroup_getRawShardBlockByHash(t *testing.T) { t.Parallel() - expectedErr := errors.New("local err") - facade := mock.FacadeStub{ - GetInternalMetaBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { - return nil, expectedErr - }, - } + t.Run("empty hash should error", func(t *testing.T) { + t.Parallel() - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) + testInternalGroup(t, &mock.FacadeStub{}, "/internal/raw/shardblock/by-hash", nil, http.StatusNotFound, "") + }) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) + facade := &mock.FacadeStub{ + GetInternalShardBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { + return nil, expectedErr + }, + } - req, _ := http.NewRequest("GET", "/internal/raw/metablock/by-hash/dummyhash", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + testInternalGroup( + t, + facade, + "/internal/raw/shardblock/by-hash/dummyhash", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - response := rawBlockResponse{} - loadResponse(resp.Body, &response) + facade := &mock.FacadeStub{ + GetInternalShardBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { + return expectedRawBlockOutput, nil + }, + } - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(response.Error, expectedErr.Error())) + response := &rawBlockResponse{} + loadInternalBlockGroupResponse( + t, + facade, + "/internal/raw/shardblock/by-hash/d08089f2ab739520598fd7aeed08c427460fe94f286383047f3f61951afc4e00", + "GET", + nil, + response, + ) + assert.Equal(t, expectedRawBlockOutput, response.Data.Block) + }) } -func TestGetRawMetaBlockByHash_ShouldWork(t *testing.T) { +func TestInternalBlockGroup_getRawMiniBlockByHash(t *testing.T) { t.Parallel() - expectedOutput := bytes.Repeat([]byte("1"), 10) - - facade := mock.FacadeStub{ - GetInternalMetaBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { - return expectedOutput, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/raw/metablock/by-hash/d08089f2ab739520598fd7aeed08c427460fe94f286383047f3f61951afc4e00", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusOK, resp.Code) - - assert.Equal(t, expectedOutput, response.Data.Block) -} - -// ---- StartOfEpoch MetaBlock - raw + t.Run("empty hash should error", func(t *testing.T) { + t.Parallel() -func TestGetRawStartOfEpochMetaBlock_NoEpochUrlParameterShouldErr(t *testing.T) { - t.Parallel() + testInternalGroup(t, &mock.FacadeStub{}, "/internal/raw/miniblock/by-hash", nil, http.StatusNotFound, "") + }) + t.Run("empty epoch should error", func(t *testing.T) { + t.Parallel() - facade := mock.FacadeStub{ - GetInternalStartOfEpochMetaBlockCalled: func(_ common.ApiOutputFormat, epoch uint32) (interface{}, error) { - return []byte{}, nil - }, - } + testInternalGroup(t, &mock.FacadeStub{}, "/internal/raw/miniblock/by-hash/aaa/epoch", nil, http.StatusNotFound, "") + }) + t.Run("invalid epoch should error", + testInternalGroupErrorScenario("/internal/raw/miniblock/by-hash/aaaa/epoch/not-uint", nil, + formatExpectedErr(apiErrors.ErrGetBlock, apiErrors.ErrInvalidEpoch))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) + facade := &mock.FacadeStub{ + GetInternalMiniBlockByHashCalled: func(format common.ApiOutputFormat, txHash string, epoch uint32) (interface{}, error) { + return nil, expectedErr + }, + } - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) + testInternalGroup( + t, + facade, + "/internal/raw/miniblock/by-hash/aaaa/epoch/1", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", "/internal/raw/startofepoch/metablock/by-epoch/a", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + facade := &mock.FacadeStub{ + GetInternalMiniBlockByHashCalled: func(format common.ApiOutputFormat, hash string, epoch uint32) (interface{}, error) { + return expectedRawBlockOutput, nil + }, + } - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusBadRequest, resp.Code) + response := &rawMiniBlockResponse{} + loadInternalBlockGroupResponse( + t, + facade, + "/internal/raw/miniblock/by-hash/aaaa/epoch/1", + "GET", + nil, + response, + ) + assert.Equal(t, expectedRawBlockOutput, response.Data.Block) + }) } -func TestGetRawStartOfEpochMetaBlock_FacadeErrorShouldErr(t *testing.T) { +func TestInternalBlockGroup_getJSONMetaBlockByNonce(t *testing.T) { t.Parallel() - expectedErr := errors.New("local err") - facade := mock.FacadeStub{ - GetInternalStartOfEpochMetaBlockCalled: func(_ common.ApiOutputFormat, epoch uint32) (interface{}, error) { - return nil, expectedErr - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/raw/startofepoch/metablock/by-epoch/1", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(response.Error, expectedErr.Error())) -} - -func TestGetRawStartOfEpochMetaBlock_ShouldWork(t *testing.T) { - t.Parallel() - - expectedOutput := bytes.Repeat([]byte("1"), 10) - - facade := mock.FacadeStub{ - GetInternalStartOfEpochMetaBlockCalled: func(_ common.ApiOutputFormat, epoch uint32) (interface{}, error) { - return expectedOutput, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/raw/startofepoch/metablock/by-epoch/1", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusOK, resp.Code) - - assert.Equal(t, expectedOutput, response.Data.Block) -} - -// ----------------- Shard Block --------------- - -func TestGetRawShardBlockByNonce_EmptyNonceUrlParameterShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetInternalMetaBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return []byte{}, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/raw/shardblock/by-nonce", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) -} - -func TestGetRawShardBlockByNonce_InvalidNonceShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetInternalShardBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return []byte{}, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/raw/shardblock/by-nonce/invalid", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusBadRequest, resp.Code) -} - -func TestGetRawShardBlockByNonce_ShouldWork(t *testing.T) { - t.Parallel() - - expectedOutput := bytes.Repeat([]byte("1"), 10) - - facade := mock.FacadeStub{ - GetInternalShardBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return expectedOutput, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/raw/shardblock/by-nonce/15", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusOK, resp.Code) - - assert.Equal(t, expectedOutput, response.Data.Block) -} - -// ---- ShardBlock - by round - -func TestGetRawShardBlockByRound_EmptyRoundUrlParameterShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetInternalShardBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return []byte{}, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/raw/shardblock/by-round", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) -} - -func TestGetRawShardBlockByRound_InvalidRoundShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetInternalShardBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return []byte{}, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/raw/shardblock/by-round/invalid", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - - assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.True(t, strings.Contains(response.Error, apiErrors.ErrInvalidBlockRound.Error())) -} - -func TestGetRawShardBlockByRound_FacadeErrorShouldErr(t *testing.T) { - t.Parallel() - - expectedErr := errors.New("local err") - facade := mock.FacadeStub{ - GetInternalShardBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return nil, expectedErr - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/raw/shardblock/by-round/15", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(response.Error, expectedErr.Error())) -} - -func TestGetRawShardBlockByRound_ShouldWork(t *testing.T) { - t.Parallel() - - expectedOutput := bytes.Repeat([]byte("1"), 10) - - facade := mock.FacadeStub{ - GetInternalShardBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return expectedOutput, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/raw/shardblock/by-round/15", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - - assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, expectedOutput, response.Data.Block) -} - -// ---- ShardBlock - by hash - -func TestGetRawShardBlockByHash_NoHashUrlParameterShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetInternalShardBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { - return []byte{}, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/raw/shardblock/by-hash", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) -} - -func TestGetRawShardBlockByHash_FacadeErrorShouldErr(t *testing.T) { - t.Parallel() - - expectedErr := errors.New("local err") - facade := mock.FacadeStub{ - GetInternalShardBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { - return nil, expectedErr - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/raw/shardblock/by-hash/dummyhash", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(response.Error, expectedErr.Error())) -} - -func TestGetRawShardBlockByHash_ShouldWork(t *testing.T) { - t.Parallel() - - expectedOutput := bytes.Repeat([]byte("1"), 10) - - facade := mock.FacadeStub{ - GetInternalShardBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { - return expectedOutput, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/raw/shardblock/by-hash/d08089f2ab739520598fd7aeed08c427460fe94f286383047f3f61951afc4e00", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusOK, resp.Code) - - assert.Equal(t, expectedOutput, response.Data.Block) -} - -// ---- MiniBlock - -func TestGetRawMiniBlockByHash_NoHashUrlParameterShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetInternalMiniBlockByHashCalled: func(_ common.ApiOutputFormat, _ string, epoch uint32) (interface{}, error) { - return []byte{}, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/raw/miniblock/by-hash", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawMiniBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) -} - -func TestGetRawMiniBlockByHash_NoEpochUrlParameterShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetInternalMiniBlockByHashCalled: func(_ common.ApiOutputFormat, _ string, epoch uint32) (interface{}, error) { - return []byte{}, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/raw/miniblock/by-hash/aaaa/epoch", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawMiniBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) -} - -func TestGetRawMiniBlockByHash_ShouldWork(t *testing.T) { - t.Parallel() - - expectedOutput := bytes.Repeat([]byte("1"), 10) - - facade := mock.FacadeStub{ - GetInternalMiniBlockByHashCalled: func(format common.ApiOutputFormat, hash string, epoch uint32) (interface{}, error) { - return expectedOutput, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/raw/miniblock/by-hash/aaaa/epoch/1", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawMiniBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusOK, resp.Code) - - assert.Equal(t, expectedOutput, response.Data.Block) -} - -// ---- JSON - -// ---- MetaBlock - by nonce - -func TestGetInternalMetaBlockByNonce_EmptyNonceUrlParameterShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetInternalMetaBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return []byte{}, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/metablock/by-nonce", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := internalMetaBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) -} - -func TestGetInternalMetaBlockByNonce_InvalidNonceShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetInternalMetaBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return []byte{}, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/metablock/by-nonce/invalid", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := internalMetaBlockResponse{} - loadResponse(resp.Body, &response) - - assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.True(t, strings.Contains(response.Error, apiErrors.ErrInvalidBlockNonce.Error())) -} - -func TestGetInternalMetaBlockByNonce_FacadeErrorShouldErr(t *testing.T) { - t.Parallel() - - expectedErr := errors.New("local err") - facade := mock.FacadeStub{ - GetInternalMetaBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return nil, expectedErr - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/metablock/by-nonce/15", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := internalMetaBlockResponse{} - loadResponse(resp.Body, &response) - - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(response.Error, expectedErr.Error())) -} - -func TestGetInternalMetaBlockByNonce_ShouldWork(t *testing.T) { - t.Parallel() - - expectedOutput := block.MetaBlock{ - Nonce: 15, - Epoch: 15, - } - - facade := mock.FacadeStub{ - GetInternalMetaBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return expectedOutput, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/metablock/by-nonce/15", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := internalMetaBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusOK, resp.Code) - - assert.Equal(t, expectedOutput, response.Data.Block) -} - -// ---- MetaBlock - by round - -func TestGetInternalMetaBlockByRound_EmptyRoundUrlParameterShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetInternalMetaBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return []byte{}, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/metablock/by-round", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := internalMetaBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) -} - -func TestGetInternalMetaBlockByRound_InvalidRoundShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetInternalMetaBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return []byte{}, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/metablock/by-round/invalid", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := internalMetaBlockResponse{} - loadResponse(resp.Body, &response) - - assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.True(t, strings.Contains(response.Error, apiErrors.ErrInvalidBlockRound.Error())) -} - -func TestGetInternalMetaBlockByRound_FacadeErrorShouldErr(t *testing.T) { - t.Parallel() - - expectedErr := errors.New("local err") - facade := mock.FacadeStub{ - GetInternalMetaBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return nil, expectedErr - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/metablock/by-round/15", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := internalMetaBlockResponse{} - loadResponse(resp.Body, &response) - - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(response.Error, expectedErr.Error())) -} - -func TestGetInternalMetaBlockByRound_ShouldWork(t *testing.T) { - t.Parallel() - - expectedOutput := block.MetaBlock{ - Nonce: 15, - Epoch: 15, - } - - facade := mock.FacadeStub{ - GetInternalMetaBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return expectedOutput, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/metablock/by-round/15", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := internalMetaBlockResponse{} - loadResponse(resp.Body, &response) - - assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, expectedOutput, response.Data.Block) -} - -// ---- MetaBlock - by hash - -func TestGetInternalMetaBlockByHash_NoHashUrlParameterShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetInternalMetaBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { - return []byte{}, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/metablock/by-hash", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := internalMetaBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) -} - -func TestGetInternalMetaBlockByHash_FacadeErrorShouldErr(t *testing.T) { - t.Parallel() - - expectedErr := errors.New("local err") - facade := mock.FacadeStub{ - GetInternalMetaBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { - return nil, expectedErr - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/metablock/by-hash/dummyhash", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := internalMetaBlockResponse{} - loadResponse(resp.Body, &response) - - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(response.Error, expectedErr.Error())) -} - -func TestGetInternalMetaBlockByHash_ShouldWork(t *testing.T) { - t.Parallel() - - expectedOutput := block.MetaBlock{ - Nonce: 15, - Epoch: 15, - } - - facade := mock.FacadeStub{ - GetInternalMetaBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { - return expectedOutput, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/metablock/by-hash/d08089f2ab739520598fd7aeed08c427460fe94f286383047f3f61951afc4e00", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := internalMetaBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusOK, resp.Code) - - assert.Equal(t, expectedOutput, response.Data.Block) -} - -// ---- StartOfEpoch MetaBlock - json - -func TestGetInternalStartOfEpochMetaBlock_NoEpochUrlParameterShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetInternalStartOfEpochMetaBlockCalled: func(_ common.ApiOutputFormat, epoch uint32) (interface{}, error) { - return []byte{}, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/startofepoch/metablock/by-epoch", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) -} - -func TestGetInternalStartOfEpochMetaBlock_FacadeErrorShouldErr(t *testing.T) { - t.Parallel() - - expectedErr := errors.New("local err") - facade := mock.FacadeStub{ - GetInternalStartOfEpochMetaBlockCalled: func(_ common.ApiOutputFormat, epoch uint32) (interface{}, error) { - return nil, expectedErr - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/startofepoch/metablock/by-epoch/1", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(response.Error, expectedErr.Error())) -} - -func TestGetInternalStartOfEpochMetaBlock_ShouldWork(t *testing.T) { - t.Parallel() - - expectedOutput := bytes.Repeat([]byte("1"), 10) - - facade := mock.FacadeStub{ - GetInternalStartOfEpochMetaBlockCalled: func(_ common.ApiOutputFormat, epoch uint32) (interface{}, error) { - return expectedOutput, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/startofepoch/metablock/by-epoch/1", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := rawBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusOK, resp.Code) - - assert.Equal(t, expectedOutput, response.Data.Block) -} - -// ----------------- Shard Block --------------- - -func TestGetInternalShardBlockByNonce_EmptyNonceUrlParameterShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetInternalMetaBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return []byte{}, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/shardblock/by-nonce", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := internalShardBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) -} - -func TestGetInternalShardBlockByNonce_InvalidNonceShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetInternalShardBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return []byte{}, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/shardblock/by-nonce/invalid", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := internalShardBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusBadRequest, resp.Code) -} - -func TestGetInternalShardBlockByNonce_ShouldWork(t *testing.T) { - t.Parallel() - - expectedOutput := block.Header{ - Nonce: 15, - Round: 15, - } - - facade := mock.FacadeStub{ - GetInternalShardBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return expectedOutput, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/shardblock/by-nonce/15", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := internalShardBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusOK, resp.Code) - - assert.Equal(t, expectedOutput, response.Data.Block) -} - -// ---- ShardBlock - by round - -func TestGetInternalShardBlockByRound_EmptyRoundUrlParameterShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetInternalShardBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return []byte{}, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/shardblock/by-round", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := internalShardBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) -} - -func TestGetInternalShardBlockByRound_InvalidRoundShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetInternalShardBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return []byte{}, nil - }, - } - - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/shardblock/by-round/invalid", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := internalShardBlockResponse{} - loadResponse(resp.Body, &response) - - assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.True(t, strings.Contains(response.Error, apiErrors.ErrInvalidBlockRound.Error())) -} - -func TestGetInternalShardBlockByRound_FacadeErrorShouldErr(t *testing.T) { - t.Parallel() - - expectedErr := errors.New("local err") - facade := mock.FacadeStub{ - GetInternalShardBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return nil, expectedErr - }, - } + t.Run("empty nonce should error", func(t *testing.T) { + t.Parallel() - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) + testInternalGroup(t, &mock.FacadeStub{}, "/internal/json/metablock/by-nonce", nil, http.StatusNotFound, "") + }) + t.Run("invalid nonce should error", + testInternalGroupErrorScenario("/internal/json/metablock/by-nonce/invalid", nil, + formatExpectedErr(apiErrors.ErrGetBlock, apiErrors.ErrInvalidBlockNonce))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) + facade := &mock.FacadeStub{ + GetInternalMetaBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { + return nil, expectedErr + }, + } - req, _ := http.NewRequest("GET", "/internal/json/shardblock/by-round/15", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + testInternalGroup( + t, + facade, + "/internal/json/metablock/by-nonce/15", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - response := internalShardBlockResponse{} - loadResponse(resp.Body, &response) + facade := &mock.FacadeStub{ + GetInternalMetaBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { + return expectedMetaBlock, nil + }, + } - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(response.Error, expectedErr.Error())) + response := &internalMetaBlockResponse{} + loadInternalBlockGroupResponse( + t, + facade, + "/internal/json/metablock/by-nonce/15", + "GET", + nil, + response, + ) + assert.Equal(t, expectedMetaBlock, response.Data.Block) + }) } -func TestGetInternalShardBlockByRound_ShouldWork(t *testing.T) { +func TestInternalBlockGroup_getJSONMetaBlockByRound(t *testing.T) { t.Parallel() - expectedOutput := block.Header{ - Nonce: 15, - Round: 15, - } - - facade := mock.FacadeStub{ - GetInternalShardBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { - return expectedOutput, nil - }, - } + t.Run("empty round should error", func(t *testing.T) { + t.Parallel() - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) + testInternalGroup(t, &mock.FacadeStub{}, "/internal/json/metablock/by-round", nil, http.StatusNotFound, "") + }) + t.Run("invalid round should error", + testInternalGroupErrorScenario("/internal/json/metablock/by-round/invalid", nil, + formatExpectedErr(apiErrors.ErrGetBlock, apiErrors.ErrInvalidBlockRound))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) + facade := &mock.FacadeStub{ + GetInternalMetaBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { + return nil, expectedErr + }, + } - req, _ := http.NewRequest("GET", "/internal/json/shardblock/by-round/15", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + testInternalGroup( + t, + facade, + "/internal/json/metablock/by-round/15", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - response := internalShardBlockResponse{} - loadResponse(resp.Body, &response) + facade := &mock.FacadeStub{ + GetInternalMetaBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { + return expectedMetaBlock, nil + }, + } - assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, expectedOutput, response.Data.Block) + response := &internalMetaBlockResponse{} + loadInternalBlockGroupResponse( + t, + facade, + "/internal/json/metablock/by-round/15", + "GET", + nil, + response, + ) + assert.Equal(t, expectedMetaBlock, response.Data.Block) + }) } -// ---- ShardBlock - by hash - -func TestGetInternalShardBlockByHash_NoHashUrlParameterShouldErr(t *testing.T) { +func TestInternalBlockGroup_getJSONMetaBlockByHash(t *testing.T) { t.Parallel() - facade := mock.FacadeStub{ - GetInternalShardBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { - return []byte{}, nil - }, - } + t.Run("empty hash should error", func(t *testing.T) { + t.Parallel() - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) + testInternalGroup(t, &mock.FacadeStub{}, "/internal/json/metablock/by-hash", nil, http.StatusNotFound, "") + }) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) + facade := &mock.FacadeStub{ + GetInternalMetaBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { + return nil, expectedErr + }, + } - req, _ := http.NewRequest("GET", "/internal/json/shardblock/by-hash", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + testInternalGroup( + t, + facade, + "/internal/json/metablock/by-hash/dummyhash", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - response := internalShardBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) + facade := &mock.FacadeStub{ + GetInternalMetaBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { + return expectedMetaBlock, nil + }, + } + + response := &internalMetaBlockResponse{} + loadInternalBlockGroupResponse( + t, + facade, + "/internal/json/metablock/by-hash/d08089f2ab739520598fd7aeed08c427460fe94f286383047f3f61951afc4e00", + "GET", + nil, + response, + ) + assert.Equal(t, expectedMetaBlock, response.Data.Block) + }) } -func TestGetInternalShardBlockByHash_FacadeErrorShouldErr(t *testing.T) { +func TestInternalBlockGroup_getJSONStartOfEpochMetaBlock(t *testing.T) { t.Parallel() - expectedErr := errors.New("local err") - facade := mock.FacadeStub{ - GetInternalShardBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { - return nil, expectedErr - }, - } + t.Run("empty epoch should error", func(t *testing.T) { + t.Parallel() - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) + testInternalGroup(t, &mock.FacadeStub{}, "/internal/json/startofepoch/metablock/by-epoch/", nil, http.StatusNotFound, "") + }) + t.Run("invalid epoch should error", + testInternalGroupErrorScenario("/internal/json/startofepoch/metablock/by-epoch/invalid", nil, + formatExpectedErr(apiErrors.ErrGetBlock, apiErrors.ErrInvalidEpoch))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) + facade := &mock.FacadeStub{ + GetInternalStartOfEpochMetaBlockCalled: func(_ common.ApiOutputFormat, epoch uint32) (interface{}, error) { + return nil, expectedErr + }, + } - req, _ := http.NewRequest("GET", "/internal/json/shardblock/by-hash/dummyhash", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + testInternalGroup( + t, + facade, + "/internal/json/startofepoch/metablock/by-epoch/1", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - response := internalShardBlockResponse{} - loadResponse(resp.Body, &response) + facade := &mock.FacadeStub{ + GetInternalStartOfEpochMetaBlockCalled: func(_ common.ApiOutputFormat, epoch uint32) (interface{}, error) { + return expectedMetaBlock, nil + }, + } - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(response.Error, expectedErr.Error())) + response := &internalMetaBlockResponse{} + loadInternalBlockGroupResponse( + t, + facade, + "/internal/json/startofepoch/metablock/by-epoch/1", + "GET", + nil, + response, + ) + assert.Equal(t, expectedMetaBlock, response.Data.Block) + }) } -func TestGetInternalShardBlockByHash_ShouldWork(t *testing.T) { +func TestInternalBlockGroup_getJSONShardBlockByNonce(t *testing.T) { t.Parallel() - expectedOutput := block.Header{ - Nonce: 15, - Round: 15, - } - - facade := mock.FacadeStub{ - GetInternalShardBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { - return expectedOutput, nil - }, - } + t.Run("empty nonce should error", func(t *testing.T) { + t.Parallel() - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) + testInternalGroup(t, &mock.FacadeStub{}, "/internal/json/shardblock/by-nonce", nil, http.StatusNotFound, "") + }) + t.Run("invalid nonce should error", + testInternalGroupErrorScenario("/internal/json/shardblock/by-nonce/invalid", nil, + formatExpectedErr(apiErrors.ErrGetBlock, apiErrors.ErrInvalidBlockNonce))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) + facade := &mock.FacadeStub{ + GetInternalShardBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { + return nil, expectedErr + }, + } - req, _ := http.NewRequest("GET", "/internal/json/shardblock/by-hash/d08089f2ab739520598fd7aeed08c427460fe94f286383047f3f61951afc4e00", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + testInternalGroup( + t, + facade, + "/internal/json/shardblock/by-nonce/15", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - response := internalShardBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusOK, resp.Code) + facade := &mock.FacadeStub{ + GetInternalShardBlockByNonceCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { + return expectedShardBlock, nil + }, + } - assert.Equal(t, expectedOutput, response.Data.Block) + response := &internalShardBlockResponse{} + loadInternalBlockGroupResponse( + t, + facade, + "/internal/json/shardblock/by-nonce/15", + "GET", + nil, + response, + ) + assert.Equal(t, expectedShardBlock, response.Data.Block) + }) } -// ---- MiniBlock - -func TestGetInternalMiniBlockByHash_EmptyHashUrlParameterShouldErr(t *testing.T) { +func TestInternalBlockGroup_getJSONShardBlockByRound(t *testing.T) { t.Parallel() - facade := mock.FacadeStub{ - GetInternalMiniBlockByHashCalled: func(_ common.ApiOutputFormat, _ string, epoch uint32) (interface{}, error) { - return []byte{}, nil - }, - } + t.Run("empty round should error", func(t *testing.T) { + t.Parallel() - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) + testInternalGroup(t, &mock.FacadeStub{}, "/internal/json/shardblock/by-round", nil, http.StatusNotFound, "") + }) + t.Run("invalid round should error", + testInternalGroupErrorScenario("/internal/json/shardblock/by-round/invalid", nil, + formatExpectedErr(apiErrors.ErrGetBlock, apiErrors.ErrInvalidBlockRound))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) + facade := &mock.FacadeStub{ + GetInternalShardBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { + return nil, expectedErr + }, + } - req, _ := http.NewRequest("GET", "/internal/json/miniblock/by-hash", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + testInternalGroup( + t, + facade, + "/internal/json/shardblock/by-round/15", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - response := internalMiniBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) + facade := &mock.FacadeStub{ + GetInternalShardBlockByRoundCalled: func(_ common.ApiOutputFormat, _ uint64) (interface{}, error) { + return expectedShardBlock, nil + }, + } + + response := &internalShardBlockResponse{} + loadInternalBlockGroupResponse( + t, + facade, + "/internal/json/shardblock/by-round/15", + "GET", + nil, + response, + ) + assert.Equal(t, expectedShardBlock, response.Data.Block) + }) } -func TestGetInternalMiniBlockByHash_NoEpochUrlParameterShouldErr(t *testing.T) { +func TestInternalBlockGroup_getJSONShardBlockByHash(t *testing.T) { t.Parallel() - facade := mock.FacadeStub{ - GetInternalMiniBlockByHashCalled: func(_ common.ApiOutputFormat, _ string, epoch uint32) (interface{}, error) { - return []byte{}, nil - }, - } + t.Run("empty hash should error", func(t *testing.T) { + t.Parallel() - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) + testInternalGroup(t, &mock.FacadeStub{}, "/internal/json/shardblock/by-hash", nil, http.StatusNotFound, "") + }) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) + facade := &mock.FacadeStub{ + GetInternalShardBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { + return nil, expectedErr + }, + } - req, _ := http.NewRequest("GET", "/internal/json/miniblock/by-hash/aaaa/epoch", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + testInternalGroup( + t, + facade, + "/internal/json/shardblock/by-hash/dummyhash", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - response := rawMiniBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusNotFound, resp.Code) + facade := &mock.FacadeStub{ + GetInternalShardBlockByHashCalled: func(_ common.ApiOutputFormat, _ string) (interface{}, error) { + return expectedShardBlock, nil + }, + } + + response := &internalShardBlockResponse{} + loadInternalBlockGroupResponse( + t, + facade, + "/internal/json/shardblock/by-hash/d08089f2ab739520598fd7aeed08c427460fe94f286383047f3f61951afc4e00", + "GET", + nil, + response, + ) + assert.Equal(t, expectedShardBlock, response.Data.Block) + }) } -func TestGetInternalMiniBlockByHash_ShouldWork(t *testing.T) { +func TestInternalBlockGroup_getJSONMiniBlockByHash(t *testing.T) { t.Parallel() - expectedOutput := block.MiniBlock{} + t.Run("empty hash should error", func(t *testing.T) { + t.Parallel() - facade := mock.FacadeStub{ - GetInternalMiniBlockByHashCalled: func(_ common.ApiOutputFormat, _ string, epoch uint32) (interface{}, error) { - return expectedOutput, nil - }, - } + testInternalGroup(t, &mock.FacadeStub{}, "/internal/json/miniblock/by-hash", nil, http.StatusNotFound, "") + }) + t.Run("empty epoch should error", func(t *testing.T) { + t.Parallel() - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) + testInternalGroup(t, &mock.FacadeStub{}, "/internal/json/miniblock/by-hash/aaa/epoch", nil, http.StatusNotFound, "") + }) + t.Run("invalid epoch should error", + testInternalGroupErrorScenario("/internal/json/miniblock/by-hash/aaaa/epoch/not-uint", nil, + formatExpectedErr(apiErrors.ErrGetBlock, apiErrors.ErrInvalidEpoch))) + t.Run("facade error should error", func(t *testing.T) { + t.Parallel() - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) + facade := &mock.FacadeStub{ + GetInternalMiniBlockByHashCalled: func(format common.ApiOutputFormat, txHash string, epoch uint32) (interface{}, error) { + return nil, expectedErr + }, + } - req, _ := http.NewRequest("GET", "/internal/json/miniblock/by-hash/dummyhash/epoch/1", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + testInternalGroup( + t, + facade, + "/internal/json/miniblock/by-hash/aaaa/epoch/1", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetBlock, expectedErr), + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - response := internalMiniBlockResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusOK, resp.Code) + facade := &mock.FacadeStub{ + GetInternalMiniBlockByHashCalled: func(format common.ApiOutputFormat, hash string, epoch uint32) (interface{}, error) { + return block.MiniBlock{}, nil + }, + } - assert.Equal(t, expectedOutput, response.Data.Block) + response := &internalMiniBlockResponse{} + loadInternalBlockGroupResponse( + t, + facade, + "/internal/json/miniblock/by-hash/aaaa/epoch/1", + "GET", + nil, + response, + ) + assert.Equal(t, block.MiniBlock{}, response.Data.Block) + }) } -func TestGetInternalStartOfEpochValidatorsInfo(t *testing.T) { +func TestInternalBlockGroup_getJSONStartOfEpochValidatorsInfo(t *testing.T) { t.Parallel() - t.Run("no epoch param should fail", func(t *testing.T) { + t.Run("empty epoch should error", func(t *testing.T) { + t.Parallel() + + testInternalGroup(t, &mock.FacadeStub{}, "/internal/json/startofepoch/validators/by-epoch", nil, http.StatusNotFound, "") + }) + t.Run("invalid epoch should error", + testInternalGroupErrorScenario("/internal/json/startofepoch/validators/by-epoch/not-uint", nil, + formatExpectedErr(apiErrors.ErrGetValidatorsInfo, apiErrors.ErrInvalidEpoch))) + t.Run("facade error should fail", func(t *testing.T) { t.Parallel() - facade := mock.FacadeStub{ + facade := &mock.FacadeStub{ GetInternalStartOfEpochValidatorsInfoCalled: func(epoch uint32) ([]*state.ShardValidatorInfo, error) { - return make([]*state.ShardValidatorInfo, 0), nil + return nil, expectedErr }, } - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) - - req, _ := http.NewRequest("GET", "/internal/json/startofepoch/validators/by-epoch/aaa", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := internalValidatorsInfoResponse{} - loadResponse(resp.Body, &response) - assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.True(t, strings.Contains(response.Error, apiErrors.ErrGetValidatorsInfo.Error())) + testInternalGroup( + t, + facade, + "/internal/json/startofepoch/validators/by-epoch/1", + nil, + http.StatusInternalServerError, + formatExpectedErr(apiErrors.ErrGetValidatorsInfo, expectedErr), + ) }) - - t.Run("facade error should fail", func(t *testing.T) { + t.Run("should work", func(t *testing.T) { t.Parallel() - expectedErr := errors.New("facade error") - facade := mock.FacadeStub{ + expectedOutput := []*state.ShardValidatorInfo{ + { + PublicKey: []byte("pubkey1"), + ShardId: 0, + Index: 1, + TempRating: 500, + }, + } + + facade := &mock.FacadeStub{ GetInternalStartOfEpochValidatorsInfoCalled: func(epoch uint32) ([]*state.ShardValidatorInfo, error) { - return nil, expectedErr + return expectedOutput, nil }, } - blockGroup, err := groups.NewInternalBlockGroup(&facade) - require.NoError(t, err) + response := &internalValidatorsInfoResponse{} + loadInternalBlockGroupResponse( + t, + facade, + "/internal/json/startofepoch/validators/by-epoch/1", + "GET", + nil, + response, + ) + assert.Equal(t, expectedOutput, response.Data.ValidatorsInfo) + }) +} - ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) +func TestInternalBlockGroup_IsInterfaceNil(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", "/internal/json/startofepoch/validators/by-epoch/1", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + blockGroup, _ := groups.NewInternalBlockGroup(nil) + require.True(t, blockGroup.IsInterfaceNil()) - response := internalValidatorsInfoResponse{} - loadResponse(resp.Body, &response) + blockGroup, _ = groups.NewInternalBlockGroup(&mock.FacadeStub{}) + require.False(t, blockGroup.IsInterfaceNil()) +} - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(response.Error, expectedErr.Error())) +func TestInternalBlockGroup_UpdateFacadeStub(t *testing.T) { + t.Parallel() + + t.Run("nil facade should error", func(t *testing.T) { + t.Parallel() + + blockGroup, err := groups.NewInternalBlockGroup(&mock.FacadeStub{}) + require.NoError(t, err) + + err = blockGroup.UpdateFacade(nil) + require.Equal(t, apiErrors.ErrNilFacadeHandler, err) }) + t.Run("cast failure should error", func(t *testing.T) { + t.Parallel() + blockGroup, err := groups.NewInternalBlockGroup(&mock.FacadeStub{}) + require.NoError(t, err) + + err = blockGroup.UpdateFacade("this is not a facade handler") + require.True(t, errors.Is(err, apiErrors.ErrFacadeWrongTypeAssertion)) + }) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -1566,13 +1024,13 @@ func TestGetInternalStartOfEpochValidatorsInfo(t *testing.T) { }, } - facade := mock.FacadeStub{ + facade := &mock.FacadeStub{ GetInternalStartOfEpochValidatorsInfoCalled: func(epoch uint32) ([]*state.ShardValidatorInfo, error) { return expectedOutput, nil }, } - blockGroup, err := groups.NewInternalBlockGroup(&facade) + blockGroup, err := groups.NewInternalBlockGroup(facade) require.NoError(t, err) ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) @@ -1586,8 +1044,84 @@ func TestGetInternalStartOfEpochValidatorsInfo(t *testing.T) { assert.Equal(t, http.StatusOK, resp.Code) assert.Equal(t, expectedOutput, response.Data.ValidatorsInfo) + + newFacade := &mock.FacadeStub{ + GetInternalStartOfEpochValidatorsInfoCalled: func(epoch uint32) ([]*state.ShardValidatorInfo, error) { + return nil, expectedErr + }, + } + err = blockGroup.UpdateFacade(newFacade) + require.NoError(t, err) + + req, _ = http.NewRequest("GET", "/internal/json/startofepoch/validators/by-epoch/1", nil) + resp = httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response = internalValidatorsInfoResponse{} + loadResponse(resp.Body, &response) + assert.Equal(t, http.StatusInternalServerError, resp.Code) + assert.True(t, strings.Contains(response.Error, expectedErr.Error())) }) +} + +func loadInternalBlockGroupResponse( + t *testing.T, + facade shared.FacadeHandler, + url string, + method string, + body io.Reader, + destination interface{}, +) { + blockGroup, err := groups.NewInternalBlockGroup(facade) + require.NoError(t, err) + + ws := startWebServer(blockGroup, "internal", getInternalBlockRoutesConfig()) + + req, _ := http.NewRequest(method, url, body) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + assert.Equal(t, http.StatusOK, resp.Code) + + loadResponse(resp.Body, destination) +} + +func testInternalGroupErrorScenario(url string, body io.Reader, expectedErr string) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() + + testInternalGroup( + t, + &mock.FacadeStub{}, + url, + body, + http.StatusBadRequest, + expectedErr, + ) + } +} +func testInternalGroup( + t *testing.T, + facade shared.FacadeHandler, + url string, + body io.Reader, + expectedRespCode int, + expectedRespError string, +) { + internalGroup, err := groups.NewInternalBlockGroup(facade) + require.NoError(t, err) + + ws := startWebServer(internalGroup, "internal", getInternalBlockRoutesConfig()) + + req, _ := http.NewRequest("GET", url, body) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := rawBlockResponse{} + loadResponse(resp.Body, &response) + assert.Equal(t, expectedRespCode, resp.Code) + assert.True(t, strings.Contains(response.Error, expectedRespError)) } func getInternalBlockRoutesConfig() config.ApiRoutesConfig { diff --git a/api/groups/networkGroup_test.go b/api/groups/networkGroup_test.go index 04a4fca603c..dccf923e1b9 100644 --- a/api/groups/networkGroup_test.go +++ b/api/groups/networkGroup_test.go @@ -4,6 +4,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "io/ioutil" "math/big" "net/http" @@ -180,6 +181,36 @@ func TestGetNetworkStatus_ShouldReturnErrorIfFacadeReturnsError(t *testing.T) { assert.Equal(t, expectedErr.Error(), response.Error) } +func TestNetworkConfigMetrics_GasLimitGuardedTxShouldWork(t *testing.T) { + t.Parallel() + + statusMetricsProvider := statusHandler.NewStatusMetrics() + key := common.MetricExtraGasLimitGuardedTx + value := uint64(37) + statusMetricsProvider.SetUInt64Value(key, value) + + facade := mock.FacadeStub{} + facade.StatusMetricsHandler = func() external.StatusMetricsHandler { + return statusMetricsProvider + } + + networkGroup, err := groups.NewNetworkGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(networkGroup, "network", getNetworkRoutesConfig()) + + req, _ := http.NewRequest("GET", "/network/config", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + respBytes, _ := ioutil.ReadAll(resp.Body) + respStr := string(respBytes) + assert.Equal(t, resp.Code, http.StatusOK) + + keyAndValueFoundInResponse := strings.Contains(respStr, key) && strings.Contains(respStr, fmt.Sprintf("%d", value)) + assert.True(t, keyAndValueFoundInResponse) +} + func TestNetworkStatusMetrics_ShouldWork(t *testing.T) { t.Parallel() @@ -705,7 +736,6 @@ func TestGetGenesisNodes(t *testing.T) { t.Run("facade error, should fail", func(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected err") facade := mock.FacadeStub{ GetGenesisNodesPubKeysCalled: func() (map[uint32][]string, map[uint32][]string, error) { return nil, nil, expectedErr @@ -743,26 +773,21 @@ func TestGetGenesisNodes(t *testing.T) { Waiting: waiting, } - facade := mock.FacadeStub{ + facade := &mock.FacadeStub{ GetGenesisNodesPubKeysCalled: func() (map[uint32][]string, map[uint32][]string, error) { return eligible, waiting, nil }, } - networkGroup, err := groups.NewNetworkGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(networkGroup, "network", getNetworkRoutesConfig()) - - req, _ := http.NewRequest("GET", "/network/genesis-nodes", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - assert.Equal(t, resp.Code, http.StatusOK) - - response := genesisNodesConfigResponse{} - loadResponse(resp.Body, &response) - + response := &genesisNodesConfigResponse{} + loadNetworkGroupResponse( + t, + facade, + "/network/genesis-nodes", + "GET", + nil, + response, + ) assert.Equal(t, expectedOutput, response.Data.Nodes) }) } @@ -773,7 +798,6 @@ func TestGetGenesisBalances(t *testing.T) { t.Run("facade error, should fail", func(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected err") facade := mock.FacadeStub{ GetGenesisBalancesCalled: func() ([]*common.InitialAccountAPI, error) { return nil, expectedErr @@ -810,26 +834,21 @@ func TestGetGenesisBalances(t *testing.T) { }, } - facade := mock.FacadeStub{ + facade := &mock.FacadeStub{ GetGenesisBalancesCalled: func() ([]*common.InitialAccountAPI, error) { return initialAccounts, nil }, } - networkGroup, err := groups.NewNetworkGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(networkGroup, "network", getNetworkRoutesConfig()) - - req, _ := http.NewRequest("GET", "/network/genesis-balances", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - assert.Equal(t, resp.Code, http.StatusOK) - - response := genesisBalancesResponse{} - loadResponse(resp.Body, &response) - + response := &genesisBalancesResponse{} + loadNetworkGroupResponse( + t, + facade, + "/network/genesis-balances", + "GET", + nil, + response, + ) assert.Equal(t, initialAccounts, response.Data.Balances) }) } @@ -840,7 +859,6 @@ func TestGetGasConfigs(t *testing.T) { t.Run("facade error, should fail", func(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected err") facade := mock.FacadeStub{ GetGasConfigsCalled: func() (map[string]map[string]uint64, error) { return nil, expectedErr @@ -877,12 +895,61 @@ func TestGetGasConfigs(t *testing.T) { common.MetaChainSystemSCsCost: metaChainSystemSCsCost, } - facade := mock.FacadeStub{ + facade := &mock.FacadeStub{ GetGasConfigsCalled: func() (map[string]map[string]uint64, error) { return expectedMap, nil }, } + response := &gasConfigsResponse{} + loadNetworkGroupResponse( + t, + facade, + "/network/gas-configs", + "GET", + nil, + response, + ) + assert.Equal(t, builtInCost, response.Data.Configs.BuiltInCost) + assert.Equal(t, metaChainSystemSCsCost, response.Data.Configs.MetaChainSystemSCsCost) + }) +} + +func TestNetworkGroup_UpdateFacade(t *testing.T) { + t.Parallel() + + t.Run("nil facade should error", func(t *testing.T) { + t.Parallel() + + networkGroup, err := groups.NewNetworkGroup(&mock.FacadeStub{}) + require.NoError(t, err) + + err = networkGroup.UpdateFacade(nil) + require.Equal(t, apiErrors.ErrNilFacadeHandler, err) + }) + t.Run("cast failure should error", func(t *testing.T) { + t.Parallel() + + networkGroup, err := groups.NewNetworkGroup(&mock.FacadeStub{}) + require.NoError(t, err) + + err = networkGroup.UpdateFacade("this is not a facade handler") + require.True(t, errors.Is(err, apiErrors.ErrFacadeWrongTypeAssertion)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + builtInCost := map[string]uint64{ + "val1": 1, + } + expectedMap := map[string]map[string]uint64{ + common.BuiltInCost: builtInCost, + } + facade := mock.FacadeStub{ + GetGasConfigsCalled: func() (map[string]map[string]uint64, error) { + return expectedMap, nil + }, + } networkGroup, err := groups.NewNetworkGroup(&facade) require.NoError(t, err) @@ -893,15 +960,61 @@ func TestGetGasConfigs(t *testing.T) { ws.ServeHTTP(resp, req) assert.Equal(t, resp.Code, http.StatusOK) - response := gasConfigsResponse{} loadResponse(resp.Body, &response) - assert.Equal(t, builtInCost, response.Data.Configs.BuiltInCost) - assert.Equal(t, metaChainSystemSCsCost, response.Data.Configs.MetaChainSystemSCsCost) + + expectedErr := errors.New("expected error") + newFacade := mock.FacadeStub{ + GetGasConfigsCalled: func() (map[string]map[string]uint64, error) { + return nil, expectedErr + }, + } + err = networkGroup.UpdateFacade(&newFacade) + require.NoError(t, err) + + req, _ = http.NewRequest("GET", "/network/gas-configs", nil) + resp = httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + loadResponse(resp.Body, &response) + assert.Equal(t, http.StatusInternalServerError, resp.Code) + assert.True(t, strings.Contains(response.Error, expectedErr.Error())) }) } +func TestNetworkGroup_IsInterfaceNil(t *testing.T) { + t.Parallel() + + networkGroup, _ := groups.NewNetworkGroup(nil) + require.True(t, networkGroup.IsInterfaceNil()) + + networkGroup, _ = groups.NewNetworkGroup(&mock.FacadeStub{}) + require.False(t, networkGroup.IsInterfaceNil()) +} + +func loadNetworkGroupResponse( + t *testing.T, + facade shared.FacadeHandler, + url string, + method string, + body io.Reader, + destination interface{}, +) { + networkGroup, err := groups.NewNetworkGroup(facade) + require.NoError(t, err) + + ws := startWebServer(networkGroup, "network", getNetworkRoutesConfig()) + + req, _ := http.NewRequest(method, url, body) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + assert.Equal(t, http.StatusOK, resp.Code) + + loadResponse(resp.Body, destination) +} + func getNetworkRoutesConfig() config.ApiRoutesConfig { return config.ApiRoutesConfig{ APIPackages: map[string]config.APIPackageConfig{ diff --git a/api/groups/nodeGroup_test.go b/api/groups/nodeGroup_test.go index ba6ade3f068..eb21fe40bd1 100644 --- a/api/groups/nodeGroup_test.go +++ b/api/groups/nodeGroup_test.go @@ -349,10 +349,34 @@ func TestP2PStatusMetrics_ShouldDisplayNonP2pMetrics(t *testing.T) { assert.False(t, strings.Contains(respStr, key)) } +func TestQueryDebug_ShouldBindJSONErrorsShouldErr(t *testing.T) { + t.Parallel() + + facade := mock.FacadeStub{ + GetQueryHandlerCalled: func(name string) (handler debug.QueryHandler, err error) { + return nil, nil + }, + } + + nodeGroup, err := groups.NewNodeGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(nodeGroup, "node", getNodeRoutesConfig()) + + req, _ := http.NewRequest("POST", "/node/debug", bytes.NewBuffer([]byte("invalid data"))) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + queryResponse := &generalResponse{} + loadResponse(resp.Body, queryResponse) + + assert.Equal(t, http.StatusBadRequest, resp.Code) + assert.Contains(t, queryResponse.Error, apiErrors.ErrValidation.Error()) +} + func TestQueryDebug_GetQueryErrorsShouldErr(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected error") facade := mock.FacadeStub{ GetQueryHandlerCalled: func(name string) (handler debug.QueryHandler, err error) { return nil, expectedErr @@ -422,7 +446,6 @@ func TestQueryDebug_GetQueryShouldWork(t *testing.T) { func TestPeerInfo_PeerInfoErrorsShouldErr(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected error") facade := mock.FacadeStub{ GetPeerInfoCalled: func(pid string) ([]core.QueryP2PPeerInfo, error) { return nil, expectedErr @@ -485,10 +508,35 @@ func TestPeerInfo_PeerInfoShouldWork(t *testing.T) { assert.NotNil(t, responseInfo["info"]) } +func TestEpochStartData_InvalidEpochShouldErr(t *testing.T) { + t.Parallel() + + facade := mock.FacadeStub{ + GetEpochStartDataAPICalled: func(epoch uint32) (*common.EpochStartDataAPI, error) { + return nil, nil + }, + } + + nodeGroup, err := groups.NewNodeGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(nodeGroup, "node", getNodeRoutesConfig()) + + req, _ := http.NewRequest("GET", "/node/epoch-start/invalid", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := &shared.GenericAPIResponse{} + loadResponse(resp.Body, response) + + assert.Equal(t, http.StatusBadRequest, resp.Code) + assert.True(t, strings.Contains(response.Error, apiErrors.ErrValidation.Error())) + assert.True(t, strings.Contains(response.Error, apiErrors.ErrBadUrlParams.Error())) +} + func TestEpochStartData_FacadeErrorsShouldErr(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected error") facade := mock.FacadeStub{ GetEpochStartDataAPICalled: func(epoch uint32) (*common.EpochStartDataAPI, error) { return nil, expectedErr @@ -603,6 +651,89 @@ func TestPrometheusMetrics_ShouldWork(t *testing.T) { assert.True(t, keyAndValueFoundInResponse) } +func TestNodeGroup_UpdateFacade(t *testing.T) { + t.Parallel() + + t.Run("nil facade should error", func(t *testing.T) { + t.Parallel() + + nodeGroup, err := groups.NewNodeGroup(&mock.FacadeStub{}) + require.NoError(t, err) + + err = nodeGroup.UpdateFacade(nil) + require.Equal(t, apiErrors.ErrNilFacadeHandler, err) + }) + t.Run("cast failure should error", func(t *testing.T) { + t.Parallel() + + nodeGroup, err := groups.NewNodeGroup(&mock.FacadeStub{}) + require.NoError(t, err) + + err = nodeGroup.UpdateFacade("this is not a facade handler") + require.True(t, errors.Is(err, apiErrors.ErrFacadeWrongTypeAssertion)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + statusMetricsProvider := statusHandler.NewStatusMetrics() + key := "test-key" + value := uint64(37) + statusMetricsProvider.SetUInt64Value(key, value) + + facade := mock.FacadeStub{} + facade.StatusMetricsHandler = func() external.StatusMetricsHandler { + return statusMetricsProvider + } + + nodeGroup, err := groups.NewNodeGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(nodeGroup, "node", getNodeRoutesConfig()) + + req, _ := http.NewRequest("GET", "/node/metrics", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + respBytes, _ := ioutil.ReadAll(resp.Body) + respStr := string(respBytes) + assert.Equal(t, resp.Code, http.StatusOK) + keyAndValueFoundInResponse := strings.Contains(respStr, key) && strings.Contains(respStr, fmt.Sprintf("%d", value)) + assert.True(t, keyAndValueFoundInResponse) + + newFacade := mock.FacadeStub{ + StatusMetricsHandler: func() external.StatusMetricsHandler { + return &testscommon.StatusMetricsStub{ + StatusMetricsWithoutP2PPrometheusStringCalled: func() (string, error) { + return "", expectedErr + }, + } + }, + } + + err = nodeGroup.UpdateFacade(&newFacade) + require.NoError(t, err) + + req, _ = http.NewRequest("GET", "/node/metrics", nil) + resp = httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + assert.Equal(t, http.StatusInternalServerError, resp.Code) + response := &shared.GenericAPIResponse{} + loadResponse(resp.Body, response) + assert.Equal(t, expectedErr.Error(), response.Error) + }) +} + +func TestNodeGroup_IsInterfaceNil(t *testing.T) { + t.Parallel() + + nodeGroup, _ := groups.NewNodeGroup(nil) + require.True(t, nodeGroup.IsInterfaceNil()) + + nodeGroup, _ = groups.NewNodeGroup(&mock.FacadeStub{}) + require.False(t, nodeGroup.IsInterfaceNil()) +} + func loadResponseAsString(rsp io.Reader, response *statusResponse) { buff, err := ioutil.ReadAll(rsp) if err != nil { diff --git a/api/groups/proofGroup.go b/api/groups/proofGroup.go index 6f6a4b17bb9..6257762c028 100644 --- a/api/groups/proofGroup.go +++ b/api/groups/proofGroup.go @@ -115,56 +115,25 @@ type VerifyProofRequest struct { func (pg *proofGroup) getProof(c *gin.Context) { rootHash := c.Param("roothash") if rootHash == "" { - c.JSON( - http.StatusBadRequest, - shared.GenericAPIResponse{ - Data: nil, - Error: fmt.Sprintf("%s: %s", errors.ErrValidation.Error(), errors.ErrValidationEmptyRootHash.Error()), - Code: shared.ReturnCodeRequestError, - }, - ) + shared.RespondWithValidationError(c, errors.ErrValidation, errors.ErrValidationEmptyRootHash) return } address := c.Param("address") if address == "" { - c.JSON( - http.StatusBadRequest, - shared.GenericAPIResponse{ - Data: nil, - Error: fmt.Sprintf("%s: %s", errors.ErrValidation.Error(), errors.ErrValidationEmptyAddress.Error()), - Code: shared.ReturnCodeRequestError, - }, - ) + shared.RespondWithValidationError(c, errors.ErrValidation, errors.ErrValidationEmptyAddress) return } response, err := pg.getFacade().GetProof(rootHash, address) if err != nil { - c.JSON( - http.StatusInternalServerError, - shared.GenericAPIResponse{ - Data: nil, - Error: fmt.Sprintf("%s: %s", errors.ErrGetProof.Error(), err.Error()), - Code: shared.ReturnCodeInternalError, - }, - ) + shared.RespondWithInternalError(c, errors.ErrGetProof, err) return } hexProof := bytesToHex(response.Proof) - c.JSON( - http.StatusOK, - shared.GenericAPIResponse{ - Data: gin.H{ - "proof": hexProof, - "value": hex.EncodeToString(response.Value), - }, - Error: "", - Code: shared.ReturnCodeSuccess, - }, - ) + shared.RespondWithSuccess(c, gin.H{"proof": hexProof, "value": hex.EncodeToString(response.Value)}) } // getProofDataTrie will receive a rootHash, a key and an address from the client, and it will return the Merkle proofs @@ -172,53 +141,25 @@ func (pg *proofGroup) getProof(c *gin.Context) { func (pg *proofGroup) getProofDataTrie(c *gin.Context) { rootHash := c.Param("roothash") if rootHash == "" { - c.JSON( - http.StatusBadRequest, - shared.GenericAPIResponse{ - Data: nil, - Error: fmt.Sprintf("%s: %s", errors.ErrValidation.Error(), errors.ErrValidationEmptyRootHash.Error()), - Code: shared.ReturnCodeRequestError, - }, - ) + shared.RespondWithValidationError(c, errors.ErrValidation, errors.ErrValidationEmptyRootHash) return } address := c.Param("address") if address == "" { - c.JSON( - http.StatusBadRequest, - shared.GenericAPIResponse{ - Data: nil, - Error: fmt.Sprintf("%s: %s", errors.ErrValidation.Error(), errors.ErrValidationEmptyAddress.Error()), - Code: shared.ReturnCodeRequestError, - }, - ) + shared.RespondWithValidationError(c, errors.ErrValidation, errors.ErrValidationEmptyAddress) return } key := c.Param("key") if key == "" { - c.JSON( - http.StatusBadRequest, - shared.GenericAPIResponse{ - Data: nil, - Error: fmt.Sprintf("%s: %s", errors.ErrValidation.Error(), errors.ErrValidationEmptyKey.Error()), - Code: shared.ReturnCodeRequestError, - }, - ) + shared.RespondWithValidationError(c, errors.ErrValidation, errors.ErrValidationEmptyKey) return } mainTrieResponse, dataTrieResponse, err := pg.getFacade().GetProofDataTrie(rootHash, address, key) if err != nil { - c.JSON( - http.StatusInternalServerError, - shared.GenericAPIResponse{ - Data: nil, - Error: fmt.Sprintf("%s: %s", errors.ErrGetProof.Error(), err.Error()), - Code: shared.ReturnCodeInternalError, - }, - ) + shared.RespondWithInternalError(c, errors.ErrGetProof, err) return } @@ -226,18 +167,11 @@ func (pg *proofGroup) getProofDataTrie(c *gin.Context) { proofs["mainProof"] = bytesToHex(mainTrieResponse.Proof) proofs["dataTrieProof"] = bytesToHex(dataTrieResponse.Proof) - c.JSON( - http.StatusOK, - shared.GenericAPIResponse{ - Data: gin.H{ - "proofs": proofs, - "value": hex.EncodeToString(dataTrieResponse.Value), - "dataTrieRootHash": dataTrieResponse.RootHash, - }, - Error: "", - Code: shared.ReturnCodeSuccess, - }, - ) + shared.RespondWithSuccess(c, gin.H{ + "proofs": proofs, + "value": hex.EncodeToString(dataTrieResponse.Value), + "dataTrieRootHash": dataTrieResponse.RootHash, + }) } func bytesToHex(bytesValue [][]byte) []string { @@ -254,44 +188,23 @@ func bytesToHex(bytesValue [][]byte) []string { func (pg *proofGroup) getProofCurrentRootHash(c *gin.Context) { address := c.Param("address") if address == "" { - c.JSON( - http.StatusBadRequest, - shared.GenericAPIResponse{ - Data: nil, - Error: fmt.Sprintf("%s: %s", errors.ErrValidation.Error(), errors.ErrValidationEmptyAddress.Error()), - Code: shared.ReturnCodeRequestError, - }, - ) + shared.RespondWithValidationError(c, errors.ErrValidation, errors.ErrValidationEmptyAddress) return } response, err := pg.getFacade().GetProofCurrentRootHash(address) if err != nil { - c.JSON( - http.StatusInternalServerError, - shared.GenericAPIResponse{ - Data: nil, - Error: fmt.Sprintf("%s: %s", errors.ErrGetProof.Error(), err.Error()), - Code: shared.ReturnCodeInternalError, - }, - ) + shared.RespondWithInternalError(c, errors.ErrGetProof, err) return } hexProof := bytesToHex(response.Proof) - c.JSON( - http.StatusOK, - shared.GenericAPIResponse{ - Data: gin.H{ - "proof": hexProof, - "value": hex.EncodeToString(response.Value), - "rootHash": response.RootHash, - }, - Error: "", - Code: shared.ReturnCodeSuccess, - }, - ) + shared.RespondWithSuccess(c, gin.H{ + "proof": hexProof, + "value": hex.EncodeToString(response.Value), + "rootHash": response.RootHash, + }) } // verifyProof will receive a rootHash, an address and a Merkle proof from the client, @@ -300,14 +213,7 @@ func (pg *proofGroup) verifyProof(c *gin.Context) { var verifyProofParams = &VerifyProofRequest{} err := c.ShouldBindJSON(&verifyProofParams) if err != nil { - c.JSON( - http.StatusBadRequest, - shared.GenericAPIResponse{ - Data: nil, - Error: fmt.Sprintf("%s: %s", errors.ErrValidation.Error(), err.Error()), - Code: shared.ReturnCodeRequestError, - }, - ) + shared.RespondWithValidationError(c, errors.ErrValidation, err) return } @@ -315,14 +221,7 @@ func (pg *proofGroup) verifyProof(c *gin.Context) { for _, hexProof := range verifyProofParams.Proof { bytesProof, err := hex.DecodeString(hexProof) if err != nil { - c.JSON( - http.StatusBadRequest, - shared.GenericAPIResponse{ - Data: nil, - Error: fmt.Sprintf("%s: %s", errors.ErrValidation.Error(), err.Error()), - Code: shared.ReturnCodeRequestError, - }, - ) + shared.RespondWithValidationError(c, errors.ErrValidation, err) return } @@ -332,25 +231,11 @@ func (pg *proofGroup) verifyProof(c *gin.Context) { var proofOk bool proofOk, err = pg.getFacade().VerifyProof(verifyProofParams.RootHash, verifyProofParams.Address, proof) if err != nil { - c.JSON( - http.StatusInternalServerError, - shared.GenericAPIResponse{ - Data: nil, - Error: fmt.Sprintf("%s: %s", errors.ErrVerifyProof.Error(), err.Error()), - Code: shared.ReturnCodeInternalError, - }, - ) + shared.RespondWithInternalError(c, errors.ErrVerifyProof, err) return } - c.JSON( - http.StatusOK, - shared.GenericAPIResponse{ - Data: gin.H{"ok": proofOk}, - Error: "", - Code: shared.ReturnCodeSuccess, - }, - ) + shared.RespondWithSuccess(c, gin.H{"ok": proofOk}) } func (pg *proofGroup) getFacade() proofFacadeHandler { diff --git a/api/groups/proofGroup_test.go b/api/groups/proofGroup_test.go index 481070d60ab..4c1ab926c57 100644 --- a/api/groups/proofGroup_test.go +++ b/api/groups/proofGroup_test.go @@ -379,6 +379,94 @@ func TestVerifyProof(t *testing.T) { assert.True(t, isValid) } +func TestProofGroup_UpdateFacade(t *testing.T) { + t.Parallel() + + t.Run("nil facade should error", func(t *testing.T) { + t.Parallel() + + proofGroup, err := groups.NewProofGroup(&mock.FacadeStub{}) + require.NoError(t, err) + + err = proofGroup.UpdateFacade(nil) + require.Equal(t, apiErrors.ErrNilFacadeHandler, err) + }) + t.Run("cast failure should error", func(t *testing.T) { + t.Parallel() + + proofGroup, err := groups.NewProofGroup(&mock.FacadeStub{}) + require.NoError(t, err) + + err = proofGroup.UpdateFacade("this is not a facade handler") + require.True(t, errors.Is(err, apiErrors.ErrFacadeWrongTypeAssertion)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + rootHash := "rootHash" + address := "address" + validProof := []string{hex.EncodeToString([]byte("valid")), hex.EncodeToString([]byte("proof"))} + verifyProofParams := groups.VerifyProofRequest{ + RootHash: rootHash, + Address: address, + Proof: validProof, + } + verifyProofBytes, _ := json.Marshal(verifyProofParams) + + facade := &mock.FacadeStub{ + VerifyProofCalled: func(rH string, addr string, proof [][]byte) (bool, error) { + return true, nil + }, + } + + proofGroup, err := groups.NewProofGroup(facade) + require.NoError(t, err) + + ws := startWebServer(proofGroup, "proof", getProofRoutesConfig()) + + req, _ := http.NewRequest("POST", "/proof/verify", bytes.NewBuffer(verifyProofBytes)) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := shared.GenericAPIResponse{} + loadResponse(resp.Body, &response) + assert.Equal(t, shared.ReturnCodeSuccess, response.Code) + responseMap, ok := response.Data.(map[string]interface{}) + assert.True(t, ok) + isValid, ok := responseMap["ok"].(bool) + assert.True(t, ok) + assert.True(t, isValid) + + verifyProfErr := fmt.Errorf("VerifyProof err") + newFacade := &mock.FacadeStub{ + VerifyProofCalled: func(rootHash string, address string, proof [][]byte) (bool, error) { + return false, verifyProfErr + }, + } + + err = proofGroup.UpdateFacade(newFacade) + require.NoError(t, err) + + req, _ = http.NewRequest("POST", "/proof/verify", bytes.NewBuffer(verifyProofBytes)) + resp = httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + loadResponse(resp.Body, &response) + assert.Equal(t, shared.ReturnCodeInternalError, response.Code) + assert.True(t, strings.Contains(response.Error, apiErrors.ErrVerifyProof.Error())) + }) +} + +func TestProofGroup_IsInterfaceNil(t *testing.T) { + t.Parallel() + + proofGroup, _ := groups.NewProofGroup(nil) + require.True(t, proofGroup.IsInterfaceNil()) + + proofGroup, _ = groups.NewProofGroup(&mock.FacadeStub{}) + require.False(t, proofGroup.IsInterfaceNil()) +} + func getProofRoutesConfig() config.ApiRoutesConfig { return config.ApiRoutesConfig{ APIPackages: map[string]config.APIPackageConfig{ diff --git a/api/groups/transactionGroup.go b/api/groups/transactionGroup.go index a23c6e7962e..70b77cae6b6 100644 --- a/api/groups/transactionGroup.go +++ b/api/groups/transactionGroup.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/api/shared" "github.com/multiversx/mx-chain-go/api/shared/logging" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/node/external" txSimData "github.com/multiversx/mx-chain-go/process/txsimulator/data" ) @@ -43,8 +44,7 @@ const ( // transactionFacadeHandler defines the methods to be implemented by a facade for transaction requests type transactionFacadeHandler interface { - CreateTransaction(nonce uint64, value string, receiver string, receiverUsername []byte, sender string, senderUsername []byte, gasPrice uint64, - gasLimit uint64, data []byte, signatureHex string, chainID string, version uint32, options uint32) (*transaction.Transaction, []byte, error) + CreateTransaction(txArgs *external.ArgsCreateTransaction) (*transaction.Transaction, []byte, error) ValidateTransaction(tx *transaction.Transaction) error ValidateTransactionForSimulation(tx *transaction.Transaction, checkSignature bool) error SendBulkTransactions([]*transaction.Transaction) (uint64, error) @@ -161,19 +161,21 @@ type MultipleTxRequest struct { // SendTxRequest represents the structure that maps and validates user input for publishing a new transaction type SendTxRequest struct { - Sender string `form:"sender" json:"sender"` - Receiver string `form:"receiver" json:"receiver"` - SenderUsername []byte `json:"senderUsername,omitempty"` - ReceiverUsername []byte `json:"receiverUsername,omitempty"` - Value string `form:"value" json:"value"` - Data []byte `form:"data" json:"data"` - Nonce uint64 `form:"nonce" json:"nonce"` - GasPrice uint64 `form:"gasPrice" json:"gasPrice"` - GasLimit uint64 `form:"gasLimit" json:"gasLimit"` - Signature string `form:"signature" json:"signature"` - ChainID string `form:"chainID" json:"chainID"` - Version uint32 `form:"version" json:"version"` - Options uint32 `json:"options,omitempty"` + Sender string `form:"sender" json:"sender"` + Receiver string `form:"receiver" json:"receiver"` + SenderUsername []byte `json:"senderUsername,omitempty"` + ReceiverUsername []byte `json:"receiverUsername,omitempty"` + Value string `form:"value" json:"value"` + Data []byte `form:"data" json:"data"` + Nonce uint64 `form:"nonce" json:"nonce"` + GasPrice uint64 `form:"gasPrice" json:"gasPrice"` + GasLimit uint64 `form:"gasLimit" json:"gasLimit"` + Signature string `form:"signature" json:"signature"` + ChainID string `form:"chainID" json:"chainID"` + Version uint32 `form:"version" json:"version"` + Options uint32 `json:"options,omitempty"` + GuardianAddr string `json:"guardian,omitempty"` + GuardianSignature string `json:"guardianSignature,omitempty"` } // TxResponse represents the structure on which the response will be validated against @@ -215,23 +217,27 @@ func (tg *transactionGroup) simulateTransaction(c *gin.Context) { return } + txArgs := &external.ArgsCreateTransaction{ + Nonce: gtx.Nonce, + Value: gtx.Value, + Receiver: gtx.Receiver, + ReceiverUsername: gtx.ReceiverUsername, + Sender: gtx.Sender, + SenderUsername: gtx.SenderUsername, + GasPrice: gtx.GasPrice, + GasLimit: gtx.GasLimit, + DataField: gtx.Data, + SignatureHex: gtx.Signature, + ChainID: gtx.ChainID, + Version: gtx.Version, + Options: gtx.Options, + Guardian: gtx.GuardianAddr, + GuardianSigHex: gtx.GuardianSignature, + } start := time.Now() - tx, txHash, err := tg.getFacade().CreateTransaction( - gtx.Nonce, - gtx.Value, - gtx.Receiver, - gtx.ReceiverUsername, - gtx.Sender, - gtx.SenderUsername, - gtx.GasPrice, - gtx.GasLimit, - gtx.Data, - gtx.Signature, - gtx.ChainID, - gtx.Version, - gtx.Options, - ) + tx, txHash, err := tg.getFacade().CreateTransaction(txArgs) logging.LogAPIActionDurationIfNeeded(start, "API call: CreateTransaction") + if err != nil { c.JSON( http.StatusBadRequest, @@ -301,22 +307,25 @@ func (tg *transactionGroup) sendTransaction(c *gin.Context) { return } + txArgs := &external.ArgsCreateTransaction{ + Nonce: gtx.Nonce, + Value: gtx.Value, + Receiver: gtx.Receiver, + ReceiverUsername: gtx.ReceiverUsername, + Sender: gtx.Sender, + SenderUsername: gtx.SenderUsername, + GasPrice: gtx.GasPrice, + GasLimit: gtx.GasLimit, + DataField: gtx.Data, + SignatureHex: gtx.Signature, + ChainID: gtx.ChainID, + Version: gtx.Version, + Options: gtx.Options, + Guardian: gtx.GuardianAddr, + GuardianSigHex: gtx.GuardianSignature, + } start := time.Now() - tx, txHash, err := tg.getFacade().CreateTransaction( - gtx.Nonce, - gtx.Value, - gtx.Receiver, - gtx.ReceiverUsername, - gtx.Sender, - gtx.SenderUsername, - gtx.GasPrice, - gtx.GasLimit, - gtx.Data, - gtx.Signature, - gtx.ChainID, - gtx.Version, - gtx.Options, - ) + tx, txHash, err := tg.getFacade().CreateTransaction(txArgs) logging.LogAPIActionDurationIfNeeded(start, "API call: CreateTransaction") if err != nil { c.JSON( @@ -396,22 +405,24 @@ func (tg *transactionGroup) sendMultipleTransactions(c *gin.Context) { var start time.Time txsHashes := make(map[int]string) for idx, receivedTx := range gtx { - start = time.Now() - tx, txHash, err = tg.getFacade().CreateTransaction( - receivedTx.Nonce, - receivedTx.Value, - receivedTx.Receiver, - receivedTx.ReceiverUsername, - receivedTx.Sender, - receivedTx.SenderUsername, - receivedTx.GasPrice, - receivedTx.GasLimit, - receivedTx.Data, - receivedTx.Signature, - receivedTx.ChainID, - receivedTx.Version, - receivedTx.Options, - ) + txArgs := &external.ArgsCreateTransaction{ + Nonce: receivedTx.Nonce, + Value: receivedTx.Value, + Receiver: receivedTx.Receiver, + ReceiverUsername: receivedTx.ReceiverUsername, + Sender: receivedTx.Sender, + SenderUsername: receivedTx.SenderUsername, + GasPrice: receivedTx.GasPrice, + GasLimit: receivedTx.GasLimit, + DataField: receivedTx.Data, + SignatureHex: receivedTx.Signature, + ChainID: receivedTx.ChainID, + Version: receivedTx.Version, + Options: receivedTx.Options, + Guardian: receivedTx.GuardianAddr, + GuardianSigHex: receivedTx.GuardianSignature, + } + tx, txHash, err = tg.getFacade().CreateTransaction(txArgs) logging.LogAPIActionDurationIfNeeded(start, "API call: CreateTransaction") if err != nil { continue @@ -523,22 +534,25 @@ func (tg *transactionGroup) computeTransactionGasLimit(c *gin.Context) { return } + txArgs := &external.ArgsCreateTransaction{ + Nonce: gtx.Nonce, + Value: gtx.Value, + Receiver: gtx.Receiver, + ReceiverUsername: gtx.ReceiverUsername, + Sender: gtx.Sender, + SenderUsername: gtx.SenderUsername, + GasPrice: gtx.GasPrice, + GasLimit: gtx.GasLimit, + DataField: gtx.Data, + SignatureHex: gtx.Signature, + ChainID: gtx.ChainID, + Version: gtx.Version, + Options: gtx.Options, + Guardian: gtx.GuardianAddr, + GuardianSigHex: gtx.GuardianSignature, + } start := time.Now() - tx, _, err := tg.getFacade().CreateTransaction( - gtx.Nonce, - gtx.Value, - gtx.Receiver, - gtx.ReceiverUsername, - gtx.Sender, - gtx.SenderUsername, - gtx.GasPrice, - gtx.GasLimit, - gtx.Data, - gtx.Signature, - gtx.ChainID, - gtx.Version, - gtx.Options, - ) + tx, _, err := tg.getFacade().CreateTransaction(txArgs) logging.LogAPIActionDurationIfNeeded(start, "API call: CreateTransaction") if err != nil { c.JSON( diff --git a/api/groups/transactionGroup_test.go b/api/groups/transactionGroup_test.go index 60cab3fb24a..82d67abf1aa 100644 --- a/api/groups/transactionGroup_test.go +++ b/api/groups/transactionGroup_test.go @@ -6,7 +6,7 @@ import ( "encoding/json" "errors" "fmt" - "math/big" + "io" "net/http" "net/http/httptest" "strings" @@ -20,6 +20,7 @@ import ( "github.com/multiversx/mx-chain-go/api/shared" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/node/external" txSimData "github.com/multiversx/mx-chain-go/process/txsimulator/data" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -128,1042 +129,983 @@ type txPoolNonceGapsForSenderResponse struct { Code string `json:"code"` } -func TestGetTransaction_WithCorrectHashShouldReturnTransaction(t *testing.T) { - sender := "sender" - receiver := "receiver" - value := "10" - txData := []byte("data") - hash := "hash" - facade := mock.FacadeStub{ - GetTransactionHandler: func(hash string, withEvents bool) (i *dataTx.ApiTransactionResult, e error) { - return &dataTx.ApiTransactionResult{ - Sender: sender, - Receiver: receiver, - Data: txData, - Value: value, - }, nil - }, - } - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - - req, _ := http.NewRequest("GET", "/transaction/"+hash, nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := transactionResponse{} - loadResponse(resp.Body, &response) - - txResp := response.Data.TxResp - assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, sender, txResp.Sender) - assert.Equal(t, receiver, txResp.Receiver) - assert.Equal(t, value, txResp.Value) - assert.Equal(t, txData, txResp.Data) -} - -func TestGetTransaction_WithUnknownHashShouldReturnNil(t *testing.T) { - sender := "sender" - receiver := "receiver" - value := "10" - txData := []byte("data") - wrongHash := "wronghash" - facade := mock.FacadeStub{ - GetTransactionHandler: func(hash string, withEvents bool) (*dataTx.ApiTransactionResult, error) { - if hash == wrongHash { - return nil, errors.New("local error") - } - return &dataTx.ApiTransactionResult{ - Sender: sender, - Receiver: receiver, - Data: txData, - Value: value, - }, nil - }, - } - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - - req, _ := http.NewRequest("GET", "/transaction/"+wrongHash, nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - txResp := transactionResponse{} - loadResponse(resp.Body, &txResp) - - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.Empty(t, txResp.Data) -} - -func TestGetTransaction_ErrorWithExceededNumGoRoutines(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetThrottlerForEndpointCalled: func(_ string) (core.Throttler, bool) { - return &mock.ThrottlerStub{ - CanProcessCalled: func() bool { return false }, - }, true - }, - } - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - - req, _ := http.NewRequest("GET", "/transaction/eeee", nil) - - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - txResp := transactionResponse{} - loadResponse(resp.Body, &txResp) - - assert.Equal(t, http.StatusTooManyRequests, resp.Code) - assert.True(t, strings.Contains(txResp.Error, apiErrors.ErrTooManyRequests.Error())) - assert.Equal(t, string(shared.ReturnCodeSystemBusy), txResp.Code) - assert.Empty(t, txResp.Data) -} - -func TestSendTransaction_ErrorWithExceededNumGoRoutines(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetThrottlerForEndpointCalled: func(_ string) (core.Throttler, bool) { - return &mock.ThrottlerStub{ - CanProcessCalled: func() bool { return false }, - }, true - }, - } - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - - tx := groups.SendTxRequest{} - - jsonBytes, _ := json.Marshal(tx) - req, _ := http.NewRequest("POST", "/transaction/send", bytes.NewBuffer(jsonBytes)) - - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - txResp := sendSingleTxResponse{} - loadResponse(resp.Body, &txResp) - - assert.Equal(t, http.StatusTooManyRequests, resp.Code) - assert.True(t, strings.Contains(txResp.Error, apiErrors.ErrTooManyRequests.Error())) - assert.Equal(t, string(shared.ReturnCodeSystemBusy), txResp.Code) - assert.Empty(t, txResp.Data) -} - -func TestSendTransaction_WrongParametersShouldErrorOnValidation(t *testing.T) { - t.Parallel() - sender := "sender" - receiver := "receiver" - value := "ishouldbeint" - data := "data" - - facade := mock.FacadeStub{} - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - - jsonStr := fmt.Sprintf(`{"sender":"%s", "receiver":"%s", "value":%s, "data":"%s"}`, - sender, - receiver, - value, - data, - ) - - req, _ := http.NewRequest("POST", "/transaction/send", bytes.NewBuffer([]byte(jsonStr))) - - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - txResp := sendSingleTxResponse{} - loadResponse(resp.Body, &txResp) - - assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.Contains(t, txResp.Error, apiErrors.ErrValidation.Error()) - assert.Empty(t, txResp.Data) -} - -func TestSendTransaction_ErrorWhenFacadeSendTransactionError(t *testing.T) { - t.Parallel() - sender := "sender" - receiver := "receiver" - value := big.NewInt(10) - data := "data" - signature := "aabbccdd" - errorString := "send transaction error" - - facade := mock.FacadeStub{ - CreateTransactionHandler: func(nonce uint64, value string, receiver string, receiverUsername []byte, sender string, senderUsername []byte, gasPrice uint64, gasLimit uint64, data []byte, signatureHex string, chainID string, version uint32, options uint32) (*dataTx.Transaction, []byte, error) { - return nil, nil, nil - }, - SendBulkTransactionsHandler: func(txs []*dataTx.Transaction) (u uint64, err error) { - return 0, errors.New(errorString) - }, - ValidateTransactionHandler: func(tx *dataTx.Transaction) error { - return nil - }, - } - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - - jsonStr := fmt.Sprintf(`{"sender":"%s", "receiver":"%s", "value":"%s", "signature":"%s", "data":"%s"}`, - sender, - receiver, - value, - signature, - data, - ) - - req, _ := http.NewRequest("POST", "/transaction/send", bytes.NewBuffer([]byte(jsonStr))) - - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - txResp := sendSingleTxResponse{} - loadResponse(resp.Body, &txResp) - - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.Contains(t, txResp.Error, errorString) - assert.Empty(t, txResp.Data) -} - -func TestSendTransaction_ReturnsSuccessfully(t *testing.T) { - t.Parallel() - nonce := uint64(1) - sender := "sender" - receiver := "receiver" - value := big.NewInt(10) - data := "data" - signature := "aabbccdd" - hexTxHash := "deadbeef" - - facade := mock.FacadeStub{ - CreateTransactionHandler: func(nonce uint64, value string, receiver string, receiverUsername []byte, sender string, senderUsername []byte, gasPrice uint64, gasLimit uint64, data []byte, signatureHex string, chainID string, version uint32, options uint32) (*dataTx.Transaction, []byte, error) { - txHash, _ := hex.DecodeString(hexTxHash) - return nil, txHash, nil - }, - SendBulkTransactionsHandler: func(txs []*dataTx.Transaction) (u uint64, err error) { - return 1, nil - }, - ValidateTransactionHandler: func(tx *dataTx.Transaction) error { - return nil - }, - } - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - - jsonStr := fmt.Sprintf( - `{"nonce": %d, "sender": "%s", "receiver": "%s", "value": "%s", "signature": "%s", "data": "%s"}`, +var ( + sender = "sender" + receiver = "receiver" + value = "10" + txData = []byte("data") + hash = "hash" + guardian = "guardian" + signature = "aabbccdd" + expectedErr = errors.New("expected error") + nonce = uint64(1) + hexTxHash = "deadbeef" + jsonTxStr = fmt.Sprintf(`{"nonce": %d, "sender":"%s", "receiver":"%s", "value":"%s", "signature":"%s", "data":"%s"}`, nonce, sender, receiver, value, signature, - data, + txData, ) +) - req, _ := http.NewRequest("POST", "/transaction/send", bytes.NewBuffer([]byte(jsonStr))) - - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - response := sendSingleTxResponse{} - loadResponse(resp.Body, &response) - - assert.Equal(t, http.StatusOK, resp.Code) - assert.Empty(t, response.Error) - assert.Equal(t, hexTxHash, response.Data.TxHash) -} - -func TestSendMultipleTransactions_ErrorWithExceededNumGoRoutines(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{ - GetThrottlerForEndpointCalled: func(_ string) (core.Throttler, bool) { - return &mock.ThrottlerStub{ - CanProcessCalled: func() bool { return false }, - }, true - }, - } - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - - tx0 := groups.SendTxRequest{} - txs := []*groups.SendTxRequest{&tx0} - - jsonBytes, _ := json.Marshal(txs) - req, _ := http.NewRequest("POST", "/transaction/send-multiple", bytes.NewBuffer(jsonBytes)) - - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - txResp := sendMultipleTxsResponse{} - loadResponse(resp.Body, &txResp) - - assert.Equal(t, http.StatusTooManyRequests, resp.Code) - assert.True(t, strings.Contains(txResp.Error, apiErrors.ErrTooManyRequests.Error())) - assert.Equal(t, string(shared.ReturnCodeSystemBusy), txResp.Code) - assert.Empty(t, txResp.Data) -} - -func TestSendMultipleTransactions_WrongPayloadShouldErrorOnValidation(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{} - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - - jsonStr := `{"wrong": json}` - - req, _ := http.NewRequest("POST", "/transaction/send-multiple", bytes.NewBuffer([]byte(jsonStr))) - - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - txResp := sendMultipleTxsResponse{} - loadResponse(resp.Body, &txResp) - - assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.Contains(t, txResp.Error, apiErrors.ErrValidation.Error()) - assert.Empty(t, txResp.Data) -} - -func TestSendMultipleTransactions_OkPayloadShouldWork(t *testing.T) { - t.Parallel() - - createTxWasCalled := false - sendBulkTxsWasCalled := false - - facade := mock.FacadeStub{ - CreateTransactionHandler: func(nonce uint64, value string, receiver string, receiverUsername []byte, sender string, senderUsername []byte, gasPrice uint64, gasLimit uint64, data []byte, signatureHex string, chainID string, version uint32, options uint32) (*dataTx.Transaction, []byte, error) { - createTxWasCalled = true - return &dataTx.Transaction{}, make([]byte, 0), nil - }, - SendBulkTransactionsHandler: func(txs []*dataTx.Transaction) (u uint64, e error) { - sendBulkTxsWasCalled = true - return 0, nil - }, - ValidateTransactionHandler: func(tx *dataTx.Transaction) error { - return nil - }, - } - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - - tx0 := groups.SendTxRequest{ - Sender: "sender1", - Receiver: "receiver1", - Value: "100", - Data: make([]byte, 0), - Nonce: 0, - GasPrice: 0, - GasLimit: 0, - Signature: "", - } - tx1 := tx0 - tx1.Sender = "sender2" - txs := []*groups.SendTxRequest{&tx0, &tx1} - - jsonBytes, _ := json.Marshal(txs) - - req, _ := http.NewRequest("POST", "/transaction/send-multiple", bytes.NewBuffer(jsonBytes)) - - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - txCostResp := sendMultipleTxsResponse{} - loadResponse(resp.Body, &txCostResp) - - assert.Equal(t, http.StatusOK, resp.Code) - assert.True(t, createTxWasCalled) - assert.True(t, sendBulkTxsWasCalled) -} - -func TestComputeTransactionGasLimit(t *testing.T) { - t.Parallel() - - expectedGasLimit := uint64(37) - - facade := mock.FacadeStub{ - CreateTransactionHandler: func(nonce uint64, value string, receiver string, receiverUsername []byte, sender string, senderUsername []byte, gasPrice uint64, gasLimit uint64, data []byte, signatureHex string, chainID string, version uint32, options uint32) (*dataTx.Transaction, []byte, error) { - return &dataTx.Transaction{}, nil, nil - }, - ComputeTransactionGasLimitHandler: func(tx *dataTx.Transaction) (*dataTx.CostResponse, error) { - return &dataTx.CostResponse{ - GasUnits: expectedGasLimit, - ReturnMessage: "", - }, nil - }, - } - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - - tx0 := groups.SendTxRequest{ - Sender: "sender1", - Receiver: "receiver1", - Value: "100", - Data: make([]byte, 0), - Nonce: 0, - GasPrice: 0, - GasLimit: 0, - Signature: "", - } - - jsonBytes, _ := json.Marshal(tx0) - - req, _ := http.NewRequest("POST", "/transaction/cost", bytes.NewBuffer(jsonBytes)) - - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - txCostResp := transactionCostResponse{} - loadResponse(resp.Body, &txCostResp) - - assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, expectedGasLimit, txCostResp.Data.Cost) -} - -func TestSimulateTransaction_BadRequestShouldErr(t *testing.T) { - t.Parallel() - - facade := mock.FacadeStub{} - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - - req, _ := http.NewRequest("POST", "/transaction/simulate", bytes.NewBuffer([]byte("invalid bytes"))) - - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - simulateResponse := simulateTxResponse{} - loadResponse(resp.Body, &simulateResponse) - - assert.Equal(t, http.StatusBadRequest, resp.Code) -} - -func TestSimulateTransaction_CreateErrorsShouldErr(t *testing.T) { - t.Parallel() - - processTxWasCalled := false - - expectedErr := errors.New("expected error") - facade := mock.FacadeStub{ - SimulateTransactionExecutionHandler: func(tx *dataTx.Transaction) (*txSimData.SimulationResults, error) { - processTxWasCalled = true - return &txSimData.SimulationResults{ - Status: "ok", - FailReason: "no reason", - ScResults: nil, - Receipts: nil, - Hash: "hash", - }, nil - }, - CreateTransactionHandler: func(nonce uint64, value string, receiver string, receiverUsername []byte, sender string, senderUsername []byte, gasPrice uint64, gasLimit uint64, data []byte, signatureHex string, chainID string, version uint32, options uint32) (*dataTx.Transaction, []byte, error) { - return nil, nil, expectedErr - }, - ValidateTransactionForSimulationHandler: func(tx *dataTx.Transaction, bypassSignature bool) error { - return nil - }, - } - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - - tx := groups.SendTxRequest{ - Sender: "sender1", - Receiver: "receiver1", - Value: "100", - Data: make([]byte, 0), - Nonce: 0, - GasPrice: 0, - GasLimit: 0, - Signature: "", - } - jsonBytes, _ := json.Marshal(tx) - - req, _ := http.NewRequest("POST", "/transaction/simulate", bytes.NewBuffer(jsonBytes)) - - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - simulateResponse := simulateTxResponse{} - loadResponse(resp.Body, &simulateResponse) - - assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.False(t, processTxWasCalled) - assert.Contains(t, simulateResponse.Error, expectedErr.Error()) -} - -func TestSimulateTransaction_ValidateErrorsShouldErr(t *testing.T) { - t.Parallel() - - processTxWasCalled := false - - expectedErr := errors.New("expected error") - facade := mock.FacadeStub{ - SimulateTransactionExecutionHandler: func(tx *dataTx.Transaction) (*txSimData.SimulationResults, error) { - processTxWasCalled = true - return &txSimData.SimulationResults{ - Status: "ok", - FailReason: "no reason", - ScResults: nil, - Receipts: nil, - Hash: "hash", - }, nil - }, - CreateTransactionHandler: func(nonce uint64, value string, receiver string, receiverUsername []byte, sender string, senderUsername []byte, gasPrice uint64, gasLimit uint64, data []byte, signatureHex string, chainID string, version uint32, options uint32) (*dataTx.Transaction, []byte, error) { - return &dataTx.Transaction{}, []byte("hash"), nil - }, - ValidateTransactionForSimulationHandler: func(tx *dataTx.Transaction, bypassSignature bool) error { - return expectedErr - }, - } - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - - tx := groups.SendTxRequest{ - Sender: "sender1", - Receiver: "receiver1", - Value: "100", - Data: make([]byte, 0), - Nonce: 0, - GasPrice: 0, - GasLimit: 0, - Signature: "", - } - jsonBytes, _ := json.Marshal(tx) - - req, _ := http.NewRequest("POST", "/transaction/simulate", bytes.NewBuffer(jsonBytes)) - - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - simulateResponse := simulateTxResponse{} - loadResponse(resp.Body, &simulateResponse) - - assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.False(t, processTxWasCalled) - assert.Contains(t, simulateResponse.Error, expectedErr.Error()) -} - -func TestSimulateTransaction_CannotParseParameterShouldErr(t *testing.T) { +func TestTransactionsGroup_getTransaction(t *testing.T) { t.Parallel() - facade := mock.FacadeStub{} - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - - tx := groups.SendTxRequest{ - Sender: "sender1", - Receiver: "receiver1", - Value: "100", - Data: make([]byte, 0), - Nonce: 0, - GasPrice: 0, - GasLimit: 0, - Signature: "", - } - jsonBytes, _ := json.Marshal(tx) + t.Run("number of go routines exceeded", testExceededNumGoRoutines("/transaction/eeee", nil)) + t.Run("invalid params should error", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("POST", "/transaction/simulate?checkSignature=tttt", bytes.NewBuffer(jsonBytes)) + facade := mock.FacadeStub{ + GetTransactionHandler: func(hash string, withEvents bool) (*dataTx.ApiTransactionResult, error) { + require.Fail(t, "should have not been called") + return &dataTx.ApiTransactionResult{}, nil + }, + } - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + transactionGroup, err := groups.NewTransactionGroup(&facade) + require.NoError(t, err) - simulateResponse := simulateTxResponse{} - loadResponse(resp.Body, &simulateResponse) + ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.Equal(t, apiErrors.ErrValidation.Error(), simulateResponse.Error) -} + req, _ := http.NewRequest("GET", "/transaction/hash?withResults=not-bool", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) -func TestSimulateTransaction_UseQueryParameterShouldWork(t *testing.T) { - t.Parallel() + txResp := transactionResponse{} + loadResponse(resp.Body, &txResp) - facade := mock.FacadeStub{ - ValidateTransactionForSimulationHandler: func(tx *dataTx.Transaction, bypassSignature bool) error { - assert.True(t, bypassSignature) - return nil - }, - CreateTransactionHandler: func(nonce uint64, value string, receiver string, receiverUsername []byte, sender string, senderUsername []byte, gasPrice uint64, gasLimit uint64, data []byte, signatureHex string, chainID string, version uint32, options uint32) (*dataTx.Transaction, []byte, error) { - return &dataTx.Transaction{}, []byte("hash"), nil - }, - SimulateTransactionExecutionHandler: func(tx *dataTx.Transaction) (*txSimData.SimulationResults, error) { - return &txSimData.SimulationResults{}, nil - }, - } + assert.Equal(t, http.StatusBadRequest, resp.Code) + assert.Empty(t, txResp.Data) + }) + t.Run("facade returns error should error", func(t *testing.T) { + t.Parallel() - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) + facade := mock.FacadeStub{ + GetTransactionHandler: func(hash string, withEvents bool) (*dataTx.ApiTransactionResult, error) { + return nil, expectedErr + }, + } - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) + transactionGroup, err := groups.NewTransactionGroup(&facade) + require.NoError(t, err) - tx := groups.SendTxRequest{ - Sender: "sender1", - Receiver: "receiver1", - Value: "100", - Data: make([]byte, 0), - Nonce: 0, - GasPrice: 0, - GasLimit: 0, - Signature: "", - } - jsonBytes, _ := json.Marshal(tx) + ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - req, _ := http.NewRequest("POST", "/transaction/simulate?bypassSignature=true", bytes.NewBuffer(jsonBytes)) + req, _ := http.NewRequest("GET", "/transaction/hash", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + txResp := transactionResponse{} + loadResponse(resp.Body, &txResp) - simulateResponse := simulateTxResponse{} - loadResponse(resp.Body, &simulateResponse) + assert.Equal(t, http.StatusInternalServerError, resp.Code) + assert.Empty(t, txResp.Data) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - assert.Equal(t, http.StatusOK, resp.Code) + facade := &mock.FacadeStub{ + GetTransactionHandler: func(hash string, withEvents bool) (i *dataTx.ApiTransactionResult, e error) { + return &dataTx.ApiTransactionResult{ + Sender: sender, + Receiver: receiver, + Data: txData, + Value: value, + GuardianAddr: guardian, + }, nil + }, + } + + response := &transactionResponse{} + loadTransactionGroupResponse( + t, + facade, + "/transaction/"+hash, + "GET", + nil, + response, + ) + txResp := response.Data.TxResp + assert.Equal(t, sender, txResp.Sender) + assert.Equal(t, receiver, txResp.Receiver) + assert.Equal(t, value, txResp.Value) + assert.Equal(t, txData, txResp.Data) + assert.Equal(t, guardian, txResp.GuardianAddr) + }) } -func TestSimulateTransaction_ProcessErrorsShouldErr(t *testing.T) { +func TestTransactionGroup_sendTransaction(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected error") - facade := mock.FacadeStub{ - SimulateTransactionExecutionHandler: func(tx *dataTx.Transaction) (*txSimData.SimulationResults, error) { - return nil, expectedErr - }, - CreateTransactionHandler: func(nonce uint64, value string, receiver string, receiverUsername []byte, sender string, senderUsername []byte, gasPrice uint64, gasLimit uint64, data []byte, signatureHex string, chainID string, version uint32, options uint32) (*dataTx.Transaction, []byte, error) { - return &dataTx.Transaction{}, []byte("hash"), nil - }, - ValidateTransactionForSimulationHandler: func(tx *dataTx.Transaction, bypassSignature bool) error { - return nil - }, - } - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - - tx := groups.SendTxRequest{ - Sender: "sender1", - Receiver: "receiver1", - Value: "100", - Data: make([]byte, 0), - Nonce: 0, - GasPrice: 0, - GasLimit: 0, - Signature: "", - } - jsonBytes, _ := json.Marshal(tx) + t.Run("number of go routines exceeded", testExceededNumGoRoutines("/transaction/send", &groups.SendTxRequest{})) + t.Run("invalid params should error", testTransactionGroupErrorScenario("/transaction/send", "POST", jsonTxStr, http.StatusBadRequest, apiErrors.ErrValidation)) + t.Run("CreateTransaction error should error", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("POST", "/transaction/simulate", bytes.NewBuffer(jsonBytes)) + facade := &mock.FacadeStub{ + CreateTransactionHandler: func(txArgs *external.ArgsCreateTransaction) (*dataTx.Transaction, []byte, error) { + return nil, nil, expectedErr + }, + ValidateTransactionHandler: func(tx *dataTx.Transaction) error { + require.Fail(t, "should have not been called") + return nil + }, + } + testTransactionsGroup( + t, + facade, + "/transaction/send", + "POST", + &groups.SendTxRequest{}, + http.StatusBadRequest, + expectedErr, + ) + }) + t.Run("ValidateTransaction error should error", func(t *testing.T) { + t.Parallel() - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + facade := &mock.FacadeStub{ + CreateTransactionHandler: func(txArgs *external.ArgsCreateTransaction) (*dataTx.Transaction, []byte, error) { + return nil, nil, nil + }, + ValidateTransactionHandler: func(tx *dataTx.Transaction) error { + return expectedErr + }, + SendBulkTransactionsHandler: func(txs []*dataTx.Transaction) (u uint64, err error) { + require.Fail(t, "should have not been called") + return 0, nil + }, + } + testTransactionsGroup( + t, + facade, + "/transaction/send", + "POST", + &groups.SendTxRequest{}, + http.StatusBadRequest, + expectedErr, + ) + }) + t.Run("SendBulkTransactions error should error", func(t *testing.T) { + t.Parallel() - simulateResponse := simulateTxResponse{} - loadResponse(resp.Body, &simulateResponse) + facade := &mock.FacadeStub{ + CreateTransactionHandler: func(txArgs *external.ArgsCreateTransaction) (*dataTx.Transaction, []byte, error) { + return nil, nil, nil + }, + SendBulkTransactionsHandler: func(txs []*dataTx.Transaction) (u uint64, err error) { + return 0, expectedErr + }, + ValidateTransactionHandler: func(tx *dataTx.Transaction) error { + return nil + }, + } + testTransactionsGroup( + t, + facade, + "/transaction/send", + "POST", + &groups.SendTxRequest{}, + http.StatusInternalServerError, + expectedErr, + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.Contains(t, simulateResponse.Error, expectedErr.Error()) + facade := &mock.FacadeStub{ + CreateTransactionHandler: func(txArgs *external.ArgsCreateTransaction) (*dataTx.Transaction, []byte, error) { + txHash, _ := hex.DecodeString(hexTxHash) + return nil, txHash, nil + }, + SendBulkTransactionsHandler: func(txs []*dataTx.Transaction) (u uint64, err error) { + return 1, nil + }, + ValidateTransactionHandler: func(tx *dataTx.Transaction) error { + return nil + }, + } + + response := &sendSingleTxResponse{} + loadTransactionGroupResponse( + t, + facade, + "/transaction/send", + "POST", + bytes.NewBuffer([]byte(jsonTxStr)), + response, + ) + assert.Empty(t, response.Error) + assert.Equal(t, hexTxHash, response.Data.TxHash) + }) } -func TestSimulateTransaction(t *testing.T) { +func TestTransactionGroup_sendMultipleTransactions(t *testing.T) { t.Parallel() - processTxWasCalled := false - - facade := mock.FacadeStub{ - SimulateTransactionExecutionHandler: func(tx *dataTx.Transaction) (*txSimData.SimulationResults, error) { - processTxWasCalled = true - return &txSimData.SimulationResults{ - Status: "ok", - FailReason: "no reason", - ScResults: nil, - Receipts: nil, - Hash: "hash", - }, nil - }, - CreateTransactionHandler: func(nonce uint64, value string, receiver string, receiverUsername []byte, sender string, senderUsername []byte, gasPrice uint64, gasLimit uint64, data []byte, signatureHex string, chainID string, version uint32, options uint32) (*dataTx.Transaction, []byte, error) { - return &dataTx.Transaction{}, []byte("hash"), nil - }, - ValidateTransactionForSimulationHandler: func(tx *dataTx.Transaction, bypassSignature bool) error { - return nil - }, - } - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) + t.Run("number of go routines exceeded", testExceededNumGoRoutines("/transaction/send-multiple", &groups.SendTxRequest{})) + t.Run("invalid params should error", testTransactionGroupErrorScenario("/transaction/send-multiple", "POST", jsonTxStr, http.StatusBadRequest, apiErrors.ErrValidation)) + t.Run("CreateTransaction error should continue, error on SendBulkTransactions", func(t *testing.T) { + t.Parallel() - tx := groups.SendTxRequest{ - Sender: "sender1", - Receiver: "receiver1", - Value: "100", - Data: make([]byte, 0), - Nonce: 0, - GasPrice: 0, - GasLimit: 0, - Signature: "", - } - jsonBytes, _ := json.Marshal(tx) + facade := &mock.FacadeStub{ + CreateTransactionHandler: func(txArgs *external.ArgsCreateTransaction) (*dataTx.Transaction, []byte, error) { + return nil, nil, expectedErr + }, + ValidateTransactionHandler: func(tx *dataTx.Transaction) error { + require.Fail(t, "should not have been called") + return nil + }, + SendBulkTransactionsHandler: func(txs []*dataTx.Transaction) (uint64, error) { + require.Zero(t, len(txs)) + return 0, expectedErr + }, + } + testTransactionsGroup( + t, + facade, + "/transaction/send-multiple", + "POST", + []*groups.SendTxRequest{{}}, + http.StatusInternalServerError, + expectedErr, + ) + }) + t.Run("ValidateTransaction error should continue, error on SendBulkTransactions", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("POST", "/transaction/simulate", bytes.NewBuffer(jsonBytes)) + facade := &mock.FacadeStub{ + CreateTransactionHandler: func(txArgs *external.ArgsCreateTransaction) (*dataTx.Transaction, []byte, error) { + return nil, nil, nil + }, + ValidateTransactionHandler: func(tx *dataTx.Transaction) error { + return expectedErr + }, + SendBulkTransactionsHandler: func(txs []*dataTx.Transaction) (uint64, error) { + require.Zero(t, len(txs)) + return 0, expectedErr + }, + } + testTransactionsGroup( + t, + facade, + "/transaction/send-multiple", + "POST", + []*groups.SendTxRequest{{}}, + http.StatusInternalServerError, + expectedErr, + ) + }) + t.Run("SendBulkTransactions error error", func(t *testing.T) { + t.Parallel() - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + facade := &mock.FacadeStub{ + CreateTransactionHandler: func(txArgs *external.ArgsCreateTransaction) (*dataTx.Transaction, []byte, error) { + return nil, nil, nil + }, + ValidateTransactionHandler: func(tx *dataTx.Transaction) error { + return nil + }, + SendBulkTransactionsHandler: func(txs []*dataTx.Transaction) (uint64, error) { + require.Equal(t, 1, len(txs)) + return 0, expectedErr + }, + } + testTransactionsGroup( + t, + facade, + "/transaction/send-multiple", + "POST", + []*groups.SendTxRequest{{}}, + http.StatusInternalServerError, + expectedErr, + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - simulateResponse := simulateTxResponse{} - loadResponse(resp.Body, &simulateResponse) + createTxWasCalled := false + sendBulkTxsWasCalled := false - assert.Equal(t, http.StatusOK, resp.Code) - assert.True(t, processTxWasCalled) - assert.Equal(t, string(shared.ReturnCodeSuccess), simulateResponse.Code) + facade := &mock.FacadeStub{ + CreateTransactionHandler: func(txArgs *external.ArgsCreateTransaction) (*dataTx.Transaction, []byte, error) { + createTxWasCalled = true + return &dataTx.Transaction{}, make([]byte, 0), nil + }, + SendBulkTransactionsHandler: func(txs []*dataTx.Transaction) (u uint64, e error) { + sendBulkTxsWasCalled = true + return 0, nil + }, + ValidateTransactionHandler: func(tx *dataTx.Transaction) error { + return nil + }, + } + + tx0 := groups.SendTxRequest{ + Sender: "sender1", + Receiver: "receiver1", + Value: "100", + Data: make([]byte, 0), + Nonce: 0, + GasPrice: 0, + GasLimit: 0, + Signature: "", + } + tx1 := tx0 + tx1.Sender = "sender2" + txs := []*groups.SendTxRequest{&tx0, &tx1} + + jsonBytes, _ := json.Marshal(txs) + + response := &sendMultipleTxsResponse{} + loadTransactionGroupResponse( + t, + facade, + "/transaction/send-multiple", + "POST", + bytes.NewBuffer(jsonBytes), + response, + ) + assert.True(t, createTxWasCalled) + assert.True(t, sendBulkTxsWasCalled) + }) } -func TestGetTransactionsPoolShouldError(t *testing.T) { +func TestTransactionGroup_computeTransactionGasLimit(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected error") - facade := mock.FacadeStub{ - GetTransactionsPoolCalled: func(fields string) (*common.TransactionsPoolAPIResponse, error) { - return nil, expectedErr - }, - } - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) + t.Run("invalid params should error", testTransactionGroupErrorScenario("/transaction/cost", "POST", jsonTxStr, http.StatusBadRequest, apiErrors.ErrValidation)) + t.Run("CreateTransaction error should error", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", "/transaction/pool", nil) + facade := &mock.FacadeStub{ + CreateTransactionHandler: func(txArgs *external.ArgsCreateTransaction) (*dataTx.Transaction, []byte, error) { + return nil, nil, expectedErr + }, + ComputeTransactionGasLimitHandler: func(tx *dataTx.Transaction) (*dataTx.CostResponse, error) { + require.Fail(t, "should not have been called") + return nil, nil + }, + } + testTransactionsGroup( + t, + facade, + "/transaction/cost", + "POST", + &groups.SendTxRequest{}, + http.StatusInternalServerError, + expectedErr, + ) + }) + t.Run("ComputeTransactionGasLimit error should error", func(t *testing.T) { + t.Parallel() - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + facade := &mock.FacadeStub{ + CreateTransactionHandler: func(txArgs *external.ArgsCreateTransaction) (*dataTx.Transaction, []byte, error) { + return nil, nil, nil + }, + ComputeTransactionGasLimitHandler: func(tx *dataTx.Transaction) (*dataTx.CostResponse, error) { + return nil, expectedErr + }, + } + testTransactionsGroup( + t, + facade, + "/transaction/cost", + "POST", + &groups.SendTxRequest{}, + http.StatusInternalServerError, + expectedErr, + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - txsPoolResp := generalResponse{} - loadResponse(resp.Body, &txsPoolResp) + expectedGasLimit := uint64(37) - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(txsPoolResp.Error, expectedErr.Error())) + facade := &mock.FacadeStub{ + CreateTransactionHandler: func(txArgs *external.ArgsCreateTransaction) (*dataTx.Transaction, []byte, error) { + return &dataTx.Transaction{}, nil, nil + }, + ComputeTransactionGasLimitHandler: func(tx *dataTx.Transaction) (*dataTx.CostResponse, error) { + return &dataTx.CostResponse{ + GasUnits: expectedGasLimit, + ReturnMessage: "", + }, nil + }, + } + + tx0 := groups.SendTxRequest{ + Sender: "sender1", + Receiver: "receiver1", + Value: "100", + Data: make([]byte, 0), + Nonce: 0, + GasPrice: 0, + GasLimit: 0, + Signature: "", + } + + jsonBytes, _ := json.Marshal(tx0) + + response := &transactionCostResponse{} + loadTransactionGroupResponse( + t, + facade, + "/transaction/cost", + "POST", + bytes.NewBuffer(jsonBytes), + response, + ) + assert.Equal(t, expectedGasLimit, response.Data.Cost) + }) } -func TestGetTransactionsPoolShouldWork(t *testing.T) { +func TestTransactionGroup_simulateTransaction(t *testing.T) { t.Parallel() - expectedTxPool := &common.TransactionsPoolAPIResponse{ - RegularTransactions: []common.Transaction{ - { - TxFields: map[string]interface{}{ - "hash": "tx", - }, + t.Run("number of go routines exceeded", testExceededNumGoRoutines("/transaction/simulate", &groups.SendTxRequest{})) + t.Run("invalid param transaction should error", testTransactionGroupErrorScenario("/transaction/simulate", "POST", jsonTxStr, http.StatusBadRequest, apiErrors.ErrValidation)) + t.Run("invalid param checkSignature should error", testTransactionGroupErrorScenario("/transaction/simulate?checkSignature=not-bool", "POST", &groups.SendTxRequest{}, http.StatusBadRequest, apiErrors.ErrValidation)) + t.Run("CreateTransaction error should error", func(t *testing.T) { + t.Parallel() + + facade := &mock.FacadeStub{ + CreateTransactionHandler: func(txArgs *external.ArgsCreateTransaction) (*dataTx.Transaction, []byte, error) { + return nil, nil, expectedErr }, - { - TxFields: map[string]interface{}{ - "hash": "tx2", - }, + ValidateTransactionForSimulationHandler: func(tx *dataTx.Transaction, bypassSignature bool) error { + require.Fail(t, "should have not been called") + return nil }, - }, - } - facade := mock.FacadeStub{ - GetTransactionsPoolCalled: func(fields string) (*common.TransactionsPoolAPIResponse, error) { - return expectedTxPool, nil - }, - } - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - - req, _ := http.NewRequest("GET", "/transaction/pool", nil) + } + testTransactionsGroup( + t, + facade, + "/transaction/simulate", + "POST", + &groups.SendTxRequest{}, + http.StatusBadRequest, + expectedErr, + ) + }) + t.Run("ValidateTransactionForSimulation error should error", func(t *testing.T) { + t.Parallel() - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + facade := &mock.FacadeStub{ + CreateTransactionHandler: func(txArgs *external.ArgsCreateTransaction) (*dataTx.Transaction, []byte, error) { + return nil, nil, nil + }, + ValidateTransactionForSimulationHandler: func(tx *dataTx.Transaction, bypassSignature bool) error { + return expectedErr + }, + SimulateTransactionExecutionHandler: func(tx *dataTx.Transaction) (*txSimData.SimulationResults, error) { + require.Fail(t, "should have not been called") + return nil, nil + }, + } + testTransactionsGroup( + t, + facade, + "/transaction/simulate", + "POST", + &groups.SendTxRequest{}, + http.StatusBadRequest, + expectedErr, + ) + }) + t.Run("SimulateTransactionExecution error should error", func(t *testing.T) { + t.Parallel() - txsPoolResp := txsPoolResponse{} - loadResponse(resp.Body, &txsPoolResp) + facade := &mock.FacadeStub{ + CreateTransactionHandler: func(txArgs *external.ArgsCreateTransaction) (*dataTx.Transaction, []byte, error) { + return nil, nil, nil + }, + ValidateTransactionForSimulationHandler: func(tx *dataTx.Transaction, bypassSignature bool) error { + return nil + }, + SimulateTransactionExecutionHandler: func(tx *dataTx.Transaction) (*txSimData.SimulationResults, error) { + return nil, expectedErr + }, + } + testTransactionsGroup( + t, + facade, + "/transaction/simulate", + "POST", + &groups.SendTxRequest{}, + http.StatusInternalServerError, + expectedErr, + ) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - assert.Equal(t, http.StatusOK, resp.Code) - assert.Empty(t, txsPoolResp.Error) - assert.Equal(t, *expectedTxPool, txsPoolResp.Data.TxPool) + processTxWasCalled := false + + facade := &mock.FacadeStub{ + SimulateTransactionExecutionHandler: func(tx *dataTx.Transaction) (*txSimData.SimulationResults, error) { + processTxWasCalled = true + return &txSimData.SimulationResults{ + Status: "ok", + FailReason: "no reason", + ScResults: nil, + Receipts: nil, + Hash: "hash", + }, nil + }, + CreateTransactionHandler: func(txArgs *external.ArgsCreateTransaction) (*dataTx.Transaction, []byte, error) { + return &dataTx.Transaction{}, []byte("hash"), nil + }, + ValidateTransactionForSimulationHandler: func(tx *dataTx.Transaction, bypassSignature bool) error { + return nil + }, + } + + tx := groups.SendTxRequest{ + Sender: "sender1", + Receiver: "receiver1", + Value: "100", + Data: make([]byte, 0), + Nonce: 0, + GasPrice: 0, + GasLimit: 0, + Signature: "", + } + jsonBytes, _ := json.Marshal(tx) + + response := &simulateTxResponse{} + loadTransactionGroupResponse( + t, + facade, + "/transaction/simulate", + "POST", + bytes.NewBuffer(jsonBytes), + response, + ) + assert.True(t, processTxWasCalled) + assert.Equal(t, string(shared.ReturnCodeSuccess), response.Code) + }) } -func TestGetTransactionsPoolForSenderShouldError(t *testing.T) { +func TestTransactionGroup_getTransactionsPool(t *testing.T) { t.Parallel() - query := "?by-sender=sender" - expectedErr := errors.New("expected error") - facade := mock.FacadeStub{ - GetTransactionsPoolForSenderCalled: func(sender, fields string) (*common.TransactionsPoolForSenderApiResponse, error) { - return nil, expectedErr - }, - } - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) + t.Run("number of go routines exceeded", testExceededNumGoRoutines("/transaction/pool", nil)) + t.Run("invalid last-nonce param should error", testTransactionGroupErrorScenario("/transaction/pool?last-nonce=not-bool", "GET", nil, http.StatusBadRequest, apiErrors.ErrValidation)) + t.Run("invalid nonce-gaps param should error", testTransactionGroupErrorScenario("/transaction/pool?nonce-gaps=not-bool", "GET", nil, http.StatusBadRequest, apiErrors.ErrValidation)) + t.Run("empty sender, requesting latest nonce", testTxPoolWithInvalidQuery("?last-nonce=true", apiErrors.ErrEmptySenderToGetLatestNonce)) + t.Run("empty sender, requesting nonce gaps", testTxPoolWithInvalidQuery("?nonce-gaps=true", apiErrors.ErrEmptySenderToGetNonceGaps)) + t.Run("fields + latest nonce", testTxPoolWithInvalidQuery("?fields=sender,receiver&last-nonce=true", apiErrors.ErrFetchingLatestNonceCannotIncludeFields)) + t.Run("fields + nonce gaps", testTxPoolWithInvalidQuery("?fields=sender,receiver&nonce-gaps=true", apiErrors.ErrFetchingNonceGapsCannotIncludeFields)) + t.Run("fields has spaces", testTxPoolWithInvalidQuery("?fields=sender ,receiver", apiErrors.ErrInvalidFields)) + t.Run("fields has numbers", testTxPoolWithInvalidQuery("?fields=sender1", apiErrors.ErrInvalidFields)) + t.Run("GetTransactionsPool error should error", func(t *testing.T) { + t.Parallel() - req, _ := http.NewRequest("GET", "/transaction/pool"+query, nil) + facade := &mock.FacadeStub{ + GetTransactionsPoolCalled: func(fields string) (*common.TransactionsPoolAPIResponse, error) { + return nil, expectedErr + }, + } + testTransactionsGroup( + t, + facade, + "/transaction/pool", + "GET", + nil, + http.StatusInternalServerError, + expectedErr, + ) + }) + t.Run("GetTransactionsPoolForSender error should error", func(t *testing.T) { + t.Parallel() - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + facade := &mock.FacadeStub{ + GetTransactionsPoolForSenderCalled: func(sender, fields string) (*common.TransactionsPoolForSenderApiResponse, error) { + return nil, expectedErr + }, + } + testTransactionsGroup( + t, + facade, + "/transaction/pool?by-sender=sender", + "GET", + nil, + http.StatusInternalServerError, + expectedErr, + ) + }) + t.Run("GetLastPoolNonceForSender error should error", func(t *testing.T) { + t.Parallel() - txsForSenderResp := poolForSenderResponse{} - loadResponse(resp.Body, &txsForSenderResp) + facade := &mock.FacadeStub{ + GetLastPoolNonceForSenderCalled: func(sender string) (uint64, error) { + return 0, expectedErr + }, + } + testTransactionsGroup( + t, + facade, + "/transaction/pool?by-sender=sender&last-nonce=true", + "GET", + nil, + http.StatusInternalServerError, + expectedErr, + ) + }) + t.Run("GetTransactionsPoolNonceGapsForSender error should error", func(t *testing.T) { + t.Parallel() - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(txsForSenderResp.Error, expectedErr.Error())) -} + facade := &mock.FacadeStub{ + GetTransactionsPoolNonceGapsForSenderCalled: func(sender string) (*common.TransactionsPoolNonceGapsForSenderApiResponse, error) { + return nil, expectedErr + }, + } + testTransactionsGroup( + t, + facade, + "/transaction/pool?by-sender=sender&nonce-gaps=true", + "GET", + nil, + http.StatusInternalServerError, + expectedErr, + ) + }) -func TestGetTransactionsPoolForSenderShouldWork(t *testing.T) { - t.Parallel() + t.Run("should work", func(t *testing.T) { + t.Parallel() - expectedSender := "sender" - providedFields := "sender,receiver" - query := "?by-sender=" + expectedSender + "&fields=" + providedFields - expectedResp := &common.TransactionsPoolForSenderApiResponse{ - Transactions: []common.Transaction{ - { - TxFields: map[string]interface{}{ - "hash": "txHash1", - "sender": expectedSender, - "receiver": "receiver1", + expectedTxPool := &common.TransactionsPoolAPIResponse{ + RegularTransactions: []common.Transaction{ + { + TxFields: map[string]interface{}{ + "hash": "tx", + }, }, - }, - { - TxFields: map[string]interface{}{ - "hash": "txHash2", - "sender": expectedSender, - "receiver": "receiver2", + { + TxFields: map[string]interface{}{ + "hash": "tx2", + }, }, }, - }, - } - facade := mock.FacadeStub{ - GetTransactionsPoolForSenderCalled: func(sender, fields string) (*common.TransactionsPoolForSenderApiResponse, error) { - return expectedResp, nil - }, - } - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) - - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - - req, _ := http.NewRequest("GET", "/transaction/pool"+query, nil) + } + facade := &mock.FacadeStub{ + GetTransactionsPoolCalled: func(fields string) (*common.TransactionsPoolAPIResponse, error) { + return expectedTxPool, nil + }, + } + + response := &txsPoolResponse{} + loadTransactionGroupResponse( + t, + facade, + "/transaction/pool", + "GET", + nil, + response, + ) + assert.Empty(t, response.Error) + assert.Equal(t, *expectedTxPool, response.Data.TxPool) + }) + t.Run("should work for sender", func(t *testing.T) { + t.Parallel() - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + expectedSender := "sender" + providedFields := "sender,receiver" + query := "?by-sender=" + expectedSender + "&fields=" + providedFields + expectedResp := &common.TransactionsPoolForSenderApiResponse{ + Transactions: []common.Transaction{ + { + TxFields: map[string]interface{}{ + "hash": "txHash1", + "sender": expectedSender, + "receiver": "receiver1", + }, + }, + { + TxFields: map[string]interface{}{ + "hash": "txHash2", + "sender": expectedSender, + "receiver": "receiver2", + }, + }, + }, + } + facade := &mock.FacadeStub{ + GetTransactionsPoolForSenderCalled: func(sender, fields string) (*common.TransactionsPoolForSenderApiResponse, error) { + return expectedResp, nil + }, + } + + response := &poolForSenderResponse{} + loadTransactionGroupResponse( + t, + facade, + "/transaction/pool"+query, + "GET", + nil, + response, + ) + assert.Empty(t, response.Error) + assert.Equal(t, *expectedResp, response.Data.TxPool) + }) + t.Run("should work for last pool nonce", func(t *testing.T) { + t.Parallel() - txsForSenderResp := poolForSenderResponse{} - loadResponse(resp.Body, &txsForSenderResp) + expectedSender := "sender" + query := "?by-sender=" + expectedSender + "&last-nonce=true" + expectedNonce := uint64(33) + facade := &mock.FacadeStub{ + GetLastPoolNonceForSenderCalled: func(sender string) (uint64, error) { + return expectedNonce, nil + }, + } + + response := &lastPoolNonceForSenderResponse{} + loadTransactionGroupResponse( + t, + facade, + "/transaction/pool"+query, + "GET", + nil, + response, + ) + assert.Empty(t, response.Error) + assert.Equal(t, expectedNonce, response.Data.Nonce) + }) + t.Run("should work for nonce gaps", func(t *testing.T) { + t.Parallel() - assert.Equal(t, http.StatusOK, resp.Code) - assert.Empty(t, txsForSenderResp.Error) - assert.Equal(t, *expectedResp, txsForSenderResp.Data.TxPool) + expectedSender := "sender" + query := "?by-sender=" + expectedSender + "&nonce-gaps=true" + expectedNonceGaps := &common.TransactionsPoolNonceGapsForSenderApiResponse{ + Sender: expectedSender, + Gaps: []common.NonceGapApiResponse{ + { + From: 33, + To: 60, + }, + }, + } + facade := &mock.FacadeStub{ + GetTransactionsPoolNonceGapsForSenderCalled: func(sender string) (*common.TransactionsPoolNonceGapsForSenderApiResponse, error) { + return expectedNonceGaps, nil + }, + } + + response := &txPoolNonceGapsForSenderResponse{} + loadTransactionGroupResponse( + t, + facade, + "/transaction/pool"+query, + "GET", + nil, + response, + ) + assert.Empty(t, response.Error) + assert.Equal(t, *expectedNonceGaps, response.Data.NonceGaps) + }) } -func TestGetLastPoolNonceForSenderShouldError(t *testing.T) { - t.Parallel() - - query := "?by-sender=sender&last-nonce=true" - expectedErr := errors.New("expected error") - facade := mock.FacadeStub{ - GetLastPoolNonceForSenderCalled: func(sender string) (uint64, error) { - return 0, expectedErr - }, - } +func testTxPoolWithInvalidQuery(query string, expectedErr error) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) + transactionGroup, err := groups.NewTransactionGroup(&mock.FacadeStub{}) + require.NoError(t, err) - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) + ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - req, _ := http.NewRequest("GET", "/transaction/pool"+query, nil) + req, _ := http.NewRequest("GET", "/transaction/pool"+query, nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) - lastPoolNonceResp := lastPoolNonceForSenderResponse{} - loadResponse(resp.Body, &lastPoolNonceResp) + txResp := &transactionResponse{} + loadResponse(resp.Body, txResp) - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(lastPoolNonceResp.Error, expectedErr.Error())) + assert.Equal(t, http.StatusBadRequest, resp.Code) + assert.True(t, strings.Contains(txResp.Error, apiErrors.ErrValidation.Error())) + assert.True(t, strings.Contains(txResp.Error, expectedErr.Error())) + } } -func TestGetLastPoolNonceForSenderShouldWork(t *testing.T) { +func TestTransactionsGroup_UpdateFacade(t *testing.T) { t.Parallel() - expectedSender := "sender" - query := "?by-sender=" + expectedSender + "&last-nonce=true" - expectedNonce := uint64(33) - facade := mock.FacadeStub{ - GetLastPoolNonceForSenderCalled: func(sender string) (uint64, error) { - return expectedNonce, nil - }, - } - - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) + t.Run("nil facade should error", func(t *testing.T) { + t.Parallel() - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) + transactionGroup, err := groups.NewTransactionGroup(&mock.FacadeStub{}) + require.NoError(t, err) - req, _ := http.NewRequest("GET", "/transaction/pool"+query, nil) + err = transactionGroup.UpdateFacade(nil) + require.Equal(t, apiErrors.ErrNilFacadeHandler, err) + }) + t.Run("cast failure should error", func(t *testing.T) { + t.Parallel() - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + transactionGroup, err := groups.NewTransactionGroup(&mock.FacadeStub{}) + require.NoError(t, err) - lastPoolNonceResp := lastPoolNonceForSenderResponse{} - loadResponse(resp.Body, &lastPoolNonceResp) + err = transactionGroup.UpdateFacade("this is not a facade handler") + require.True(t, errors.Is(err, apiErrors.ErrFacadeWrongTypeAssertion)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - assert.Equal(t, http.StatusOK, resp.Code) - assert.Empty(t, lastPoolNonceResp.Error) - assert.Equal(t, expectedNonce, lastPoolNonceResp.Data.Nonce) -} + expectedTxPool := &common.TransactionsPoolAPIResponse{ + RegularTransactions: []common.Transaction{ + { + TxFields: map[string]interface{}{ + "hash": "tx", + }, + }, + }, + } + facade := mock.FacadeStub{ + GetTransactionsPoolCalled: func(fields string) (*common.TransactionsPoolAPIResponse, error) { + return expectedTxPool, nil + }, + } -func TestGetTransactionsPoolNonceGapsForSenderShouldError(t *testing.T) { - t.Parallel() + transactionGroup, err := groups.NewTransactionGroup(&facade) + require.NoError(t, err) - query := "?by-sender=sender&nonce-gaps=true" - expectedErr := errors.New("expected error") - facade := mock.FacadeStub{ - GetTransactionsPoolNonceGapsForSenderCalled: func(sender string) (*common.TransactionsPoolNonceGapsForSenderApiResponse, error) { - return nil, expectedErr - }, - } + ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - transactionGroup, err := groups.NewTransactionGroup(&facade) - require.NoError(t, err) + req, _ := http.NewRequest("GET", "/transaction/pool", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) + txsPoolResp := txsPoolResponse{} + loadResponse(resp.Body, &txsPoolResp) + assert.Equal(t, http.StatusOK, resp.Code) + assert.Empty(t, txsPoolResp.Error) + assert.Equal(t, *expectedTxPool, txsPoolResp.Data.TxPool) - req, _ := http.NewRequest("GET", "/transaction/pool"+query, nil) + newFacade := mock.FacadeStub{ + GetTransactionsPoolCalled: func(fields string) (*common.TransactionsPoolAPIResponse, error) { + return nil, expectedErr + }, + } - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + err = transactionGroup.UpdateFacade(&newFacade) + require.NoError(t, err) - nonceGapsResp := txPoolNonceGapsForSenderResponse{} - loadResponse(resp.Body, &nonceGapsResp) + req, _ = http.NewRequest("GET", "/transaction/pool", nil) + resp = httptest.NewRecorder() + ws.ServeHTTP(resp, req) - assert.Equal(t, http.StatusInternalServerError, resp.Code) - assert.True(t, strings.Contains(nonceGapsResp.Error, expectedErr.Error())) + loadResponse(resp.Body, &txsPoolResp) + assert.Equal(t, http.StatusInternalServerError, resp.Code) + assert.True(t, strings.Contains(txsPoolResp.Error, expectedErr.Error())) + }) } -func TestGetTransactionsPoolNonceGapsForSenderShouldWork(t *testing.T) { +func TestTransactionsGroup_IsInterfaceNil(t *testing.T) { t.Parallel() - expectedSender := "sender" - query := "?by-sender=" + expectedSender + "&nonce-gaps=true" - expectedNonceGaps := &common.TransactionsPoolNonceGapsForSenderApiResponse{ - Sender: expectedSender, - Gaps: []common.NonceGapApiResponse{ - { - From: 33, - To: 60, - }, - }, - } - facade := mock.FacadeStub{ - GetTransactionsPoolNonceGapsForSenderCalled: func(sender string) (*common.TransactionsPoolNonceGapsForSenderApiResponse, error) { - return expectedNonceGaps, nil - }, - } + transactionGroup, _ := groups.NewTransactionGroup(nil) + require.True(t, transactionGroup.IsInterfaceNil()) + + transactionGroup, _ = groups.NewTransactionGroup(&mock.FacadeStub{}) + require.False(t, transactionGroup.IsInterfaceNil()) +} - transactionGroup, err := groups.NewTransactionGroup(&facade) +func loadTransactionGroupResponse( + t *testing.T, + facade shared.FacadeHandler, + url string, + method string, + body io.Reader, + destination interface{}, +) { + transactionGroup, err := groups.NewTransactionGroup(facade) require.NoError(t, err) ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - req, _ := http.NewRequest("GET", "/transaction/pool"+query, nil) - + req, _ := http.NewRequest(method, url, body) resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) - nonceGapsResp := txPoolNonceGapsForSenderResponse{} - loadResponse(resp.Body, &nonceGapsResp) - assert.Equal(t, http.StatusOK, resp.Code) - assert.Empty(t, nonceGapsResp.Error) - assert.Equal(t, *expectedNonceGaps, nonceGapsResp.Data.NonceGaps) -} - -func TestGetTransactionsPoolInvalidQueries(t *testing.T) { - t.Parallel() - t.Run("empty sender, requesting latest nonce", testTxPoolWithInvalidQuery("?last-nonce=true", apiErrors.ErrEmptySenderToGetLatestNonce)) - t.Run("empty sender, requesting nonce gaps", testTxPoolWithInvalidQuery("?nonce-gaps=true", apiErrors.ErrEmptySenderToGetNonceGaps)) - t.Run("fields + latest nonce", testTxPoolWithInvalidQuery("?fields=sender,receiver&last-nonce=true", apiErrors.ErrFetchingLatestNonceCannotIncludeFields)) - t.Run("fields + nonce gaps", testTxPoolWithInvalidQuery("?fields=sender,receiver&nonce-gaps=true", apiErrors.ErrFetchingNonceGapsCannotIncludeFields)) - t.Run("fields has spaces", testTxPoolWithInvalidQuery("?fields=sender ,receiver", apiErrors.ErrInvalidFields)) - t.Run("fields has numbers", testTxPoolWithInvalidQuery("?fields=sender1", apiErrors.ErrInvalidFields)) + loadResponse(resp.Body, destination) } -func testTxPoolWithInvalidQuery(query string, expectedErr error) func(t *testing.T) { +func testTransactionGroupErrorScenario( + url string, + method string, + body interface{}, + expectedCode int, + expectedError error, +) func(t *testing.T) { return func(t *testing.T) { t.Parallel() - transactionGroup, err := groups.NewTransactionGroup(&mock.FacadeStub{}) - require.NoError(t, err) + testTransactionsGroup( + t, + &mock.FacadeStub{}, + url, + method, + body, + expectedCode, + expectedError) + } +} - ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) +func testExceededNumGoRoutines(url string, body interface{}) func(t *testing.T) { + return func(t *testing.T) { + facade := &mock.FacadeStub{ + GetThrottlerForEndpointCalled: func(_ string) (core.Throttler, bool) { + return &mock.ThrottlerStub{ + CanProcessCalled: func() bool { return false }, + }, true + }, + } + + testTransactionsGroup( + t, + facade, + url, + "GET", + body, + http.StatusTooManyRequests, + apiErrors.ErrTooManyRequests, + ) + } +} - req, _ := http.NewRequest("GET", "/transaction/pool"+query, nil) +func testTransactionsGroup( + t *testing.T, + facade shared.FacadeHandler, + url string, + method string, + body interface{}, + expectedRespCode int, + expectedRespError error, +) { + transactionGroup, err := groups.NewTransactionGroup(facade) + require.NoError(t, err) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) - txResp := &transactionResponse{} - loadResponse(resp.Body, txResp) + jsonBytes, _ := json.Marshal(body) + req, _ := http.NewRequest(method, url, bytes.NewBuffer(jsonBytes)) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) - assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.True(t, strings.Contains(txResp.Error, apiErrors.ErrValidation.Error())) - assert.True(t, strings.Contains(txResp.Error, expectedErr.Error())) - } + txResp := shared.GenericAPIResponse{} + loadResponse(resp.Body, &txResp) + + assert.Equal(t, expectedRespCode, resp.Code) + assert.True(t, strings.Contains(txResp.Error, expectedRespError.Error())) + assert.Empty(t, txResp.Data) } func getTransactionRoutesConfig() config.ApiRoutesConfig { diff --git a/api/groups/validatorGroup_test.go b/api/groups/validatorGroup_test.go index c82a61d2efb..6d64e84c247 100644 --- a/api/groups/validatorGroup_test.go +++ b/api/groups/validatorGroup_test.go @@ -106,6 +106,90 @@ func TestValidatorStatistics_ReturnsSuccessfully(t *testing.T) { assert.Equal(t, validatorStatistics.Result, mapToReturn) } +func TestValidatorGroup_UpdateFacade(t *testing.T) { + t.Parallel() + + t.Run("nil facade should error", func(t *testing.T) { + t.Parallel() + + validatorGroup, err := groups.NewValidatorGroup(&mock.FacadeStub{}) + require.NoError(t, err) + + err = validatorGroup.UpdateFacade(nil) + require.Equal(t, apiErrors.ErrNilFacadeHandler, err) + }) + t.Run("cast failure should error", func(t *testing.T) { + t.Parallel() + + validatorGroup, err := groups.NewValidatorGroup(&mock.FacadeStub{}) + require.NoError(t, err) + + err = validatorGroup.UpdateFacade("this is not a facade handler") + require.True(t, errors.Is(err, apiErrors.ErrFacadeWrongTypeAssertion)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + mapToReturn := make(map[string]*state.ValidatorApiResponse) + mapToReturn["test"] = &state.ValidatorApiResponse{ + NumLeaderSuccess: 5, + NumLeaderFailure: 2, + NumValidatorSuccess: 7, + NumValidatorFailure: 3, + } + facade := mock.FacadeStub{ + ValidatorStatisticsHandler: func() (map[string]*state.ValidatorApiResponse, error) { + return mapToReturn, nil + }, + } + validatorGroup, err := groups.NewValidatorGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(validatorGroup, "validator", getValidatorRoutesConfig()) + + req, _ := http.NewRequest("GET", "/validator/statistics", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := shared.GenericAPIResponse{} + loadResponse(resp.Body, &response) + validatorStatistics := ValidatorStatisticsResponse{} + mapResponseData := response.Data.(map[string]interface{}) + mapResponseDataBytes, _ := json.Marshal(mapResponseData) + _ = json.Unmarshal(mapResponseDataBytes, &validatorStatistics) + assert.Equal(t, http.StatusOK, resp.Code) + assert.Equal(t, validatorStatistics.Result, mapToReturn) + + expectedErr := errors.New("expected error") + newFacade := mock.FacadeStub{ + ValidatorStatisticsHandler: func() (map[string]*state.ValidatorApiResponse, error) { + return nil, expectedErr + }, + } + + err = validatorGroup.UpdateFacade(&newFacade) + require.NoError(t, err) + + req, _ = http.NewRequest("GET", "/validator/statistics", nil) + resp = httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + loadResponse(resp.Body, &response) + assert.Equal(t, http.StatusBadRequest, resp.Code) + assert.Contains(t, response.Error, expectedErr.Error()) + }) +} + +func TestValidatorGroup_IsInterfaceNil(t *testing.T) { + t.Parallel() + + validatorGroup, _ := groups.NewValidatorGroup(nil) + require.True(t, validatorGroup.IsInterfaceNil()) + + validatorGroup, _ = groups.NewValidatorGroup(&mock.FacadeStub{}) + require.False(t, validatorGroup.IsInterfaceNil()) +} + func getValidatorRoutesConfig() config.ApiRoutesConfig { return config.ApiRoutesConfig{ APIPackages: map[string]config.APIPackageConfig{ diff --git a/api/groups/vmValuesGroup_test.go b/api/groups/vmValuesGroup_test.go index a3b06e1c46e..bd140d07b6f 100644 --- a/api/groups/vmValuesGroup_test.go +++ b/api/groups/vmValuesGroup_test.go @@ -247,6 +247,7 @@ func TestAllRoutes_WhenNoVMReturnDataShouldErr(t *testing.T) { ScAddress: dummyScAddress, FuncName: "function", Args: []string{}, + CallValue: "1", } response := simpleResponse{} @@ -276,6 +277,145 @@ func TestAllRoutes_WhenBadJsonShouldErr(t *testing.T) { requireErrorOnGetSingleValueRoutes(t, &facade, []byte("dummy"), apiErrors.ErrInvalidJSONRequest) } +func TestAllRoutes_DecodeAddressPubkeyFailsShouldErr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + cnt := 0 + facade := mock.FacadeStub{ + DecodeAddressPubkeyCalled: func(pk string) ([]byte, error) { + cnt++ + if cnt > 1 { + return nil, expectedErr + } + return hex.DecodeString(pk) + }, + ExecuteSCQueryHandler: func(query *process.SCQuery) (vmOutput *vm.VMOutputApi, e error) { + return &vm.VMOutputApi{}, nil + }, + } + + request := groups.VMValueRequest{ + ScAddress: dummyScAddress, + FuncName: "function", + Args: []string{}, + CallerAddr: dummyScAddress, + } + requireErrorOnGetSingleValueRoutes(t, &facade, request, expectedErr) +} + +func TestAllRoutes_SetStringFailsShouldErr(t *testing.T) { + t.Parallel() + + facade := mock.FacadeStub{ + ExecuteSCQueryHandler: func(query *process.SCQuery) (vmOutput *vm.VMOutputApi, e error) { + return &vm.VMOutputApi{}, nil + }, + } + + request := groups.VMValueRequest{ + ScAddress: dummyScAddress, + FuncName: "function", + Args: []string{}, + CallerAddr: dummyScAddress, // coverage + CallValue: "not an int", + } + requireErrorOnGetSingleValueRoutes(t, &facade, request, errors.New("non numeric call value")) +} + +func TestVMValuesGroup_UpdateFacade(t *testing.T) { + t.Parallel() + + t.Run("nil facade should error", func(t *testing.T) { + t.Parallel() + + group, err := groups.NewVmValuesGroup(&mock.FacadeStub{}) + require.NoError(t, err) + + err = group.UpdateFacade(nil) + require.Equal(t, apiErrors.ErrNilFacadeHandler, err) + }) + t.Run("cast failure should error", func(t *testing.T) { + t.Parallel() + + group, err := groups.NewVmValuesGroup(&mock.FacadeStub{}) + require.NoError(t, err) + + err = group.UpdateFacade("this is not a facade handler") + require.True(t, errors.Is(err, apiErrors.ErrFacadeWrongTypeAssertion)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + valueBuff, _ := hex.DecodeString("DEADBEEF") + facade := &mock.FacadeStub{ + ExecuteSCQueryHandler: func(query *process.SCQuery) (vmOutput *vm.VMOutputApi, e error) { + + return &vm.VMOutputApi{ + ReturnData: [][]byte{valueBuff}, + ReturnCode: "NOK", // coverage + }, nil + }, + } + + request := groups.VMValueRequest{ + ScAddress: dummyScAddress, + FuncName: "function", + Args: []string{}, + } + requestAsBytes, _ := json.Marshal(request) + group, err := groups.NewVmValuesGroup(facade) + require.NoError(t, err) + + server := startWebServer(group, "vm-values", getVmValuesRoutesConfig()) + + httpRequest, _ := http.NewRequest("POST", "/vm-values/hex", bytes.NewBuffer(requestAsBytes)) + responseRecorder := httptest.NewRecorder() + server.ServeHTTP(responseRecorder, httpRequest) + + responseI := shared.GenericAPIResponse{} + loadResponse(responseRecorder.Body, &responseI) + responseDataMap := responseI.Data.(map[string]interface{}) + responseDataMapBytes, _ := json.Marshal(responseDataMap) + response := &simpleResponse{} + _ = json.Unmarshal(responseDataMapBytes, response) + require.Equal(t, http.StatusOK, responseRecorder.Code) + require.Contains(t, responseI.Error, "NOK") + require.Contains(t, "", response.Error) + require.Equal(t, hex.EncodeToString(valueBuff), response.Data) + + expectedErr := errors.New("expected error") + newFacade := &mock.FacadeStub{ + ExecuteSCQueryHandler: func(query *process.SCQuery) (vmOutput *vm.VMOutputApi, e error) { + + return &vm.VMOutputApi{ + ReturnData: nil, + }, expectedErr + }, + } + + err = group.UpdateFacade(newFacade) + require.NoError(t, err) + + httpRequest, _ = http.NewRequest("POST", "/vm-values/hex", bytes.NewBuffer(requestAsBytes)) + responseRecorder = httptest.NewRecorder() + server.ServeHTTP(responseRecorder, httpRequest) + loadResponse(responseRecorder.Body, &responseI) + require.Equal(t, http.StatusBadRequest, responseRecorder.Code) + require.Contains(t, responseI.Error, expectedErr.Error()) + }) +} + +func TestVMValuesGroup_IsInterfaceNil(t *testing.T) { + t.Parallel() + + group, _ := groups.NewVmValuesGroup(nil) + require.True(t, group.IsInterfaceNil()) + + group, _ = groups.NewVmValuesGroup(&mock.FacadeStub{}) + require.False(t, group.IsInterfaceNil()) +} + func doPost(t *testing.T, facade interface{}, url string, request interface{}, response interface{}) int { // Serialize if not already requestAsBytes, ok := request.([]byte) diff --git a/api/mock/facadeStub.go b/api/mock/facadeStub.go index dbb933b7e93..a31b3dab13d 100644 --- a/api/mock/facadeStub.go +++ b/api/mock/facadeStub.go @@ -21,16 +21,15 @@ import ( // FacadeStub is the mock implementation of a node router handler type FacadeStub struct { - ShouldErrorStart bool - ShouldErrorStop bool - GetHeartbeatsHandler func() ([]data.PubKeyHeartbeat, error) - GetBalanceCalled func(address string, options api.AccountQueryOptions) (*big.Int, api.BlockInfo, error) - GetAccountCalled func(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) - GetAccountsCalled func(addresses []string, options api.AccountQueryOptions) (map[string]*api.AccountResponse, api.BlockInfo, error) - GenerateTransactionHandler func(sender string, receiver string, value *big.Int, code string) (*transaction.Transaction, error) - GetTransactionHandler func(hash string, withResults bool) (*transaction.ApiTransactionResult, error) - CreateTransactionHandler func(nonce uint64, value string, receiver string, receiverUsername []byte, sender string, senderUsername []byte, gasPrice uint64, - gasLimit uint64, data []byte, signatureHex string, chainID string, version uint32, options uint32) (*transaction.Transaction, []byte, error) + ShouldErrorStart bool + ShouldErrorStop bool + GetHeartbeatsHandler func() ([]data.PubKeyHeartbeat, error) + GetBalanceCalled func(address string, options api.AccountQueryOptions) (*big.Int, api.BlockInfo, error) + GetAccountCalled func(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) + GetAccountsCalled func(addresses []string, options api.AccountQueryOptions) (map[string]*api.AccountResponse, api.BlockInfo, error) + GenerateTransactionHandler func(sender string, receiver string, value *big.Int, code string) (*transaction.Transaction, error) + GetTransactionHandler func(hash string, withResults bool) (*transaction.ApiTransactionResult, error) + CreateTransactionHandler func(txArgs *external.ArgsCreateTransaction) (*transaction.Transaction, []byte, error) ValidateTransactionHandler func(tx *transaction.Transaction) error ValidateTransactionForSimulationHandler func(tx *transaction.Transaction, bypassSignature bool) error SendBulkTransactionsHandler func(txs []*transaction.Transaction) (uint64, error) @@ -41,6 +40,7 @@ type FacadeStub struct { NodeConfigCalled func() map[string]interface{} GetQueryHandlerCalled func(name string) (debug.QueryHandler, error) GetValueForKeyCalled func(address string, key string, options api.AccountQueryOptions) (string, api.BlockInfo, error) + GetGuardianDataCalled func(address string, options api.AccountQueryOptions) (api.GuardianData, api.BlockInfo, error) GetPeerInfoCalled func(pid string) ([]core.QueryP2PPeerInfo, error) GetConnectedPeersRatingsCalled func() string GetEpochStartDataAPICalled func(epoch uint32) (*common.EpochStartDataAPI, error) @@ -83,6 +83,10 @@ type FacadeStub struct { GetLastPoolNonceForSenderCalled func(sender string) (uint64, error) GetTransactionsPoolNonceGapsForSenderCalled func(sender string) (*common.TransactionsPoolNonceGapsForSenderApiResponse, error) GetGasConfigsCalled func() (map[string]map[string]uint64, error) + RestApiInterfaceCalled func() string + RestAPIServerDebugModeCalled func() bool + PprofEnabledCalled func() bool + DecodeAddressPubkeyCalled func(pk string) ([]byte, error) } // GetTokenSupply - @@ -159,16 +163,25 @@ func (f *FacadeStub) GetThrottlerForEndpoint(endpoint string) (core.Throttler, b // RestApiInterface - func (f *FacadeStub) RestApiInterface() string { + if f.RestApiInterfaceCalled != nil { + return f.RestApiInterfaceCalled() + } return "localhost:8080" } // RestAPIServerDebugMode - func (f *FacadeStub) RestAPIServerDebugMode() bool { + if f.RestAPIServerDebugModeCalled != nil { + return f.RestAPIServerDebugModeCalled() + } return false } // PprofEnabled - func (f *FacadeStub) PprofEnabled() bool { + if f.PprofEnabledCalled != nil { + return f.PprofEnabledCalled() + } return false } @@ -200,6 +213,14 @@ func (f *FacadeStub) GetKeyValuePairs(address string, options api.AccountQueryOp return nil, api.BlockInfo{}, nil } +// GetGuardianData - +func (f *FacadeStub) GetGuardianData(address string, options api.AccountQueryOptions) (api.GuardianData, api.BlockInfo, error) { + if f.GetGuardianDataCalled != nil { + return f.GetGuardianDataCalled(address, options) + } + return api.GuardianData{}, api.BlockInfo{}, nil +} + // GetESDTData - func (f *FacadeStub) GetESDTData(address string, key string, nonce uint64, options api.AccountQueryOptions) (*esdt.ESDigitalToken, api.BlockInfo, error) { if f.GetESDTDataCalled != nil { @@ -269,22 +290,8 @@ func (f *FacadeStub) GetAccounts(addresses []string, options api.AccountQueryOpt } // CreateTransaction is mock implementation of a handler's CreateTransaction method -func (f *FacadeStub) CreateTransaction( - nonce uint64, - value string, - receiver string, - receiverUsername []byte, - sender string, - senderUsername []byte, - gasPrice uint64, - gasLimit uint64, - data []byte, - signatureHex string, - chainID string, - version uint32, - options uint32, -) (*transaction.Transaction, []byte, error) { - return f.CreateTransactionHandler(nonce, value, receiver, receiverUsername, sender, senderUsername, gasPrice, gasLimit, data, signatureHex, chainID, version, options) +func (f *FacadeStub) CreateTransaction(txArgs *external.ArgsCreateTransaction) (*transaction.Transaction, []byte, error) { + return f.CreateTransactionHandler(txArgs) } // GetTransaction is the mock implementation of a handler's GetTransaction method @@ -359,6 +366,9 @@ func (f *FacadeStub) EncodeAddressPubkey(pk []byte) (string, error) { // DecodeAddressPubkey - func (f *FacadeStub) DecodeAddressPubkey(pk string) ([]byte, error) { + if f.DecodeAddressPubkeyCalled != nil { + return f.DecodeAddressPubkeyCalled(pk) + } return hex.DecodeString(pk) } diff --git a/api/shared/interface.go b/api/shared/interface.go index 6f93f308331..0063d64321e 100644 --- a/api/shared/interface.go +++ b/api/shared/interface.go @@ -73,6 +73,7 @@ type FacadeHandler interface { GetESDTsWithRole(address string, role string, options api.AccountQueryOptions) ([]string, api.BlockInfo, error) GetAllESDTTokens(address string, options api.AccountQueryOptions) (map[string]*esdt.ESDigitalToken, api.BlockInfo, error) GetKeyValuePairs(address string, options api.AccountQueryOptions) (map[string]string, api.BlockInfo, error) + GetGuardianData(address string, options api.AccountQueryOptions) (api.GuardianData, api.BlockInfo, error) GetBlockByHash(hash string, options api.BlockQueryOptions) (*api.Block, error) GetBlockByNonce(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) GetBlockByRound(round uint64, options api.BlockQueryOptions) (*api.Block, error) @@ -104,8 +105,7 @@ type FacadeHandler interface { GetProofCurrentRootHash(address string) (*common.GetProofResponse, error) VerifyProof(rootHash string, address string, proof [][]byte) (bool, error) GetThrottlerForEndpoint(endpoint string) (core.Throttler, bool) - CreateTransaction(nonce uint64, value string, receiver string, receiverUsername []byte, sender string, senderUsername []byte, gasPrice uint64, - gasLimit uint64, data []byte, signatureHex string, chainID string, version uint32, options uint32) (*transaction.Transaction, []byte, error) + CreateTransaction(txArgs *external.ArgsCreateTransaction) (*transaction.Transaction, []byte, error) ValidateTransaction(tx *transaction.Transaction) error ValidateTransactionForSimulation(tx *transaction.Transaction, checkSignature bool) error SendBulkTransactions([]*transaction.Transaction) (uint64, error) diff --git a/cmd/keygenerator/converter/pidPubkeyConverter.go b/cmd/keygenerator/converter/pidPubkeyConverter.go index 1cff0dfa0d7..41eeea15fa1 100644 --- a/cmd/keygenerator/converter/pidPubkeyConverter.go +++ b/cmd/keygenerator/converter/pidPubkeyConverter.go @@ -6,11 +6,8 @@ import ( "github.com/multiversx/mx-chain-crypto-go/signing/secp256k1" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/p2p/factory" - logger "github.com/multiversx/mx-chain-logger-go" ) -var log = logger.GetOrCreate("cmd/keygenerator/converter") - type pidPubkeyConverter struct { keyGen crypto.KeyGenerator p2PKeyConverter p2p.P2PKeyConverter diff --git a/cmd/node/config/api.toml b/cmd/node/config/api.toml index ec9346b782d..87b59649910 100644 --- a/cmd/node/config/api.toml +++ b/cmd/node/config/api.toml @@ -64,6 +64,9 @@ # /address/:address/key/:key will return the value of a key for a given account { Name = "/:address/key/:key", Open = true }, + # /:address/guardian-data will return the guardian data for the given account + { Name = "/:address/guardian-data", Open = true}, + # /address/:address/esdt will return the list of esdt tokens for a given account { Name = "/:address/esdt", Open = true }, diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index c6ae563ccc1..c3704f25719 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -35,6 +35,11 @@ # SyncProcessTimeInMillis is the value in milliseconds used when processing blocks while synchronizing blocks SyncProcessTimeInMillis = 12000 + # SetGuardianEpochsDelay represents the delay in epochs between the execution time of the SetGuardian transaction and + # the activation of the configured guardian. + # Make sure that this is greater than the unbonding period! + SetGuardianEpochsDelay = 2 # TODO: for mainnet should be 20, 2 is just for testing + [Versions] DefaultVersion = "default" VersionsByEpochs = [ diff --git a/cmd/node/config/economics.toml b/cmd/node/config/economics.toml index 99fcb77bbca..f0bac97a055 100644 --- a/cmd/node/config/economics.toml +++ b/cmd/node/config/economics.toml @@ -39,10 +39,12 @@ [FeeSettings] GasLimitSettings = [ - {EnableEpoch = 0, MaxGasLimitPerBlock = "1500000000", MaxGasLimitPerMiniBlock = "1500000000", MaxGasLimitPerMetaBlock = "15000000000", MaxGasLimitPerMetaMiniBlock = "15000000000", MaxGasLimitPerTx = "1500000000", MinGasLimit = "50000"}, - {EnableEpoch = 1, MaxGasLimitPerBlock = "1500000000", MaxGasLimitPerMiniBlock = "250000000", MaxGasLimitPerMetaBlock = "15000000000", MaxGasLimitPerMetaMiniBlock = "250000000", MaxGasLimitPerTx = "600000000", MinGasLimit = "50000"}, + {EnableEpoch = 0, MaxGasLimitPerBlock = "1500000000", MaxGasLimitPerMiniBlock = "1500000000", MaxGasLimitPerMetaBlock = "15000000000", MaxGasLimitPerMetaMiniBlock = "15000000000", MaxGasLimitPerTx = "1500000000", MinGasLimit = "50000", ExtraGasLimitGuardedTx = "50000"}, + {EnableEpoch = 1, MaxGasLimitPerBlock = "1500000000", MaxGasLimitPerMiniBlock = "250000000", MaxGasLimitPerMetaBlock = "15000000000", MaxGasLimitPerMetaMiniBlock = "250000000", MaxGasLimitPerTx = "600000000", MinGasLimit = "50000", ExtraGasLimitGuardedTx = "50000"}, + {EnableEpoch = 2, MaxGasLimitPerBlock = "1500000000", MaxGasLimitPerMiniBlock = "250000000", MaxGasLimitPerMetaBlock = "15000000000", MaxGasLimitPerMetaMiniBlock = "250000000", MaxGasLimitPerTx = "600000000", MinGasLimit = "50000", ExtraGasLimitGuardedTx = "50000"}, ] MinGasPrice = "1000000000" #will yield min tx fee of 0.00005 eGLD GasPriceModifier = 0.01 GasPerDataByte = "1500" DataLimitForBaseCalc = "10000" + MaxGasPriceSetGuardian = "2000000000" diff --git a/cmd/node/config/enableEpochs.toml b/cmd/node/config/enableEpochs.toml index 7bd4ffbcd41..12741cd03aa 100644 --- a/cmd/node/config/enableEpochs.toml +++ b/cmd/node/config/enableEpochs.toml @@ -62,7 +62,7 @@ ESDTEnableEpoch = 1 # GovernanceEnableEpoch represents the epoch when governance is enabled - GovernanceEnableEpoch = 1000000 + GovernanceEnableEpoch = 5 # DelegationManagerEnableEpoch represents the epoch when the delegation manager is enabled # epoch should not be 0 @@ -240,7 +240,13 @@ AlwaysSaveTokenMetaDataEnableEpoch = 1 # RuntimeCodeSizeFixEnableEpoch represents the epoch when the code size fix in the VM is enabled - RuntimeCodeSizeFixEnableEpoch = 2 + RuntimeCodeSizeFixEnableEpoch = 1 + + # KeepExecOrderOnCreatedSCRsEnableEpoch represents the epoch when the execution order of created SCRs is ensured + KeepExecOrderOnCreatedSCRsEnableEpoch = 2 + + # MultiClaimOnDelegationEnableEpoch represents the epoch when the multi claim on delegation is enabled + MultiClaimOnDelegationEnableEpoch = 3 # BLSMultiSignerEnableEpoch represents the activation epoch for different types of BLS multi-signers BLSMultiSignerEnableEpoch = [ @@ -248,6 +254,9 @@ { EnableEpoch = 1, Type = "KOSK"} ] + # SetGuardianEnableEpoch represents the epoch when the guard account feature is enabled in the protocol + SetGuardianEnableEpoch = 2 + # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ { EpochEnable = 0, MaxNumNodes = 36, NodesToShufflePerShard = 4 }, diff --git a/cmd/node/config/gasSchedules/gasScheduleV1.toml b/cmd/node/config/gasSchedules/gasScheduleV1.toml index e2e6005d3f6..66be1a6474b 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV1.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV1.toml @@ -15,6 +15,9 @@ ESDTNFTAddUri = 500000 ESDTNFTUpdateAttributes = 500000 ESDTNFTMultiTransfer = 1000000 + SetGuardian = 250000 + GuardAccount = 250000 + UnGuardAccount = 250000 [MetaChainSystemSCsCost] Stake = 5000000 @@ -36,6 +39,7 @@ UnstakeTokens = 5000000 UnbondTokens = 5000000 DelegationMgrOps = 50000000 + GetActiveFund = 50000 ValidatorToDelegation = 500000000 GetAllNodeStates = 100000000 FixWaitingListSize = 500000000 diff --git a/cmd/node/config/gasSchedules/gasScheduleV2.toml b/cmd/node/config/gasSchedules/gasScheduleV2.toml index 3c8f2f3c871..bfeb9a2595e 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV2.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV2.toml @@ -15,6 +15,9 @@ ESDTNFTAddUri = 500000 ESDTNFTUpdateAttributes = 500000 ESDTNFTMultiTransfer = 1000000 + SetGuardian = 250000 + GuardAccount = 250000 + UnGuardAccount = 250000 [MetaChainSystemSCsCost] Stake = 5000000 @@ -35,6 +38,7 @@ DelegationOps = 1000000 UnstakeTokens = 5000000 UnbondTokens = 5000000 + GetActiveFund = 50000 DelegationMgrOps = 50000000 ValidatorToDelegation = 500000000 GetAllNodeStates = 100000000 diff --git a/cmd/node/config/gasSchedules/gasScheduleV3.toml b/cmd/node/config/gasSchedules/gasScheduleV3.toml index 89b4106eae2..09a29cccbe0 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV3.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV3.toml @@ -15,6 +15,9 @@ ESDTNFTAddUri = 500000 ESDTNFTUpdateAttributes = 500000 ESDTNFTMultiTransfer = 1000000 + SetGuardian = 250000 + GuardAccount = 250000 + UnGuardAccount = 250000 [MetaChainSystemSCsCost] Stake = 5000000 @@ -38,6 +41,7 @@ GetAllNodeStates = 20000000 UnstakeTokens = 5000000 UnbondTokens = 5000000 + GetActiveFund = 50000 FixWaitingListSize = 500000000 [BaseOperationCost] diff --git a/cmd/node/config/gasSchedules/gasScheduleV4.toml b/cmd/node/config/gasSchedules/gasScheduleV4.toml index 56e6c342c1f..6de1f466876 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV4.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV4.toml @@ -15,6 +15,9 @@ ESDTNFTAddUri = 50000 ESDTNFTUpdateAttributes = 50000 ESDTNFTMultiTransfer = 200000 + SetGuardian = 250000 + GuardAccount = 250000 + UnGuardAccount = 250000 [MetaChainSystemSCsCost] Stake = 5000000 @@ -38,6 +41,7 @@ GetAllNodeStates = 20000000 UnstakeTokens = 5000000 UnbondTokens = 5000000 + GetActiveFund = 50000 FixWaitingListSize = 500000000 [BaseOperationCost] diff --git a/cmd/node/config/gasSchedules/gasScheduleV5.toml b/cmd/node/config/gasSchedules/gasScheduleV5.toml index 33f1fdbfd85..634275b1cd9 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV5.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV5.toml @@ -15,6 +15,9 @@ ESDTNFTAddUri = 50000 ESDTNFTUpdateAttributes = 50000 ESDTNFTMultiTransfer = 200000 + SetGuardian = 250000 + GuardAccount = 250000 + UnGuardAccount = 250000 [MetaChainSystemSCsCost] Stake = 5000000 @@ -38,6 +41,7 @@ GetAllNodeStates = 20000000 UnstakeTokens = 5000000 UnbondTokens = 5000000 + GetActiveFund = 50000 FixWaitingListSize = 500000000 [BaseOperationCost] diff --git a/cmd/node/config/gasSchedules/gasScheduleV6.toml b/cmd/node/config/gasSchedules/gasScheduleV6.toml index e14027354a4..09229b8f15f 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV6.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV6.toml @@ -15,6 +15,9 @@ ESDTNFTAddUri = 50000 ESDTNFTUpdateAttributes = 50000 ESDTNFTMultiTransfer = 200000 + SetGuardian = 250000 + GuardAccount = 250000 + UnGuardAccount = 250000 [MetaChainSystemSCsCost] Stake = 5000000 @@ -38,6 +41,7 @@ GetAllNodeStates = 20000000 UnstakeTokens = 5000000 UnbondTokens = 5000000 + GetActiveFund = 50000 FixWaitingListSize = 500000000 [BaseOperationCost] diff --git a/cmd/node/config/gasSchedules/gasScheduleV7.toml b/cmd/node/config/gasSchedules/gasScheduleV7.toml index 9e5589e9673..3f31ac9969c 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV7.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV7.toml @@ -16,6 +16,9 @@ ESDTNFTUpdateAttributes = 50000 ESDTNFTMultiTransfer = 200000 MultiESDTNFTTransfer = 200000 # should be the same value with the ESDTNFTMultiTransfer + SetGuardian = 250000 + GuardAccount = 250000 + UnGuardAccount = 250000 [MetaChainSystemSCsCost] Stake = 5000000 @@ -32,13 +35,14 @@ ESDTIssue = 50000000 ESDTOperations = 50000000 Proposal = 50000000 - Vote = 50000000 + Vote = 5000000 DelegateVote = 50000000 RevokeVote = 50000000 CloseProposal = 50000000 GetAllNodeStates = 20000000 UnstakeTokens = 5000000 UnbondTokens = 5000000 + GetActiveFund = 50000 FixWaitingListSize = 500000000 [BaseOperationCost] diff --git a/cmd/node/config/systemSmartContractsConfig.toml b/cmd/node/config/systemSmartContractsConfig.toml index c5e418a9749..256dc292171 100644 --- a/cmd/node/config/systemSmartContractsConfig.toml +++ b/cmd/node/config/systemSmartContractsConfig.toml @@ -17,7 +17,7 @@ OwnerAddress = "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c" [GovernanceSystemSCConfig] - FirstWhitelistedAddress = "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80" #should use a multisign contract instead of a wallet address + ChangeConfigAddress = "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80" #should use a multisign contract instead of a wallet address [GovernanceSystemSCConfig.V1] NumNodes = 500 ProposalCost = "5000000000000000000" #5 eGLD @@ -25,10 +25,10 @@ MinPassThreshold = 300 MinVetoThreshold = 50 [GovernanceSystemSCConfig.Active] - ProposalCost = "5000000000000000000" #5 eGLD - MinQuorum = "500000000000" # MinQuorum is equal to 10*sqrt(NodePrice) - MinPassThreshold = "251000000000" - MinVetoThreshold = "249000000000" + ProposalCost = "1000000000000000000000" #1000 eGLD + MinQuorum = 0.5 #fraction of value 0.5 - 50% + MinPassThreshold = 0.5 #fraction of value 0.5 - 50% + MinVetoThreshold = 0.33 #fraction of value 0.33 - 33% [DelegationManagerSystemSCConfig] MinCreationDeposit = "1250000000000000000000" #1.25K eGLD diff --git a/cmd/termui/view/termuic/termuiRenders/widgetsRender.go b/cmd/termui/view/termuic/termuiRenders/widgetsRender.go index c7fcebec7d5..12d21a9aca6 100644 --- a/cmd/termui/view/termuic/termuiRenders/widgetsRender.go +++ b/cmd/termui/view/termuic/termuiRenders/widgetsRender.go @@ -124,12 +124,16 @@ func (wr *WidgetsRender) prepareInstanceInfo() { shardId := wr.presenter.GetShardId() instanceType := wr.presenter.GetNodeType() peerType := wr.presenter.GetPeerType() + peerSubType := wr.presenter.GetPeerSubType() chainID := wr.presenter.GetChainID() nodeTypeAndListDisplay := instanceType if peerType != string(common.ObserverList) && !strings.Contains(peerType, invalidKey) { nodeTypeAndListDisplay += fmt.Sprintf(" - %s", peerType) } + if peerSubType == core.FullHistoryObserver.String() { + nodeTypeAndListDisplay += " - full archive" + } shardIdStr := fmt.Sprintf("%d", shardId) if shardId == uint64(core.MetachainShardId) { shardIdStr = "meta" diff --git a/common/channels.go b/common/channels.go index 3d00dcde162..177ac89f5c5 100644 --- a/common/channels.go +++ b/common/channels.go @@ -7,13 +7,3 @@ func GetClosedUnbufferedChannel() chan struct{} { return ch } - -// GetErrorFromChanNonBlocking will get the error from channel -func GetErrorFromChanNonBlocking(errChan chan error) error { - select { - case err := <-errChan: - return err - default: - return nil - } -} diff --git a/common/channels_test.go b/common/channels_test.go index a5fad97d1a4..4e2828e2d6a 100644 --- a/common/channels_test.go +++ b/common/channels_test.go @@ -1,11 +1,8 @@ package common import ( - "errors" "testing" - "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -39,57 +36,3 @@ func didTriggerHappen(ch chan struct{}) bool { return false } } - -func TestErrFromChannel(t *testing.T) { - t.Parallel() - - t.Run("empty channel, should return nil", func(t *testing.T) { - t.Parallel() - - t.Run("unbuffered chan", func(t *testing.T) { - t.Parallel() - - errChan := make(chan error) - assert.Nil(t, GetErrorFromChanNonBlocking(errChan)) - }) - - t.Run("buffered chan", func(t *testing.T) { - t.Parallel() - - errChan := make(chan error, 1) - assert.Nil(t, GetErrorFromChanNonBlocking(errChan)) - }) - }) - - t.Run("non empty channel, should return error", func(t *testing.T) { - t.Parallel() - - t.Run("unbuffered chan", func(t *testing.T) { - t.Parallel() - - expectedErr := errors.New("expected error") - errChan := make(chan error) - go func() { - errChan <- expectedErr - }() - - time.Sleep(time.Second) // allow the go routine to start - - assert.Equal(t, expectedErr, GetErrorFromChanNonBlocking(errChan)) - }) - - t.Run("buffered chan", func(t *testing.T) { - t.Parallel() - - for i := 1; i < 10; i++ { - errChan := make(chan error, i) - expectedErr := errors.New("expected error") - for j := 0; j < i; j++ { - errChan <- expectedErr - } - - assert.Equal(t, expectedErr, GetErrorFromChanNonBlocking(errChan)) - } - }) - }) -} diff --git a/common/configParser.go b/common/configParser.go index 77a46161a71..bc814990528 100644 --- a/common/configParser.go +++ b/common/configParser.go @@ -163,3 +163,12 @@ func GetSkBytesFromP2pKey(p2pKeyFilename string) ([]byte, error) { return skBytes, nil } + +// GetNodeProcessingMode returns the node processing mode based on the provided config +func GetNodeProcessingMode(importDbConfig *config.ImportDbConfig) NodeProcessingMode { + if importDbConfig.IsImportDBMode { + return ImportDb + } + + return Normal +} diff --git a/common/constants.go b/common/constants.go index 71306020854..e827811ae5b 100644 --- a/common/constants.go +++ b/common/constants.go @@ -312,6 +312,9 @@ const MetricMinGasPrice = "erd_min_gas_price" // MetricMinGasLimit is the metric that specifies the minimum gas limit const MetricMinGasLimit = "erd_min_gas_limit" +// MetricExtraGasLimitGuardedTx specifies the extra gas limit required for guarded transactions +const MetricExtraGasLimitGuardedTx = "erd_extra_gas_limit_guarded_tx" + // MetricRewardsTopUpGradientPoint is the metric that specifies the rewards top up gradient point const MetricRewardsTopUpGradientPoint = "erd_rewards_top_up_gradient_point" @@ -597,6 +600,9 @@ const ( // MetricRatingsPeerHonestyUnitValue represents the peer honesty unit value MetricRatingsPeerHonestyUnitValue = "erd_ratings_peerhonesty_unit_value" + + // MetricSetGuardianEnableEpoch represents the epoch when the guardian feature is enabled + MetricSetGuardianEnableEpoch = "erd_set_guardian_feature_enable_epoch" ) const ( diff --git a/common/enablers/enableEpochsHandler.go b/common/enablers/enableEpochsHandler.go index f24b8fdf952..59cee759e8a 100644 --- a/common/enablers/enableEpochsHandler.go +++ b/common/enablers/enableEpochsHandler.go @@ -118,6 +118,9 @@ func (handler *enableEpochsHandler) EpochConfirmed(epoch uint32, _ uint64) { handler.setFlagValue(epoch >= handler.enableEpochsConfig.MaxBlockchainHookCountersEnableEpoch, handler.maxBlockchainHookCountersFlag, "maxBlockchainHookCountersFlag", epoch, handler.enableEpochsConfig.MaxBlockchainHookCountersEnableEpoch) handler.setFlagValue(epoch >= handler.enableEpochsConfig.WipeSingleNFTLiquidityDecreaseEnableEpoch, handler.wipeSingleNFTLiquidityDecreaseFlag, "wipeSingleNFTLiquidityDecreaseFlag", epoch, handler.enableEpochsConfig.WipeSingleNFTLiquidityDecreaseEnableEpoch) handler.setFlagValue(epoch >= handler.enableEpochsConfig.AlwaysSaveTokenMetaDataEnableEpoch, handler.alwaysSaveTokenMetaDataFlag, "alwaysSaveTokenMetaDataFlag", epoch, handler.enableEpochsConfig.AlwaysSaveTokenMetaDataEnableEpoch) + handler.setFlagValue(epoch >= handler.enableEpochsConfig.MultiClaimOnDelegationEnableEpoch, handler.multiClaimOnDelegationFlag, "multiClaimOnDelegationFlag", epoch, handler.enableEpochsConfig.MultiClaimOnDelegationEnableEpoch) + handler.setFlagValue(epoch >= handler.enableEpochsConfig.SetGuardianEnableEpoch, handler.setGuardianFlag, "setGuardianFlag", epoch, handler.enableEpochsConfig.SetGuardianEnableEpoch) + handler.setFlagValue(epoch >= handler.enableEpochsConfig.KeepExecOrderOnCreatedSCRsEnableEpoch, handler.keepExecOrderOnCreatedSCRsFlag, "keepExecOrderOnCreatedSCRsFlag", epoch, handler.enableEpochsConfig.KeepExecOrderOnCreatedSCRsEnableEpoch) } func (handler *enableEpochsHandler) setFlagValue(value bool, flag *atomic.Flag, flagName string, epoch uint32, flagEpoch uint32) { diff --git a/common/enablers/enableEpochsHandler_test.go b/common/enablers/enableEpochsHandler_test.go index 661d684f010..28e905aae64 100644 --- a/common/enablers/enableEpochsHandler_test.go +++ b/common/enablers/enableEpochsHandler_test.go @@ -91,6 +91,8 @@ func createEnableEpochsConfig() config.EnableEpochs { WipeSingleNFTLiquidityDecreaseEnableEpoch: 75, AlwaysSaveTokenMetaDataEnableEpoch: 76, RuntimeCodeSizeFixEnableEpoch: 77, + MultiClaimOnDelegationEnableEpoch: 78, + KeepExecOrderOnCreatedSCRsEnableEpoch: 79, } } @@ -213,11 +215,13 @@ func TestNewEnableEpochsHandler_EpochConfirmed(t *testing.T) { assert.True(t, handler.IsMaxBlockchainHookCountersFlagEnabled()) assert.True(t, handler.IsAlwaysSaveTokenMetaDataEnabled()) assert.True(t, handler.IsRuntimeCodeSizeFixEnabled()) + assert.False(t, handler.IsKeepExecOrderOnCreatedSCRsEnabled()) + assert.False(t, handler.IsMultiClaimOnDelegationEnabled()) }) t.Run("flags with == condition should be set, along with all >=", func(t *testing.T) { t.Parallel() - epoch := uint32(78) + epoch := uint32(79) cfg := createEnableEpochsConfig() cfg.StakingV2EnableEpoch = epoch cfg.ESDTEnableEpoch = epoch @@ -313,6 +317,7 @@ func TestNewEnableEpochsHandler_EpochConfirmed(t *testing.T) { assert.True(t, handler.IsWipeSingleNFTLiquidityDecreaseEnabled()) assert.True(t, handler.IsAlwaysSaveTokenMetaDataEnabled()) assert.True(t, handler.IsRuntimeCodeSizeFixEnabled()) + assert.True(t, handler.IsKeepExecOrderOnCreatedSCRsEnabled()) }) t.Run("flags with < should be set", func(t *testing.T) { t.Parallel() @@ -408,5 +413,6 @@ func TestNewEnableEpochsHandler_EpochConfirmed(t *testing.T) { assert.False(t, handler.IsWipeSingleNFTLiquidityDecreaseEnabled()) assert.False(t, handler.IsAlwaysSaveTokenMetaDataEnabled()) assert.False(t, handler.IsRuntimeCodeSizeFixEnabled()) + assert.False(t, handler.IsKeepExecOrderOnCreatedSCRsEnabled()) }) } diff --git a/common/enablers/epochFlags.go b/common/enablers/epochFlags.go index fe11469f4bb..e77279928cb 100644 --- a/common/enablers/epochFlags.go +++ b/common/enablers/epochFlags.go @@ -90,6 +90,9 @@ type epochFlagsHolder struct { maxBlockchainHookCountersFlag *atomic.Flag wipeSingleNFTLiquidityDecreaseFlag *atomic.Flag alwaysSaveTokenMetaDataFlag *atomic.Flag + setGuardianFlag *atomic.Flag + keepExecOrderOnCreatedSCRsFlag *atomic.Flag + multiClaimOnDelegationFlag *atomic.Flag } func newEpochFlagsHolder() *epochFlagsHolder { @@ -179,6 +182,9 @@ func newEpochFlagsHolder() *epochFlagsHolder { maxBlockchainHookCountersFlag: &atomic.Flag{}, wipeSingleNFTLiquidityDecreaseFlag: &atomic.Flag{}, alwaysSaveTokenMetaDataFlag: &atomic.Flag{}, + setGuardianFlag: &atomic.Flag{}, + keepExecOrderOnCreatedSCRsFlag: &atomic.Flag{}, + multiClaimOnDelegationFlag: &atomic.Flag{}, } } @@ -659,3 +665,18 @@ func (holder *epochFlagsHolder) IsWipeSingleNFTLiquidityDecreaseEnabled() bool { func (holder *epochFlagsHolder) IsAlwaysSaveTokenMetaDataEnabled() bool { return holder.alwaysSaveTokenMetaDataFlag.IsSet() } + +// IsSetGuardianEnabled returns true if setGuardianFlag is enabled +func (holder *epochFlagsHolder) IsSetGuardianEnabled() bool { + return holder.setGuardianFlag.IsSet() +} + +// IsKeepExecOrderOnCreatedSCRsEnabled returns true if keepExecOrderOnCreatedSCRsFlag is enabled +func (holder *epochFlagsHolder) IsKeepExecOrderOnCreatedSCRsEnabled() bool { + return holder.keepExecOrderOnCreatedSCRsFlag.IsSet() +} + +// IsMultiClaimOnDelegationEnabled returns true if multi claim on delegation is enabled +func (holder *epochFlagsHolder) IsMultiClaimOnDelegationEnabled() bool { + return holder.multiClaimOnDelegationFlag.IsSet() +} diff --git a/common/errChan/errChan.go b/common/errChan/errChan.go new file mode 100644 index 00000000000..47cf29e320b --- /dev/null +++ b/common/errChan/errChan.go @@ -0,0 +1,69 @@ +package errChan + +import "sync" + +type errChanWrapper struct { + ch chan error + closed bool + closeMutex sync.RWMutex +} + +// NewErrChanWrapper creates a new errChanWrapper +func NewErrChanWrapper() *errChanWrapper { + return &errChanWrapper{ + ch: make(chan error, 1), + closed: false, + } +} + +// WriteInChanNonBlocking will send the given error on the channel if the chan is not blocked +func (ec *errChanWrapper) WriteInChanNonBlocking(err error) { + ec.closeMutex.RLock() + defer ec.closeMutex.RUnlock() + + if ec.closed { + return + } + + select { + case ec.ch <- err: + default: + } +} + +// ReadFromChanNonBlocking will read from the channel, or return nil if no error was sent on the channel +func (ec *errChanWrapper) ReadFromChanNonBlocking() error { + select { + case err := <-ec.ch: + return err + default: + return nil + } +} + +// Close will close the channel +func (ec *errChanWrapper) Close() { + ec.closeMutex.Lock() + defer ec.closeMutex.Unlock() + + if ec.closed { + return + } + + if ec.ch == nil { + return + } + + close(ec.ch) + ec.closed = true +} + +// Len returns the length of the channel +func (ec *errChanWrapper) Len() int { + return len(ec.ch) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ec *errChanWrapper) IsInterfaceNil() bool { + return ec == nil +} diff --git a/common/errChan/errChan_test.go b/common/errChan/errChan_test.go new file mode 100644 index 00000000000..3d88f358015 --- /dev/null +++ b/common/errChan/errChan_test.go @@ -0,0 +1,136 @@ +package errChan + +import ( + "fmt" + "sync" + "testing" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/stretchr/testify/assert" +) + +func TestNewErrChan(t *testing.T) { + t.Parallel() + + ec := NewErrChanWrapper() + assert.False(t, check.IfNil(ec)) + assert.Equal(t, 1, cap(ec.ch)) +} + +func TestErrChan_WriteInChanNonBlocking(t *testing.T) { + t.Parallel() + + t.Run("write in a nil channel", func(t *testing.T) { + t.Parallel() + + ec := NewErrChanWrapper() + ec.ch = nil + ec.WriteInChanNonBlocking(fmt.Errorf("err1")) + + assert.Equal(t, 0, len(ec.ch)) + }) + + t.Run("write in a closed channel", func(t *testing.T) { + t.Parallel() + + ec := NewErrChanWrapper() + ec.Close() + ec.WriteInChanNonBlocking(fmt.Errorf("err1")) + + assert.Equal(t, 0, len(ec.ch)) + }) + + t.Run("should work", func(t *testing.T) { + expectedErr := fmt.Errorf("err1") + ec := NewErrChanWrapper() + ec.WriteInChanNonBlocking(expectedErr) + ec.WriteInChanNonBlocking(fmt.Errorf("err2")) + ec.WriteInChanNonBlocking(fmt.Errorf("err3")) + + assert.Equal(t, 1, len(ec.ch)) + assert.Equal(t, expectedErr, <-ec.ch) + assert.Equal(t, 0, len(ec.ch)) + }) +} + +func TestErrChan_ReadFromChanNonBlocking(t *testing.T) { + t.Parallel() + + expectedErr := fmt.Errorf("err1") + ec := NewErrChanWrapper() + ec.ch <- expectedErr + + assert.Equal(t, 1, len(ec.ch)) + assert.Equal(t, expectedErr, ec.ReadFromChanNonBlocking()) + assert.Equal(t, 0, len(ec.ch)) + assert.Nil(t, ec.ReadFromChanNonBlocking()) +} + +func TestErrChan_Close(t *testing.T) { + t.Parallel() + + t.Run("close an already closed channel", func(t *testing.T) { + t.Parallel() + + ec := NewErrChanWrapper() + ec.Close() + + assert.True(t, ec.closed) + ec.Close() + }) + + t.Run("close a nil channel", func(t *testing.T) { + t.Parallel() + + ec := NewErrChanWrapper() + ec.ch = nil + ec.Close() + + assert.False(t, ec.closed) + }) +} + +func TestErrChan_Len(t *testing.T) { + t.Parallel() + + ec := NewErrChanWrapper() + assert.Equal(t, 0, ec.Len()) + + ec.ch <- fmt.Errorf("err1") + assert.Equal(t, 1, ec.Len()) + + ec.WriteInChanNonBlocking(fmt.Errorf("err2")) + assert.Equal(t, 1, ec.Len()) +} + +func TestErrChan_ConcurrentOperations(t *testing.T) { + t.Parallel() + + ec := NewErrChanWrapper() + numOperations := 1000 + numMethods := 2 + wg := sync.WaitGroup{} + wg.Add(numOperations) + for i := 0; i < numOperations; i++ { + go func(idx int) { + + if idx == numOperations-100 { + ec.Close() + } + + operation := idx % numMethods + switch operation { + case 0: + ec.WriteInChanNonBlocking(fmt.Errorf("err")) + case 1: + _ = ec.ReadFromChanNonBlocking() + default: + assert.Fail(t, "invalid numMethods") + } + + wg.Done() + }(i) + } + + wg.Wait() +} diff --git a/common/interface.go b/common/interface.go index 91da2d7a8f1..dd1d6d96925 100644 --- a/common/interface.go +++ b/common/interface.go @@ -14,7 +14,15 @@ import ( // TrieIteratorChannels defines the channels that are being used when iterating the trie nodes type TrieIteratorChannels struct { LeavesChan chan core.KeyValueHolder - ErrChan chan error + ErrChan BufferedErrChan +} + +// BufferedErrChan is an interface that defines the methods for a buffered error channel +type BufferedErrChan interface { + WriteInChanNonBlocking(err error) + ReadFromChanNonBlocking() error + Close() + IsInterfaceNil() bool } // Trie is an interface for Merkle Trees implementations @@ -335,6 +343,9 @@ type EnableEpochsHandler interface { IsMaxBlockchainHookCountersFlagEnabled() bool IsWipeSingleNFTLiquidityDecreaseEnabled() bool IsAlwaysSaveTokenMetaDataEnabled() bool + IsSetGuardianEnabled() bool + IsKeepExecOrderOnCreatedSCRsEnabled() bool + IsMultiClaimOnDelegationEnabled() bool IsInterfaceNil() bool } diff --git a/common/statistics/resourceMonitor_test.go b/common/statistics/resourceMonitor_test.go index c9614d5dca4..738a53275d6 100644 --- a/common/statistics/resourceMonitor_test.go +++ b/common/statistics/resourceMonitor_test.go @@ -5,10 +5,10 @@ import ( "fmt" "testing" - logger "github.com/multiversx/mx-chain-logger-go" stats "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/common/statistics/disabled" "github.com/multiversx/mx-chain-go/config" + logger "github.com/multiversx/mx-chain-logger-go" "github.com/stretchr/testify/assert" ) diff --git a/common/statistics/softwareVersion/factory/softwareVersionFactory.go b/common/statistics/softwareVersion/factory/softwareVersionFactory.go index df30e135bb8..8713db01dc9 100644 --- a/common/statistics/softwareVersion/factory/softwareVersionFactory.go +++ b/common/statistics/softwareVersion/factory/softwareVersionFactory.go @@ -32,7 +32,5 @@ func NewSoftwareVersionFactory( // Create returns a software version checker object func (svf *softwareVersionFactory) Create() (*softwareVersion.SoftwareVersionChecker, error) { stableTagProvider := softwareVersion.NewStableTagProvider(svf.config.StableTagLocation) - softwareVersionChecker, err := softwareVersion.NewSoftwareVersionChecker(svf.statusHandler, stableTagProvider, svf.config.PollingIntervalInMinutes) - - return softwareVersionChecker, err + return softwareVersion.NewSoftwareVersionChecker(svf.statusHandler, stableTagProvider, svf.config.PollingIntervalInMinutes) } diff --git a/config/config.go b/config/config.go index e5b63a75426..63c2d8e38f2 100644 --- a/config/config.go +++ b/config/config.go @@ -279,6 +279,7 @@ type GeneralSettingsConfig struct { GenesisString string GenesisMaxNumberOfShards uint32 SyncProcessTimeInMillis uint32 + SetGuardianEpochsDelay uint32 } // FacadeConfig will hold different configuration option that will be passed to the node facade diff --git a/config/economicsConfig.go b/config/economicsConfig.go index f5fcd3dea43..35c67d92469 100644 --- a/config/economicsConfig.go +++ b/config/economicsConfig.go @@ -40,14 +40,16 @@ type GasLimitSetting struct { MaxGasLimitPerMetaMiniBlock string MaxGasLimitPerTx string MinGasLimit string + ExtraGasLimitGuardedTx string } // FeeSettings will hold economics fee settings type FeeSettings struct { - GasLimitSettings []GasLimitSetting - GasPerDataByte string - MinGasPrice string - GasPriceModifier float64 + GasLimitSettings []GasLimitSetting + GasPerDataByte string + MinGasPrice string + GasPriceModifier float64 + MaxGasPriceSetGuardian string } // EconomicsConfig will hold economics config diff --git a/config/epochConfig.go b/config/epochConfig.go index e729f362d91..deadeb79d11 100644 --- a/config/epochConfig.go +++ b/config/epochConfig.go @@ -93,7 +93,10 @@ type EnableEpochs struct { MaxBlockchainHookCountersEnableEpoch uint32 WipeSingleNFTLiquidityDecreaseEnableEpoch uint32 AlwaysSaveTokenMetaDataEnableEpoch uint32 + KeepExecOrderOnCreatedSCRsEnableEpoch uint32 + MultiClaimOnDelegationEnableEpoch uint32 BLSMultiSignerEnableEpoch []MultiSignerConfig + SetGuardianEnableEpoch uint32 } // GasScheduleByEpochs represents a gas schedule toml entry that will be applied from the provided epoch diff --git a/config/systemSmartContractsConfig.go b/config/systemSmartContractsConfig.go index 0d2833df91e..895aea97c8b 100644 --- a/config/systemSmartContractsConfig.go +++ b/config/systemSmartContractsConfig.go @@ -45,16 +45,16 @@ type GovernanceSystemSCConfigV1 struct { // system smart contract once it activates type GovernanceSystemSCConfigActive struct { ProposalCost string - MinQuorum string - MinPassThreshold string - MinVetoThreshold string + MinQuorum float64 + MinPassThreshold float64 + MinVetoThreshold float64 } // GovernanceSystemSCConfig defines the set of constants to initialize the governance system smart contract type GovernanceSystemSCConfig struct { - V1 GovernanceSystemSCConfigV1 - Active GovernanceSystemSCConfigActive - FirstWhitelistedAddress string + V1 GovernanceSystemSCConfigV1 + Active GovernanceSystemSCConfigActive + ChangeConfigAddress string } // DelegationManagerSystemSCConfig defines a set of constants to initialize the delegation manager system smart contract diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 2c2173c2ee1..d7a3b1c7170 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -237,6 +237,8 @@ func TestTomlEconomicsParser(t *testing.T) { maxGasLimitPerBlock := "18446744073709551615" minGasPrice := "18446744073709551615" minGasLimit := "18446744073709551615" + extraGasLimitGuardedTx := "50000" + maxGasPriceSetGuardian := "1234567" protocolSustainabilityAddress := "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp" denomination := 18 @@ -265,11 +267,13 @@ func TestTomlEconomicsParser(t *testing.T) { FeeSettings: FeeSettings{ GasLimitSettings: []GasLimitSetting{ { - MaxGasLimitPerBlock: maxGasLimitPerBlock, - MinGasLimit: minGasLimit, + MaxGasLimitPerBlock: maxGasLimitPerBlock, + MinGasLimit: minGasLimit, + ExtraGasLimitGuardedTx: extraGasLimitGuardedTx, }, }, - MinGasPrice: minGasPrice, + MinGasPrice: minGasPrice, + MaxGasPriceSetGuardian: maxGasPriceSetGuardian, }, } @@ -292,8 +296,9 @@ func TestTomlEconomicsParser(t *testing.T) { ProtocolSustainabilityAddress = "` + protocolSustainabilityAddress + `" [FeeSettings] - GasLimitSettings = [{EnableEpoch = 0, MaxGasLimitPerBlock = "` + maxGasLimitPerBlock + `", MaxGasLimitPerMiniBlock = "", MaxGasLimitPerMetaBlock = "", MaxGasLimitPerMetaMiniBlock = "", MaxGasLimitPerTx = "", MinGasLimit = "` + minGasLimit + `"}] + GasLimitSettings = [{EnableEpoch = 0, MaxGasLimitPerBlock = "` + maxGasLimitPerBlock + `", MaxGasLimitPerMiniBlock = "", MaxGasLimitPerMetaBlock = "", MaxGasLimitPerMetaMiniBlock = "", MaxGasLimitPerTx = "", MinGasLimit = "` + minGasLimit + `", ExtraGasLimitGuardedTx = "` + extraGasLimitGuardedTx + `"}] MinGasPrice = "` + minGasPrice + `" + MaxGasPriceSetGuardian = "` + maxGasPriceSetGuardian + `" ` cfg := EconomicsConfig{} @@ -684,6 +689,15 @@ func TestEnableEpochConfig(t *testing.T) { # RuntimeMemStoreLimitEnableEpoch represents the epoch when the condition for Runtime MemStore is enabled RuntimeMemStoreLimitEnableEpoch = 63 + # SetGuardianEnableEpoch represents the epoch when guard account feature is enabled + SetGuardianEnableEpoch = 64 + + # KeepExecOrderOnCreatedSCRsEnableEpoch represents the epoch when the execution order of created SCRs is ensured + KeepExecOrderOnCreatedSCRsEnableEpoch = 64 + + # MultiClaimOnDelegationEnableEpoch represents the epoch when the multi claim on delegation function is enabled + MultiClaimOnDelegationEnableEpoch = 65 + # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ { EpochEnable = 44, MaxNumNodes = 2169, NodesToShufflePerShard = 80 }, @@ -779,6 +793,9 @@ func TestEnableEpochConfig(t *testing.T) { AlwaysSaveTokenMetaDataEnableEpoch: 61, RuntimeCodeSizeFixEnableEpoch: 62, RuntimeMemStoreLimitEnableEpoch: 63, + SetGuardianEnableEpoch: 64, + MultiClaimOnDelegationEnableEpoch: 65, + KeepExecOrderOnCreatedSCRsEnableEpoch: 64, BLSMultiSignerEnableEpoch: []MultiSignerConfig{ { EnableEpoch: 0, diff --git a/consensus/mock/mockTestInitializer.go b/consensus/mock/mockTestInitializer.go index 4468c3d338b..6fa62a5a49d 100644 --- a/consensus/mock/mockTestInitializer.go +++ b/consensus/mock/mockTestInitializer.go @@ -205,7 +205,7 @@ func InitConsensusCoreWithMultiSigner(multiSigner crypto.MultiSigner) *Consensus messageSigningHandler := &MessageSigningHandlerStub{} peerBlacklistHandler := &PeerBlacklistHandlerStub{} multiSignerContainer := cryptoMocks.NewMultiSignerContainerMock(multiSigner) - signingHandler := &SigningHandlerStub{} + signingHandler := &consensusMocks.SigningHandlerStub{} container := &ConsensusCoreMock{ blockChain: blockChain, diff --git a/consensus/spos/bls/subroundEndRound_test.go b/consensus/spos/bls/subroundEndRound_test.go index 7e1a7eb83af..1cf43a179c0 100644 --- a/consensus/spos/bls/subroundEndRound_test.go +++ b/consensus/spos/bls/subroundEndRound_test.go @@ -20,6 +20,7 @@ import ( "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/p2p/factory" "github.com/multiversx/mx-chain-go/testscommon" + consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/stretchr/testify/assert" @@ -329,7 +330,7 @@ func TestSubroundEndRound_DoEndRoundJobErrAggregatingSigShouldFail(t *testing.T) container := mock.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - signingHandler := &mock.SigningHandlerStub{ + signingHandler := &consensusMocks.SigningHandlerStub{ AggregateSigsCalled: func(bitmap []byte, epoch uint32) ([]byte, error) { return nil, crypto.ErrNilHasher }, @@ -543,7 +544,7 @@ func TestSubroundEndRound_CheckIfSignatureIsFilled(t *testing.T) { expectedSignature := []byte("signature") container := mock.InitConsensusCore() - signingHandler := &mock.SigningHandlerStub{ + signingHandler := &consensusMocks.SigningHandlerStub{ CreateSignatureForPublicKeyCalled: func(publicKeyBytes []byte, msg []byte) ([]byte, error) { var receivedHdr block.Header _ = container.Marshalizer().Unmarshal(&receivedHdr, msg) @@ -957,7 +958,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) expectedErr := errors.New("exptected error") - signingHandler := &mock.SigningHandlerStub{ + signingHandler := &consensusMocks.SigningHandlerStub{ SignatureShareCalled: func(index uint16) ([]byte, error) { return nil, expectedErr }, @@ -979,7 +980,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) expectedErr := errors.New("exptected error") - signingHandler := &mock.SigningHandlerStub{ + signingHandler := &consensusMocks.SigningHandlerStub{ SignatureShareCalled: func(index uint16) ([]byte, error) { return nil, nil }, @@ -1005,7 +1006,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { container := mock.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - signingHandler := &mock.SigningHandlerStub{ + signingHandler := &consensusMocks.SigningHandlerStub{ SignatureShareCalled: func(index uint16) ([]byte, error) { return nil, nil }, @@ -1050,7 +1051,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) expectedErr := errors.New("exptected error") - signingHandler := &mock.SigningHandlerStub{ + signingHandler := &consensusMocks.SigningHandlerStub{ AggregateSigsCalled: func(bitmap []byte, epoch uint32) ([]byte, error) { return nil, expectedErr }, @@ -1071,7 +1072,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) expectedErr := errors.New("exptected error") - signingHandler := &mock.SigningHandlerStub{ + signingHandler := &consensusMocks.SigningHandlerStub{ SetAggregatedSigCalled: func(_ []byte) error { return expectedErr }, @@ -1110,7 +1111,7 @@ func TestSubroundEndRound_DoEndRoundJobByLeaderVerificationFail(t *testing.T) { verifySigShareNumCalls := 0 verifyFirstCall := true - signingHandler := &mock.SigningHandlerStub{ + signingHandler := &consensusMocks.SigningHandlerStub{ SignatureShareCalled: func(index uint16) ([]byte, error) { return nil, nil }, @@ -1157,7 +1158,7 @@ func TestSubroundEndRound_DoEndRoundJobByLeaderVerificationFail(t *testing.T) { verifySigShareNumCalls := 0 verifyFirstCall := true - signingHandler := &mock.SigningHandlerStub{ + signingHandler := &consensusMocks.SigningHandlerStub{ SignatureShareCalled: func(index uint16) ([]byte, error) { return nil, nil }, @@ -1420,7 +1421,7 @@ func TestVerifyInvalidSigners(t *testing.T) { } wasCalled := false - signingHandler := &mock.SigningHandlerStub{ + signingHandler := &consensusMocks.SigningHandlerStub{ VerifySingleSignatureCalled: func(publicKeyBytes []byte, message []byte, signature []byte) error { wasCalled = true return errors.New("expected err") diff --git a/consensus/spos/bls/subroundSignature_test.go b/consensus/spos/bls/subroundSignature_test.go index 613d1f315e8..d327a6ea206 100644 --- a/consensus/spos/bls/subroundSignature_test.go +++ b/consensus/spos/bls/subroundSignature_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" "github.com/multiversx/mx-chain-go/testscommon" + consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/pkg/errors" "github.com/stretchr/testify/assert" @@ -277,7 +278,7 @@ func TestSubroundSignature_DoSignatureJob(t *testing.T) { sr.Data = []byte("X") err := errors.New("create signature share error") - signingHandler := &mock.SigningHandlerStub{ + signingHandler := &consensusMocks.SigningHandlerStub{ CreateSignatureShareForPublicKeyCalled: func(msg []byte, index uint16, epoch uint32, publicKeyBytes []byte) ([]byte, error) { return nil, err }, @@ -287,7 +288,7 @@ func TestSubroundSignature_DoSignatureJob(t *testing.T) { r = sr.DoSignatureJob() assert.False(t, r) - signingHandler = &mock.SigningHandlerStub{ + signingHandler = &consensusMocks.SigningHandlerStub{ CreateSignatureShareForPublicKeyCalled: func(msg []byte, index uint16, epoch uint32, publicKeyBytes []byte) ([]byte, error) { return []byte("SIG"), nil }, @@ -367,7 +368,7 @@ func TestSubroundSignature_ReceivedSignatureStoreShareFailed(t *testing.T) { errStore := errors.New("signature share store failed") storeSigShareCalled := false - signingHandler := &mock.SigningHandlerStub{ + signingHandler := &consensusMocks.SigningHandlerStub{ VerifySignatureShareCalled: func(index uint16, sig, msg []byte, epoch uint32) error { return nil }, diff --git a/consensus/spos/consensusCoreValidator_test.go b/consensus/spos/consensusCoreValidator_test.go index 41b965887b1..acdc008cbe8 100644 --- a/consensus/spos/consensusCoreValidator_test.go +++ b/consensus/spos/consensusCoreValidator_test.go @@ -34,7 +34,7 @@ func initConsensusDataContainer() *ConsensusCore { messageSigningHandler := &mock.MessageSigningHandlerStub{} peerBlacklistHandler := &mock.PeerBlacklistHandlerStub{} multiSignerContainer := cryptoMocks.NewMultiSignerContainerMock(multiSignerMock) - signingHandler := &mock.SigningHandlerStub{} + signingHandler := &consensusMocks.SigningHandlerStub{} return &ConsensusCore{ blockChain: blockChain, diff --git a/dataRetriever/txpool/memorytests/memory_test.go b/dataRetriever/txpool/memorytests/memory_test.go index 95861c78839..d2d48fbbcd5 100644 --- a/dataRetriever/txpool/memorytests/memory_test.go +++ b/dataRetriever/txpool/memorytests/memory_test.go @@ -36,25 +36,25 @@ func TestShardedTxPool_MemoryFootprint(t *testing.T) { journals = append(journals, runScenario(t, newScenario(200, 1, core.MegabyteSize, "0"), memoryAssertion{200, 200}, memoryAssertion{0, 1})) journals = append(journals, runScenario(t, newScenario(10, 1000, 20480, "0"), memoryAssertion{190, 205}, memoryAssertion{1, 4})) journals = append(journals, runScenario(t, newScenario(10000, 1, 1024, "0"), memoryAssertion{10, 16}, memoryAssertion{4, 10})) - journals = append(journals, runScenario(t, newScenario(1, 60000, 256, "0"), memoryAssertion{30, 32}, memoryAssertion{10, 16})) - journals = append(journals, runScenario(t, newScenario(10, 10000, 100, "0"), memoryAssertion{36, 40}, memoryAssertion{16, 24})) - journals = append(journals, runScenario(t, newScenario(100000, 1, 1024, "0"), memoryAssertion{120, 128}, memoryAssertion{56, 60})) + journals = append(journals, runScenario(t, newScenario(1, 60000, 256, "0"), memoryAssertion{30, 36}, memoryAssertion{10, 16})) + journals = append(journals, runScenario(t, newScenario(10, 10000, 100, "0"), memoryAssertion{36, 46}, memoryAssertion{16, 24})) + journals = append(journals, runScenario(t, newScenario(100000, 1, 1024, "0"), memoryAssertion{120, 136}, memoryAssertion{56, 60})) // With larger memory footprint - journals = append(journals, runScenario(t, newScenario(100000, 3, 650, "0"), memoryAssertion{290, 310}, memoryAssertion{95, 110})) - journals = append(journals, runScenario(t, newScenario(150000, 2, 650, "0"), memoryAssertion{290, 310}, memoryAssertion{120, 130})) - journals = append(journals, runScenario(t, newScenario(300000, 1, 650, "0"), memoryAssertion{290, 310}, memoryAssertion{170, 190})) - journals = append(journals, runScenario(t, newScenario(30, 10000, 650, "0"), memoryAssertion{290, 310}, memoryAssertion{60, 70})) - journals = append(journals, runScenario(t, newScenario(300, 1000, 650, "0"), memoryAssertion{290, 310}, memoryAssertion{60, 70})) + journals = append(journals, runScenario(t, newScenario(100000, 3, 650, "0"), memoryAssertion{290, 320}, memoryAssertion{95, 120})) + journals = append(journals, runScenario(t, newScenario(150000, 2, 650, "0"), memoryAssertion{290, 320}, memoryAssertion{120, 140})) + journals = append(journals, runScenario(t, newScenario(300000, 1, 650, "0"), memoryAssertion{290, 320}, memoryAssertion{170, 190})) + journals = append(journals, runScenario(t, newScenario(30, 10000, 650, "0"), memoryAssertion{290, 320}, memoryAssertion{60, 75})) + journals = append(journals, runScenario(t, newScenario(300, 1000, 650, "0"), memoryAssertion{290, 320}, memoryAssertion{60, 80})) // Scenarios where destination == me journals = append(journals, runScenario(t, newScenario(100, 1, core.MegabyteSize, "1_0"), memoryAssertion{90, 100}, memoryAssertion{0, 1})) journals = append(journals, runScenario(t, newScenario(10000, 1, 10240, "1_0"), memoryAssertion{96, 128}, memoryAssertion{0, 4})) - journals = append(journals, runScenario(t, newScenario(10, 10000, 1000, "1_0"), memoryAssertion{96, 128}, memoryAssertion{16, 24})) - journals = append(journals, runScenario(t, newScenario(150000, 1, 128, "1_0"), memoryAssertion{50, 65}, memoryAssertion{30, 36})) - journals = append(journals, runScenario(t, newScenario(1, 150000, 128, "1_0"), memoryAssertion{50, 65}, memoryAssertion{30, 36})) + journals = append(journals, runScenario(t, newScenario(10, 10000, 1000, "1_0"), memoryAssertion{96, 136}, memoryAssertion{16, 25})) + journals = append(journals, runScenario(t, newScenario(150000, 1, 128, "1_0"), memoryAssertion{50, 75}, memoryAssertion{30, 40})) + journals = append(journals, runScenario(t, newScenario(1, 150000, 128, "1_0"), memoryAssertion{50, 75}, memoryAssertion{30, 40})) for _, journal := range journals { journal.displayFootprintsSummary() diff --git a/debug/process/stateExport.go b/debug/process/stateExport.go index 9fbdd6ce1bc..831aaebfc0e 100644 --- a/debug/process/stateExport.go +++ b/debug/process/stateExport.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/state" ) @@ -66,7 +67,7 @@ func getCode(accountsDB state.AccountsAdapter, codeHash []byte) ([]byte, error) func getData(accountsDB state.AccountsAdapter, rootHash []byte, address []byte) ([]string, error) { leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err := accountsDB.GetAllLeaves(leavesChannels, context.Background(), rootHash) @@ -89,7 +90,7 @@ func getData(accountsDB state.AccountsAdapter, rootHash []byte, address []byte) hex.EncodeToString(valWithoutSuffix))) } - err = <-leavesChannels.ErrChan + err = leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, fmt.Errorf("%w while trying to export data on hex root hash %s, address %s", err, hex.EncodeToString(rootHash), hex.EncodeToString(address)) diff --git a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go index 730d7cd82e8..5ab4c67d1bb 100644 --- a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go +++ b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go @@ -50,7 +50,6 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) if check.IfNil(args.CryptoComponents) { return nil, epochStart.ErrNilCryptoComponentsHolder } - if check.IfNil(args.CoreComponents.AddressPubKeyConverter()) { return nil, epochStart.ErrNilPubkeyConverter } diff --git a/epochStart/bootstrap/metaStorageHandler.go b/epochStart/bootstrap/metaStorageHandler.go index 6872afbfdb6..0a25fa08b45 100644 --- a/epochStart/bootstrap/metaStorageHandler.go +++ b/epochStart/bootstrap/metaStorageHandler.go @@ -36,6 +36,7 @@ func NewMetaStorageHandler( currentEpoch uint32, uint64Converter typeConverters.Uint64ByteSliceConverter, nodeTypeProvider NodeTypeProviderHandler, + nodeProcessingMode common.NodeProcessingMode, snapshotsEnabled bool, managedPeersHolder common.ManagedPeersHolder, ) (*metaStorageHandler, error) { @@ -51,6 +52,7 @@ func NewMetaStorageHandler( CurrentEpoch: currentEpoch, StorageType: factory.BootstrapStorageService, CreateTrieEpochRootHashStorer: false, + NodeProcessingMode: nodeProcessingMode, SnapshotsEnabled: snapshotsEnabled, ManagedPeersHolder: managedPeersHolder, }, diff --git a/epochStart/bootstrap/metaStorageHandler_test.go b/epochStart/bootstrap/metaStorageHandler_test.go index b5e897e3c16..f196823c94d 100644 --- a/epochStart/bootstrap/metaStorageHandler_test.go +++ b/epochStart/bootstrap/metaStorageHandler_test.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart/mock" @@ -34,7 +35,20 @@ func TestNewMetaStorageHandler_InvalidConfigErr(t *testing.T) { nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - mtStrHandler, err := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider, false, managedPeersHolder) + mtStrHandler, err := NewMetaStorageHandler( + gCfg, + prefsConfig, + coordinator, + pathManager, + marshalizer, + hasher, + 1, + uit64Cvt, + nodeTypeProvider, + common.Normal, + false, + managedPeersHolder, + ) assert.True(t, check.IfNil(mtStrHandler)) assert.NotNil(t, err) } @@ -53,7 +67,20 @@ func TestNewMetaStorageHandler_CreateForMetaErr(t *testing.T) { uit64Cvt := &mock.Uint64ByteSliceConverterMock{} nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - mtStrHandler, err := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider, false, managedPeersHolder) + mtStrHandler, err := NewMetaStorageHandler( + gCfg, + prefsConfig, + coordinator, + pathManager, + marshalizer, + hasher, + 1, + uit64Cvt, + nodeTypeProvider, + common.Normal, + false, + managedPeersHolder, + ) assert.False(t, check.IfNil(mtStrHandler)) assert.Nil(t, err) } @@ -73,7 +100,20 @@ func TestMetaStorageHandler_saveLastHeader(t *testing.T) { nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - mtStrHandler, _ := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider, false, managedPeersHolder) + mtStrHandler, _ := NewMetaStorageHandler( + gCfg, + prefsConfig, + coordinator, + pathManager, + marshalizer, + hasher, + 1, + uit64Cvt, + nodeTypeProvider, + common.Normal, + false, + managedPeersHolder, + ) header := &block.MetaBlock{Nonce: 0} @@ -102,7 +142,20 @@ func TestMetaStorageHandler_saveLastCrossNotarizedHeaders(t *testing.T) { nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - mtStrHandler, _ := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider, false, managedPeersHolder) + mtStrHandler, _ := NewMetaStorageHandler( + gCfg, + prefsConfig, + coordinator, + pathManager, + marshalizer, + hasher, + 1, + uit64Cvt, + nodeTypeProvider, + common.Normal, + false, + managedPeersHolder, + ) hdr1 := &block.Header{Nonce: 1} hdr2 := &block.Header{Nonce: 2} @@ -137,7 +190,20 @@ func TestMetaStorageHandler_saveTriggerRegistry(t *testing.T) { nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - mtStrHandler, _ := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider, false, managedPeersHolder) + mtStrHandler, _ := NewMetaStorageHandler( + gCfg, + prefsConfig, + coordinator, + pathManager, + marshalizer, + hasher, + 1, + uit64Cvt, + nodeTypeProvider, + common.Normal, + false, + managedPeersHolder, + ) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{Nonce: 3}, @@ -163,7 +229,20 @@ func TestMetaStorageHandler_saveDataToStorage(t *testing.T) { nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - mtStrHandler, _ := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider, false, managedPeersHolder) + mtStrHandler, _ := NewMetaStorageHandler( + gCfg, + prefsConfig, + coordinator, + pathManager, + marshalizer, + hasher, + 1, + uit64Cvt, + nodeTypeProvider, + common.Normal, + false, + managedPeersHolder, + ) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{Nonce: 3}, @@ -206,7 +285,20 @@ func testMetaWithMissingStorer(missingUnit dataRetriever.UnitType, atCallNumber nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - mtStrHandler, _ := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider, false, managedPeersHolder) + mtStrHandler, _ := NewMetaStorageHandler( + gCfg, + prefsConfig, + coordinator, + pathManager, + marshalizer, + hasher, + 1, + uit64Cvt, + nodeTypeProvider, + common.Normal, + false, + managedPeersHolder, + ) counter := 0 mtStrHandler.storageService = &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 724cdb72135..65681c90c26 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -115,6 +115,7 @@ type epochStartBootstrap struct { checkNodesOnDisk bool bootstrapHeartbeatSender update.Closer trieSyncStatisticsProvider common.SizeSyncStatisticsHandler + nodeProcessingMode common.NodeProcessingMode // created components requestHandler process.RequestHandler @@ -178,6 +179,7 @@ type ArgsEpochStartBootstrap struct { DataSyncerCreator types.ScheduledDataSyncerCreator ScheduledSCRsStorer storage.Storer TrieSyncStatisticsProvider common.SizeSyncStatisticsHandler + NodeProcessingMode common.NodeProcessingMode } type dataToSync struct { @@ -224,6 +226,7 @@ func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, storerScheduledSCRs: args.ScheduledSCRsStorer, shardCoordinator: args.GenesisShardCoordinator, trieSyncStatisticsProvider: args.TrieSyncStatisticsProvider, + nodeProcessingMode: args.NodeProcessingMode, } whiteListCache, err := storageunit.NewCache(storageFactory.GetCacherFromConfig(epochStartProvider.generalConfig.WhiteListPool)) @@ -763,6 +766,7 @@ func (e *epochStartBootstrap) requestAndProcessForMeta(peerMiniBlocks []*block.M e.epochStartMeta.GetEpoch(), e.coreComponentsHolder.Uint64ByteSliceConverter(), e.coreComponentsHolder.NodeTypeProvider(), + e.nodeProcessingMode, e.flagsConfig.SnapshotsEnabled, e.cryptoComponentsHolder.ManagedPeersHolder(), ) @@ -932,6 +936,7 @@ func (e *epochStartBootstrap) requestAndProcessForShard(peerMiniBlocks []*block. e.baseData.lastEpoch, e.coreComponentsHolder.Uint64ByteSliceConverter(), e.coreComponentsHolder.NodeTypeProvider(), + e.nodeProcessingMode, e.flagsConfig.SnapshotsEnabled, e.cryptoComponentsHolder.ManagedPeersHolder(), ) @@ -1115,6 +1120,7 @@ func (e *epochStartBootstrap) createStorageService( CurrentEpoch: startEpoch, StorageType: storageFactory.BootstrapStorageService, CreateTrieEpochRootHashStorer: createTrieEpochRootHashStorer, + NodeProcessingMode: e.nodeProcessingMode, SnapshotsEnabled: e.flagsConfig.SnapshotsEnabled, ManagedPeersHolder: e.cryptoComponentsHolder.ManagedPeersHolder(), }) diff --git a/epochStart/bootstrap/shardStorageHandler.go b/epochStart/bootstrap/shardStorageHandler.go index 44fedbc8bf8..9ffc5384e31 100644 --- a/epochStart/bootstrap/shardStorageHandler.go +++ b/epochStart/bootstrap/shardStorageHandler.go @@ -40,6 +40,7 @@ func NewShardStorageHandler( currentEpoch uint32, uint64Converter typeConverters.Uint64ByteSliceConverter, nodeTypeProvider core.NodeTypeProviderHandler, + nodeProcessingMode common.NodeProcessingMode, snapshotsEnabled bool, managedPeersHolder common.ManagedPeersHolder, ) (*shardStorageHandler, error) { @@ -55,6 +56,7 @@ func NewShardStorageHandler( CurrentEpoch: currentEpoch, StorageType: factory.BootstrapStorageService, CreateTrieEpochRootHashStorer: false, + NodeProcessingMode: nodeProcessingMode, SnapshotsEnabled: snapshotsEnabled, ManagedPeersHolder: managedPeersHolder, }, diff --git a/epochStart/bootstrap/shardStorageHandler_test.go b/epochStart/bootstrap/shardStorageHandler_test.go index 3405cca7f57..45c1ee48abd 100644 --- a/epochStart/bootstrap/shardStorageHandler_test.go +++ b/epochStart/bootstrap/shardStorageHandler_test.go @@ -40,7 +40,20 @@ func TestNewShardStorageHandler_ShouldWork(t *testing.T) { }() args := createDefaultShardStorageArgs() - shardStorage, err := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, err := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) assert.False(t, check.IfNil(shardStorage)) assert.Nil(t, err) @@ -52,7 +65,20 @@ func TestShardStorageHandler_SaveDataToStorageShardDataNotFound(t *testing.T) { }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{Epoch: 1}, @@ -70,7 +96,20 @@ func TestShardStorageHandler_SaveDataToStorageMissingHeader(t *testing.T) { }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{ @@ -111,7 +150,20 @@ func testShardWithMissingStorer(missingUnit dataRetriever.UnitType, atCallNumber counter := 0 args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) shardStorage.storageService = &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { counter++ @@ -153,7 +205,20 @@ func TestShardStorageHandler_SaveDataToStorage(t *testing.T) { }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) hash1 := []byte("hash1") hdr1 := block.MetaBlock{ @@ -252,7 +317,20 @@ func TestShardStorageHandler_getCrossProcessedMiniBlockHeadersDestMe(t *testing. mbs := append(intraMbs, crossMbs...) args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) shardHeader := &block.Header{ Nonce: 100, MiniBlockHeaders: mbs, @@ -272,7 +350,20 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledErrorG t.Parallel() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) meta := &block.MetaBlock{ Nonce: 100, EpochStart: block.EpochStart{}, @@ -290,7 +381,20 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledNoSche t.Parallel() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, err := shardStorage.getProcessedAndPendingMiniBlocksWithScheduled(scenario.metaBlock, scenario.headers, scenario.shardHeader, false) @@ -305,7 +409,20 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledWrongH t.Parallel() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) scenario := createPendingAndProcessedMiniBlocksScenario() wrongShardHeader := &block.MetaBlock{} @@ -327,7 +444,20 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduled(t *te t.Parallel() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, err := shardStorage.getProcessedAndPendingMiniBlocksWithScheduled(scenario.metaBlock, scenario.headers, scenario.shardHeader, true) @@ -495,7 +625,20 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksErrorGettingEpochSt }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) meta := &block.MetaBlock{ Nonce: 100, EpochStart: block.EpochStart{}, @@ -518,7 +661,20 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksMissingHeader(t *te lastFinishedMetaBlock := "last finished meta block" args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) meta := &block.MetaBlock{ Nonce: 100, EpochStart: block.EpochStart{ @@ -544,7 +700,20 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWrongHeader(t *test lastFinishedMetaBlockHash := "last finished meta block" firstPendingMeta := "first pending meta" args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) meta := &block.MetaBlock{ @@ -575,7 +744,20 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksNilMetaBlock(t *tes lastFinishedMetaBlockHash := "last finished meta block" firstPendingMeta := "first pending meta" args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) meta := &block.MetaBlock{ @@ -608,7 +790,20 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksNoProcessedNoPendin lastFinishedMetaBlockHash := "last finished meta block" firstPendingMeta := "first pending meta" args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) lastFinishedHeaders[0].PendingMiniBlockHeaders = nil @@ -637,7 +832,20 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithProcessedAndPen t.Parallel() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, firstPendingMetaBlockHash, err := shardStorage.getProcessedAndPendingMiniBlocks(scenario.metaBlock, scenario.headers) @@ -655,7 +863,20 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledGetSha }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) headers := map[string]data.HeaderHandler{} meta := &block.MetaBlock{ @@ -676,7 +897,20 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledMissin }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -705,7 +939,20 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledWrongT }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -741,7 +988,20 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledErrorW args.marshalizer = &testscommon.MarshalizerStub{MarshalCalled: func(obj interface{}) ([]byte, error) { return nil, expectedErr }} - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -772,7 +1032,20 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduled(t *te }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -808,7 +1081,20 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithScheduledErrorUpda }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -838,7 +1124,20 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithScheduled(t *testi }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, false, args.managedPeersHolder) + shardStorage, _ := NewShardStorageHandler( + args.generalConfig, + args.prefsConfig, + args.shardCoordinator, + args.pathManagerHandler, + args.marshalizer, + args.hasher, + 1, + args.uint64Converter, + args.nodeTypeProvider, + args.nodeProcessingMode, + false, + args.managedPeersHolder, + ) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" prevMetaHash := "prev metaHlock hash" @@ -1060,6 +1359,7 @@ type shardStorageArgs struct { currentEpoch uint32 uint64Converter typeConverters.Uint64ByteSliceConverter nodeTypeProvider core.NodeTypeProviderHandler + nodeProcessingMode common.NodeProcessingMode managedPeersHolder common.ManagedPeersHolder } @@ -1074,6 +1374,7 @@ func createDefaultShardStorageArgs() shardStorageArgs { currentEpoch: 0, uint64Converter: &mock.Uint64ByteSliceConverterMock{}, nodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + nodeProcessingMode: common.Normal, managedPeersHolder: &testscommon.ManagedPeersHolderStub{}, } } diff --git a/epochStart/bootstrap/startInEpochScheduled.go b/epochStart/bootstrap/startInEpochScheduled.go index 807d81729f5..265cf4dcc72 100644 --- a/epochStart/bootstrap/startInEpochScheduled.go +++ b/epochStart/bootstrap/startInEpochScheduled.go @@ -254,7 +254,11 @@ func (ses *startInEpochWithScheduledDataSyncer) prepareScheduledIntermediateTxs( additionalData := header.GetAdditionalData() if additionalData != nil { - scheduledIntermediateTxsMap := getScheduledIntermediateTxsMap(miniBlocks, scheduledIntermediateTxs) + scheduledIntermediateTxsMap := getScheduledIntermediateTxsMapInOrder( + header.GetMiniBlockHeaderHandlers(), + miniBlocks, + scheduledIntermediateTxs, + ) gasAndFees := scheduled.GasAndFees{ AccumulatedFees: additionalData.GetScheduledAccumulatedFees(), DeveloperFees: additionalData.GetScheduledDeveloperFees(), @@ -343,16 +347,27 @@ func isScheduledIntermediateTx( return isScheduledIntermediateTransaction && isTxExecutedInSelfShard } -func getScheduledIntermediateTxsMap( +func getScheduledIntermediateTxsMapInOrder( + miniBlockHeaderHandlerList []data.MiniBlockHeaderHandler, miniBlocks map[string]*block.MiniBlock, intermediateTxs map[string]data.TransactionHandler, ) map[block.Type][]data.TransactionHandler { - intermediateTxsMap := make(map[block.Type][]data.TransactionHandler) - for txHash, tx := range intermediateTxs { - blockType := getBlockTypeOfTx([]byte(txHash), miniBlocks) - intermediateTxsMap[blockType] = append(intermediateTxsMap[blockType], tx) + for _, mbHeader := range miniBlockHeaderHandlerList { + miniBlock, ok := miniBlocks[string(mbHeader.GetHash())] + if !ok { + continue + } + + for _, hash := range miniBlock.TxHashes { + txHandler, ok := intermediateTxs[string(hash)] + if !ok { + continue + } + + intermediateTxsMap[miniBlock.Type] = append(intermediateTxsMap[miniBlock.Type], txHandler) + } } return intermediateTxsMap diff --git a/epochStart/bootstrap/startInEpochScheduled_test.go b/epochStart/bootstrap/startInEpochScheduled_test.go index fe1e23afcae..7a649b01718 100644 --- a/epochStart/bootstrap/startInEpochScheduled_test.go +++ b/epochStart/bootstrap/startInEpochScheduled_test.go @@ -392,12 +392,18 @@ func TestStartInEpochWithScheduledDataSyncer_getScheduledIntermediateTxsMap(t *t tx2 := &smartContractResult.SmartContractResult{Nonce: 1} tx3 := &transaction.Transaction{Nonce: 2} tx4 := &smartContractResult.SmartContractResult{Nonce: 3} + tx5 := &smartContractResult.SmartContractResult{Nonce: 5} + tx6 := &smartContractResult.SmartContractResult{Nonce: 6} + tx7 := &smartContractResult.SmartContractResult{Nonce: 7} intermediateTxs := map[string]data.TransactionHandler{ "hash1": tx1, "hash2": tx2, "hash3": tx3, "hash4": tx4, + "5hash": tx5, + "3hash": tx6, + "hash6": tx7, } miniBlocks := make(map[string]*block.MiniBlock) @@ -415,14 +421,26 @@ func TestStartInEpochWithScheduledDataSyncer_getScheduledIntermediateTxsMap(t *t } miniBlocks["4"] = &block.MiniBlock{ Type: block.SmartContractResultBlock, - TxHashes: [][]byte{[]byte("hash4")}, + TxHashes: [][]byte{[]byte("hash4"), []byte("5hash"), []byte("3hash"), []byte("hash6")}, } - scheduledIntermediateTxsMap := getScheduledIntermediateTxsMap(miniBlocks, intermediateTxs) + miniBlockHeaders := make([]data.MiniBlockHeaderHandler, 4) + miniBlockHeaders[0] = &block.MiniBlockHeader{Hash: []byte("1")} + miniBlockHeaders[1] = &block.MiniBlockHeader{Hash: []byte("2")} + miniBlockHeaders[2] = &block.MiniBlockHeader{Hash: []byte("3")} + miniBlockHeaders[3] = &block.MiniBlockHeader{Hash: []byte("4")} + + scheduledIntermediateTxsMap := getScheduledIntermediateTxsMapInOrder(miniBlockHeaders, miniBlocks, intermediateTxs) require.Equal(t, 2, len(scheduledIntermediateTxsMap)) - require.Equal(t, 3, len(scheduledIntermediateTxsMap[block.SmartContractResultBlock])) + require.Equal(t, 6, len(scheduledIntermediateTxsMap[block.SmartContractResultBlock])) require.Equal(t, 1, len(scheduledIntermediateTxsMap[block.InvalidBlock])) assert.Equal(t, tx3, scheduledIntermediateTxsMap[block.InvalidBlock][0]) + assert.Equal(t, tx1, scheduledIntermediateTxsMap[block.SmartContractResultBlock][0]) + assert.Equal(t, tx2, scheduledIntermediateTxsMap[block.SmartContractResultBlock][1]) + assert.Equal(t, tx4, scheduledIntermediateTxsMap[block.SmartContractResultBlock][2]) + assert.Equal(t, tx5, scheduledIntermediateTxsMap[block.SmartContractResultBlock][3]) + assert.Equal(t, tx6, scheduledIntermediateTxsMap[block.SmartContractResultBlock][4]) + assert.Equal(t, tx7, scheduledIntermediateTxsMap[block.SmartContractResultBlock][5]) } func TestStartInEpochWithScheduledDataSyncer_saveScheduledInfoNoScheduledRootHash(t *testing.T) { @@ -443,7 +461,7 @@ func TestStartInEpochWithScheduledDataSyncer_saveScheduledInfoNoScheduledRootHas }, } - scheduledIntermediateTxsMap := getScheduledIntermediateTxsMap(make(map[string]*block.MiniBlock), scheduledIntermediateTxs) + scheduledIntermediateTxsMap := getScheduledIntermediateTxsMapInOrder(make([]data.MiniBlockHeaderHandler, 0), make(map[string]*block.MiniBlock), scheduledIntermediateTxs) scheduledInfo := &process.ScheduledInfo{ RootHash: nil, IntermediateTxs: scheduledIntermediateTxsMap, @@ -501,7 +519,7 @@ func TestStartInEpochWithScheduledDataSyncer_saveScheduledInfo(t *testing.T) { }, } - scheduledIntermediateTxsMap := getScheduledIntermediateTxsMap(make(map[string]*block.MiniBlock), scheduledIntermediateTxs) + scheduledIntermediateTxsMap := getScheduledIntermediateTxsMapInOrder(make([]data.MiniBlockHeaderHandler, 0), make(map[string]*block.MiniBlock), scheduledIntermediateTxs) scheduledInfo := &process.ScheduledInfo{ RootHash: scheduledRootHash, IntermediateTxs: scheduledIntermediateTxsMap, diff --git a/epochStart/metachain/systemSCs.go b/epochStart/metachain/systemSCs.go index 642053ad7d1..645f54ce3ea 100644 --- a/epochStart/metachain/systemSCs.go +++ b/epochStart/metachain/systemSCs.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" vInfo "github.com/multiversx/mx-chain-go/common/validatorInfo" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/epochStart" @@ -1102,7 +1103,7 @@ func (s *systemSCProcessor) getArgumentsForSetOwnerFunctionality(userValidatorAc leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err = userValidatorAccount.DataTrie().GetAllLeavesOnChannel(leavesChannels, context.Background(), rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -1125,7 +1126,7 @@ func (s *systemSCProcessor) getArgumentsForSetOwnerFunctionality(userValidatorAc } } - err = common.GetErrorFromChanNonBlocking(leavesChannels.ErrChan) + err = leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, err } diff --git a/epochStart/metachain/systemSCs_test.go b/epochStart/metachain/systemSCs_test.go index 81096d0697f..55678804a7d 100644 --- a/epochStart/metachain/systemSCs_test.go +++ b/epochStart/metachain/systemSCs_test.go @@ -989,13 +989,16 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp OwnerAddress: "aaaaaa", }, GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ + V1: config.GovernanceSystemSCConfigV1{ + ProposalCost: "500", + }, Active: config.GovernanceSystemSCConfigActive{ ProposalCost: "500", - MinQuorum: "50", - MinPassThreshold: "50", - MinVetoThreshold: "50", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, }, - FirstWhitelistedAddress: "3132333435363738393031323334353637383930313233343536373839303234", + ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", @@ -1099,16 +1102,19 @@ func createEconomicsData() process.EconomicsDataHandler { MaxGasLimitPerMetaMiniBlock: maxGasLimitPerBlock, MaxGasLimitPerTx: maxGasLimitPerBlock, MinGasLimit: minGasLimit, + ExtraGasLimitGuardedTx: "50000", }, }, - MinGasPrice: minGasPrice, - GasPerDataByte: "1", - GasPriceModifier: 1.0, + MinGasPrice: minGasPrice, + GasPerDataByte: "1", + GasPriceModifier: 1.0, + MaxGasPriceSetGuardian: "100000", }, }, EpochNotifier: &epochNotifier.EpochNotifierStub{}, EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economicsHandler.NewEconomicsData(argsNewEconomicsData) return economicsData diff --git a/epochStart/mock/economicsHandlerStub.go b/epochStart/mock/economicsHandlerStub.go deleted file mode 100644 index c0a70ff1736..00000000000 --- a/epochStart/mock/economicsHandlerStub.go +++ /dev/null @@ -1,276 +0,0 @@ -package mock - -import ( - "math/big" - - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/data" -) - -// EconomicsHandlerStub - -type EconomicsHandlerStub struct { - MaxGasLimitPerBlockCalled func() uint64 - MaxGasLimitPerMiniBlockCalled func() uint64 - MaxGasLimitPerBlockForSafeCrossShardCalled func() uint64 - MaxGasLimitPerMiniBlockForSafeCrossShardCalled func() uint64 - MaxGasLimitPerTxCalled func() uint64 - SetMinGasPriceCalled func(minasPrice uint64) - SetMinGasLimitCalled func(minGasLimit uint64) - ComputeGasLimitCalled func(tx data.TransactionWithFeeHandler) uint64 - ComputeMoveBalanceFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int - ComputeTxFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int - CheckValidityTxValuesCalled func(tx data.TransactionWithFeeHandler) error - DeveloperPercentageCalled func() float64 - MinGasPriceCalled func() uint64 - LeaderPercentageCalled func() float64 - ProtocolSustainabilityPercentageCalled func() float64 - ProtocolSustainabilityAddressCalled func() string - MinInflationRateCalled func() float64 - MaxInflationRateCalled func(year uint32) float64 - ComputeFeeForProcessingCalled func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int - GasPriceModifierCalled func() float64 - SplitTxGasInCategoriesCalled func(tx data.TransactionWithFeeHandler) (uint64, uint64) - GasPriceForProcessingCalled func(tx data.TransactionWithFeeHandler) uint64 - GasPriceForMoveCalled func(tx data.TransactionWithFeeHandler) uint64 - MinGasPriceForProcessingCalled func() uint64 - ComputeGasUsedAndFeeBasedOnRefundValueCalled func(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) - ComputeTxFeeBasedOnGasUsedCalled func(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int - RewardsTopUpGradientPointCalled func() *big.Int - RewardsTopUpFactorCalled func() float64 - ComputeGasLimitBasedOnBalanceCalled func(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) - SetStatusHandlerCalled func(statusHandler core.AppStatusHandler) error -} - -// ComputeGasLimitBasedOnBalance - -func (ehs *EconomicsHandlerStub) ComputeGasLimitBasedOnBalance(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) { - if ehs.ComputeGasLimitBasedOnBalanceCalled != nil { - return ehs.ComputeGasLimitBasedOnBalanceCalled(tx, balance) - } - return 0, nil -} - -// MinGasPrice - -func (ehs *EconomicsHandlerStub) MinGasPrice() uint64 { - if ehs.MinGasPriceCalled != nil { - return ehs.MinGasPriceCalled() - } - return 0 -} - -// MinGasLimit will return min gas limit -func (ehs *EconomicsHandlerStub) MinGasLimit() uint64 { - return 0 -} - -// GasPerDataByte - -func (ehs *EconomicsHandlerStub) GasPerDataByte() uint64 { - return 0 -} - -// DeveloperPercentage - -func (ehs *EconomicsHandlerStub) DeveloperPercentage() float64 { - return ehs.DeveloperPercentageCalled() -} - -// GenesisTotalSupply - -func (ehs *EconomicsHandlerStub) GenesisTotalSupply() *big.Int { - return big.NewInt(0) -} - -// MaxGasLimitPerBlock - -func (ehs *EconomicsHandlerStub) MaxGasLimitPerBlock(uint32) uint64 { - if ehs.MaxGasLimitPerBlockCalled != nil { - return ehs.MaxGasLimitPerBlockCalled() - } - return 0 -} - -// MaxGasLimitPerMiniBlock - -func (ehs *EconomicsHandlerStub) MaxGasLimitPerMiniBlock(uint32) uint64 { - if ehs.MaxGasLimitPerMiniBlockCalled != nil { - return ehs.MaxGasLimitPerMiniBlockCalled() - } - return 0 -} - -// MaxGasLimitPerBlockForSafeCrossShard - -func (ehs *EconomicsHandlerStub) MaxGasLimitPerBlockForSafeCrossShard() uint64 { - if ehs.MaxGasLimitPerBlockForSafeCrossShardCalled != nil { - return ehs.MaxGasLimitPerBlockForSafeCrossShardCalled() - } - return 0 -} - -// MaxGasLimitPerMiniBlockForSafeCrossShard - -func (ehs *EconomicsHandlerStub) MaxGasLimitPerMiniBlockForSafeCrossShard() uint64 { - if ehs.MaxGasLimitPerMiniBlockForSafeCrossShardCalled != nil { - return ehs.MaxGasLimitPerMiniBlockForSafeCrossShardCalled() - } - return 0 -} - -// MaxGasLimitPerTx - -func (ehs *EconomicsHandlerStub) MaxGasLimitPerTx() uint64 { - if ehs.MaxGasLimitPerTxCalled != nil { - return ehs.MaxGasLimitPerTxCalled() - } - return 0 -} - -// ComputeGasLimit - -func (ehs *EconomicsHandlerStub) ComputeGasLimit(tx data.TransactionWithFeeHandler) uint64 { - return ehs.ComputeGasLimitCalled(tx) -} - -// ComputeMoveBalanceFee - -func (ehs *EconomicsHandlerStub) ComputeMoveBalanceFee(tx data.TransactionWithFeeHandler) *big.Int { - return ehs.ComputeMoveBalanceFeeCalled(tx) -} - -// ComputeTxFee - -func (ehs *EconomicsHandlerStub) ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int { - return ehs.ComputeTxFeeCalled(tx) -} - -// CheckValidityTxValues - -func (ehs *EconomicsHandlerStub) CheckValidityTxValues(tx data.TransactionWithFeeHandler) error { - return ehs.CheckValidityTxValuesCalled(tx) -} - -// LeaderPercentage - -func (ehs *EconomicsHandlerStub) LeaderPercentage() float64 { - if ehs.LeaderPercentageCalled != nil { - return ehs.LeaderPercentageCalled() - } - - return 1 -} - -// ProtocolSustainabilityPercentage will return the protocol sustainability percentage value -func (ehs *EconomicsHandlerStub) ProtocolSustainabilityPercentage() float64 { - if ehs.ProtocolSustainabilityPercentageCalled != nil { - return ehs.ProtocolSustainabilityPercentageCalled() - } - - return 0.1 -} - -// ProtocolSustainabilityAddress will return the protocol sustainability address -func (ehs *EconomicsHandlerStub) ProtocolSustainabilityAddress() string { - if ehs.ProtocolSustainabilityAddressCalled != nil { - return ehs.ProtocolSustainabilityAddressCalled() - } - - return "1111" -} - -// MinInflationRate - -func (ehs *EconomicsHandlerStub) MinInflationRate() float64 { - if ehs.MinInflationRateCalled != nil { - return ehs.MinInflationRateCalled() - } - - return 1 -} - -// MaxInflationRate - -func (ehs *EconomicsHandlerStub) MaxInflationRate(year uint32) float64 { - if ehs.MaxInflationRateCalled != nil { - return ehs.MaxInflationRateCalled(year) - } - - return 1000000 -} - -// ComputeFeeForProcessing - -func (ehs *EconomicsHandlerStub) ComputeFeeForProcessing(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int { - if ehs.ComputeFeeForProcessingCalled != nil { - return ehs.ComputeFeeForProcessingCalled(tx, gasToUse) - } - return big.NewInt(0) -} - -// GasPriceModifier - -func (ehs *EconomicsHandlerStub) GasPriceModifier() float64 { - if ehs.GasPriceModifierCalled != nil { - return ehs.GasPriceModifierCalled() - } - return 0 -} - -// SplitTxGasInCategories - -func (ehs *EconomicsHandlerStub) SplitTxGasInCategories(tx data.TransactionWithFeeHandler) (uint64, uint64) { - if ehs.SplitTxGasInCategoriesCalled != nil { - return ehs.SplitTxGasInCategoriesCalled(tx) - } - return 0, 0 -} - -// GasPriceForProcessing - -func (ehs *EconomicsHandlerStub) GasPriceForProcessing(tx data.TransactionWithFeeHandler) uint64 { - if ehs.GasPriceForProcessingCalled != nil { - return ehs.GasPriceForProcessingCalled(tx) - } - return 0 -} - -// GasPriceForMove - -func (ehs *EconomicsHandlerStub) GasPriceForMove(tx data.TransactionWithFeeHandler) uint64 { - if ehs.GasPriceForMoveCalled != nil { - return ehs.GasPriceForMoveCalled(tx) - } - return 0 -} - -// MinGasPriceForProcessing - -func (ehs *EconomicsHandlerStub) MinGasPriceForProcessing() uint64 { - if ehs.MinGasPriceForProcessingCalled != nil { - return ehs.MinGasPriceForProcessingCalled() - } - return 0 -} - -// ComputeGasUsedAndFeeBasedOnRefundValue - -func (ehs *EconomicsHandlerStub) ComputeGasUsedAndFeeBasedOnRefundValue(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) { - if ehs.ComputeGasUsedAndFeeBasedOnRefundValueCalled != nil { - return ehs.ComputeGasUsedAndFeeBasedOnRefundValueCalled(tx, refundValue) - } - return 0, big.NewInt(0) -} - -// ComputeTxFeeBasedOnGasUsed - -func (ehs *EconomicsHandlerStub) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int { - if ehs.ComputeTxFeeBasedOnGasUsedCalled != nil { - return ehs.ComputeTxFeeBasedOnGasUsedCalled(tx, gasUsed) - } - return big.NewInt(0) -} - -// RewardsTopUpGradientPoint - -func (ehs *EconomicsHandlerStub) RewardsTopUpGradientPoint() *big.Int { - if ehs.RewardsTopUpGradientPointCalled != nil { - return ehs.RewardsTopUpGradientPointCalled() - } - return big.NewInt(0) -} - -// RewardsTopUpFactor - -func (ehs *EconomicsHandlerStub) RewardsTopUpFactor() float64 { - if ehs.RewardsTopUpFactorCalled != nil { - return ehs.RewardsTopUpFactorCalled() - } - return 0 -} - -// SetStatusHandler - -func (ehs *EconomicsHandlerStub) SetStatusHandler(statusHandler core.AppStatusHandler) error { - if ehs.SetStatusHandlerCalled != nil { - return ehs.SetStatusHandlerCalled(statusHandler) - } - return nil -} - -// IsInterfaceNil returns true if there is no value under the interface -func (ehs *EconomicsHandlerStub) IsInterfaceNil() bool { - return ehs == nil -} diff --git a/epochStart/notifier/epochStartSubscriptionHandler.go b/epochStart/notifier/epochStartSubscriptionHandler.go index fbb9a2d2050..1e4141a96dd 100644 --- a/epochStart/notifier/epochStartSubscriptionHandler.go +++ b/epochStart/notifier/epochStartSubscriptionHandler.go @@ -1,9 +1,11 @@ package notifier import ( + "runtime/debug" "sort" "sync" + "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/epochStart" ) @@ -39,34 +41,43 @@ func NewEpochStartSubscriptionHandler() *epochStartSubscriptionHandler { // RegisterHandler will subscribe a function, so it will be called when NotifyAll method is called func (essh *epochStartSubscriptionHandler) RegisterHandler(handler epochStart.ActionHandler) { - if handler != nil { - essh.mutEpochStartHandler.Lock() - essh.epochStartHandlers = append(essh.epochStartHandlers, handler) - essh.mutEpochStartHandler.Unlock() + if check.IfNilReflect(handler) { + return + } + + essh.mutEpochStartHandler.Lock() + defer essh.mutEpochStartHandler.Unlock() + + for _, existingHandler := range essh.epochStartHandlers { + if existingHandler == handler { + log.Error("epochStartSubscriptionHandler.RegisterHandler - trying to add a duplicated handler", "stack trace", string(debug.Stack())) + return + } } + + essh.epochStartHandlers = append(essh.epochStartHandlers, handler) + sort.Slice(essh.epochStartHandlers, func(i, j int) bool { + return essh.epochStartHandlers[i].NotifyOrder() < essh.epochStartHandlers[j].NotifyOrder() + }) } // UnregisterHandler will unsubscribe a function from the slice func (essh *epochStartSubscriptionHandler) UnregisterHandler(handlerToUnregister epochStart.ActionHandler) { - if handlerToUnregister != nil { - essh.mutEpochStartHandler.Lock() - for idx, handler := range essh.epochStartHandlers { - if handler == handlerToUnregister { - essh.epochStartHandlers = append(essh.epochStartHandlers[:idx], essh.epochStartHandlers[idx+1:]...) - } + if check.IfNilReflect(handlerToUnregister) { + return + } + essh.mutEpochStartHandler.Lock() + for idx, handler := range essh.epochStartHandlers { + if handler == handlerToUnregister { + essh.epochStartHandlers = append(essh.epochStartHandlers[:idx], essh.epochStartHandlers[idx+1:]...) } - essh.mutEpochStartHandler.Unlock() } + essh.mutEpochStartHandler.Unlock() } // NotifyAll will call all the subscribed functions from the internal slice func (essh *epochStartSubscriptionHandler) NotifyAll(hdr data.HeaderHandler) { essh.mutEpochStartHandler.RLock() - - sort.Slice(essh.epochStartHandlers, func(i, j int) bool { - return essh.epochStartHandlers[i].NotifyOrder() < essh.epochStartHandlers[j].NotifyOrder() - }) - for i := 0; i < len(essh.epochStartHandlers); i++ { essh.epochStartHandlers[i].EpochStartAction(hdr) } @@ -77,11 +88,6 @@ func (essh *epochStartSubscriptionHandler) NotifyAll(hdr data.HeaderHandler) { // observed, but not yet confirmed/committed. Some components may need to do some initialisation/preparation func (essh *epochStartSubscriptionHandler) NotifyAllPrepare(metaHdr data.HeaderHandler, body data.BodyHandler) { essh.mutEpochStartHandler.RLock() - - sort.Slice(essh.epochStartHandlers, func(i, j int) bool { - return essh.epochStartHandlers[i].NotifyOrder() < essh.epochStartHandlers[j].NotifyOrder() - }) - for i := 0; i < len(essh.epochStartHandlers); i++ { essh.epochStartHandlers[i].EpochStartPrepare(metaHdr, body) } diff --git a/epochStart/notifier/epochStartSubscriptionHandler_test.go b/epochStart/notifier/epochStartSubscriptionHandler_test.go index fb290574a57..cfd5d2787a9 100644 --- a/epochStart/notifier/epochStartSubscriptionHandler_test.go +++ b/epochStart/notifier/epochStartSubscriptionHandler_test.go @@ -1,6 +1,7 @@ package notifier_test import ( + "sync" "testing" "github.com/multiversx/mx-chain-core-go/data" @@ -43,6 +44,30 @@ func TestEpochStartSubscriptionHandler_RegisterHandlerOkHandlerShouldAdd(t *test mutHandlers.RUnlock() } +func TestEpochStartSubscriptionHandler_RegisterHandlerShouldNotAllowDuplicates(t *testing.T) { + t.Parallel() + + essh := notifier.NewEpochStartSubscriptionHandler() + handler := notifier.NewHandlerForEpochStart(func(hdr data.HeaderHandler) {}, nil, 0) + + essh.RegisterHandler(handler) + essh.RegisterHandler(handler) + + handlers, mutHandlers := essh.RegisteredHandlers() + mutHandlers.RLock() + assert.Len(t, handlers, 1) + mutHandlers.RUnlock() + + // check unregister twice to ensure there is no problem + essh.UnregisterHandler(handler) + essh.UnregisterHandler(handler) + + handlers, mutHandlers = essh.RegisteredHandlers() + mutHandlers.RLock() + assert.Len(t, handlers, 0) + mutHandlers.RUnlock() +} + func TestEpochStartSubscriptionHandler_UnregisterHandlerNilHandlerShouldDoNothing(t *testing.T) { t.Parallel() @@ -61,7 +86,7 @@ func TestEpochStartSubscriptionHandler_UnregisterHandlerNilHandlerShouldDoNothin mutHandlers.RUnlock() } -func TestEpochStartSubscriptionHandler_UnregisterHandlerOklHandlerShouldRemove(t *testing.T) { +func TestEpochStartSubscriptionHandler_UnregisterHandlerOkHandlerShouldRemove(t *testing.T) { t.Parallel() essh := notifier.NewEpochStartSubscriptionHandler() @@ -82,31 +107,64 @@ func TestEpochStartSubscriptionHandler_UnregisterHandlerOklHandlerShouldRemove(t func TestEpochStartSubscriptionHandler_NotifyAll(t *testing.T) { t.Parallel() - firstHandlerWasCalled := false - secondHandlerWasCalled := false - lastCalled := 0 + calledHandlers := make(map[int]struct{}) + calledHandlersIndices := make([]int, 0) essh := notifier.NewEpochStartSubscriptionHandler() - // register 2 handlers handler1 := notifier.NewHandlerForEpochStart(func(hdr data.HeaderHandler) { - firstHandlerWasCalled = true - lastCalled = 1 + calledHandlers[1] = struct{}{} + calledHandlersIndices = append(calledHandlersIndices, 1) }, nil, 1) handler2 := notifier.NewHandlerForEpochStart(func(hdr data.HeaderHandler) { - secondHandlerWasCalled = true - lastCalled = 2 + calledHandlers[2] = struct{}{} + calledHandlersIndices = append(calledHandlersIndices, 2) }, nil, 2) + handler3 := notifier.NewHandlerForEpochStart(func(hdr data.HeaderHandler) { + calledHandlers[3] = struct{}{} + calledHandlersIndices = append(calledHandlersIndices, 3) + }, nil, 3) - essh.RegisterHandler(handler1) essh.RegisterHandler(handler2) + essh.RegisterHandler(handler1) + essh.RegisterHandler(handler3) // make sure that the handler were not called yet - assert.False(t, firstHandlerWasCalled) - assert.False(t, secondHandlerWasCalled) + assert.Empty(t, calledHandlers) // now we call the NotifyAll method and all handlers should be called essh.NotifyAll(&block.Header{}) - assert.True(t, firstHandlerWasCalled) - assert.True(t, secondHandlerWasCalled) - assert.Equal(t, lastCalled, 2) + assert.Len(t, calledHandlers, 3) + assert.Equal(t, []int{1, 2, 3}, calledHandlersIndices) +} + +func TestEpochStartSubscriptionHandler_ConcurrentOperations(t *testing.T) { + t.Parallel() + + handler := notifier.NewEpochStartSubscriptionHandler() + + numOperations := 500 + wg := sync.WaitGroup{} + wg.Add(numOperations) + for i := 0; i < numOperations; i++ { + go func(idx int) { + switch idx & 6 { + case 0: + handler.RegisterHandler(notifier.NewHandlerForEpochStart(func(hdr data.HeaderHandler) {}, func(hdr data.HeaderHandler) {}, 0)) + case 1: + handler.UnregisterHandler(notifier.NewHandlerForEpochStart(func(hdr data.HeaderHandler) {}, func(hdr data.HeaderHandler) {}, 0)) + case 2: + handler.NotifyAll(&block.Header{}) + case 3: + handler.NotifyAllPrepare(&block.Header{}, &block.Body{}) + case 4: + handler.NotifyEpochChangeConfirmed(uint32(idx + 1)) + case 5: + handler.RegisterForEpochChangeConfirmed(func(epoch uint32) {}) + } + + wg.Done() + }(i) + } + + wg.Wait() } diff --git a/errors/errors.go b/errors/errors.go index 0f548b9cd60..131f93f2b72 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -118,6 +118,9 @@ var ErrNilStateComponentsFactory = errors.New("nil state components factory") // ErrNilStatusComponentsFactory signals that the provided status components factory is nil var ErrNilStatusComponentsFactory = errors.New("nil status components factory") +// ErrNilStatusCoreComponentsFactory signals that an operation has been attempted with nil status core components factory +var ErrNilStatusCoreComponentsFactory = errors.New("nil status core components factory provided") + // ErrNilBootstrapParamsHandler signals that the provided bootstrap parameters handler is nil var ErrNilBootstrapParamsHandler = errors.New("nil bootstrap parameters handler") @@ -298,15 +301,9 @@ var ErrNilP2pPublicKey = errors.New("nil p2p public key") // ErrNilRater signals that a nil rater was provided var ErrNilRater = errors.New("nil rater") -// ErrNilRatingData signals that nil rating data were provided -var ErrNilRatingData = errors.New("nil rating data") - // ErrNilRatingsInfoHandler signals that nil ratings data information was provided var ErrNilRatingsInfoHandler = errors.New("nil ratings info handler") -// ErrNilRequestedItemHandler signals that a nil requested items handler was provided -var ErrNilRequestedItemHandler = errors.New("nil requested item handler") - // ErrNilRequestHandler signals that a nil request handler was provided var ErrNilRequestHandler = errors.New("nil request handler") @@ -322,9 +319,6 @@ var ErrNilRoundHandler = errors.New("nil roundHandler") // ErrNilShardCoordinator signals that a nil shard coordinator was provided var ErrNilShardCoordinator = errors.New("nil shard coordinator provided") -// ErrNilSmartContractParser signals that a nil smart contract parser was provided -var ErrNilSmartContractParser = errors.New("nil smart contract parser") - // ErrNilSoftwareVersion signals that a nil software version was provided var ErrNilSoftwareVersion = errors.New("nil software version") @@ -400,12 +394,6 @@ var ErrNilVmMarshalizer = errors.New("nil vm marshalizer") // ErrNilWatchdog signals that a nil watchdog was provided var ErrNilWatchdog = errors.New("nil watchdog") -// ErrNilWhiteListHandler signals that a nil whitelist handler was provided -var ErrNilWhiteListHandler = errors.New("nil white list handler") - -// ErrNilWhiteListVerifiedTxs signals that a nil whitelist for verified transactions was prvovided -var ErrNilWhiteListVerifiedTxs = errors.New("nil white list verified txs") - // ErrPollingFunctionRegistration signals an error while registering the polling function registration var ErrPollingFunctionRegistration = errors.New("cannot register handler func for num of connected peers") @@ -553,5 +541,14 @@ var ErrNilPeersRatingHandler = errors.New("nil peers rating handler") // ErrNilPeersRatingMonitor signals that a nil peers rating monitor implementation has been provided var ErrNilPeersRatingMonitor = errors.New("nil peers rating monitor") +// ErrNilLogger signals that a nil logger instance has been provided +var ErrNilLogger = errors.New("nil logger") + +// ErrNilShuffleOutCloser signals that a nil shuffle out closer has been provided +var ErrNilShuffleOutCloser = errors.New("nil shuffle out closer") + +// ErrNilHistoryRepository signals that history processor is nil +var ErrNilHistoryRepository = errors.New("history repository is nil") + // ErrNilMissingTrieNodesNotifier signals that a nil missing trie nodes notifier was provided var ErrNilMissingTrieNodesNotifier = errors.New("nil missing trie nodes notifier") diff --git a/examples/construction_test.go b/examples/construction_test.go index 771c266f494..f5f36d19491 100644 --- a/examples/construction_test.go +++ b/examples/construction_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/hashing/blake2b" + "github.com/multiversx/mx-chain-core-go/hashing/keccak" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-crypto-go/signing" "github.com/multiversx/mx-chain-crypto-go/signing/ed25519" @@ -21,6 +22,7 @@ import ( var ( addressEncoder, _ = pubkeyConverter.NewBech32PubkeyConverter(32, "erd") signingMarshalizer = &marshal.JsonMarshalizer{} + txSignHasher = keccak.NewKeccak() signer = &singlesig.Ed25519Signer{} signingCryptoSuite = ed25519.NewEd25519() contentMarshalizer = &marshal.GogoProtoMarshalizer{} @@ -188,7 +190,7 @@ func computeTransactionSignature(t *testing.T, senderSeedHex string, tx *transac privateKey, err := keyGenerator.PrivateKeyFromByteArray(senderSeed) require.Nil(t, err) - dataToSign, err := tx.GetDataForSigning(addressEncoder, signingMarshalizer) + dataToSign, err := tx.GetDataForSigning(addressEncoder, signingMarshalizer, txSignHasher) require.Nil(t, err) signature, err := signer.Sign(privateKey, dataToSign) diff --git a/facade/initial/initialNodeFacade.go b/facade/initial/initialNodeFacade.go index 41eb1673405..06c40d1bf74 100644 --- a/facade/initial/initialNodeFacade.go +++ b/facade/initial/initialNodeFacade.go @@ -126,20 +126,7 @@ func (inf *initialNodeFacade) GetESDTsWithRole(_ string, _ string, _ api.Account } // CreateTransaction return nil and error -func (inf *initialNodeFacade) CreateTransaction( - _ uint64, - _ string, - _ string, - _ []byte, - _ string, - _ []byte, - _ uint64, - _ uint64, - _ []byte, - _ string, - _ string, - _ uint32, - _ uint32) (*transaction.Transaction, []byte, error) { +func (inf *initialNodeFacade) CreateTransaction(_ *external.ArgsCreateTransaction) (*transaction.Transaction, []byte, error) { return nil, nil, errNodeStarting } @@ -343,6 +330,11 @@ func (inf *initialNodeFacade) GetKeyValuePairs(_ string, _ api.AccountQueryOptio return nil, api.BlockInfo{}, errNodeStarting } +// GetGuardianData returns error +func (inf *initialNodeFacade) GetGuardianData(_ string, _ api.AccountQueryOptions) (api.GuardianData, api.BlockInfo, error) { + return api.GuardianData{}, api.BlockInfo{}, errNodeStarting +} + // GetDirectStakedList returns empty slice func (inf *initialNodeFacade) GetDirectStakedList() ([]*api.DirectStakedValue, error) { return nil, errNodeStarting diff --git a/facade/initial/initialNodeFacade_test.go b/facade/initial/initialNodeFacade_test.go index 533bf85867a..f157571da48 100644 --- a/facade/initial/initialNodeFacade_test.go +++ b/facade/initial/initialNodeFacade_test.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-go/facade" + "github.com/multiversx/mx-chain-go/node/external" "github.com/multiversx/mx-chain-go/testscommon" "github.com/stretchr/testify/assert" ) @@ -67,8 +68,7 @@ func TestInitialNodeFacade_AllMethodsShouldNotPanic(t *testing.T) { assert.Nil(t, s3) assert.Equal(t, errNodeStarting, err) - n1, n2, err := inf.CreateTransaction(uint64(0), "", "", []byte{0}, "", - []byte{0}, uint64(0), uint64(0), []byte{0}, "", "", uint32(0), uint32(0)) + n1, n2, err := inf.CreateTransaction(&external.ArgsCreateTransaction{}) assert.Nil(t, n1) assert.Nil(t, n2) assert.Equal(t, errNodeStarting, err) @@ -229,5 +229,9 @@ func TestInitialNodeFacade_AllMethodsShouldNotPanic(t *testing.T) { assert.Equal(t, uint64(0), nonce) assert.Equal(t, errNodeStarting, err) + guardianData, _, err := inf.GetGuardianData("", api.AccountQueryOptions{}) + assert.Equal(t, api.GuardianData{}, guardianData) + assert.Equal(t, errNodeStarting, err) + assert.False(t, check.IfNil(inf)) } diff --git a/facade/interface.go b/facade/interface.go index 1973b6b9835..09c93a04368 100644 --- a/facade/interface.go +++ b/facade/interface.go @@ -33,6 +33,9 @@ type NodeHandler interface { // GetValueForKey returns the value of a key from a given account GetValueForKey(address string, key string, options api.AccountQueryOptions) (string, api.BlockInfo, error) + // GetGuardianData returns the guardian data for given account + GetGuardianData(address string, options api.AccountQueryOptions) (api.GuardianData, api.BlockInfo, error) + // GetKeyValuePairs returns the key-value pairs under a given address GetKeyValuePairs(address string, options api.AccountQueryOptions, ctx context.Context) (map[string]string, api.BlockInfo, error) @@ -58,8 +61,7 @@ type NodeHandler interface { GetTokenSupply(token string) (*api.ESDTSupply, error) // CreateTransaction will return a transaction from all needed fields - CreateTransaction(nonce uint64, value string, receiver string, receiverUsername []byte, sender string, senderUsername []byte, gasPrice uint64, - gasLimit uint64, data []byte, signatureHex string, chainID string, version uint32, options uint32) (*transaction.Transaction, []byte, error) + CreateTransaction(txArgs *external.ArgsCreateTransaction) (*transaction.Transaction, []byte, error) // ValidateTransaction will validate a transaction ValidateTransaction(tx *transaction.Transaction) error diff --git a/facade/mock/nodeStub.go b/facade/mock/nodeStub.go index 2b5f4087c24..729e2d568d4 100644 --- a/facade/mock/nodeStub.go +++ b/facade/mock/nodeStub.go @@ -12,16 +12,16 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/debug" "github.com/multiversx/mx-chain-go/heartbeat/data" + "github.com/multiversx/mx-chain-go/node/external" "github.com/multiversx/mx-chain-go/state" ) // NodeStub - type NodeStub struct { - ConnectToAddressesHandler func([]string) error - GetBalanceCalled func(address string, options api.AccountQueryOptions) (*big.Int, api.BlockInfo, error) - GenerateTransactionHandler func(sender string, receiver string, amount string, code string) (*transaction.Transaction, error) - CreateTransactionHandler func(nonce uint64, value string, receiver string, receiverUsername []byte, sender string, senderUsername []byte, gasPrice uint64, - gasLimit uint64, data []byte, signatureHex string, chainID string, version, options uint32) (*transaction.Transaction, []byte, error) + ConnectToAddressesHandler func([]string) error + GetBalanceCalled func(address string, options api.AccountQueryOptions) (*big.Int, api.BlockInfo, error) + GenerateTransactionHandler func(sender string, receiver string, amount string, code string) (*transaction.Transaction, error) + CreateTransactionHandler func(txArgs *external.ArgsCreateTransaction) (*transaction.Transaction, []byte, error) ValidateTransactionHandler func(tx *transaction.Transaction) error ValidateTransactionForSimulationCalled func(tx *transaction.Transaction, bypassSignature bool) error SendBulkTransactionsHandler func(txs []*transaction.Transaction) (uint64, error) @@ -36,6 +36,7 @@ type NodeStub struct { IsSelfTriggerCalled func() bool GetQueryHandlerCalled func(name string) (debug.QueryHandler, error) GetValueForKeyCalled func(address string, key string, options api.AccountQueryOptions) (string, api.BlockInfo, error) + GetGuardianDataCalled func(address string, options api.AccountQueryOptions) (api.GuardianData, api.BlockInfo, error) GetPeerInfoCalled func(pid string) ([]core.QueryP2PPeerInfo, error) GetConnectedPeersRatingsCalled func() string GetEpochStartDataAPICalled func(epoch uint32) (*common.EpochStartDataAPI, error) @@ -116,6 +117,14 @@ func (ns *NodeStub) GetValueForKey(address string, key string, options api.Accou return "", api.BlockInfo{}, nil } +// GetGuardianData - +func (ns *NodeStub) GetGuardianData(address string, options api.AccountQueryOptions) (api.GuardianData, api.BlockInfo, error) { + if ns.GetGuardianDataCalled != nil { + return ns.GetGuardianDataCalled(address, options) + } + return api.GuardianData{}, api.BlockInfo{}, nil +} + // EncodeAddressPubkey - func (ns *NodeStub) EncodeAddressPubkey(pk []byte) (string, error) { return hex.EncodeToString(pk), nil @@ -132,10 +141,9 @@ func (ns *NodeStub) GetBalance(address string, options api.AccountQueryOptions) } // CreateTransaction - -func (ns *NodeStub) CreateTransaction(nonce uint64, value string, receiver string, receiverUsername []byte, sender string, senderUsername []byte, gasPrice uint64, - gasLimit uint64, data []byte, signatureHex string, chainID string, version uint32, options uint32) (*transaction.Transaction, []byte, error) { +func (ns *NodeStub) CreateTransaction(txArgs *external.ArgsCreateTransaction) (*transaction.Transaction, []byte, error) { - return ns.CreateTransactionHandler(nonce, value, receiver, receiverUsername, sender, senderUsername, gasPrice, gasLimit, data, signatureHex, chainID, version, options) + return ns.CreateTransactionHandler(txArgs) } //ValidateTransaction - diff --git a/facade/nodeFacade.go b/facade/nodeFacade.go index d350a34c97e..eb2523e08a9 100644 --- a/facade/nodeFacade.go +++ b/facade/nodeFacade.go @@ -237,6 +237,11 @@ func (nf *nodeFacade) GetKeyValuePairs(address string, options apiData.AccountQu return nf.node.GetKeyValuePairs(address, options, ctx) } +// GetGuardianData returns the guardian data for the provided address +func (nf *nodeFacade) GetGuardianData(address string, options apiData.AccountQueryOptions) (apiData.GuardianData, apiData.BlockInfo, error) { + return nf.node.GetGuardianData(address, options) +} + // GetAllESDTTokens returns all the esdt tokens for a given address func (nf *nodeFacade) GetAllESDTTokens(address string, options apiData.AccountQueryOptions) (map[string]*esdt.ESDigitalToken, apiData.BlockInfo, error) { ctx, cancel := nf.getContextForApiTrieRangeOperations() @@ -268,23 +273,8 @@ func (nf *nodeFacade) getContextForApiTrieRangeOperations() (context.Context, co } // CreateTransaction creates a transaction from all needed fields -func (nf *nodeFacade) CreateTransaction( - nonce uint64, - value string, - receiver string, - receiverUsername []byte, - sender string, - senderUsername []byte, - gasPrice uint64, - gasLimit uint64, - txData []byte, - signatureHex string, - chainID string, - version uint32, - options uint32, -) (*transaction.Transaction, []byte, error) { - - return nf.node.CreateTransaction(nonce, value, receiver, receiverUsername, sender, senderUsername, gasPrice, gasLimit, txData, signatureHex, chainID, version, options) +func (nf *nodeFacade) CreateTransaction(txArgs *external.ArgsCreateTransaction) (*transaction.Transaction, []byte, error) { + return nf.node.CreateTransaction(txArgs) } // ValidateTransaction will validate a transaction diff --git a/facade/nodeFacade_test.go b/facade/nodeFacade_test.go index 2001c728aab..a1a39380fbc 100644 --- a/facade/nodeFacade_test.go +++ b/facade/nodeFacade_test.go @@ -571,7 +571,7 @@ func TestNodeFacade_CreateTransaction(t *testing.T) { nodeCreateTxWasCalled := false node := &mock.NodeStub{ - CreateTransactionHandler: func(_ uint64, _ string, _ string, _ []byte, _ string, _ []byte, _ uint64, _ uint64, _ []byte, _ string, _ string, _, _ uint32) (*transaction.Transaction, []byte, error) { + CreateTransactionHandler: func(txArgs *external.ArgsCreateTransaction) (*transaction.Transaction, []byte, error) { nodeCreateTxWasCalled = true return nil, nil, nil }, @@ -580,7 +580,7 @@ func TestNodeFacade_CreateTransaction(t *testing.T) { arg.Node = node nf, _ := NewNodeFacade(arg) - _, _, _ = nf.CreateTransaction(0, "0", "0", nil, "0", nil, 0, 0, []byte("0"), "0", "chainID", 1, 0) + _, _, _ = nf.CreateTransaction(&external.ArgsCreateTransaction{}) assert.True(t, nodeCreateTxWasCalled) } @@ -758,6 +758,48 @@ func TestNodeFacade_GetKeyValuePairs(t *testing.T) { assert.Equal(t, expectedPairs, res) } +func TestNodeFacade_GetGuardianData(t *testing.T) { + t.Parallel() + arg := createMockArguments() + + emptyGuardianData := api.GuardianData{} + testAddress := "test address" + expectedErr := errors.New("expected error") + + expectedGuardianData := api.GuardianData{ + ActiveGuardian: &api.Guardian{ + Address: "guardian1", + ActivationEpoch: 0, + }, + PendingGuardian: &api.Guardian{ + Address: "guardian2", + ActivationEpoch: 10, + }, + Guarded: true, + } + arg.Node = &mock.NodeStub{ + GetGuardianDataCalled: func(address string, options api.AccountQueryOptions) (api.GuardianData, api.BlockInfo, error) { + if testAddress == address { + return expectedGuardianData, api.BlockInfo{}, nil + } + return emptyGuardianData, api.BlockInfo{}, expectedErr + }, + } + + t.Run("with error", func(t *testing.T) { + nf, _ := NewNodeFacade(arg) + res, _, err := nf.GetGuardianData("", api.AccountQueryOptions{}) + assert.Equal(t, expectedErr, err) + assert.Equal(t, emptyGuardianData, res) + }) + t.Run("ok", func(t *testing.T) { + nf, _ := NewNodeFacade(arg) + res, _, err := nf.GetGuardianData(testAddress, api.AccountQueryOptions{}) + assert.NoError(t, err) + assert.Equal(t, expectedGuardianData, res) + }) +} + func TestNodeFacade_GetAllESDTTokens(t *testing.T) { t.Parallel() diff --git a/factory/api/apiResolverFactory.go b/factory/api/apiResolverFactory.go index 7033187a800..ca726f4e1fe 100644 --- a/factory/api/apiResolverFactory.go +++ b/factory/api/apiResolverFactory.go @@ -61,35 +61,37 @@ type ApiResolverArgs struct { } type scQueryServiceArgs struct { - generalConfig *config.Config - epochConfig *config.EpochConfig - coreComponents factory.CoreComponentsHolder - stateComponents factory.StateComponentsHolder - dataComponents factory.DataComponentsHolder - processComponents factory.ProcessComponentsHolder - statusCoreComponents factory.StatusCoreComponentsHolder - gasScheduleNotifier core.GasScheduleNotifier - messageSigVerifier vm.MessageSignVerifier - systemSCConfig *config.SystemSmartContractsConfig - bootstrapper process.Bootstrapper - allowVMQueriesChan chan struct{} - workingDir string + generalConfig *config.Config + epochConfig *config.EpochConfig + coreComponents factory.CoreComponentsHolder + stateComponents factory.StateComponentsHolder + dataComponents factory.DataComponentsHolder + processComponents factory.ProcessComponentsHolder + statusCoreComponents factory.StatusCoreComponentsHolder + gasScheduleNotifier core.GasScheduleNotifier + messageSigVerifier vm.MessageSignVerifier + systemSCConfig *config.SystemSmartContractsConfig + bootstrapper process.Bootstrapper + guardedAccountHandler process.GuardedAccountHandler + allowVMQueriesChan chan struct{} + workingDir string } type scQueryElementArgs struct { - generalConfig *config.Config - epochConfig *config.EpochConfig - coreComponents factory.CoreComponentsHolder - stateComponents factory.StateComponentsHolder - dataComponents factory.DataComponentsHolder - processComponents factory.ProcessComponentsHolder - gasScheduleNotifier core.GasScheduleNotifier - messageSigVerifier vm.MessageSignVerifier - systemSCConfig *config.SystemSmartContractsConfig - bootstrapper process.Bootstrapper - allowVMQueriesChan chan struct{} - workingDir string - index int + generalConfig *config.Config + epochConfig *config.EpochConfig + coreComponents factory.CoreComponentsHolder + stateComponents factory.StateComponentsHolder + dataComponents factory.DataComponentsHolder + processComponents factory.ProcessComponentsHolder + gasScheduleNotifier core.GasScheduleNotifier + messageSigVerifier vm.MessageSignVerifier + systemSCConfig *config.SystemSmartContractsConfig + bootstrapper process.Bootstrapper + guardedAccountHandler process.GuardedAccountHandler + allowVMQueriesChan chan struct{} + workingDir string + index int } // CreateApiResolver is able to create an ApiResolver instance that will solve the REST API requests through the node facade @@ -103,13 +105,13 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { dataComponents: args.DataComponents, stateComponents: args.StateComponents, processComponents: args.ProcessComponents, - statusCoreComponents: args.StatusCoreComponents, - gasScheduleNotifier: args.GasScheduleNotifier, - messageSigVerifier: args.CryptoComponents.MessageSignVerifier(), - systemSCConfig: args.Configs.SystemSCConfig, - bootstrapper: args.Bootstrapper, - allowVMQueriesChan: args.AllowVMQueriesChan, - workingDir: apiWorkingDir, + statusCoreComponents: args.StatusCoreComponents, gasScheduleNotifier: args.GasScheduleNotifier, + messageSigVerifier: args.CryptoComponents.MessageSignVerifier(), + systemSCConfig: args.Configs.SystemSCConfig, + bootstrapper: args.Bootstrapper, + guardedAccountHandler: args.BootstrapComponents.GuardedAccountHandler(), + allowVMQueriesChan: args.AllowVMQueriesChan, + workingDir: apiWorkingDir, } scQueryService, err := createScQueryService(argsSCQuery) @@ -131,6 +133,7 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { args.BootstrapComponents.ShardCoordinator(), args.CoreComponents.EpochNotifier(), args.CoreComponents.EnableEpochsHandler(), + args.BootstrapComponents.GuardedAccountHandler(), convertedAddresses, args.Configs.GeneralConfig.BuiltInFunctions.MaxNumAddressesInTransferRole, ) @@ -206,6 +209,7 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { BuiltInFunctionsCostHandler: builtInCostHandler, EconomicsConfig: *args.Configs.EconomicsConfig, EnableEpochsConfig: args.Configs.EpochConfig.EnableEpochs, + TxVersionChecker: args.CoreComponents.TxVersionChecker(), }) if err != nil { return nil, err @@ -283,19 +287,20 @@ func createScQueryService( } argsQueryElem := &scQueryElementArgs{ - generalConfig: args.generalConfig, - epochConfig: args.epochConfig, - coreComponents: args.coreComponents, - dataComponents: args.dataComponents, - stateComponents: args.stateComponents, - processComponents: args.processComponents, - gasScheduleNotifier: args.gasScheduleNotifier, - messageSigVerifier: args.messageSigVerifier, - systemSCConfig: args.systemSCConfig, - workingDir: args.workingDir, - bootstrapper: args.bootstrapper, - allowVMQueriesChan: args.allowVMQueriesChan, - index: 0, + generalConfig: args.generalConfig, + epochConfig: args.epochConfig, + coreComponents: args.coreComponents, + dataComponents: args.dataComponents, + stateComponents: args.stateComponents, + processComponents: args.processComponents, + gasScheduleNotifier: args.gasScheduleNotifier, + messageSigVerifier: args.messageSigVerifier, + systemSCConfig: args.systemSCConfig, + workingDir: args.workingDir, + bootstrapper: args.bootstrapper, + guardedAccountHandler: args.guardedAccountHandler, + allowVMQueriesChan: args.allowVMQueriesChan, + index: 0, } var err error @@ -340,6 +345,7 @@ func createScQueryElement( args.processComponents.ShardCoordinator(), args.coreComponents.EpochNotifier(), args.coreComponents.EnableEpochsHandler(), + args.guardedAccountHandler, convertedAddresses, args.generalConfig.BuiltInFunctions.MaxNumAddressesInTransferRole, ) @@ -410,7 +416,7 @@ func createScQueryElement( queryVirtualMachineConfig := args.generalConfig.VirtualMachine.Querying.VirtualMachineConfig esdtTransferParser, errParser := parsers.NewESDTTransferParser(args.coreComponents.InternalMarshalizer()) if errParser != nil { - return nil, err + return nil, errParser } blockChainHookImpl, errBlockChainHook := hooks.NewBlockChainHookImpl(argsHook) @@ -474,6 +480,7 @@ func createBuiltinFuncs( shardCoordinator sharding.Coordinator, epochNotifier vmcommon.EpochNotifier, enableEpochsHandler vmcommon.EnableEpochsHandler, + guardedAccountHandler vmcommon.GuardedAccountHandler, automaticCrawlerAddresses [][]byte, maxNumAddressesInTransferRole uint32, ) (vmcommon.BuiltInFunctionFactory, error) { @@ -485,6 +492,7 @@ func createBuiltinFuncs( ShardCoordinator: shardCoordinator, EpochNotifier: epochNotifier, EnableEpochsHandler: enableEpochsHandler, + GuardedAccountHandler: guardedAccountHandler, AutomaticCrawlerAddresses: automaticCrawlerAddresses, MaxNumNodesInTransferRole: maxNumAddressesInTransferRole, } diff --git a/factory/api/apiResolverFactory_test.go b/factory/api/apiResolverFactory_test.go index 7462ae0496e..3a8288cdf1d 100644 --- a/factory/api/apiResolverFactory_test.go +++ b/factory/api/apiResolverFactory_test.go @@ -1,30 +1,72 @@ package api_test import ( + "strings" + "sync" "testing" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/typeConverters" + "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/factory/api" "github.com/multiversx/mx-chain-go/factory/bootstrap" "github.com/multiversx/mx-chain-go/factory/mock" + testsMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/sync/disabled" + "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" + stateMocks "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/stretchr/testify/require" ) -func TestCreateApiResolver(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } +const unreachableStep = 10000 + +type failingSteps struct { + marshallerStepCounter int + marshallerFailingStep int + + enableEpochsHandlerStepCounter int + enableEpochsHandlerFailingStep int + + uint64ByteSliceConvStepCounter int + uint64ByteSliceConvFailingStep int + + addressPublicKeyConverterStepCounter int + addressPublicKeyConverterFailingStep int +} + +func (fs *failingSteps) reset() { + fs.marshallerStepCounter = 0 + fs.marshallerFailingStep = unreachableStep + fs.enableEpochsHandlerStepCounter = 0 + fs.enableEpochsHandlerFailingStep = unreachableStep + + fs.uint64ByteSliceConvStepCounter = 0 + fs.uint64ByteSliceConvFailingStep = unreachableStep + + fs.addressPublicKeyConverterStepCounter = 0 + fs.addressPublicKeyConverterFailingStep = unreachableStep +} + +func createMockArgs(t *testing.T) *api.ApiResolverArgs { shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) coreComponents := componentsMock.GetCoreComponents() cryptoComponents := componentsMock.GetCryptoComponents(coreComponents) networkComponents := componentsMock.GetNetworkComponents(cryptoComponents) dataComponents := componentsMock.GetDataComponents(coreComponents, shardCoordinator) - stateComponents := componentsMock.GetStateComponents(coreComponents, shardCoordinator) + stateComponents := componentsMock.GetStateComponents(coreComponents) processComponents := componentsMock.GetProcessComponents(shardCoordinator, coreComponents, networkComponents, dataComponents, cryptoComponents, stateComponents) argsB := componentsMock.GetBootStrapFactoryArgs() @@ -37,7 +79,8 @@ func TestCreateApiResolver(t *testing.T) { gasSchedule, _ := common.LoadGasScheduleConfig("../../cmd/node/config/gasSchedules/gasScheduleV1.toml") economicsConfig := testscommon.GetEconomicsConfig() cfg := componentsMock.GetGeneralConfig() - args := &api.ApiResolverArgs{ + + return &api.ApiResolverArgs{ Configs: &config.Configs{ FlagsConfig: &config.ContextFlagsConfig{ WorkingDir: "", @@ -59,8 +102,367 @@ func TestCreateApiResolver(t *testing.T) { Bootstrapper: disabled.NewDisabledBootstrapper(), AllowVMQueriesChan: common.GetClosedUnbufferedChannel(), } +} + +func createFailingMockArgs(t *testing.T, failingSteps *failingSteps) *api.ApiResolverArgs { + args := createMockArgs(t) + coreCompStub := factory.NewCoreComponentsHolderStubFromRealComponent(args.CoreComponents) + + internalMarshaller := args.CoreComponents.InternalMarshalizer() + coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { + failingSteps.marshallerStepCounter++ + if failingSteps.marshallerStepCounter > failingSteps.marshallerFailingStep { + return nil + } + return internalMarshaller + } + + enableEpochsHandler := args.CoreComponents.EnableEpochsHandler() + coreCompStub.EnableEpochsHandlerCalled = func() common.EnableEpochsHandler { + failingSteps.enableEpochsHandlerStepCounter++ + if failingSteps.enableEpochsHandlerStepCounter > failingSteps.enableEpochsHandlerFailingStep { + return nil + } + return enableEpochsHandler + } + + byteSliceConv := args.CoreComponents.Uint64ByteSliceConverter() + coreCompStub.Uint64ByteSliceConverterCalled = func() typeConverters.Uint64ByteSliceConverter { + failingSteps.uint64ByteSliceConvStepCounter++ + if failingSteps.uint64ByteSliceConvStepCounter > failingSteps.uint64ByteSliceConvFailingStep { + return nil + } + return byteSliceConv + } + + pubKeyConv := args.CoreComponents.AddressPubKeyConverter() + coreCompStub.AddressPubKeyConverterCalled = func() core.PubkeyConverter { + failingSteps.addressPublicKeyConverterStepCounter++ + if failingSteps.addressPublicKeyConverterStepCounter > failingSteps.addressPublicKeyConverterFailingStep { + return nil + } + return pubKeyConv + } + + args.CoreComponents = coreCompStub + return args +} + +func TestCreateApiResolver(t *testing.T) { + t.Parallel() + + t.Run("createScQueryService fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgs(t) + args.Configs.GeneralConfig.VirtualMachine.Querying.NumConcurrentVMs = 0 + apiResolver, err := api.CreateApiResolver(args) + require.True(t, strings.Contains(err.Error(), "VirtualMachine.Querying.NumConcurrentVms")) + require.True(t, check.IfNil(apiResolver)) + }) + + failingStepsInstance := &failingSteps{} + failingArgs := createFailingMockArgs(t, failingStepsInstance) + // do not run these tests in parallel as they all use the same args + t.Run("DecodeAddresses fails causing createScQueryElement error should error", func(t *testing.T) { + failingStepsInstance.reset() + failingStepsInstance.addressPublicKeyConverterFailingStep = 0 + apiResolver, err := api.CreateApiResolver(failingArgs) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "public key converter")) + require.True(t, check.IfNil(apiResolver)) + }) + t.Run("DecodeAddresses fails should error", func(t *testing.T) { + failingStepsInstance.reset() + failingStepsInstance.addressPublicKeyConverterFailingStep = 2 + apiResolver, err := api.CreateApiResolver(failingArgs) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "public key converter")) + require.True(t, check.IfNil(apiResolver)) + }) + t.Run("createBuiltinFuncs fails should error", func(t *testing.T) { + failingStepsInstance.reset() + failingStepsInstance.marshallerFailingStep = 3 + apiResolver, err := api.CreateApiResolver(failingArgs) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "marshalizer")) + require.True(t, check.IfNil(apiResolver)) + }) + t.Run("NewESDTTransferParser fails should error", func(t *testing.T) { + failingStepsInstance.reset() + failingStepsInstance.marshallerFailingStep = 4 + apiResolver, err := api.CreateApiResolver(failingArgs) + require.NotNil(t, err) + println(err.Error()) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "marshaller")) + require.True(t, check.IfNil(apiResolver)) + }) + t.Run("NewTxTypeHandler fails should error", func(t *testing.T) { + failingStepsInstance.reset() + failingStepsInstance.enableEpochsHandlerFailingStep = 4 + apiResolver, err := api.CreateApiResolver(failingArgs) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "enable epochs handler")) + require.True(t, check.IfNil(apiResolver)) + }) + t.Run("NewTransactionCostEstimator fails should error", func(t *testing.T) { + failingStepsInstance.reset() + failingStepsInstance.enableEpochsHandlerFailingStep = 5 + apiResolver, err := api.CreateApiResolver(failingArgs) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "enable epochs handler")) + require.True(t, check.IfNil(apiResolver)) + }) + t.Run("createLogsFacade fails should error", func(t *testing.T) { + failingStepsInstance.reset() + failingStepsInstance.marshallerFailingStep = 5 + apiResolver, err := api.CreateApiResolver(failingArgs) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "marshalizer")) + require.True(t, check.IfNil(apiResolver)) + }) + t.Run("NewOperationDataFieldParser fails should error", func(t *testing.T) { + failingStepsInstance.reset() + failingStepsInstance.marshallerFailingStep = 6 + apiResolver, err := api.CreateApiResolver(failingArgs) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "marshalizer")) + require.True(t, check.IfNil(apiResolver)) + }) + t.Run("NewAPITransactionProcessor fails should error", func(t *testing.T) { + failingStepsInstance.reset() + failingStepsInstance.marshallerFailingStep = 7 + apiResolver, err := api.CreateApiResolver(failingArgs) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "marshalizer")) + require.True(t, check.IfNil(apiResolver)) + }) + t.Run("createAPIBlockProcessor fails because createAPIBlockProcessorArgs fails should error", func(t *testing.T) { + failingStepsInstance.reset() + failingStepsInstance.uint64ByteSliceConvFailingStep = 2 + apiResolver, err := api.CreateApiResolver(failingArgs) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "uint64")) + require.True(t, check.IfNil(apiResolver)) + }) + t.Run("createAPIInternalBlockProcessor fails because createAPIBlockProcessorArgs fails should error", func(t *testing.T) { + failingStepsInstance.reset() + failingStepsInstance.uint64ByteSliceConvFailingStep = 4 + apiResolver, err := api.CreateApiResolver(failingArgs) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "uint64")) + require.True(t, check.IfNil(apiResolver)) + }) + t.Run("createAPIBlockProcessorArgs fails because createLogsFacade fails should error", func(t *testing.T) { + failingStepsInstance.reset() + failingStepsInstance.marshallerFailingStep = 8 + apiResolver, err := api.CreateApiResolver(failingArgs) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "marshalizer")) + require.True(t, check.IfNil(apiResolver)) + }) + t.Run("createAPIBlockProcessorArgs fails because NewAlteredAccountsProvider fails should error", func(t *testing.T) { + failingStepsInstance.reset() + failingStepsInstance.addressPublicKeyConverterFailingStep = 9 + apiResolver, err := api.CreateApiResolver(failingArgs) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "public key converter")) + require.True(t, check.IfNil(apiResolver)) + }) + t.Run("should work", func(t *testing.T) { + failingStepsInstance.reset() // no failure + apiResolver, err := api.CreateApiResolver(failingArgs) + require.Nil(t, err) + require.False(t, check.IfNil(apiResolver)) + }) +} + +func createMockSCQueryElementArgs() api.SCQueryElementArgs { + return api.SCQueryElementArgs{ + GeneralConfig: &config.Config{ + BuiltInFunctions: config.BuiltInFunctionsConfig{ + MaxNumAddressesInTransferRole: 1, + AutomaticCrawlerAddresses: []string{"addr1"}, + }, + SmartContractDataPool: config.CacheConfig{ + Type: "LRU", + Capacity: 100, + }, + }, + EpochConfig: &config.EpochConfig{}, + CoreComponents: &mock.CoreComponentsMock{ + AddrPubKeyConv: &testscommon.PubkeyConverterStub{ + DecodeCalled: func(humanReadable string) ([]byte, error) { + return []byte(humanReadable), nil + }, + }, + IntMarsh: &testscommon.MarshalizerStub{}, + EpochChangeNotifier: &epochNotifierMock.EpochNotifierStub{}, + EnableEpochsHandlerField: &testscommon.EnableEpochsHandlerStub{}, + UInt64ByteSliceConv: &testsMocks.Uint64ByteSliceConverterMock{}, + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + NodesConfig: &testscommon.NodesSetupStub{}, + Hash: &testscommon.HasherStub{}, + RatingHandler: &testscommon.RaterMock{}, + WasmVMChangeLockerInternal: &sync.RWMutex{}, + }, + StateComponents: &mock.StateComponentsHolderStub{ + AccountsAdapterAPICalled: func() state.AccountsAdapter { + return &stateMocks.AccountsStub{} + }, + PeerAccountsCalled: func() state.AccountsAdapter { + return &stateMocks.AccountsStub{} + }, + }, + DataComponents: &mock.DataComponentsMock{ + Storage: &genericMocks.ChainStorerMock{}, + Blkc: &testscommon.ChainHandlerMock{}, + DataPool: &dataRetriever.PoolsHolderMock{}, + }, + ProcessComponents: &mock.ProcessComponentsMock{ + ShardCoord: &testscommon.ShardsCoordinatorMock{}, + }, + GasScheduleNotifier: &testscommon.GasScheduleNotifierMock{ + LatestGasScheduleCalled: func() map[string]map[string]uint64 { + gasSchedule, _ := common.LoadGasScheduleConfig("../../cmd/node/config/gasSchedules/gasScheduleV1.toml") + return gasSchedule + }, + }, + MessageSigVerifier: &testscommon.MessageSignVerifierMock{}, + SystemSCConfig: &config.SystemSmartContractsConfig{}, + Bootstrapper: testsMocks.NewTestBootstrapperMock(), + AllowVMQueriesChan: make(chan struct{}, 1), + WorkingDir: "", + Index: 0, + GuardedAccountHandler: &guardianMocks.GuardedAccountHandlerStub{}, + } +} + +func TestCreateApiResolver_createScQueryElement(t *testing.T) { + t.Parallel() + + t.Run("nil guardian handler should error", func(t *testing.T) { + t.Parallel() + + args := createMockSCQueryElementArgs() + args.GuardedAccountHandler = nil + scQueryService, err := api.CreateScQueryElement(args) + require.Equal(t, process.ErrNilGuardedAccountHandler, err) + require.Nil(t, scQueryService) + }) + t.Run("DecodeAddresses fails", func(t *testing.T) { + t.Parallel() + + args := createMockSCQueryElementArgs() + args.CoreComponents = &mock.CoreComponentsMock{ + AddrPubKeyConv: nil, + } + scQueryService, err := api.CreateScQueryElement(args) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "public key converter")) + require.Nil(t, scQueryService) + }) + t.Run("createBuiltinFuncs fails", func(t *testing.T) { + t.Parallel() + + args := createMockSCQueryElementArgs() + coreCompMock := args.CoreComponents.(*mock.CoreComponentsMock) + coreCompMock.IntMarsh = nil + scQueryService, err := api.CreateScQueryElement(args) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "marshalizer")) + require.Nil(t, scQueryService) + }) + t.Run("NewCache fails", func(t *testing.T) { + t.Parallel() + + args := createMockSCQueryElementArgs() + args.GeneralConfig.SmartContractDataPool = config.CacheConfig{ + Type: "LRU", + SizeInBytes: 1, + } + scQueryService, err := api.CreateScQueryElement(args) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "lru")) + require.Nil(t, scQueryService) + }) + t.Run("metachain - NewBlockChainHookImpl fails", func(t *testing.T) { + t.Parallel() + + args := createMockSCQueryElementArgs() + args.ProcessComponents = &mock.ProcessComponentsMock{ + ShardCoord: &testscommon.ShardsCoordinatorMock{ + SelfIDCalled: func() uint32 { + return common.MetachainShardId + }, + }, + } + dataCompMock := args.DataComponents.(*mock.DataComponentsMock) + dataCompMock.Storage = nil + scQueryService, err := api.CreateScQueryElement(args) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "storage")) + require.Nil(t, scQueryService) + }) + t.Run("metachain - NewVMContainerFactory fails", func(t *testing.T) { + t.Parallel() + + args := createMockSCQueryElementArgs() + args.ProcessComponents = &mock.ProcessComponentsMock{ + ShardCoord: &testscommon.ShardsCoordinatorMock{ + SelfIDCalled: func() uint32 { + return common.MetachainShardId + }, + }, + } + coreCompMock := args.CoreComponents.(*mock.CoreComponentsMock) + coreCompMock.Hash = nil + scQueryService, err := api.CreateScQueryElement(args) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "hasher")) + require.Nil(t, scQueryService) + }) + t.Run("shard - NewVMContainerFactory fails", func(t *testing.T) { + t.Parallel() + + args := createMockSCQueryElementArgs() + coreCompStub := factory.NewCoreComponentsHolderStubFromRealComponent(args.CoreComponents) + internalMarshaller := args.CoreComponents.InternalMarshalizer() + counter := 0 + coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { + counter++ + if counter > 2 { + return nil + } + return internalMarshaller + } + args.CoreComponents = coreCompStub + scQueryService, err := api.CreateScQueryElement(args) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "marshaller")) + require.Nil(t, scQueryService) + }) + t.Run("shard - NewBlockChainHookImpl fails", func(t *testing.T) { + t.Parallel() + + args := createMockSCQueryElementArgs() + dataCompMock := args.DataComponents.(*mock.DataComponentsMock) + dataCompMock.Storage = nil + scQueryService, err := api.CreateScQueryElement(args) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "storage")) + require.Nil(t, scQueryService) + }) + t.Run("shard - NewVMContainerFactory fails", func(t *testing.T) { + t.Parallel() + + args := createMockSCQueryElementArgs() + coreCompMock := args.CoreComponents.(*mock.CoreComponentsMock) + coreCompMock.Hash = nil + scQueryService, err := api.CreateScQueryElement(args) + require.NotNil(t, err) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "hasher")) + require.Nil(t, scQueryService) + }) - apiResolver, err := api.CreateApiResolver(args) - require.Nil(t, err) - require.NotNil(t, apiResolver) } diff --git a/factory/api/export_test.go b/factory/api/export_test.go new file mode 100644 index 00000000000..85a5c6f08b6 --- /dev/null +++ b/factory/api/export_test.go @@ -0,0 +1,47 @@ +package api + +import ( + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/vm" +) + +// SCQueryElementArgs - +type SCQueryElementArgs struct { + GeneralConfig *config.Config + EpochConfig *config.EpochConfig + CoreComponents factory.CoreComponentsHolder + StateComponents factory.StateComponentsHolder + DataComponents factory.DataComponentsHolder + ProcessComponents factory.ProcessComponentsHolder + GasScheduleNotifier core.GasScheduleNotifier + MessageSigVerifier vm.MessageSignVerifier + SystemSCConfig *config.SystemSmartContractsConfig + Bootstrapper process.Bootstrapper + AllowVMQueriesChan chan struct{} + WorkingDir string + Index int + GuardedAccountHandler process.GuardedAccountHandler +} + +// CreateScQueryElement - +func CreateScQueryElement(args SCQueryElementArgs) (process.SCQueryService, error) { + return createScQueryElement(&scQueryElementArgs{ + generalConfig: args.GeneralConfig, + epochConfig: args.EpochConfig, + coreComponents: args.CoreComponents, + stateComponents: args.StateComponents, + dataComponents: args.DataComponents, + processComponents: args.ProcessComponents, + gasScheduleNotifier: args.GasScheduleNotifier, + messageSigVerifier: args.MessageSigVerifier, + systemSCConfig: args.SystemSCConfig, + bootstrapper: args.Bootstrapper, + allowVMQueriesChan: args.AllowVMQueriesChan, + workingDir: args.WorkingDir, + index: args.Index, + guardedAccountHandler: args.GuardedAccountHandler, + }) +} diff --git a/factory/bootstrap/bootstrapComponents.go b/factory/bootstrap/bootstrapComponents.go index e0f6ae2b110..9f580416853 100644 --- a/factory/bootstrap/bootstrapComponents.go +++ b/factory/bootstrap/bootstrapComponents.go @@ -13,6 +13,8 @@ import ( "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/factory/block" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/guardian" "github.com/multiversx/mx-chain-go/process/headerCheck" "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/sharding" @@ -26,7 +28,7 @@ import ( var log = logger.GetOrCreate("factory") -// BootstrapComponentsFactoryArgs holds the arguments needed to create a botstrap components factory +// BootstrapComponentsFactoryArgs holds the arguments needed to create a bootstrap components factory type BootstrapComponentsFactoryArgs struct { Config config.Config RoundConfig config.RoundConfig @@ -60,6 +62,7 @@ type bootstrapComponents struct { headerVersionHandler nodeFactory.HeaderVersionHandler versionedHeaderFactory nodeFactory.VersionedHeaderFactory headerIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler + guardedAccountHandler process.GuardedAccountHandler } // NewBootstrapComponentsFactory creates an instance of bootstrapComponentsFactory @@ -82,9 +85,6 @@ func NewBootstrapComponentsFactory(args BootstrapComponentsFactoryArgs) (*bootst if args.WorkingDir == "" { return nil, errors.ErrInvalidWorkingDir } - if check.IfNil(args.StatusCoreComponents) { - return nil, errors.ErrNilStatusCoreComponents - } if check.IfNil(args.StatusCoreComponents.AppStatusHandler()) { return nil, errors.ErrNilAppStatusHandler } @@ -179,6 +179,12 @@ func (bcf *bootstrapComponentsFactory) Create() (*bootstrapComponents, error) { tss := bcf.statusCoreComponents.TrieSyncStatistics() tss.AddNumProcessed(1) + setGuardianEpochsDelay := bcf.config.GeneralSettings.SetGuardianEpochsDelay + guardedAccountHandler, err := guardian.NewGuardedAccount(bcf.coreComponents.InternalMarshalizer(), bcf.coreComponents.EpochNotifier(), setGuardianEpochsDelay) + if err != nil { + return nil, err + } + epochStartBootstrapArgs := bootstrap.ArgsEpochStartBootstrap{ CoreComponentsHolder: bcf.coreComponents, CryptoComponentsHolder: bcf.cryptoComponents, @@ -201,6 +207,7 @@ func (bcf *bootstrapComponentsFactory) Create() (*bootstrapComponents, error) { DataSyncerCreator: dataSyncerFactory, ScheduledSCRsStorer: nil, // will be updated after sync from network TrieSyncStatisticsProvider: tss, + NodeProcessingMode: common.GetNodeProcessingMode(&bcf.importDbConfig), } var epochStartBootstrapper factory.EpochStartBootstrapper @@ -256,6 +263,7 @@ func (bcf *bootstrapComponentsFactory) Create() (*bootstrapComponents, error) { headerVersionHandler: headerVersionHandler, headerIntegrityVerifier: headerIntegrityVerifier, versionedHeaderFactory: versionedHeaderFactory, + guardedAccountHandler: guardedAccountHandler, }, nil } diff --git a/factory/bootstrap/bootstrapComponentsHandler.go b/factory/bootstrap/bootstrapComponentsHandler.go index ad6a3e85b43..bda412e2759 100644 --- a/factory/bootstrap/bootstrapComponentsHandler.go +++ b/factory/bootstrap/bootstrapComponentsHandler.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/process" ) var _ factory.ComponentHandler = (*managedBootstrapComponents)(nil) @@ -93,6 +94,18 @@ func (mbf *managedBootstrapComponents) EpochStartBootstrapper() factory.EpochSta return mbf.bootstrapComponents.epochStartBootstrapper } +// GuardedAccountHandler returns the guarded account handler +func (mbf *managedBootstrapComponents) GuardedAccountHandler() process.GuardedAccountHandler { + mbf.mutBootstrapComponents.RLock() + defer mbf.mutBootstrapComponents.RUnlock() + + if mbf.bootstrapComponents == nil { + return nil + } + + return mbf.bootstrapComponents.guardedAccountHandler +} + // EpochBootstrapParams returns the epoch start bootstrap parameters handler func (mbf *managedBootstrapComponents) EpochBootstrapParams() factory.BootstrapParamsHolder { mbf.mutBootstrapComponents.RLock() diff --git a/factory/bootstrap/bootstrapComponentsHandler_test.go b/factory/bootstrap/bootstrapComponentsHandler_test.go index c4934611449..c95896f9959 100644 --- a/factory/bootstrap/bootstrapComponentsHandler_test.go +++ b/factory/bootstrap/bootstrapComponentsHandler_test.go @@ -4,18 +4,17 @@ import ( "errors" "testing" - errorsErd "github.com/multiversx/mx-chain-go/errors" + errorsMx "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/factory/bootstrap" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // ------------ Test ManagedBootstrapComponents -------------------- func TestNewManagedBootstrapComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) @@ -27,35 +26,30 @@ func TestNewManagedBootstrapComponents(t *testing.T) { func TestNewBootstrapComponentsFactory_NilFactory(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } mbc, err := bootstrap.NewManagedBootstrapComponents(nil) require.Nil(t, mbc) - require.Equal(t, errorsErd.ErrNilBootstrapComponentsFactory, err) + require.Equal(t, errorsMx.ErrNilBootstrapComponentsFactory, err) } -func TestManagedBootstrapComponents_CheckSubcomponentsNoCreate(t *testing.T) { +func TestManagedBootstrapComponents_MethodsNoCreate(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) mbc, _ := bootstrap.NewManagedBootstrapComponents(bcf) err := mbc.CheckSubcomponents() + require.Equal(t, errorsMx.ErrNilBootstrapComponentsHolder, err) - require.Equal(t, errorsErd.ErrNilBootstrapComponentsHolder, err) + assert.Nil(t, mbc.EpochStartBootstrapper()) + assert.Nil(t, mbc.EpochBootstrapParams()) + assert.Nil(t, mbc.Close()) + assert.Equal(t, factory.BootstrapComponentsName, mbc.String()) } -func TestManagedBootstrapComponents_Create(t *testing.T) { +func TestManagedBootstrapComponents_MethodsCreate(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) @@ -66,13 +60,21 @@ func TestManagedBootstrapComponents_Create(t *testing.T) { err = mbc.CheckSubcomponents() require.Nil(t, err) + + assert.NotNil(t, mbc.EpochStartBootstrapper()) + params := mbc.EpochBootstrapParams() + require.NotNil(t, mbc) + assert.Equal(t, uint32(0), params.Epoch()) + assert.Equal(t, uint32(0), params.SelfShardID()) + assert.Equal(t, uint32(2), params.NumOfShards()) + assert.Nil(t, params.NodesConfig()) + + assert.Nil(t, mbc.Close()) + assert.Equal(t, factory.BootstrapComponentsName, mbc.String()) } func TestManagedBootstrapComponents_CreateNilInternalMarshalizer(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() coreComponents := componentsMock.GetDefaultCoreComponents() @@ -82,14 +84,11 @@ func TestManagedBootstrapComponents_CreateNilInternalMarshalizer(t *testing.T) { coreComponents.IntMarsh = nil err := mbc.Create() - require.True(t, errors.Is(err, errorsErd.ErrBootstrapDataComponentsFactoryCreate)) + require.True(t, errors.Is(err, errorsMx.ErrBootstrapDataComponentsFactoryCreate)) } func TestManagedBootstrapComponents_Close(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() @@ -102,3 +101,15 @@ func TestManagedBootstrapComponents_Close(t *testing.T) { _ = mbc.Close() require.Nil(t, mbc.EpochBootstrapParams()) } + +func TestManagedBootstrapComponents_IsInterfaceNil(t *testing.T) { + t.Parallel() + + mbc, _ := bootstrap.NewManagedBootstrapComponents(nil) + require.True(t, mbc.IsInterfaceNil()) + + args := componentsMock.GetBootStrapFactoryArgs() + bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) + mbc, _ = bootstrap.NewManagedBootstrapComponents(bcf) + require.False(t, mbc.IsInterfaceNil()) +} diff --git a/factory/bootstrap/bootstrapComponents_test.go b/factory/bootstrap/bootstrapComponents_test.go index dcbb5a0c8c4..85c22017b28 100644 --- a/factory/bootstrap/bootstrapComponents_test.go +++ b/factory/bootstrap/bootstrapComponents_test.go @@ -1,140 +1,247 @@ package bootstrap_test import ( + "bytes" "errors" + "strings" "testing" - errorsErd "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/config" + errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory/bootstrap" + "github.com/multiversx/mx-chain-go/testscommon" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -// ------------ Test BootstrapComponentsFactory -------------------- -func TestNewBootstrapComponentsFactory_OkValuesShouldWork(t *testing.T) { +func TestNewBootstrapComponentsFactory(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() - - bcf, err := bootstrap.NewBootstrapComponentsFactory(args) - - require.NotNil(t, bcf) - require.Nil(t, err) -} - -func TestNewBootstrapComponentsFactory_NilCoreComponents(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - args := componentsMock.GetBootStrapFactoryArgs() - args.CoreComponents = nil - - bcf, err := bootstrap.NewBootstrapComponentsFactory(args) - - require.Nil(t, bcf) - require.Equal(t, errorsErd.ErrNilCoreComponentsHolder, err) -} - -func TestNewBootstrapComponentsFactory_NilCryptoComponents(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - args := componentsMock.GetBootStrapFactoryArgs() - args.CryptoComponents = nil - - bcf, err := bootstrap.NewBootstrapComponentsFactory(args) - - require.Nil(t, bcf) - require.Equal(t, errorsErd.ErrNilCryptoComponentsHolder, err) -} - -func TestNewBootstrapComponentsFactory_NilNetworkComponents(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - args := componentsMock.GetBootStrapFactoryArgs() - args.NetworkComponents = nil - - bcf, err := bootstrap.NewBootstrapComponentsFactory(args) - - require.Nil(t, bcf) - require.Equal(t, errorsErd.ErrNilNetworkComponentsHolder, err) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + bcf, err := bootstrap.NewBootstrapComponentsFactory(args) + require.NotNil(t, bcf) + require.Nil(t, err) + }) + t.Run("nil core components should error", func(t *testing.T) { + t.Parallel() + + argsCopy := args + argsCopy.CoreComponents = nil + bcf, err := bootstrap.NewBootstrapComponentsFactory(argsCopy) + require.Nil(t, bcf) + require.Equal(t, errorsMx.ErrNilCoreComponentsHolder, err) + }) + t.Run("nil crypto components should error", func(t *testing.T) { + t.Parallel() + + argsCopy := args + argsCopy.CryptoComponents = nil + bcf, err := bootstrap.NewBootstrapComponentsFactory(argsCopy) + require.Nil(t, bcf) + require.Equal(t, errorsMx.ErrNilCryptoComponentsHolder, err) + }) + t.Run("nil network components should error", func(t *testing.T) { + t.Parallel() + + argsCopy := args + argsCopy.NetworkComponents = nil + bcf, err := bootstrap.NewBootstrapComponentsFactory(argsCopy) + require.Nil(t, bcf) + require.Equal(t, errorsMx.ErrNilNetworkComponentsHolder, err) + }) + t.Run("nil status core components should error", func(t *testing.T) { + t.Parallel() + + argsCopy := args + argsCopy.StatusCoreComponents = nil + bcf, err := bootstrap.NewBootstrapComponentsFactory(argsCopy) + require.Nil(t, bcf) + require.Equal(t, errorsMx.ErrNilStatusCoreComponents, err) + }) + t.Run("nil trie sync statistics should error", func(t *testing.T) { + t.Parallel() + + argsCopy := args + argsCopy.StatusCoreComponents = &factory.StatusCoreComponentsStub{ + TrieSyncStatisticsField: nil, + } + bcf, err := bootstrap.NewBootstrapComponentsFactory(argsCopy) + require.Nil(t, bcf) + require.Equal(t, errorsMx.ErrNilTrieSyncStatistics, err) + }) + t.Run("nil app status handler should error", func(t *testing.T) { + t.Parallel() + + argsCopy := args + argsCopy.StatusCoreComponents = &factory.StatusCoreComponentsStub{ + AppStatusHandlerField: nil, + TrieSyncStatisticsField: &testscommon.SizeSyncStatisticsHandlerStub{}, + } + bcf, err := bootstrap.NewBootstrapComponentsFactory(argsCopy) + require.Nil(t, bcf) + require.Equal(t, errorsMx.ErrNilAppStatusHandler, err) + }) + t.Run("empty working dir should error", func(t *testing.T) { + t.Parallel() + + argsCopy := args + argsCopy.WorkingDir = "" + bcf, err := bootstrap.NewBootstrapComponentsFactory(argsCopy) + require.Nil(t, bcf) + require.Equal(t, errorsMx.ErrInvalidWorkingDir, err) + }) } -func TestNewBootstrapComponentsFactory_NilWorkingDir(t *testing.T) { +func TestBootstrapComponentsFactory_Create(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - args := componentsMock.GetBootStrapFactoryArgs() - args.WorkingDir = "" - - bcf, err := bootstrap.NewBootstrapComponentsFactory(args) - - require.Nil(t, bcf) - require.Equal(t, errorsErd.ErrInvalidWorkingDir, err) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetBootStrapFactoryArgs() + bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) + require.NotNil(t, bcf) + + bc, err := bcf.Create() + require.Nil(t, err) + require.NotNil(t, bc) + }) + t.Run("ProcessDestinationShardAsObserver fails should error", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetBootStrapFactoryArgs() + args.PrefConfig.Preferences.DestinationShardAsObserver = "" + bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) + require.NotNil(t, bcf) + + bc, err := bcf.Create() + require.Nil(t, bc) + require.True(t, strings.Contains(err.Error(), "DestinationShardAsObserver")) + }) + t.Run("NewCache fails should error", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetBootStrapFactoryArgs() + args.Config.Versions.Cache = config.CacheConfig{ + Type: "LRU", + SizeInBytes: 1, + } + bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) + require.NotNil(t, bcf) + + bc, err := bcf.Create() + require.Nil(t, bc) + require.True(t, strings.Contains(err.Error(), "LRU")) + }) + t.Run("NewHeaderVersionHandler fails should error", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetBootStrapFactoryArgs() + args.Config.Versions.DefaultVersion = string(bytes.Repeat([]byte("a"), 20)) + bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) + require.NotNil(t, bcf) + + bc, err := bcf.Create() + require.Nil(t, bc) + require.NotNil(t, err) + }) + t.Run("NewHeaderIntegrityVerifier fails should error", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetBootStrapFactoryArgs() + coreComponents := componentsMock.GetDefaultCoreComponents() + coreComponents.ChainIdCalled = func() string { + return "" + } + args.CoreComponents = coreComponents + bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) + require.NotNil(t, bcf) + + bc, err := bcf.Create() + require.Nil(t, bc) + require.NotNil(t, err) + }) + t.Run("CreateShardCoordinator fails should error", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetBootStrapFactoryArgs() + coreComponents := componentsMock.GetDefaultCoreComponents() + coreComponents.NodesConfig = nil + args.CoreComponents = coreComponents + bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) + require.NotNil(t, bcf) + + bc, err := bcf.Create() + require.Nil(t, bc) + require.NotNil(t, err) + }) + t.Run("NewBootstrapDataProvider fails should error", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetBootStrapFactoryArgs() + coreComponents := componentsMock.GetDefaultCoreComponents() + args.CoreComponents = coreComponents + coreComponents.IntMarsh = nil + bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) + require.NotNil(t, bcf) + + bc, err := bcf.Create() + require.Nil(t, bc) + require.True(t, errors.Is(err, errorsMx.ErrNewBootstrapDataProvider)) + }) + t.Run("import db mode - NewStorageEpochStartBootstrap fails should error", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetBootStrapFactoryArgs() + coreComponents := componentsMock.GetDefaultCoreComponents() + args.CoreComponents = coreComponents + coreComponents.RatingHandler = nil + args.ImportDbConfig.IsImportDBMode = true + bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) + require.NotNil(t, bcf) + + bc, err := bcf.Create() + require.Nil(t, bc) + require.True(t, errors.Is(err, errorsMx.ErrNewStorageEpochStartBootstrap)) + }) + t.Run("NewStorageEpochStartBootstrap fails should error", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetBootStrapFactoryArgs() + coreComponents := componentsMock.GetDefaultCoreComponents() + args.CoreComponents = coreComponents + coreComponents.RatingHandler = nil + bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) + require.NotNil(t, bcf) + + bc, err := bcf.Create() + require.Nil(t, bc) + require.True(t, errors.Is(err, errorsMx.ErrNewEpochStartBootstrap)) + }) } - -func TestBootstrapComponentsFactory_CreateShouldWork(t *testing.T) { +func TestBootstrapComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() - bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) + require.NotNil(t, bcf) bc, err := bcf.Create() - require.Nil(t, err) require.NotNil(t, bc) -} - -func TestBootstrapComponentsFactory_CreateBootstrapDataProviderCreationFail(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - args := componentsMock.GetBootStrapFactoryArgs() - coreComponents := componentsMock.GetDefaultCoreComponents() - args.CoreComponents = coreComponents - - bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) - - coreComponents.IntMarsh = nil - bc, err := bcf.Create() - - require.Nil(t, bc) - require.True(t, errors.Is(err, errorsErd.ErrNewBootstrapDataProvider)) -} - -func TestBootstrapComponentsFactory_CreateEpochStartBootstrapCreationFail(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - args := componentsMock.GetBootStrapFactoryArgs() - coreComponents := componentsMock.GetDefaultCoreComponents() - args.CoreComponents = coreComponents - - bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) - - coreComponents.RatingHandler = nil - bc, err := bcf.Create() + assert.Equal(t, core.NodeTypeObserver, bc.NodeType()) + assert.False(t, check.IfNil(bc.ShardCoordinator())) + assert.False(t, check.IfNil(bc.HeaderVersionHandler())) + assert.False(t, check.IfNil(bc.VersionedHeaderFactory())) + assert.False(t, check.IfNil(bc.HeaderIntegrityVerifier())) - require.Nil(t, bc) - require.True(t, errors.Is(err, errorsErd.ErrNewEpochStartBootstrap)) + require.Nil(t, bc.Close()) } diff --git a/factory/bootstrap/shardingFactory.go b/factory/bootstrap/shardingFactory.go index 262ad7f8d06..32d5504292d 100644 --- a/factory/bootstrap/shardingFactory.go +++ b/factory/bootstrap/shardingFactory.go @@ -1,11 +1,11 @@ package bootstrap import ( - "errors" "fmt" "time" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/closing" "github.com/multiversx/mx-chain-core-go/data/endProcess" "github.com/multiversx/mx-chain-core-go/hashing" @@ -14,6 +14,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/epochStart" + errErd "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" @@ -29,6 +30,15 @@ func CreateShardCoordinator( prefsConfig config.PreferencesConfig, log logger.Logger, ) (sharding.Coordinator, core.NodeType, error) { + if check.IfNil(nodesConfig) { + return nil, "", errErd.ErrNilGenesisNodesSetupHandler + } + if check.IfNil(pubKey) { + return nil, "", errErd.ErrNilPublicKey + } + if check.IfNil(log) { + return nil, "", errErd.ErrNilLogger + } selfShardId, err := getShardIdFromNodePubKey(pubKey, nodesConfig) nodeType := core.NodeTypeValidator @@ -71,10 +81,6 @@ func CreateShardCoordinator( } func getShardIdFromNodePubKey(pubKey crypto.PublicKey, nodesConfig sharding.GenesisNodesSetupHandler) (uint32, error) { - if pubKey == nil { - return 0, errors.New("nil public key") - } - publicKey, err := pubKey.ToByteArray() if err != nil { return 0, err @@ -108,6 +114,21 @@ func CreateNodesCoordinator( enableEpochsHandler common.EnableEpochsHandler, validatorInfoCacher epochStart.ValidatorInfoCacher, ) (nodesCoordinator.NodesCoordinator, error) { + if check.IfNil(nodeShufflerOut) { + return nil, errErd.ErrNilShuffleOutCloser + } + if check.IfNil(nodesConfig) { + return nil, errErd.ErrNilGenesisNodesSetupHandler + } + if check.IfNil(epochStartNotifier) { + return nil, errErd.ErrNilEpochStartNotifier + } + if check.IfNil(pubKey) { + return nil, errErd.ErrNilPublicKey + } + if check.IfNil(bootstrapParameters) { + return nil, errErd.ErrNilBootstrapParamsHandler + } if chanNodeStop == nil { return nil, nodesCoordinator.ErrNilNodeStopChannel } @@ -219,6 +240,10 @@ func CreateNodesShuffleOut( chanStopNodeProcess chan endProcess.ArgEndProcess, ) (factory.ShuffleOutCloser, error) { + if check.IfNil(nodesConfig) { + return nil, errErd.ErrNilGenesisNodesSetupHandler + } + maxThresholdEpochDuration := epochConfig.MaxShuffledOutRestartThreshold if !(maxThresholdEpochDuration >= 0.0 && maxThresholdEpochDuration <= 1.0) { return nil, fmt.Errorf("invalid max threshold for shuffled out handler") diff --git a/factory/bootstrap/shardingFactory_test.go b/factory/bootstrap/shardingFactory_test.go new file mode 100644 index 00000000000..277589f2e7e --- /dev/null +++ b/factory/bootstrap/shardingFactory_test.go @@ -0,0 +1,661 @@ +package bootstrap + +import ( + "errors" + "strings" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-go/config" + errErd "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/storage" + validatorInfoCacherMocks "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" + "github.com/stretchr/testify/require" +) + +var expectedErr = errors.New("expected error") + +func TestCreateShardCoordinator(t *testing.T) { + t.Parallel() + + t.Run("nil nodes config should error", func(t *testing.T) { + t.Parallel() + + shardC, nodeType, err := CreateShardCoordinator(nil, nil, config.PreferencesConfig{}, nil) + require.Equal(t, errErd.ErrNilGenesisNodesSetupHandler, err) + require.Empty(t, nodeType) + require.True(t, check.IfNil(shardC)) + }) + t.Run("nil pub key should error", func(t *testing.T) { + t.Parallel() + + shardC, nodeType, err := CreateShardCoordinator(&testscommon.NodesSetupStub{}, nil, config.PreferencesConfig{}, nil) + require.Equal(t, errErd.ErrNilPublicKey, err) + require.Empty(t, nodeType) + require.True(t, check.IfNil(shardC)) + }) + t.Run("nil logger should error", func(t *testing.T) { + t.Parallel() + + shardC, nodeType, err := CreateShardCoordinator(&testscommon.NodesSetupStub{}, &cryptoMocks.PublicKeyStub{}, config.PreferencesConfig{}, nil) + require.Equal(t, errErd.ErrNilLogger, err) + require.Empty(t, nodeType) + require.True(t, check.IfNil(shardC)) + }) + t.Run("getShardIdFromNodePubKey fails should error", func(t *testing.T) { + t.Parallel() + + shardC, nodeType, err := CreateShardCoordinator( + &testscommon.NodesSetupStub{}, + &cryptoMocks.PublicKeyStub{ + ToByteArrayStub: func() ([]byte, error) { + return nil, expectedErr + }, + }, + config.PreferencesConfig{}, + &testscommon.LoggerStub{}, + ) + require.Equal(t, expectedErr, err) + require.Empty(t, nodeType) + require.True(t, check.IfNil(shardC)) + }) + t.Run("public key not in genesis - ProcessDestinationShardAsObserver fails should error", func(t *testing.T) { + t.Parallel() + + shardC, nodeType, err := CreateShardCoordinator( + &testscommon.NodesSetupStub{}, + &cryptoMocks.PublicKeyStub{ + ToByteArrayStub: func() ([]byte, error) { + return nil, sharding.ErrPublicKeyNotFoundInGenesis // force this error here + }, + }, + config.PreferencesConfig{ + DestinationShardAsObserver: "", // ProcessDestinationShardAsObserver fails + }, + &testscommon.LoggerStub{}, + ) + require.NotNil(t, err) + require.Empty(t, nodeType) + require.True(t, check.IfNil(shardC)) + }) + t.Run("public key not in genesis, destination shard disabled - ToByteArray fails should error", func(t *testing.T) { + t.Parallel() + + counter := 0 + shardC, nodeType, err := CreateShardCoordinator( + &testscommon.NodesSetupStub{ + GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { + return 0, sharding.ErrPublicKeyNotFoundInGenesis // force this error + }, + }, + &cryptoMocks.PublicKeyStub{ + ToByteArrayStub: func() ([]byte, error) { + counter++ + if counter > 1 { + return nil, expectedErr + } + return []byte("public key"), nil + }, + }, + config.PreferencesConfig{ + DestinationShardAsObserver: "disabled", // force if branch + }, + &testscommon.LoggerStub{}, + ) + require.NotNil(t, err) + require.True(t, errors.Is(err, expectedErr)) + require.Equal(t, core.NodeTypeObserver, nodeType) + require.True(t, check.IfNil(shardC)) + }) + t.Run("public key not in genesis, destination shard disabled - should work", func(t *testing.T) { + t.Parallel() + + shardC, nodeType, err := CreateShardCoordinator( + &testscommon.NodesSetupStub{ + GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { + return 0, sharding.ErrPublicKeyNotFoundInGenesis // force this error + }, + NumberOfShardsCalled: func() uint32 { + return 2 + }, + }, + &cryptoMocks.PublicKeyStub{ + ToByteArrayStub: func() ([]byte, error) { + return []byte("public key"), nil + }, + }, + config.PreferencesConfig{ + DestinationShardAsObserver: "disabled", // for coverage + }, + &testscommon.LoggerStub{}, + ) + require.Nil(t, err) + require.Equal(t, core.NodeTypeObserver, nodeType) + require.False(t, check.IfNil(shardC)) + }) + t.Run("metachain but 0 shards should error", func(t *testing.T) { + t.Parallel() + + shardC, nodeType, err := CreateShardCoordinator( + &testscommon.NodesSetupStub{ + GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { + return core.MetachainShardId, nil + }, + NumberOfShardsCalled: func() uint32 { + return 0 + }, + }, + &cryptoMocks.PublicKeyStub{}, + config.PreferencesConfig{}, + &testscommon.LoggerStub{}, + ) + require.NotNil(t, err) + require.Empty(t, nodeType) + require.True(t, check.IfNil(shardC)) + }) + t.Run("metachain should work", func(t *testing.T) { + t.Parallel() + + shardC, nodeType, err := CreateShardCoordinator( + &testscommon.NodesSetupStub{ + GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { + return core.MetachainShardId, nil + }, + }, + &cryptoMocks.PublicKeyStub{}, + config.PreferencesConfig{}, + &testscommon.LoggerStub{}, + ) + require.Nil(t, err) + require.Equal(t, core.NodeTypeValidator, nodeType) + require.False(t, check.IfNil(shardC)) + }) +} + +func TestCreateNodesCoordinator(t *testing.T) { + t.Parallel() + + t.Run("nil nodes shuffler out closer should error", func(t *testing.T) { + t.Parallel() + + nodesC, err := CreateNodesCoordinator( + nil, + &testscommon.NodesSetupStub{}, + config.PreferencesConfig{}, + &mock.EpochStartNotifierStub{}, + &cryptoMocks.PublicKeyStub{}, + &testscommon.MarshalizerStub{}, + &testscommon.HasherStub{}, + &testscommon.RaterMock{}, + &storage.StorerStub{}, + &shardingMocks.NodeShufflerMock{}, + 0, + &bootstrapMocks.BootstrapParamsHandlerMock{}, + 0, + make(chan endProcess.ArgEndProcess, 1), + &nodeTypeProviderMock.NodeTypeProviderStub{}, + &testscommon.EnableEpochsHandlerStub{}, + &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + ) + require.Equal(t, errErd.ErrNilShuffleOutCloser, err) + require.True(t, check.IfNil(nodesC)) + }) + t.Run("nil nodes config should error", func(t *testing.T) { + t.Parallel() + + nodesC, err := CreateNodesCoordinator( + &testscommon.ShuffleOutCloserStub{}, + nil, + config.PreferencesConfig{}, + &mock.EpochStartNotifierStub{}, + &cryptoMocks.PublicKeyStub{}, + &testscommon.MarshalizerStub{}, + &testscommon.HasherStub{}, + &testscommon.RaterMock{}, + &storage.StorerStub{}, + &shardingMocks.NodeShufflerMock{}, + 0, + &bootstrapMocks.BootstrapParamsHandlerMock{}, + 0, + make(chan endProcess.ArgEndProcess, 1), + &nodeTypeProviderMock.NodeTypeProviderStub{}, + &testscommon.EnableEpochsHandlerStub{}, + &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + ) + require.Equal(t, errErd.ErrNilGenesisNodesSetupHandler, err) + require.True(t, check.IfNil(nodesC)) + }) + t.Run("nil epoch start notifier should error", func(t *testing.T) { + t.Parallel() + + nodesC, err := CreateNodesCoordinator( + &testscommon.ShuffleOutCloserStub{}, + &testscommon.NodesSetupStub{}, + config.PreferencesConfig{}, + nil, + &cryptoMocks.PublicKeyStub{}, + &testscommon.MarshalizerStub{}, + &testscommon.HasherStub{}, + &testscommon.RaterMock{}, + &storage.StorerStub{}, + &shardingMocks.NodeShufflerMock{}, + 0, + &bootstrapMocks.BootstrapParamsHandlerMock{}, + 0, + make(chan endProcess.ArgEndProcess, 1), + &nodeTypeProviderMock.NodeTypeProviderStub{}, + &testscommon.EnableEpochsHandlerStub{}, + &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + ) + require.Equal(t, errErd.ErrNilEpochStartNotifier, err) + require.True(t, check.IfNil(nodesC)) + }) + t.Run("nil pub key should error", func(t *testing.T) { + t.Parallel() + + nodesC, err := CreateNodesCoordinator( + &testscommon.ShuffleOutCloserStub{}, + &testscommon.NodesSetupStub{}, + config.PreferencesConfig{}, + &mock.EpochStartNotifierStub{}, + nil, + &testscommon.MarshalizerStub{}, + &testscommon.HasherStub{}, + &testscommon.RaterMock{}, + &storage.StorerStub{}, + &shardingMocks.NodeShufflerMock{}, + 0, + &bootstrapMocks.BootstrapParamsHandlerMock{}, + 0, + make(chan endProcess.ArgEndProcess, 1), + &nodeTypeProviderMock.NodeTypeProviderStub{}, + &testscommon.EnableEpochsHandlerStub{}, + &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + ) + require.Equal(t, errErd.ErrNilPublicKey, err) + require.True(t, check.IfNil(nodesC)) + }) + t.Run("nil bootstrap params should error", func(t *testing.T) { + t.Parallel() + + nodesC, err := CreateNodesCoordinator( + &testscommon.ShuffleOutCloserStub{}, + &testscommon.NodesSetupStub{}, + config.PreferencesConfig{}, + &mock.EpochStartNotifierStub{}, + &cryptoMocks.PublicKeyStub{}, + &testscommon.MarshalizerStub{}, + &testscommon.HasherStub{}, + &testscommon.RaterMock{}, + &storage.StorerStub{}, + &shardingMocks.NodeShufflerMock{}, + 0, + nil, + 0, + make(chan endProcess.ArgEndProcess, 1), + &nodeTypeProviderMock.NodeTypeProviderStub{}, + &testscommon.EnableEpochsHandlerStub{}, + &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + ) + require.Equal(t, errErd.ErrNilBootstrapParamsHandler, err) + require.True(t, check.IfNil(nodesC)) + }) + t.Run("nil chan should error", func(t *testing.T) { + t.Parallel() + + nodesC, err := CreateNodesCoordinator( + &testscommon.ShuffleOutCloserStub{}, + &testscommon.NodesSetupStub{}, + config.PreferencesConfig{}, + &mock.EpochStartNotifierStub{}, + &cryptoMocks.PublicKeyStub{}, + &testscommon.MarshalizerStub{}, + &testscommon.HasherStub{}, + &testscommon.RaterMock{}, + &storage.StorerStub{}, + &shardingMocks.NodeShufflerMock{}, + 0, + &bootstrapMocks.BootstrapParamsHandlerMock{}, + 0, + nil, + &nodeTypeProviderMock.NodeTypeProviderStub{}, + &testscommon.EnableEpochsHandlerStub{}, + &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + ) + require.Equal(t, nodesCoordinator.ErrNilNodeStopChannel, err) + require.True(t, check.IfNil(nodesC)) + }) + t.Run("invalid shard should error", func(t *testing.T) { + t.Parallel() + + nodesC, err := CreateNodesCoordinator( + &testscommon.ShuffleOutCloserStub{}, + &testscommon.NodesSetupStub{}, + config.PreferencesConfig{ + DestinationShardAsObserver: "", + }, + &mock.EpochStartNotifierStub{}, + &cryptoMocks.PublicKeyStub{}, + &testscommon.MarshalizerStub{}, + &testscommon.HasherStub{}, + &testscommon.RaterMock{}, + &storage.StorerStub{}, + &shardingMocks.NodeShufflerMock{}, + 0, + &bootstrapMocks.BootstrapParamsHandlerMock{}, + 0, + make(chan endProcess.ArgEndProcess, 1), + &nodeTypeProviderMock.NodeTypeProviderStub{}, + &testscommon.EnableEpochsHandlerStub{}, + &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + ) + require.NotNil(t, err) + require.True(t, check.IfNil(nodesC)) + }) + t.Run("destination shard disabled - ToByteArray fails should error", func(t *testing.T) { + t.Parallel() + + nodesC, err := CreateNodesCoordinator( + &testscommon.ShuffleOutCloserStub{}, + &testscommon.NodesSetupStub{}, + config.PreferencesConfig{ + DestinationShardAsObserver: "disabled", + }, + &mock.EpochStartNotifierStub{}, + &cryptoMocks.PublicKeyStub{ + ToByteArrayStub: func() ([]byte, error) { + return nil, expectedErr + }, + }, + &testscommon.MarshalizerStub{}, + &testscommon.HasherStub{}, + &testscommon.RaterMock{}, + &storage.StorerStub{}, + &shardingMocks.NodeShufflerMock{}, + 0, + &bootstrapMocks.BootstrapParamsHandlerMock{}, + 0, + make(chan endProcess.ArgEndProcess, 1), + &nodeTypeProviderMock.NodeTypeProviderStub{}, + &testscommon.EnableEpochsHandlerStub{}, + &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + ) + require.True(t, errors.Is(err, expectedErr)) + require.True(t, check.IfNil(nodesC)) + }) + t.Run("ToByteArray fails should error", func(t *testing.T) { + t.Parallel() + + nodesC, err := CreateNodesCoordinator( + &testscommon.ShuffleOutCloserStub{}, + &testscommon.NodesSetupStub{}, + config.PreferencesConfig{ + DestinationShardAsObserver: "0", + }, + &mock.EpochStartNotifierStub{}, + &cryptoMocks.PublicKeyStub{ + ToByteArrayStub: func() ([]byte, error) { + return nil, expectedErr + }, + }, + &testscommon.MarshalizerStub{}, + &testscommon.HasherStub{}, + &testscommon.RaterMock{}, + &storage.StorerStub{}, + &shardingMocks.NodeShufflerMock{}, + 0, + &bootstrapMocks.BootstrapParamsHandlerMock{}, + 0, + make(chan endProcess.ArgEndProcess, 1), + &nodeTypeProviderMock.NodeTypeProviderStub{}, + &testscommon.EnableEpochsHandlerStub{}, + &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + ) + require.True(t, errors.Is(err, expectedErr)) + require.True(t, check.IfNil(nodesC)) + }) + t.Run("NewShuffledOutTrigger fails should error", func(t *testing.T) { + t.Parallel() + + nodesC, err := CreateNodesCoordinator( + &testscommon.ShuffleOutCloserStub{}, + &testscommon.NodesSetupStub{}, + config.PreferencesConfig{ + DestinationShardAsObserver: "0", + }, + &mock.EpochStartNotifierStub{}, + &cryptoMocks.PublicKeyStub{ + ToByteArrayStub: func() ([]byte, error) { + return nil, nil // no error but nil pub key to force NewShuffledOutTrigger to fail + }, + }, + &testscommon.MarshalizerStub{}, + &testscommon.HasherStub{}, + &testscommon.RaterMock{}, + &storage.StorerStub{}, + &shardingMocks.NodeShufflerMock{}, + 0, + &bootstrapMocks.BootstrapParamsHandlerMock{}, + 0, + make(chan endProcess.ArgEndProcess, 1), + &nodeTypeProviderMock.NodeTypeProviderStub{}, + &testscommon.EnableEpochsHandlerStub{}, + &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + ) + require.NotNil(t, err) + require.True(t, check.IfNil(nodesC)) + }) + t.Run("NewIndexHashedNodesCoordinator fails should error", func(t *testing.T) { + t.Parallel() + + nodesC, err := CreateNodesCoordinator( + &testscommon.ShuffleOutCloserStub{}, + &testscommon.NodesSetupStub{}, + config.PreferencesConfig{ + DestinationShardAsObserver: "0", + }, + &mock.EpochStartNotifierStub{}, + &cryptoMocks.PublicKeyStub{ + ToByteArrayStub: func() ([]byte, error) { + return []byte("public key"), nil + }, + }, + nil, // force NewIndexHashedNodesCoordinator to fail + &testscommon.HasherStub{}, + &testscommon.RaterMock{}, + &storage.StorerStub{}, + &shardingMocks.NodeShufflerMock{}, + 0, + &bootstrapMocks.BootstrapParamsHandlerMock{}, + 0, + make(chan endProcess.ArgEndProcess, 1), + &nodeTypeProviderMock.NodeTypeProviderStub{}, + &testscommon.EnableEpochsHandlerStub{}, + &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + ) + require.NotNil(t, err) + require.True(t, check.IfNil(nodesC)) + }) + t.Run("NewIndexHashedNodesCoordinatorWithRater fails should error", func(t *testing.T) { + t.Parallel() + + nodesC, err := CreateNodesCoordinator( + &testscommon.ShuffleOutCloserStub{}, + &testscommon.NodesSetupStub{}, + config.PreferencesConfig{ + DestinationShardAsObserver: "0", + }, + &mock.EpochStartNotifierStub{}, + &cryptoMocks.PublicKeyStub{ + ToByteArrayStub: func() ([]byte, error) { + return []byte("public key"), nil + }, + }, + &testscommon.MarshalizerStub{}, + &testscommon.HasherStub{}, + nil, // force NewIndexHashedNodesCoordinatorWithRater to fail + &storage.StorerStub{}, + &shardingMocks.NodeShufflerMock{}, + 0, + &bootstrapMocks.BootstrapParamsHandlerMock{ + NodesConfigCalled: func() *nodesCoordinator.NodesCoordinatorRegistry { + return &nodesCoordinator.NodesCoordinatorRegistry{ + EpochsConfig: map[string]*nodesCoordinator.EpochValidators{ + "0": { + EligibleValidators: map[string][]*nodesCoordinator.SerializableValidator{ + "4294967295": { + { + PubKey: []byte("pk1"), + Chances: 1, + Index: 0, + }, + }, + }, + WaitingValidators: map[string][]*nodesCoordinator.SerializableValidator{}, + LeavingValidators: map[string][]*nodesCoordinator.SerializableValidator{}, + }, + }, + CurrentEpoch: 0, + } + }, + }, + 0, + make(chan endProcess.ArgEndProcess, 1), + &nodeTypeProviderMock.NodeTypeProviderStub{}, + &testscommon.EnableEpochsHandlerStub{}, + &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + ) + require.NotNil(t, err) + require.True(t, check.IfNil(nodesC)) + }) + t.Run("should work with nodes config", func(t *testing.T) { + t.Parallel() + + nodesC, err := CreateNodesCoordinator( + &testscommon.ShuffleOutCloserStub{}, + &testscommon.NodesSetupStub{}, + config.PreferencesConfig{ + DestinationShardAsObserver: "disabled", + }, + &mock.EpochStartNotifierStub{}, + &cryptoMocks.PublicKeyStub{ + ToByteArrayStub: func() ([]byte, error) { + return []byte("public key"), nil + }, + }, + &testscommon.MarshalizerStub{}, + &testscommon.HasherStub{}, + &testscommon.RaterMock{}, + &storage.StorerStub{}, + &shardingMocks.NodeShufflerMock{}, + 0, + &bootstrapMocks.BootstrapParamsHandlerMock{ + NodesConfigCalled: func() *nodesCoordinator.NodesCoordinatorRegistry { + return &nodesCoordinator.NodesCoordinatorRegistry{ + EpochsConfig: map[string]*nodesCoordinator.EpochValidators{ + "0": { + EligibleValidators: map[string][]*nodesCoordinator.SerializableValidator{ + "4294967295": { + { + PubKey: []byte("pk1"), + Chances: 1, + Index: 0, + }, + }, + }, + WaitingValidators: map[string][]*nodesCoordinator.SerializableValidator{}, + LeavingValidators: map[string][]*nodesCoordinator.SerializableValidator{}, + }, + }, + CurrentEpoch: 0, + } + }, + }, + 0, + make(chan endProcess.ArgEndProcess, 1), + &nodeTypeProviderMock.NodeTypeProviderStub{}, + &testscommon.EnableEpochsHandlerStub{}, + &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + ) + require.Nil(t, err) + require.False(t, check.IfNil(nodesC)) + }) +} + +func TestCreateNodesShuffleOut(t *testing.T) { + t.Parallel() + + t.Run("nil nodes config should error", func(t *testing.T) { + t.Parallel() + + shuffler, err := CreateNodesShuffleOut(nil, config.EpochStartConfig{}, make(chan endProcess.ArgEndProcess, 1)) + require.Equal(t, errErd.ErrNilGenesisNodesSetupHandler, err) + require.True(t, check.IfNil(shuffler)) + }) + t.Run("invalid MaxShuffledOutRestartThreshold should error", func(t *testing.T) { + t.Parallel() + + shuffler, err := CreateNodesShuffleOut( + &testscommon.NodesSetupStub{}, + config.EpochStartConfig{ + MaxShuffledOutRestartThreshold: 5.0, + }, + make(chan endProcess.ArgEndProcess, 1), + ) + require.True(t, strings.Contains(err.Error(), "invalid max threshold for shuffled out handler")) + require.True(t, check.IfNil(shuffler)) + }) + t.Run("invalid MaxShuffledOutRestartThreshold should error", func(t *testing.T) { + t.Parallel() + + shuffler, err := CreateNodesShuffleOut( + &testscommon.NodesSetupStub{}, + config.EpochStartConfig{ + MinShuffledOutRestartThreshold: 5.0, + }, + make(chan endProcess.ArgEndProcess, 1), + ) + require.True(t, strings.Contains(err.Error(), "invalid min threshold for shuffled out handler")) + require.True(t, check.IfNil(shuffler)) + }) + t.Run("NewShuffleOutCloser fails should error", func(t *testing.T) { + t.Parallel() + + shuffler, err := CreateNodesShuffleOut( + &testscommon.NodesSetupStub{}, + config.EpochStartConfig{}, + nil, // force NewShuffleOutCloser to fail + ) + require.NotNil(t, err) + require.True(t, check.IfNil(shuffler)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + shuffler, err := CreateNodesShuffleOut( + &testscommon.NodesSetupStub{ + GetRoundDurationCalled: func() uint64 { + return 4000 + }, + }, + config.EpochStartConfig{ + RoundsPerEpoch: 200, + MinShuffledOutRestartThreshold: 0.05, + MaxShuffledOutRestartThreshold: 0.25, + }, + make(chan endProcess.ArgEndProcess, 1), + ) + require.Nil(t, err) + require.False(t, check.IfNil(shuffler)) + }) +} diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index 418868d4108..a872b80f433 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -81,32 +81,9 @@ type consensusComponents struct { // NewConsensusComponentsFactory creates an instance of consensusComponentsFactory func NewConsensusComponentsFactory(args ConsensusComponentsFactoryArgs) (*consensusComponentsFactory, error) { - if check.IfNil(args.CoreComponents) { - return nil, errors.ErrNilCoreComponentsHolder - } - if check.IfNil(args.DataComponents) { - return nil, errors.ErrNilDataComponentsHolder - } - if check.IfNil(args.CryptoComponents) { - return nil, errors.ErrNilCryptoComponentsHolder - } - if check.IfNil(args.NetworkComponents) { - return nil, errors.ErrNilNetworkComponentsHolder - } - if check.IfNil(args.ProcessComponents) { - return nil, errors.ErrNilProcessComponentsHolder - } - if check.IfNil(args.StateComponents) { - return nil, errors.ErrNilStateComponentsHolder - } - if check.IfNil(args.StatusComponents) { - return nil, errors.ErrNilStatusComponentsHolder - } - if check.IfNil(args.ScheduledProcessor) { - return nil, errors.ErrNilScheduledProcessor - } - if check.IfNil(args.StatusCoreComponents) { - return nil, errors.ErrNilStatusCoreComponents + err := checkArgs(args) + if err != nil { + return nil, err } return &consensusComponentsFactory{ @@ -130,10 +107,6 @@ func NewConsensusComponentsFactory(args ConsensusComponentsFactoryArgs) (*consen func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { var err error - err = ccf.checkArgs() - if err != nil { - return nil, err - } cc := &consensusComponents{} consensusGroupSize, err := getConsensusGroupSize(ccf.coreComponents.GenesisNodesSetup(), ccf.processComponents.ShardCoordinator()) @@ -358,12 +331,7 @@ func (ccf *consensusComponentsFactory) createChronology() (consensus.ChronologyH Watchdog: wd, AppStatusHandler: ccf.statusCoreComponents.AppStatusHandler(), } - chronologyHandler, err := chronology.NewChronology(chronologyArg) - if err != nil { - return nil, err - } - - return chronologyHandler, nil + return chronology.NewChronology(chronologyArg) } func (ccf *consensusComponentsFactory) getEpoch() uint32 { @@ -518,12 +486,7 @@ func (ccf *consensusComponentsFactory) createShardBootstrapper() (process.Bootst ArgBaseBootstrapper: argsBaseBootstrapper, } - bootstrap, err := sync.NewShardBootstrap(argsShardBootstrapper) - if err != nil { - return nil, err - } - - return bootstrap, nil + return sync.NewShardBootstrap(argsShardBootstrapper) } func (ccf *consensusComponentsFactory) createArgsBaseAccountsSyncer(trieStorageManager common.StorageManager) syncer.ArgsNewBaseAccountsSyncer { @@ -654,12 +617,7 @@ func (ccf *consensusComponentsFactory) createMetaChainBootstrapper() (process.Bo ValidatorStatisticsDBSyncer: validatorAccountsDBSyncer, } - bootstrap, err := sync.NewMetaBootstrap(argsMetaBootstrapper) - if err != nil { - return nil, err - } - - return bootstrap, nil + return sync.NewMetaBootstrap(argsMetaBootstrapper) } func (ccf *consensusComponentsFactory) createConsensusTopic(cc *consensusComponents) error { @@ -718,33 +676,66 @@ func (ccf *consensusComponentsFactory) addCloserInstances(closers ...update.Clos return nil } -func (ccf *consensusComponentsFactory) checkArgs() error { - blockchain := ccf.dataComponents.Blockchain() - if check.IfNil(blockchain) { - return errors.ErrNilBlockChainHandler +func checkArgs(args ConsensusComponentsFactoryArgs) error { + if check.IfNil(args.CoreComponents) { + return errors.ErrNilCoreComponentsHolder } - marshalizer := ccf.coreComponents.InternalMarshalizer() - if check.IfNil(marshalizer) { - return errors.ErrNilMarshalizer + if check.IfNil(args.CoreComponents.GenesisNodesSetup()) { + return errors.ErrNilGenesisNodesSetupHandler + } + if check.IfNil(args.DataComponents) { + return errors.ErrNilDataComponentsHolder } - dataPool := ccf.dataComponents.Datapool() - if check.IfNil(dataPool) { + if check.IfNil(args.DataComponents.Datapool()) { return errors.ErrNilDataPoolsHolder } - shardCoordinator := ccf.processComponents.ShardCoordinator() - if check.IfNil(shardCoordinator) { - return errors.ErrNilShardCoordinator + if check.IfNil(args.DataComponents.Blockchain()) { + return errors.ErrNilBlockChainHandler + } + if check.IfNil(args.CryptoComponents) { + return errors.ErrNilCryptoComponentsHolder + } + if check.IfNil(args.CryptoComponents.PublicKey()) { + return errors.ErrNilPublicKey + } + if check.IfNil(args.CryptoComponents.PrivateKey()) { + return errors.ErrNilPrivateKey + } + if check.IfNil(args.NetworkComponents) { + return errors.ErrNilNetworkComponentsHolder } - netMessenger := ccf.networkComponents.NetworkMessenger() - if check.IfNil(netMessenger) { + if check.IfNil(args.NetworkComponents.NetworkMessenger()) { return errors.ErrNilMessenger } - hardforkTrigger := ccf.processComponents.HardforkTrigger() - if check.IfNil(hardforkTrigger) { + if check.IfNil(args.ProcessComponents) { + return errors.ErrNilProcessComponentsHolder + } + if check.IfNil(args.ProcessComponents.NodesCoordinator()) { + return errors.ErrNilNodesCoordinator + } + if check.IfNil(args.ProcessComponents.ShardCoordinator()) { + return errors.ErrNilShardCoordinator + } + if check.IfNil(args.ProcessComponents.RoundHandler()) { + return errors.ErrNilRoundHandler + } + if check.IfNil(args.ProcessComponents.HardforkTrigger()) { return errors.ErrNilHardforkTrigger } - if check.IfNil(ccf.statusCoreComponents.AppStatusHandler()) { - return errors.ErrNilAppStatusHandler + if check.IfNil(args.StateComponents) { + return errors.ErrNilStateComponentsHolder + } + if check.IfNil(args.StatusComponents) { + return errors.ErrNilStatusComponentsHolder + } + if check.IfNil(args.StatusComponents.OutportHandler()) { + return errors.ErrNilOutportHandler + } + if check.IfNil(args.ScheduledProcessor) { + return errors.ErrNilScheduledProcessor + } + if check.IfNil(args.StatusCoreComponents) { + return errors.ErrNilStatusCoreComponents } return nil diff --git a/factory/consensus/consensusComponentsHandler_test.go b/factory/consensus/consensusComponentsHandler_test.go index 20b0e5b4e3a..c0a89f8a08e 100644 --- a/factory/consensus/consensusComponentsHandler_test.go +++ b/factory/consensus/consensusComponentsHandler_test.go @@ -3,76 +3,133 @@ package consensus_test import ( "testing" + errorsMx "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/factory" consensusComp "github.com/multiversx/mx-chain-go/factory/consensus" - "github.com/multiversx/mx-chain-go/factory/mock" + "github.com/multiversx/mx-chain-go/testscommon" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" - "github.com/multiversx/mx-chain-go/testscommon/factory" - "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + factoryMocks "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/stretchr/testify/require" ) -// ------------ Test ManagedConsensusComponentsFactory -------------------- -func TestManagedConsensusComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { +func TestNewManagedConsensusComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetConsensusArgs(shardCoordinator) - statusCoreComponents := &factory.StatusCoreComponentsStub{ - AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, - } - args.StatusCoreComponents = statusCoreComponents - consensusComponentsFactory, _ := consensusComp.NewConsensusComponentsFactory(args) - managedConsensusComponents, err := consensusComp.NewManagedConsensusComponents(consensusComponentsFactory) - require.NoError(t, err) - statusCoreComponents.AppStatusHandlerField = nil - err = managedConsensusComponents.Create() - require.Error(t, err) - require.NotNil(t, managedConsensusComponents.CheckSubcomponents()) + t.Run("nil factory should error", func(t *testing.T) { + t.Parallel() + + managedConsensusComponents, err := consensusComp.NewManagedConsensusComponents(nil) + require.Equal(t, errorsMx.ErrNilConsensusComponentsFactory, err) + require.Nil(t, managedConsensusComponents) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + consensusComponentsFactory, _ := consensusComp.NewConsensusComponentsFactory(createMockConsensusComponentsFactoryArgs()) + managedConsensusComponents, err := consensusComp.NewManagedConsensusComponents(consensusComponentsFactory) + require.NoError(t, err) + require.NotNil(t, managedConsensusComponents) + }) } -func TestManagedConsensusComponents_CreateShouldWork(t *testing.T) { +func TestManagedConsensusComponents_Create(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetConsensusArgs(shardCoordinator) + t.Run("invalid params should error", func(t *testing.T) { + t.Parallel() - consensusComponentsFactory, _ := consensusComp.NewConsensusComponentsFactory(args) - managedConsensusComponents, err := consensusComp.NewManagedConsensusComponents(consensusComponentsFactory) + args := createMockConsensusComponentsFactoryArgs() + statusCoreCompStub, ok := args.StatusCoreComponents.(*factoryMocks.StatusCoreComponentsStub) + require.True(t, ok) + statusCoreCompStub.AppStatusHandlerField = nil + consensusComponentsFactory, _ := consensusComp.NewConsensusComponentsFactory(args) + managedConsensusComponents, _ := consensusComp.NewManagedConsensusComponents(consensusComponentsFactory) + require.NotNil(t, managedConsensusComponents) - require.NoError(t, err) - require.Nil(t, managedConsensusComponents.BroadcastMessenger()) - require.Nil(t, managedConsensusComponents.Chronology()) - require.Nil(t, managedConsensusComponents.ConsensusWorker()) - require.Error(t, managedConsensusComponents.CheckSubcomponents()) + err := managedConsensusComponents.Create() + require.Error(t, err) + }) + t.Run("should work with getters", func(t *testing.T) { + t.Parallel() + + shardCoordinator := testscommon.NewMultiShardsCoordinatorMock(2) + args := componentsMock.GetConsensusArgs(shardCoordinator) + consensusComponentsFactory, _ := consensusComp.NewConsensusComponentsFactory(args) + managedConsensusComponents, _ := consensusComp.NewManagedConsensusComponents(consensusComponentsFactory) + require.NotNil(t, managedConsensusComponents) + + require.Nil(t, managedConsensusComponents.BroadcastMessenger()) + require.Nil(t, managedConsensusComponents.Chronology()) + require.Nil(t, managedConsensusComponents.ConsensusWorker()) + require.Nil(t, managedConsensusComponents.Bootstrapper()) + + err := managedConsensusComponents.Create() + require.NoError(t, err) + require.NotNil(t, managedConsensusComponents.BroadcastMessenger()) + require.NotNil(t, managedConsensusComponents.Chronology()) + require.NotNil(t, managedConsensusComponents.ConsensusWorker()) + require.NotNil(t, managedConsensusComponents.Bootstrapper()) + + require.Equal(t, factory.ConsensusComponentsName, managedConsensusComponents.String()) + }) +} + +func TestManagedConsensusComponents_ConsensusGroupSize(t *testing.T) { + t.Parallel() + + consensusComponentsFactory, _ := consensusComp.NewConsensusComponentsFactory(createMockConsensusComponentsFactoryArgs()) + managedConsensusComponents, _ := consensusComp.NewManagedConsensusComponents(consensusComponentsFactory) + require.NotNil(t, managedConsensusComponents) + + size, err := managedConsensusComponents.ConsensusGroupSize() + require.Equal(t, errorsMx.ErrNilConsensusComponentsHolder, err) + require.Zero(t, size) err = managedConsensusComponents.Create() require.NoError(t, err) - require.NotNil(t, managedConsensusComponents.BroadcastMessenger()) - require.NotNil(t, managedConsensusComponents.Chronology()) - require.NotNil(t, managedConsensusComponents.ConsensusWorker()) - require.NoError(t, managedConsensusComponents.CheckSubcomponents()) + size, err = managedConsensusComponents.ConsensusGroupSize() + require.NoError(t, err) + require.Equal(t, 2, size) } -func TestManagedConsensusComponents_Close(t *testing.T) { +func TestManagedConsensusComponents_CheckSubcomponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - consensusArgs := componentsMock.GetConsensusArgs(shardCoordinator) - consensusComponentsFactory, _ := consensusComp.NewConsensusComponentsFactory(consensusArgs) + consensusComponentsFactory, _ := consensusComp.NewConsensusComponentsFactory(createMockConsensusComponentsFactoryArgs()) managedConsensusComponents, _ := consensusComp.NewManagedConsensusComponents(consensusComponentsFactory) + require.NotNil(t, managedConsensusComponents) + + require.Equal(t, errorsMx.ErrNilConsensusComponentsHolder, managedConsensusComponents.CheckSubcomponents()) + err := managedConsensusComponents.Create() require.NoError(t, err) + require.Nil(t, managedConsensusComponents.CheckSubcomponents()) +} + +func TestManagedConsensusComponents_IsInterfaceNil(t *testing.T) { + t.Parallel() + + managedConsensusComponents, _ := consensusComp.NewManagedConsensusComponents(nil) + require.True(t, managedConsensusComponents.IsInterfaceNil()) + + consensusComponentsFactory, _ := consensusComp.NewConsensusComponentsFactory(createMockConsensusComponentsFactoryArgs()) + managedConsensusComponents, _ = consensusComp.NewManagedConsensusComponents(consensusComponentsFactory) + require.False(t, managedConsensusComponents.IsInterfaceNil()) +} + +func TestManagedConsensusComponents_Close(t *testing.T) { + t.Parallel() + + consensusComponentsFactory, _ := consensusComp.NewConsensusComponentsFactory(createMockConsensusComponentsFactoryArgs()) + managedConsensusComponents, _ := consensusComp.NewManagedConsensusComponents(consensusComponentsFactory) + + err := managedConsensusComponents.Close() + require.NoError(t, err) + + err = managedConsensusComponents.Create() + require.NoError(t, err) + err = managedConsensusComponents.Close() require.NoError(t, err) require.Nil(t, managedConsensusComponents.BroadcastMessenger()) diff --git a/factory/consensus/consensusComponents_test.go b/factory/consensus/consensusComponents_test.go index 0fb97ab3d4f..184cb8d3d11 100644 --- a/factory/consensus/consensusComponents_test.go +++ b/factory/consensus/consensusComponents_test.go @@ -4,418 +4,947 @@ import ( "errors" "strings" "testing" + "time" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/consensus/chronology" - "github.com/multiversx/mx-chain-go/consensus/spos/sposFactory" - errorsErd "github.com/multiversx/mx-chain-go/errors" - "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus" + retriever "github.com/multiversx/mx-chain-go/dataRetriever" + errorsMx "github.com/multiversx/mx-chain-go/errors" consensusComp "github.com/multiversx/mx-chain-go/factory/consensus" "github.com/multiversx/mx-chain-go/factory/mock" - "github.com/multiversx/mx-chain-go/process" + testsMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon" - componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" + consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/dblookupext" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + factoryMocks "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + outportMocks "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + stateMocks "github.com/multiversx/mx-chain-go/testscommon/state" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" + "github.com/multiversx/mx-chain-go/update" "github.com/stretchr/testify/require" ) -// ------------ Test ConsensusComponentsFactory -------------------- -func TestNewConsensusComponentsFactory_OkValuesShouldWork(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") +func createMockConsensusComponentsFactoryArgs() consensusComp.ConsensusComponentsFactoryArgs { + return consensusComp.ConsensusComponentsFactoryArgs{ + Config: testscommon.GetGeneralConfig(), + BootstrapRoundIndex: 0, + CoreComponents: &mock.CoreComponentsMock{ + IntMarsh: &testscommon.MarshalizerStub{}, + Hash: &testscommon.HasherStub{ + SizeCalled: func() int { + return 1 + }, + }, + UInt64ByteSliceConv: &testsMocks.Uint64ByteSliceConverterMock{}, + AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, + WatchdogTimer: &testscommon.WatchdogMock{}, + AlarmSch: &testscommon.AlarmSchedulerStub{}, + NtpSyncTimer: &testscommon.SyncTimerStub{}, + GenesisBlockTime: time.Time{}, + NodesConfig: &testscommon.NodesSetupStub{ + GetShardConsensusGroupSizeCalled: func() uint32 { + return 2 + }, + GetMetaConsensusGroupSizeCalled: func() uint32 { + return 2 + }, + }, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + StartTime: time.Time{}, + }, + NetworkComponents: &testsMocks.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{}, + InputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, + PeerHonesty: &testscommon.PeerHonestyHandlerStub{}, + }, + CryptoComponents: &testsMocks.CryptoComponentsStub{ + PrivKey: &cryptoMocks.PrivateKeyStub{}, + PubKey: &cryptoMocks.PublicKeyStub{}, + PubKeyString: "pub key string", + PeerSignHandler: &testsMocks.PeerSignatureHandler{}, + MultiSigContainer: &cryptoMocks.MultiSignerContainerMock{ + MultiSigner: &cryptoMocks.MultisignerMock{}, + }, + BlKeyGen: &cryptoMocks.KeyGenStub{}, + BlockSig: &cryptoMocks.SingleSignerStub{}, + KeysHandlerField: &testscommon.KeysHandlerStub{}, + SigHandler: &consensusMocks.SigningHandlerStub{}, + }, + DataComponents: &testsMocks.DataComponentsStub{ + DataPool: &dataRetriever.PoolsHolderStub{ + MiniBlocksCalled: func() storage.Cacher { + return &testscommon.CacherStub{} + }, + TrieNodesCalled: func() storage.Cacher { + return &testscommon.CacherStub{} + }, + HeadersCalled: func() retriever.HeadersPool { + return &testsMocks.HeadersCacherStub{} + }, + }, + BlockChain: &testscommon.ChainHandlerStub{ + GetGenesisHeaderHashCalled: func() []byte { + return []byte("genesis hash") + }, + GetGenesisHeaderCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, + MbProvider: &testsMocks.MiniBlocksProviderStub{}, + Store: &genericMocks.ChainStorerMock{}, + }, + ProcessComponents: &testsMocks.ProcessComponentsStub{ + EpochTrigger: &testsMocks.EpochStartTriggerStub{}, + EpochNotifier: &testsMocks.EpochStartNotifierStub{}, + NodesCoord: &shardingMocks.NodesCoordinatorStub{}, + NodeRedundancyHandlerInternal: &testsMocks.RedundancyHandlerStub{}, + HardforkTriggerField: &testscommon.HardforkTriggerStub{}, + ReqHandler: &testscommon.RequestHandlerStub{}, + PeerMapper: &testsMocks.PeerShardMapperStub{}, + ShardCoord: testscommon.NewMultiShardsCoordinatorMock(2), + RoundHandlerField: &testscommon.RoundHandlerMock{ + TimeDurationCalled: func() time.Duration { + return time.Second + }, + }, + BootSore: &mock.BootstrapStorerMock{}, + ForkDetect: &mock.ForkDetectorMock{}, + BlockProcess: &testscommon.BlockProcessorStub{}, + BlockTrack: &mock.BlockTrackerStub{}, + ScheduledTxsExecutionHandlerInternal: &testscommon.ScheduledTxsExecutionStub{}, + ProcessedMiniBlocksTrackerInternal: &testscommon.ProcessedMiniBlocksTrackerStub{}, + PendingMiniBlocksHdl: &mock.PendingMiniBlocksHandlerStub{}, + BlackListHdl: &testscommon.TimeCacheStub{}, + CurrentEpochProviderInternal: &testsMocks.CurrentNetworkEpochProviderStub{}, + HistoryRepositoryInternal: &dblookupext.HistoryRepositoryStub{}, + IntContainer: &testscommon.InterceptorsContainerStub{}, + HeaderSigVerif: &testsMocks.HeaderSigVerifierStub{}, + HeaderIntegrVerif: &mock.HeaderIntegrityVerifierStub{}, + FallbackHdrValidator: &testscommon.FallBackHeaderValidatorStub{}, + }, + StateComponents: &factoryMocks.StateComponentsMock{ + StorageManagers: map[string]common.StorageManager{ + retriever.UserAccountsUnit.String(): &storageManager.StorageManagerStub{}, + retriever.PeerAccountsUnit.String(): &storageManager.StorageManagerStub{}, + }, + Accounts: &stateMocks.AccountsStub{}, + PeersAcc: &stateMocks.AccountsStub{}, + MissingNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, + }, + StatusComponents: &testsMocks.StatusComponentsStub{ + Outport: &outportMocks.OutportStub{}, + }, + StatusCoreComponents: &factoryMocks.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + }, + ScheduledProcessor: &consensusMocks.ScheduledProcessorStub{}, + IsInImportMode: false, + ShouldDisableWatchdog: false, } +} - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetConsensusArgs(shardCoordinator) +func TestNewConsensusComponentsFactory(t *testing.T) { + t.Parallel() - bcf, err := consensusComp.NewConsensusComponentsFactory(args) + t.Run("should work", func(t *testing.T) { + t.Parallel() - require.NotNil(t, bcf) - require.Nil(t, err) -} + ccf, err := consensusComp.NewConsensusComponentsFactory(createMockConsensusComponentsFactoryArgs()) -func TestNewConsensusComponentsFactory_NilCoreComponents(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + require.NotNil(t, ccf) + require.Nil(t, err) + }) + t.Run("nil CoreComponents should error", func(t *testing.T) { + t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetConsensusArgs(shardCoordinator) - args.CoreComponents = nil + args := createMockConsensusComponentsFactoryArgs() + args.CoreComponents = nil + ccf, err := consensusComp.NewConsensusComponentsFactory(args) - bcf, err := consensusComp.NewConsensusComponentsFactory(args) + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilCoreComponentsHolder, err) + }) + t.Run("nil GenesisNodesSetup should error", func(t *testing.T) { + t.Parallel() - require.Nil(t, bcf) - require.Equal(t, errorsErd.ErrNilCoreComponentsHolder, err) -} + args := createMockConsensusComponentsFactoryArgs() + args.CoreComponents = &mock.CoreComponentsMock{ + NodesConfig: nil, + } + ccf, err := consensusComp.NewConsensusComponentsFactory(args) -func TestNewConsensusComponentsFactory_NilDataComponents(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilGenesisNodesSetupHandler, err) + }) + t.Run("nil DataComponents should error", func(t *testing.T) { + t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetConsensusArgs(shardCoordinator) - args.DataComponents = nil + args := createMockConsensusComponentsFactoryArgs() + args.DataComponents = nil + ccf, err := consensusComp.NewConsensusComponentsFactory(args) - bcf, err := consensusComp.NewConsensusComponentsFactory(args) + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilDataComponentsHolder, err) + }) + t.Run("nil Datapool should error", func(t *testing.T) { + t.Parallel() - require.Nil(t, bcf) - require.Equal(t, errorsErd.ErrNilDataComponentsHolder, err) -} + args := createMockConsensusComponentsFactoryArgs() + args.DataComponents = &testsMocks.DataComponentsStub{ + DataPool: nil, + } + ccf, err := consensusComp.NewConsensusComponentsFactory(args) -func TestNewConsensusComponentsFactory_NilCryptoComponents(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilDataPoolsHolder, err) + }) + t.Run("nil BlockChain should error", func(t *testing.T) { + t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetConsensusArgs(shardCoordinator) - args.CryptoComponents = nil + args := createMockConsensusComponentsFactoryArgs() + args.DataComponents = &testsMocks.DataComponentsStub{ + DataPool: &dataRetriever.PoolsHolderStub{}, + BlockChain: nil, + } + ccf, err := consensusComp.NewConsensusComponentsFactory(args) - bcf, err := consensusComp.NewConsensusComponentsFactory(args) + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilBlockChainHandler, err) + }) + t.Run("nil CryptoComponents should error", func(t *testing.T) { + t.Parallel() - require.Nil(t, bcf) - require.Equal(t, errorsErd.ErrNilCryptoComponentsHolder, err) -} + args := createMockConsensusComponentsFactoryArgs() + args.CryptoComponents = nil + ccf, err := consensusComp.NewConsensusComponentsFactory(args) -func TestNewConsensusComponentsFactory_NilNetworkComponents(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilCryptoComponentsHolder, err) + }) + t.Run("nil PublicKey should error", func(t *testing.T) { + t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetConsensusArgs(shardCoordinator) - args.NetworkComponents = nil + args := createMockConsensusComponentsFactoryArgs() + args.CryptoComponents = &testsMocks.CryptoComponentsStub{ + PubKey: nil, + } + ccf, err := consensusComp.NewConsensusComponentsFactory(args) - bcf, err := consensusComp.NewConsensusComponentsFactory(args) + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilPublicKey, err) + }) + t.Run("nil PrivateKey should error", func(t *testing.T) { + t.Parallel() - require.Nil(t, bcf) - require.Equal(t, errorsErd.ErrNilNetworkComponentsHolder, err) -} + args := createMockConsensusComponentsFactoryArgs() + args.CryptoComponents = &testsMocks.CryptoComponentsStub{ + PubKey: &cryptoMocks.PublicKeyStub{}, + PrivKey: nil, + } + ccf, err := consensusComp.NewConsensusComponentsFactory(args) -func TestNewConsensusComponentsFactory_NilProcessComponents(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilPrivateKey, err) + }) + t.Run("nil NetworkComponents should error", func(t *testing.T) { + t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetConsensusArgs(shardCoordinator) - args.ProcessComponents = nil + args := createMockConsensusComponentsFactoryArgs() + args.NetworkComponents = nil + ccf, err := consensusComp.NewConsensusComponentsFactory(args) - bcf, err := consensusComp.NewConsensusComponentsFactory(args) + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilNetworkComponentsHolder, err) + }) + t.Run("nil Messenger should error", func(t *testing.T) { + t.Parallel() - require.Nil(t, bcf) - require.Equal(t, errorsErd.ErrNilProcessComponentsHolder, err) -} + args := createMockConsensusComponentsFactoryArgs() + args.NetworkComponents = &testsMocks.NetworkComponentsStub{ + Messenger: nil, + } + ccf, err := consensusComp.NewConsensusComponentsFactory(args) -func TestNewConsensusComponentsFactory_NilStateComponents(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilMessenger, err) + }) + t.Run("nil ProcessComponents should error", func(t *testing.T) { + t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetConsensusArgs(shardCoordinator) - args.StateComponents = nil + args := createMockConsensusComponentsFactoryArgs() + args.ProcessComponents = nil + ccf, err := consensusComp.NewConsensusComponentsFactory(args) - bcf, err := consensusComp.NewConsensusComponentsFactory(args) + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilProcessComponentsHolder, err) + }) + t.Run("nil NodesCoordinator should error", func(t *testing.T) { + t.Parallel() - require.Nil(t, bcf) - require.Equal(t, errorsErd.ErrNilStateComponentsHolder, err) -} + args := createMockConsensusComponentsFactoryArgs() + args.ProcessComponents = &testsMocks.ProcessComponentsStub{ + NodesCoord: nil, + } + ccf, err := consensusComp.NewConsensusComponentsFactory(args) -// ------------ Test Old Use Cases -------------------- -func TestConsensusComponentsFactory_CreateGenesisBlockNotInitializedShouldErr(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilNodesCoordinator, err) + }) + t.Run("nil ShardCoordinator should error", func(t *testing.T) { + t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - consensusArgs := componentsMock.GetConsensusArgs(shardCoordinator) - consensusComponentsFactory, _ := consensusComp.NewConsensusComponentsFactory(consensusArgs) - managedConsensusComponents, _ := consensusComp.NewManagedConsensusComponents(consensusComponentsFactory) + args := createMockConsensusComponentsFactoryArgs() + args.ProcessComponents = &testsMocks.ProcessComponentsStub{ + NodesCoord: &shardingMocks.NodesCoordinatorStub{}, + ShardCoord: nil, + } + ccf, err := consensusComp.NewConsensusComponentsFactory(args) - dataComponents := consensusArgs.DataComponents + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilShardCoordinator, err) + }) + t.Run("nil RoundHandler should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + args.ProcessComponents = &testsMocks.ProcessComponentsStub{ + NodesCoord: &shardingMocks.NodesCoordinatorStub{}, + ShardCoord: &testscommon.ShardsCoordinatorMock{}, + RoundHandlerField: nil, + } + ccf, err := consensusComp.NewConsensusComponentsFactory(args) - dataComponents.SetBlockchain(&testscommon.ChainHandlerStub{ - GetGenesisHeaderHashCalled: func() []byte { - return nil - }, - GetGenesisHeaderCalled: func() data.HeaderHandler { - return nil - }, + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilRoundHandler, err) }) + t.Run("nil HardforkTrigger should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + args.ProcessComponents = &testsMocks.ProcessComponentsStub{ + NodesCoord: &shardingMocks.NodesCoordinatorStub{}, + ShardCoord: &testscommon.ShardsCoordinatorMock{}, + RoundHandlerField: &testscommon.RoundHandlerMock{}, + HardforkTriggerField: nil, + } + ccf, err := consensusComp.NewConsensusComponentsFactory(args) - err := managedConsensusComponents.Create() - require.True(t, errors.Is(err, errorsErd.ErrConsensusComponentsFactoryCreate)) - require.True(t, strings.Contains(err.Error(), errorsErd.ErrGenesisBlockNotInitialized.Error())) -} + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilHardforkTrigger, err) + }) + t.Run("nil StateComponents should error", func(t *testing.T) { + t.Parallel() -func TestConsensusComponentsFactory_CreateForShard(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + args := createMockConsensusComponentsFactoryArgs() + args.StateComponents = nil + ccf, err := consensusComp.NewConsensusComponentsFactory(args) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetConsensusArgs(shardCoordinator) - ccf, _ := consensusComp.NewConsensusComponentsFactory(args) - require.NotNil(t, ccf) + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilStateComponentsHolder, err) + }) + t.Run("nil StatusComponents should error", func(t *testing.T) { + t.Parallel() - cc, err := ccf.Create() - require.NoError(t, err) - require.NotNil(t, cc) -} + args := createMockConsensusComponentsFactoryArgs() + args.StatusComponents = nil + ccf, err := consensusComp.NewConsensusComponentsFactory(args) -type wrappedProcessComponents struct { - factory.ProcessComponentsHolder -} + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilStatusComponentsHolder, err) + }) + t.Run("nil OutportHandler should error", func(t *testing.T) { + t.Parallel() -func (wp *wrappedProcessComponents) ShardCoordinator() sharding.Coordinator { - shC := mock.NewMultiShardsCoordinatorMock(2) - shC.SelfIDCalled = func() uint32 { - return core.MetachainShardId - } + args := createMockConsensusComponentsFactoryArgs() + args.StatusComponents = &testsMocks.StatusComponentsStub{ + Outport: nil, + } + ccf, err := consensusComp.NewConsensusComponentsFactory(args) - return shC -} + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilOutportHandler, err) + }) + t.Run("nil ScheduledProcessor should error", func(t *testing.T) { + t.Parallel() -func TestConsensusComponentsFactory_CreateForMeta(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + args := createMockConsensusComponentsFactoryArgs() + args.ScheduledProcessor = nil + ccf, err := consensusComp.NewConsensusComponentsFactory(args) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetConsensusArgs(shardCoordinator) + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilScheduledProcessor, err) + }) + t.Run("nil StatusCoreComponents should error", func(t *testing.T) { + t.Parallel() - args.ProcessComponents = &wrappedProcessComponents{ - ProcessComponentsHolder: args.ProcessComponents, - } - ccf, _ := consensusComp.NewConsensusComponentsFactory(args) - require.NotNil(t, ccf) + args := createMockConsensusComponentsFactoryArgs() + args.StatusCoreComponents = nil + ccf, err := consensusComp.NewConsensusComponentsFactory(args) - cc, err := ccf.Create() - require.NoError(t, err) - require.NotNil(t, cc) + require.Nil(t, ccf) + require.Equal(t, errorsMx.ErrNilStatusCoreComponents, err) + }) } -func TestConsensusComponentsFactory_CreateNilShardCoordinator(t *testing.T) { +func TestConsensusComponentsFactory_Create(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - consensusArgs := componentsMock.GetConsensusArgs(shardCoordinator) - processComponents := &mock.ProcessComponentsMock{} - consensusArgs.ProcessComponents = processComponents - consensusComponentsFactory, _ := consensusComp.NewConsensusComponentsFactory(consensusArgs) + expectedErr := errors.New("expected error") + t.Run("invalid shard id should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) + require.True(t, ok) + processCompStub.ShardCoord = &testscommon.ShardsCoordinatorMock{ + SelfIDCalled: func() uint32 { + return 5 + }, + NoShards: 2, + } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) - cc, err := consensusComponentsFactory.Create() + cc, err := ccf.Create() + require.Equal(t, sharding.ErrShardIdOutOfRange, err) + require.Nil(t, cc) + }) + t.Run("genesis block not initialized should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) + require.True(t, ok) + processCompStub.ShardCoord = &testscommon.ShardsCoordinatorMock{ + SelfIDCalled: func() uint32 { + return core.MetachainShardId // coverage + }, + NoShards: 2, + } - require.Nil(t, cc) - require.Equal(t, errorsErd.ErrNilShardCoordinator, err) -} + dataCompStub, ok := args.DataComponents.(*testsMocks.DataComponentsStub) + require.True(t, ok) + dataCompStub.BlockChain = &testscommon.ChainHandlerStub{ + GetGenesisHeaderHashCalled: func() []byte { + return []byte("") + }, + } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) -func TestConsensusComponentsFactory_CreateConsensusTopicCreateTopicError(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + cc, err := ccf.Create() + require.Equal(t, errorsMx.ErrGenesisBlockNotInitialized, err) + require.Nil(t, cc) + }) + t.Run("createChronology fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) + require.True(t, ok) + cnt := 0 + processCompStub.RoundHandlerCalled = func() consensus.RoundHandler { + cnt++ + if cnt > 1 { + return nil + } + return &testscommon.RoundHandlerMock{} + } - localError := errors.New("error") - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetConsensusArgs(shardCoordinator) - networkComponents := componentsMock.GetDefaultNetworkComponents() - networkComponents.Messenger = &p2pmocks.MessengerStub{ - HasTopicValidatorCalled: func(name string) bool { - return false - }, - HasTopicCalled: func(name string) bool { - return false - }, - CreateTopicCalled: func(name string, createChannelForTopic bool) error { - return localError - }, - } - args.NetworkComponents = networkComponents + args.IsInImportMode = true // coverage + args.ShouldDisableWatchdog = true // coverage + statusCompStub, ok := args.StatusComponents.(*testsMocks.StatusComponentsStub) + require.True(t, ok) + statusCompStub.Outport = &outportMocks.OutportStub{ + HasDriversCalled: func() bool { + return true // coverage + }, + } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) - bcf, _ := consensusComp.NewConsensusComponentsFactory(args) - cc, err := bcf.Create() + cc, err := ccf.Create() + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "roundHandler")) + require.Nil(t, cc) + }) + t.Run("createBootstrapper fails due to nil shard coordinator should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) + require.True(t, ok) + cnt := 0 + processCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { + cnt++ + if cnt > 2 { + return nil // createBootstrapper fails + } + return testscommon.NewMultiShardsCoordinatorMock(2) + } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) - require.Nil(t, cc) - require.Equal(t, localError, err) -} + cc, err := ccf.Create() + require.Equal(t, errorsMx.ErrNilShardCoordinator, err) + require.Nil(t, cc) + }) + t.Run("createBootstrapper fails due to invalid shard coordinator should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) + require.True(t, ok) + cnt := 0 + shardC := testscommon.NewMultiShardsCoordinatorMock(2) + processCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { + cnt++ + if cnt > 2 { + shardC.SelfIDCalled = func() uint32 { + return shardC.NoShards + 1 // createBootstrapper returns ErrShardIdOutOfRange + } + return shardC + } + return shardC + } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) -func TestConsensusComponentsFactory_CreateConsensusTopicNilMessageProcessor(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + cc, err := ccf.Create() + require.Equal(t, sharding.ErrShardIdOutOfRange, err) + require.Nil(t, cc) + }) + t.Run("createShardBootstrapper fails due to NewShardStorageBootstrapper failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) + require.True(t, ok) + cnt := 0 + processCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { + cnt++ + if cnt > 3 { + return nil // NewShardStorageBootstrapper fails + } + return testscommon.NewMultiShardsCoordinatorMock(2) + } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetConsensusArgs(shardCoordinator) - networkComponents := componentsMock.GetDefaultNetworkComponents() - networkComponents.Messenger = nil - args.NetworkComponents = networkComponents + cc, err := ccf.Create() + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "shard coordinator")) + require.Nil(t, cc) + }) + t.Run("createUserAccountsSyncer fails due to missing UserAccountTrie should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + stateCompStub, ok := args.StateComponents.(*factoryMocks.StateComponentsMock) + require.True(t, ok) + stateCompStub.StorageManagers = make(map[string]common.StorageManager) // missing UserAccountTrie + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) + + cc, err := ccf.Create() + require.Equal(t, errorsMx.ErrNilTrieStorageManager, err) + require.Nil(t, cc) + }) + t.Run("createUserAccountsSyncer fails due to invalid NumConcurrentTrieSyncers should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + args.Config.TrieSync.NumConcurrentTrieSyncers = 0 + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) + + cc, err := ccf.Create() + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "value is not positive")) + require.Nil(t, cc) + }) + t.Run("createMetaChainBootstrapper fails due to NewMetaStorageBootstrapper failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) + require.True(t, ok) + cnt := 0 + processCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { + cnt++ + if cnt > 3 { + return nil // NewShardStorageBootstrapper fails + } + shardC := testscommon.NewMultiShardsCoordinatorMock(2) + shardC.CurrentShard = core.MetachainShardId + return shardC + } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) - bcf, _ := consensusComp.NewConsensusComponentsFactory(args) - cc, err := bcf.Create() + cc, err := ccf.Create() + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "shard coordinator")) + require.Nil(t, cc) + }) + t.Run("createUserAccountsSyncer fails due to missing UserAccountTrie should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + stateCompStub, ok := args.StateComponents.(*factoryMocks.StateComponentsMock) + require.True(t, ok) + stateCompStub.StorageManagers = make(map[string]common.StorageManager) // missing UserAccountTrie + processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) + require.True(t, ok) + processCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { + shardC := testscommon.NewMultiShardsCoordinatorMock(2) + shardC.CurrentShard = core.MetachainShardId + return shardC + } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) - require.Nil(t, cc) - require.Equal(t, errorsErd.ErrNilMessenger, err) -} + cc, err := ccf.Create() + require.Equal(t, errorsMx.ErrNilTrieStorageManager, err) + require.Nil(t, cc) + }) + t.Run("createValidatorAccountsSyncer fails due to missing PeerAccountTrie should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + stateCompStub, ok := args.StateComponents.(*factoryMocks.StateComponentsMock) + require.True(t, ok) + stateCompStub.StorageManagers = map[string]common.StorageManager{ + retriever.UserAccountsUnit.String(): &storageManager.StorageManagerStub{}, + } // missing PeerAccountTrie + processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) + require.True(t, ok) + processCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { + shardC := testscommon.NewMultiShardsCoordinatorMock(2) + shardC.CurrentShard = core.MetachainShardId + return shardC + } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) -func TestConsensusComponentsFactory_CreateNilSyncTimer(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + cc, err := ccf.Create() + require.Equal(t, errorsMx.ErrNilTrieStorageManager, err) + require.Nil(t, cc) + }) + t.Run("createConsensusState fails due to nil public key should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + cryptoCompStub, ok := args.CryptoComponents.(*testsMocks.CryptoComponentsStub) + require.True(t, ok) + cnt := 0 + cryptoCompStub.PublicKeyCalled = func() crypto.PublicKey { + cnt++ + if cnt > 1 { + return nil + } + return &cryptoMocks.PublicKeyStub{} + } + processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) + require.True(t, ok) + processCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { + shardC := testscommon.NewMultiShardsCoordinatorMock(2) + shardC.CurrentShard = core.MetachainShardId // coverage + return shardC + } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetConsensusArgs(shardCoordinator) - coreComponents := componentsMock.GetDefaultCoreComponents() - coreComponents.NtpSyncTimer = nil - args.CoreComponents = coreComponents - bcf, _ := consensusComp.NewConsensusComponentsFactory(args) - cc, err := bcf.Create() + cc, err := ccf.Create() + require.Equal(t, errorsMx.ErrNilPublicKey, err) + require.Nil(t, cc) + }) + t.Run("createConsensusState fails due to ToByteArray failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + cryptoCompStub, ok := args.CryptoComponents.(*testsMocks.CryptoComponentsStub) + require.True(t, ok) + cryptoCompStub.PubKey = &cryptoMocks.PublicKeyStub{ + ToByteArrayStub: func() ([]byte, error) { + return nil, expectedErr + }, + } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) - require.Nil(t, cc) - require.Equal(t, chronology.ErrNilSyncTimer, err) -} + cc, err := ccf.Create() + require.Equal(t, expectedErr, err) + require.Nil(t, cc) + }) + t.Run("createConsensusState fails due to nil nodes coordinator should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) + require.True(t, ok) + cnt := 0 + processCompStub.NodesCoordinatorCalled = func() nodesCoordinator.NodesCoordinator { + cnt++ + if cnt > 2 { + return nil + } + return &shardingMocks.NodesCoordinatorStub{} + } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) -func TestStartConsensus_ShardBootstrapperNilAccounts(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + cc, err := ccf.Create() + require.Equal(t, errorsMx.ErrNilNodesCoordinator, err) + require.Nil(t, cc) + }) + t.Run("createConsensusState fails due to GetConsensusWhitelistedNodes failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) + require.True(t, ok) + processCompStub.NodesCoordinatorCalled = func() nodesCoordinator.NodesCoordinator { + return &shardingMocks.NodesCoordinatorStub{ + GetConsensusWhitelistedNodesCalled: func(epoch uint32) (map[string]struct{}, error) { + return nil, expectedErr + }, + } + } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetConsensusArgs(shardCoordinator) - stateComponents := componentsMock.GetDefaultStateComponents() - stateComponents.Accounts = nil - args.StateComponents = stateComponents - bcf, _ := consensusComp.NewConsensusComponentsFactory(args) - cc, err := bcf.Create() + cc, err := ccf.Create() + require.Equal(t, expectedErr, err) + require.Nil(t, cc) + }) + t.Run("GetConsensusCoreFactory failure should error", func(t *testing.T) { + t.Parallel() - require.Nil(t, cc) - require.Equal(t, process.ErrNilAccountsAdapter, err) -} + args := createMockConsensusComponentsFactoryArgs() + args.Config.Consensus.Type = "invalid" // GetConsensusCoreFactory fails + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) -func TestStartConsensus_ShardBootstrapperNilPoolHolder(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + cc, err := ccf.Create() + require.Error(t, err) + require.Nil(t, cc) + }) + t.Run("GetBroadcastMessenger failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) + require.True(t, ok) + cnt := 0 + processCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { + cnt++ + if cnt > 6 { + return nil // GetBroadcastMessenger fails + } + return testscommon.NewMultiShardsCoordinatorMock(2) + } + dataCompStub, ok := args.DataComponents.(*testsMocks.DataComponentsStub) + require.True(t, ok) + dataCompStub.BlockChain = &testscommon.ChainHandlerStub{ + GetGenesisHeaderHashCalled: func() []byte { + return []byte("genesis hash") + }, + GetGenesisHeaderCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} // coverage + }, + } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) - shardCoordinator.CurrentShard = 0 - args := componentsMock.GetConsensusArgs(shardCoordinator) - dataComponents := componentsMock.GetDefaultDataComponents() - dataComponents.DataPool = nil - args.DataComponents = dataComponents - processComponents := componentsMock.GetDefaultProcessComponents(shardCoordinator) - args.ProcessComponents = processComponents - bcf, _ := consensusComp.NewConsensusComponentsFactory(args) - cc, err := bcf.Create() - - require.Nil(t, cc) - require.Equal(t, errorsErd.ErrNilDataPoolsHolder, err) -} + cc, err := ccf.Create() + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "shard coordinator")) + require.Nil(t, cc) + }) + t.Run("NewWorker failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + args.Config.Marshalizer.SizeCheckDelta = 1 // coverage + processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) + require.True(t, ok) + processCompStub.HeaderIntegrVerif = nil + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) + + cc, err := ccf.Create() + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "header integrity verifier")) + require.Nil(t, cc) + }) + t.Run("createConsensusTopic fails due nil shard coordinator should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) + require.True(t, ok) + cnt := 0 + processCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { + cnt++ + if cnt > 9 { + return nil // createConsensusTopic fails + } + return testscommon.NewMultiShardsCoordinatorMock(2) + } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) -func TestStartConsensus_MetaBootstrapperNilPoolHolder(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + cc, err := ccf.Create() + require.Equal(t, errorsMx.ErrNilShardCoordinator, err) + require.Nil(t, cc) + }) + t.Run("createConsensusTopic fails due nil messenger should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + netwCompStub, ok := args.NetworkComponents.(*testsMocks.NetworkComponentsStub) + require.True(t, ok) + cnt := 0 + netwCompStub.MessengerCalled = func() p2p.Messenger { + cnt++ + if cnt > 3 { + return nil + } + return &p2pmocks.MessengerStub{} + } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) - shardCoordinator.CurrentShard = core.MetachainShardId - shardCoordinator.ComputeIdCalled = func(address []byte) uint32 { - if core.IsSmartContractOnMetachain(address[len(address)-1:], address) { - return core.MetachainShardId + cc, err := ccf.Create() + require.Equal(t, errorsMx.ErrNilMessenger, err) + require.Nil(t, cc) + }) + t.Run("createConsensusTopic fails due CreateTopic failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + netwCompStub, ok := args.NetworkComponents.(*testsMocks.NetworkComponentsStub) + require.True(t, ok) + netwCompStub.Messenger = &p2pmocks.MessengerStub{ + HasTopicCalled: func(name string) bool { + return false + }, + CreateTopicCalled: func(name string, createChannelForTopic bool) error { + return expectedErr + }, } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) - return 0 - } - args := componentsMock.GetConsensusArgs(shardCoordinator) - dataComponents := componentsMock.GetDefaultDataComponents() - dataComponents.DataPool = nil - args.DataComponents = dataComponents - args.ProcessComponents = componentsMock.GetDefaultProcessComponents(shardCoordinator) - bcf, err := consensusComp.NewConsensusComponentsFactory(args) - require.Nil(t, err) - require.NotNil(t, bcf) - cc, err := bcf.Create() - - require.Nil(t, cc) - require.Equal(t, errorsErd.ErrNilDataPoolsHolder, err) -} + cc, err := ccf.Create() + require.Equal(t, expectedErr, err) + require.Nil(t, cc) + }) + t.Run("createConsensusState fails due to nil KeysHandler should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + cryptoCompStub, ok := args.CryptoComponents.(*testsMocks.CryptoComponentsStub) + require.True(t, ok) + cnt := 0 + cryptoCompStub.KeysHandlerCalled = func() consensus.KeysHandler { + cnt++ + if cnt > 0 { + return nil + } + return &testscommon.KeysHandlerStub{} + } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) -func TestStartConsensus_MetaBootstrapperWrongNumberShards(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + cc, err := ccf.Create() + require.Error(t, err) + require.Contains(t, err.Error(), "keys handler") + require.Nil(t, cc) + }) + t.Run("NewConsensusCore failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + cryptoCompStub, ok := args.CryptoComponents.(*testsMocks.CryptoComponentsStub) + require.True(t, ok) + cryptoCompStub.SigHandler = nil + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) + + cc, err := ccf.Create() + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "signing handler")) + require.Nil(t, cc) + }) + t.Run("GetSubroundsFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + statusCoreCompStub, ok := args.StatusCoreComponents.(*factoryMocks.StatusCoreComponentsStub) + require.True(t, ok) + cnt := 0 + statusCoreCompStub.AppStatusHandlerCalled = func() core.AppStatusHandler { + cnt++ + if cnt > 4 { + return nil + } + return &statusHandler.AppStatusHandlerStub{} + } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) - args := componentsMock.GetConsensusArgs(shardCoordinator) - processComponents := componentsMock.GetDefaultProcessComponents(shardCoordinator) - args.ProcessComponents = processComponents - bcf, err := consensusComp.NewConsensusComponentsFactory(args) - require.Nil(t, err) - shardCoordinator.CurrentShard = 2 - cc, err := bcf.Create() - - require.Nil(t, cc) - require.Equal(t, sharding.ErrShardIdOutOfRange, err) -} + cc, err := ccf.Create() + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "AppStatusHandler")) + require.Nil(t, cc) + }) + t.Run("addCloserInstances failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockConsensusComponentsFactoryArgs() + processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) + require.True(t, ok) + processCompStub.HardforkTriggerField = &testscommon.HardforkTriggerStub{ + AddCloserCalled: func(closer update.Closer) error { + return expectedErr + }, + } + ccf, _ := consensusComp.NewConsensusComponentsFactory(args) + require.NotNil(t, ccf) -func TestStartConsensus_ShardBootstrapperPubKeyToByteArrayError(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + cc, err := ccf.Create() + require.Equal(t, expectedErr, err) + require.Nil(t, cc) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - localErr := errors.New("err") - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetConsensusArgs(shardCoordinator) - cryptoParams := componentsMock.GetDefaultCryptoComponents() - cryptoParams.PubKey = &mock.PublicKeyMock{ - ToByteArrayHandler: func() (i []byte, err error) { - return []byte("nil"), localErr - }, - } - args.CryptoComponents = cryptoParams - bcf, _ := consensusComp.NewConsensusComponentsFactory(args) - cc, err := bcf.Create() - require.Nil(t, cc) - require.Equal(t, localErr, err) -} + ccf, _ := consensusComp.NewConsensusComponentsFactory(createMockConsensusComponentsFactoryArgs()) + require.NotNil(t, ccf) -func TestStartConsensus_ShardBootstrapperInvalidConsensusType(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + cc, err := ccf.Create() + require.NoError(t, err) + require.NotNil(t, cc) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetConsensusArgs(shardCoordinator) - args.Config.Consensus.Type = "invalid" - bcf, err := consensusComp.NewConsensusComponentsFactory(args) - require.Nil(t, err) - cc, err := bcf.Create() - require.Nil(t, cc) - require.Equal(t, sposFactory.ErrInvalidConsensusType, err) + require.Nil(t, cc.Close()) + }) } diff --git a/factory/constants.go b/factory/constants.go index 58a37c56d00..5b2eb2a16f6 100644 --- a/factory/constants.go +++ b/factory/constants.go @@ -13,8 +13,6 @@ const ( CryptoComponentsName = "managedCryptoComponents" // DataComponentsName is the data components identifier DataComponentsName = "managedDataComponents" - // HeartbeatComponentsName is the heartbeat components identifier - HeartbeatComponentsName = "managedHeartbeatComponents" // HeartbeatV2ComponentsName is the heartbeat V2 components identifier HeartbeatV2ComponentsName = "managedHeartbeatV2Components" // NetworkComponentsName is the network components identifier diff --git a/factory/core/coreComponents.go b/factory/core/coreComponents.go index 46b5a50585b..33310be51b4 100644 --- a/factory/core/coreComponents.go +++ b/factory/core/coreComponents.go @@ -261,12 +261,15 @@ func (ccf *coreComponentsFactory) Create() (*coreComponents, error) { return nil, err } + txVersionChecker := versioning.NewTxVersionChecker(ccf.config.GeneralSettings.MinTransactionVersion) + log.Trace("creating economics data components") argsNewEconomicsData := economics.ArgsNewEconomicsData{ Economics: &ccf.economicsConfig, EpochNotifier: epochNotifier, EnableEpochsHandler: enableEpochsHandler, BuiltInFunctionsCostHandler: builtInCostHandler, + TxVersionChecker: txVersionChecker, } economicsData, err := economics.NewEconomicsData(argsNewEconomicsData) if err != nil { @@ -312,8 +315,6 @@ func (ccf *coreComponentsFactory) Create() (*coreComponents, error) { return nil, err } - txVersionChecker := versioning.NewTxVersionChecker(ccf.config.GeneralSettings.MinTransactionVersion) - // set as observer at first - it will be updated when creating the nodes coordinator nodeTypeProvider := nodetype.NewNodeTypeProvider(core.NodeTypeObserver) diff --git a/factory/core/coreComponentsHandler_test.go b/factory/core/coreComponentsHandler_test.go index c271963a8ea..9c22a9a2f22 100644 --- a/factory/core/coreComponentsHandler_test.go +++ b/factory/core/coreComponentsHandler_test.go @@ -2,89 +2,162 @@ package core_test import ( "testing" + "time" "github.com/multiversx/mx-chain-go/config" + errorsMx "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/factory" coreComp "github.com/multiversx/mx-chain-go/factory/core" + "github.com/multiversx/mx-chain-go/testscommon" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/stretchr/testify/require" ) -// ------------ Test ManagedCoreComponents -------------------- -func TestManagedCoreComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { +func TestManagedCoreComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - coreArgs := componentsMock.GetCoreArgs() - coreArgs.Config.Marshalizer = config.MarshalizerConfig{ - Type: "invalid_marshalizer_type", - SizeCheckDelta: 0, - } - coreComponentsFactory, _ := coreComp.NewCoreComponentsFactory(coreArgs) - managedCoreComponents, err := coreComp.NewManagedCoreComponents(coreComponentsFactory) - require.NoError(t, err) - err = managedCoreComponents.Create() - require.Error(t, err) - require.Nil(t, managedCoreComponents.InternalMarshalizer()) + t.Run("nil factory should error", func(t *testing.T) { + t.Parallel() + + managedCoreComponents, err := coreComp.NewManagedCoreComponents(nil) + require.Equal(t, errorsMx.ErrNilCoreComponentsFactory, err) + require.Nil(t, managedCoreComponents) + }) + t.Run("invalid args should error", func(t *testing.T) { + t.Parallel() + + coreArgs := componentsMock.GetCoreArgs() + coreArgs.Config.Marshalizer = config.MarshalizerConfig{ + Type: "invalid_marshalizer_type", + SizeCheckDelta: 0, + } + coreComponentsFactory, _ := coreComp.NewCoreComponentsFactory(coreArgs) + managedCoreComponents, err := coreComp.NewManagedCoreComponents(coreComponentsFactory) + require.NoError(t, err) + err = managedCoreComponents.Create() + require.Error(t, err) + require.Nil(t, managedCoreComponents.InternalMarshalizer()) + }) + t.Run("should work with getters", func(t *testing.T) { + t.Parallel() + + coreArgs := componentsMock.GetCoreArgs() + coreComponentsFactory, _ := coreComp.NewCoreComponentsFactory(coreArgs) + managedCoreComponents, err := coreComp.NewManagedCoreComponents(coreComponentsFactory) + require.NoError(t, err) + require.Nil(t, managedCoreComponents.Hasher()) + require.Nil(t, managedCoreComponents.InternalMarshalizer()) + require.Nil(t, managedCoreComponents.VmMarshalizer()) + require.Nil(t, managedCoreComponents.TxMarshalizer()) + require.Nil(t, managedCoreComponents.Uint64ByteSliceConverter()) + require.Nil(t, managedCoreComponents.AddressPubKeyConverter()) + require.Nil(t, managedCoreComponents.ValidatorPubKeyConverter()) + require.Nil(t, managedCoreComponents.PathHandler()) + require.Equal(t, "", managedCoreComponents.ChainID()) + require.Nil(t, managedCoreComponents.AddressPubKeyConverter()) + require.Nil(t, managedCoreComponents.EnableRoundsHandler()) + require.Nil(t, managedCoreComponents.WasmVMChangeLocker()) + require.Nil(t, managedCoreComponents.ProcessStatusHandler()) + require.True(t, len(managedCoreComponents.HardforkTriggerPubKey()) == 0) + require.Nil(t, managedCoreComponents.TxSignHasher()) + require.Zero(t, managedCoreComponents.MinTransactionVersion()) + require.Nil(t, managedCoreComponents.TxVersionChecker()) + require.Zero(t, managedCoreComponents.EncodedAddressLen()) + require.Nil(t, managedCoreComponents.AlarmScheduler()) + require.Nil(t, managedCoreComponents.SyncTimer()) + require.Equal(t, time.Time{}, managedCoreComponents.GenesisTime()) + require.Nil(t, managedCoreComponents.Watchdog()) + require.Nil(t, managedCoreComponents.EconomicsData()) + require.Nil(t, managedCoreComponents.APIEconomicsData()) + require.Nil(t, managedCoreComponents.RatingsData()) + require.Nil(t, managedCoreComponents.Rater()) + require.Nil(t, managedCoreComponents.GenesisNodesSetup()) + require.Nil(t, managedCoreComponents.RoundHandler()) + require.Nil(t, managedCoreComponents.NodesShuffler()) + require.Nil(t, managedCoreComponents.EpochNotifier()) + require.Nil(t, managedCoreComponents.EpochStartNotifierWithConfirm()) + require.Nil(t, managedCoreComponents.ChanStopNodeProcess()) + require.Nil(t, managedCoreComponents.NodeTypeProvider()) + require.Nil(t, managedCoreComponents.EnableEpochsHandler()) + + err = managedCoreComponents.Create() + require.NoError(t, err) + require.NotNil(t, managedCoreComponents.Hasher()) + require.NotNil(t, managedCoreComponents.InternalMarshalizer()) + require.NotNil(t, managedCoreComponents.VmMarshalizer()) + require.NotNil(t, managedCoreComponents.TxMarshalizer()) + require.NotNil(t, managedCoreComponents.Uint64ByteSliceConverter()) + require.NotNil(t, managedCoreComponents.AddressPubKeyConverter()) + require.NotNil(t, managedCoreComponents.ValidatorPubKeyConverter()) + require.NotNil(t, managedCoreComponents.PathHandler()) + require.NotEqual(t, "", managedCoreComponents.ChainID()) + require.NotNil(t, managedCoreComponents.AddressPubKeyConverter()) + require.NotNil(t, managedCoreComponents.EnableRoundsHandler()) + require.NotNil(t, managedCoreComponents.WasmVMChangeLocker()) + require.NotNil(t, managedCoreComponents.ProcessStatusHandler()) + expectedBytes, _ := managedCoreComponents.ValidatorPubKeyConverter().Decode(componentsMock.DummyPk) + require.Equal(t, expectedBytes, managedCoreComponents.HardforkTriggerPubKey()) + require.NotNil(t, managedCoreComponents.TxSignHasher()) + require.NotZero(t, managedCoreComponents.MinTransactionVersion()) + require.NotNil(t, managedCoreComponents.TxVersionChecker()) + require.NotZero(t, managedCoreComponents.EncodedAddressLen()) + require.NotNil(t, managedCoreComponents.AlarmScheduler()) + require.NotNil(t, managedCoreComponents.SyncTimer()) + require.NotNil(t, managedCoreComponents.GenesisTime()) + require.NotNil(t, managedCoreComponents.Watchdog()) + require.NotNil(t, managedCoreComponents.EconomicsData()) + require.NotNil(t, managedCoreComponents.APIEconomicsData()) + require.NotNil(t, managedCoreComponents.RatingsData()) + require.NotNil(t, managedCoreComponents.Rater()) + require.NotNil(t, managedCoreComponents.GenesisNodesSetup()) + require.NotNil(t, managedCoreComponents.RoundHandler()) + require.NotNil(t, managedCoreComponents.NodesShuffler()) + require.NotNil(t, managedCoreComponents.EpochNotifier()) + require.NotNil(t, managedCoreComponents.EpochStartNotifierWithConfirm()) + require.NotNil(t, managedCoreComponents.ChanStopNodeProcess()) + require.NotNil(t, managedCoreComponents.NodeTypeProvider()) + require.NotNil(t, managedCoreComponents.EnableEpochsHandler()) + require.Nil(t, managedCoreComponents.SetInternalMarshalizer(&testscommon.MarshalizerStub{})) + + require.Equal(t, factory.CoreComponentsName, managedCoreComponents.String()) + }) } -func TestManagedCoreComponents_CreateShouldWork(t *testing.T) { +func TestManagedCoreComponents_CheckSubcomponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreArgs := componentsMock.GetCoreArgs() coreComponentsFactory, _ := coreComp.NewCoreComponentsFactory(coreArgs) managedCoreComponents, err := coreComp.NewManagedCoreComponents(coreComponentsFactory) require.NoError(t, err) - require.Nil(t, managedCoreComponents.Hasher()) - require.Nil(t, managedCoreComponents.InternalMarshalizer()) - require.Nil(t, managedCoreComponents.VmMarshalizer()) - require.Nil(t, managedCoreComponents.TxMarshalizer()) - require.Nil(t, managedCoreComponents.Uint64ByteSliceConverter()) - require.Nil(t, managedCoreComponents.AddressPubKeyConverter()) - require.Nil(t, managedCoreComponents.ValidatorPubKeyConverter()) - require.Nil(t, managedCoreComponents.PathHandler()) - require.Equal(t, "", managedCoreComponents.ChainID()) - require.Nil(t, managedCoreComponents.AddressPubKeyConverter()) - require.Nil(t, managedCoreComponents.EnableRoundsHandler()) - require.Nil(t, managedCoreComponents.WasmVMChangeLocker()) - require.Nil(t, managedCoreComponents.ProcessStatusHandler()) - require.True(t, len(managedCoreComponents.HardforkTriggerPubKey()) == 0) + require.Equal(t, errorsMx.ErrNilCoreComponents, managedCoreComponents.CheckSubcomponents()) err = managedCoreComponents.Create() require.NoError(t, err) - require.NotNil(t, managedCoreComponents.Hasher()) - require.NotNil(t, managedCoreComponents.InternalMarshalizer()) - require.NotNil(t, managedCoreComponents.VmMarshalizer()) - require.NotNil(t, managedCoreComponents.TxMarshalizer()) - require.NotNil(t, managedCoreComponents.Uint64ByteSliceConverter()) - require.NotNil(t, managedCoreComponents.AddressPubKeyConverter()) - require.NotNil(t, managedCoreComponents.ValidatorPubKeyConverter()) - require.NotNil(t, managedCoreComponents.PathHandler()) - require.NotEqual(t, "", managedCoreComponents.ChainID()) - require.NotNil(t, managedCoreComponents.AddressPubKeyConverter()) - require.NotNil(t, managedCoreComponents.EnableRoundsHandler()) - require.NotNil(t, managedCoreComponents.WasmVMChangeLocker()) - require.NotNil(t, managedCoreComponents.ProcessStatusHandler()) - expectedBytes, _ := managedCoreComponents.ValidatorPubKeyConverter().Decode(componentsMock.DummyPk) - require.Equal(t, expectedBytes, managedCoreComponents.HardforkTriggerPubKey()) + require.Nil(t, managedCoreComponents.CheckSubcomponents()) } func TestManagedCoreComponents_Close(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - coreArgs := componentsMock.GetCoreArgs() - coreComponentsFactory, _ := coreComp.NewCoreComponentsFactory(coreArgs) + coreComponentsFactory, _ := coreComp.NewCoreComponentsFactory(componentsMock.GetCoreArgs()) managedCoreComponents, _ := coreComp.NewManagedCoreComponents(coreComponentsFactory) err := managedCoreComponents.Close() require.NoError(t, err) err = managedCoreComponents.Create() require.NoError(t, err) + err = managedCoreComponents.Close() + require.NoError(t, err) +} +func TestManagedCoreComponents_IsInterfaceNil(t *testing.T) { + t.Parallel() + + managedCoreComponents, _ := coreComp.NewManagedCoreComponents(nil) + require.True(t, managedCoreComponents.IsInterfaceNil()) + + coreArgs := componentsMock.GetCoreArgs() + coreComponentsFactory, _ := coreComp.NewCoreComponentsFactory(coreArgs) + managedCoreComponents, _ = coreComp.NewManagedCoreComponents(coreComponentsFactory) + require.False(t, managedCoreComponents.IsInterfaceNil()) } diff --git a/factory/core/coreComponents_test.go b/factory/core/coreComponents_test.go index 6c8981e0812..79aba4a2532 100644 --- a/factory/core/coreComponents_test.go +++ b/factory/core/coreComponents_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/multiversx/mx-chain-go/config" - errorsErd "github.com/multiversx/mx-chain-go/errors" + errorsMx "github.com/multiversx/mx-chain-go/errors" coreComp "github.com/multiversx/mx-chain-go/factory/core" "github.com/multiversx/mx-chain-go/state" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" @@ -14,9 +14,6 @@ import ( func TestNewCoreComponentsFactory_OkValuesShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() ccf, _ := coreComp.NewCoreComponentsFactory(args) @@ -26,9 +23,6 @@ func TestNewCoreComponentsFactory_OkValuesShouldWork(t *testing.T) { func TestCoreComponentsFactory_CreateCoreComponentsNoHasherConfigShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() args.Config = config.Config{ @@ -41,14 +35,11 @@ func TestCoreComponentsFactory_CreateCoreComponentsNoHasherConfigShouldErr(t *te cc, err := ccf.Create() require.Nil(t, cc) - require.True(t, errors.Is(err, errorsErd.ErrHasherCreation)) + require.True(t, errors.Is(err, errorsMx.ErrHasherCreation)) } func TestCoreComponentsFactory_CreateCoreComponentsInvalidHasherConfigShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() args.Config = config.Config{ @@ -64,14 +55,11 @@ func TestCoreComponentsFactory_CreateCoreComponentsInvalidHasherConfigShouldErr( cc, err := ccf.Create() require.Nil(t, cc) - require.True(t, errors.Is(err, errorsErd.ErrHasherCreation)) + require.True(t, errors.Is(err, errorsMx.ErrHasherCreation)) } func TestCoreComponentsFactory_CreateCoreComponentsNoInternalMarshallerConfigShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() args.Config = config.Config{ @@ -83,14 +71,11 @@ func TestCoreComponentsFactory_CreateCoreComponentsNoInternalMarshallerConfigSho cc, err := ccf.Create() require.Nil(t, cc) - require.True(t, errors.Is(err, errorsErd.ErrMarshalizerCreation)) + require.True(t, errors.Is(err, errorsMx.ErrMarshalizerCreation)) } func TestCoreComponentsFactory_CreateCoreComponentsInvalidInternalMarshallerConfigShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() args.Config = config.Config{ @@ -106,14 +91,11 @@ func TestCoreComponentsFactory_CreateCoreComponentsInvalidInternalMarshallerConf cc, err := ccf.Create() require.Nil(t, cc) - require.True(t, errors.Is(err, errorsErd.ErrMarshalizerCreation)) + require.True(t, errors.Is(err, errorsMx.ErrMarshalizerCreation)) } func TestCoreComponentsFactory_CreateCoreComponentsNoVmMarshallerConfigShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() args.Config = config.Config{ @@ -129,14 +111,11 @@ func TestCoreComponentsFactory_CreateCoreComponentsNoVmMarshallerConfigShouldErr cc, err := ccf.Create() require.Nil(t, cc) - require.True(t, errors.Is(err, errorsErd.ErrMarshalizerCreation)) + require.True(t, errors.Is(err, errorsMx.ErrMarshalizerCreation)) } func TestCoreComponentsFactory_CreateCoreComponentsInvalidVmMarshallerConfigShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() args.Config = config.Config{ @@ -155,14 +134,11 @@ func TestCoreComponentsFactory_CreateCoreComponentsInvalidVmMarshallerConfigShou cc, err := ccf.Create() require.Nil(t, cc) - require.True(t, errors.Is(err, errorsErd.ErrMarshalizerCreation)) + require.True(t, errors.Is(err, errorsMx.ErrMarshalizerCreation)) } func TestCoreComponentsFactory_CreateCoreComponentsNoTxSignMarshallerConfigShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() args.Config = config.Config{ @@ -181,14 +157,11 @@ func TestCoreComponentsFactory_CreateCoreComponentsNoTxSignMarshallerConfigShoul cc, err := ccf.Create() require.Nil(t, cc) - require.True(t, errors.Is(err, errorsErd.ErrMarshalizerCreation)) + require.True(t, errors.Is(err, errorsMx.ErrMarshalizerCreation)) } func TestCoreComponentsFactory_CreateCoreComponentsInvalidTxSignMarshallerConfigShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() args.Config = config.Config{ @@ -210,14 +183,25 @@ func TestCoreComponentsFactory_CreateCoreComponentsInvalidTxSignMarshallerConfig cc, err := ccf.Create() require.Nil(t, cc) - require.True(t, errors.Is(err, errorsErd.ErrMarshalizerCreation)) + require.True(t, errors.Is(err, errorsMx.ErrMarshalizerCreation)) } -func TestCoreComponentsFactory_CreateCoreComponentsInvalidValPubKeyConverterShouldErr(t *testing.T) { +func TestCoreComponentsFactory_CreateCoreComponentsInvalidTxSignHasherConfigShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") + + args := componentsMock.GetCoreArgs() + args.Config.TxSignHasher = config.TypeConfig{ + Type: "invalid", } + ccf, _ := coreComp.NewCoreComponentsFactory(args) + + cc, err := ccf.Create() + require.Nil(t, cc) + require.True(t, errors.Is(err, errorsMx.ErrHasherCreation)) +} + +func TestCoreComponentsFactory_CreateCoreComponentsInvalidValPubKeyConverterShouldErr(t *testing.T) { + t.Parallel() args := componentsMock.GetCoreArgs() args.Config.ValidatorPubkeyConverter.Type = "invalid" @@ -230,9 +214,6 @@ func TestCoreComponentsFactory_CreateCoreComponentsInvalidValPubKeyConverterShou func TestCoreComponentsFactory_CreateCoreComponentsInvalidAddrPubKeyConverterShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() args.Config.AddressPubkeyConverter.Type = "invalid" @@ -243,11 +224,92 @@ func TestCoreComponentsFactory_CreateCoreComponentsInvalidAddrPubKeyConverterSho require.True(t, errors.Is(err, state.ErrInvalidPubkeyConverterType)) } +func TestCoreComponentsFactory_CreateCoreComponentsNilChanStopNodeProcessShouldErr(t *testing.T) { + t.Parallel() + + args := componentsMock.GetCoreArgs() + args.ChanStopNodeProcess = nil + ccf, _ := coreComp.NewCoreComponentsFactory(args) + + cc, err := ccf.Create() + require.Nil(t, cc) + require.NotNil(t, err) +} + +func TestCoreComponentsFactory_CreateCoreComponentsInvalidRoundConfigShouldErr(t *testing.T) { + t.Parallel() + + args := componentsMock.GetCoreArgs() + args.RoundConfig = config.RoundConfig{} + ccf, _ := coreComp.NewCoreComponentsFactory(args) + + cc, err := ccf.Create() + require.Nil(t, cc) + require.NotNil(t, err) +} + +func TestCoreComponentsFactory_CreateCoreComponentsInvalidEpochConfigShouldErr(t *testing.T) { + t.Parallel() + + args := componentsMock.GetCoreArgs() + args.EpochConfig = config.EpochConfig{} + ccf, _ := coreComp.NewCoreComponentsFactory(args) + + cc, err := ccf.Create() + require.Nil(t, cc) + require.NotNil(t, err) +} + +func TestCoreComponentsFactory_CreateCoreComponentsInvalidGenesisMaxNumberOfShardsShouldErr(t *testing.T) { + t.Parallel() + + args := componentsMock.GetCoreArgs() + args.Config.GeneralSettings.GenesisMaxNumberOfShards = 0 + ccf, _ := coreComp.NewCoreComponentsFactory(args) + + cc, err := ccf.Create() + require.Nil(t, cc) + require.NotNil(t, err) +} + +func TestCoreComponentsFactory_CreateCoreComponentsInvalidEconomicsConfigShouldErr(t *testing.T) { + t.Parallel() + + args := componentsMock.GetCoreArgs() + args.EconomicsConfig = config.EconomicsConfig{} + ccf, _ := coreComp.NewCoreComponentsFactory(args) + + cc, err := ccf.Create() + require.Nil(t, cc) + require.NotNil(t, err) +} + +func TestCoreComponentsFactory_CreateCoreComponentsInvalidRatingsConfigShouldErr(t *testing.T) { + t.Parallel() + + args := componentsMock.GetCoreArgs() + args.RatingsConfig = config.RatingsConfig{} + ccf, _ := coreComp.NewCoreComponentsFactory(args) + + cc, err := ccf.Create() + require.Nil(t, cc) + require.NotNil(t, err) +} + +func TestCoreComponentsFactory_CreateCoreComponentsInvalidHardforkPubKeyShouldErr(t *testing.T) { + t.Parallel() + + args := componentsMock.GetCoreArgs() + args.Config.Hardfork.PublicKeyToListenFrom = "invalid" + ccf, _ := coreComp.NewCoreComponentsFactory(args) + + cc, err := ccf.Create() + require.Nil(t, cc) + require.NotNil(t, err) +} + func TestCoreComponentsFactory_CreateCoreComponentsShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() ccf, _ := coreComp.NewCoreComponentsFactory(args) @@ -257,12 +319,21 @@ func TestCoreComponentsFactory_CreateCoreComponentsShouldWork(t *testing.T) { require.NotNil(t, cc) } +func TestCoreComponentsFactory_CreateCoreComponentsShouldWorkAfterHardfork(t *testing.T) { + t.Parallel() + + args := componentsMock.GetCoreArgs() + args.Config.Hardfork.AfterHardFork = true + ccf, _ := coreComp.NewCoreComponentsFactory(args) + + cc, err := ccf.Create() + require.NoError(t, err) + require.NotNil(t, cc) +} + // ------------ Test CoreComponents -------------------- func TestCoreComponents_CloseShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() ccf, _ := coreComp.NewCoreComponentsFactory(args) diff --git a/factory/crypto/cryptoComponents.go b/factory/crypto/cryptoComponents.go index 6191ed2ab32..153f5486bd4 100644 --- a/factory/crypto/cryptoComponents.go +++ b/factory/crypto/cryptoComponents.go @@ -79,7 +79,6 @@ type cryptoParams struct { privateKey crypto.PrivateKey publicKeyString string publicKeyBytes []byte - privateKeyBytes []byte handledPrivateKeys [][]byte } @@ -357,11 +356,6 @@ func (ccf *cryptoComponentsFactory) readCryptoParams(keygen crypto.KeyGenerator) return nil, err } - cp.privateKeyBytes, err = cp.privateKey.ToByteArray() - if err != nil { - return nil, err - } - cp.publicKey = cp.privateKey.GeneratePublic() if len(readPk) > 0 { cp.publicKeyBytes, err = cp.publicKey.ToByteArray() @@ -397,11 +391,6 @@ func (ccf *cryptoComponentsFactory) generateCryptoParams( return nil, err } - cp.privateKeyBytes, err = cp.privateKey.ToByteArray() - if err != nil { - return nil, err - } - cp.publicKeyString, err = ccf.validatorPubKeyConverter.Encode(cp.publicKeyBytes) if err != nil { return nil, err diff --git a/factory/crypto/cryptoComponentsHandler.go b/factory/crypto/cryptoComponentsHandler.go index 7238e2153c4..cca697174fa 100644 --- a/factory/crypto/cryptoComponentsHandler.go +++ b/factory/crypto/cryptoComponentsHandler.go @@ -199,18 +199,6 @@ func (mcc *managedCryptoComponents) PublicKeyBytes() []byte { return mcc.cryptoParams.publicKeyBytes } -// PrivateKeyBytes returns the configured validator private key bytes -func (mcc *managedCryptoComponents) PrivateKeyBytes() []byte { - mcc.mutCryptoComponents.RLock() - defer mcc.mutCryptoComponents.RUnlock() - - if mcc.cryptoComponents == nil { - return nil - } - - return mcc.cryptoParams.privateKeyBytes -} - // TxSingleSigner returns the transaction signer func (mcc *managedCryptoComponents) TxSingleSigner() crypto.SingleSigner { mcc.mutCryptoComponents.RLock() @@ -260,6 +248,10 @@ func (mcc *managedCryptoComponents) MultiSignerContainer() cryptoCommon.MultiSig // SetMultiSignerContainer sets the multiSigner container in the crypto components func (mcc *managedCryptoComponents) SetMultiSignerContainer(ms cryptoCommon.MultiSignerContainer) error { + if check.IfNil(ms) { + return errors.ErrNilMultiSignerContainer + } + mcc.mutCryptoComponents.Lock() mcc.multiSignerContainer = ms mcc.mutCryptoComponents.Unlock() diff --git a/factory/crypto/cryptoComponentsHandler_test.go b/factory/crypto/cryptoComponentsHandler_test.go index 45aed193e93..3a3ee7b2743 100644 --- a/factory/crypto/cryptoComponentsHandler_test.go +++ b/factory/crypto/cryptoComponentsHandler_test.go @@ -1,111 +1,155 @@ package crypto_test import ( + "strings" "testing" - "github.com/multiversx/mx-chain-go/errors" + errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" cryptoComp "github.com/multiversx/mx-chain-go/factory/crypto" + "github.com/multiversx/mx-chain-go/integrationTests/mock" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/stretchr/testify/require" ) -// ------------ Test ManagedCryptoComponents -------------------- -func TestManagedCryptoComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { +func TestManagedCryptoComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetCryptoArgs(coreComponents) - args.Config.Consensus.Type = "invalid" - cryptoComponentsFactory, _ := cryptoComp.NewCryptoComponentsFactory(args) - managedCryptoComponents, err := cryptoComp.NewManagedCryptoComponents(cryptoComponentsFactory) - require.NoError(t, err) - err = managedCryptoComponents.Create() - require.Error(t, err) - require.Nil(t, managedCryptoComponents.BlockSignKeyGen()) + t.Run("nil factory should error", func(t *testing.T) { + t.Parallel() + + managedCryptoComponents, err := cryptoComp.NewManagedCryptoComponents(nil) + require.Equal(t, errorsMx.ErrNilCryptoComponentsFactory, err) + require.Nil(t, managedCryptoComponents) + }) + t.Run("invalid args should error", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetCryptoArgs(coreComponents) + args.Config.Consensus.Type = "invalid" + cryptoComponentsFactory, _ := cryptoComp.NewCryptoComponentsFactory(args) + managedCryptoComponents, err := cryptoComp.NewManagedCryptoComponents(cryptoComponentsFactory) + require.NoError(t, err) + err = managedCryptoComponents.Create() + require.Error(t, err) + require.Nil(t, managedCryptoComponents.BlockSignKeyGen()) + }) + t.Run("pub key mismatch", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetCryptoArgs(coreComponents) + args.Config.Consensus.Type = "disabled" + cryptoComponentsFactory, _ := cryptoComp.NewCryptoComponentsFactory(args) + managedCryptoComponents, err := cryptoComp.NewManagedCryptoComponents(cryptoComponentsFactory) + require.NoError(t, err) + err = managedCryptoComponents.Create() + require.True(t, strings.Contains(err.Error(), errorsMx.ErrPublicKeyMismatch.Error())) + }) + t.Run("should work with activateBLSPubKeyMessageVerification", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetCryptoArgs(coreComponents) + args.ActivateBLSPubKeyMessageVerification = true + cryptoComponentsFactory, _ := cryptoComp.NewCryptoComponentsFactory(args) + managedCryptoComponents, err := cryptoComp.NewManagedCryptoComponents(cryptoComponentsFactory) + require.NoError(t, err) + err = managedCryptoComponents.Create() + require.NoError(t, err) + }) + t.Run("should work with getters", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetCryptoArgs(coreComponents) + cryptoComponentsFactory, _ := cryptoComp.NewCryptoComponentsFactory(args) + managedCryptoComponents, err := cryptoComp.NewManagedCryptoComponents(cryptoComponentsFactory) + require.NoError(t, err) + require.Nil(t, managedCryptoComponents.TxSingleSigner()) + require.Nil(t, managedCryptoComponents.BlockSigner()) + require.Nil(t, managedCryptoComponents.MultiSignerContainer()) + require.Nil(t, managedCryptoComponents.BlockSignKeyGen()) + require.Nil(t, managedCryptoComponents.TxSignKeyGen()) + require.Nil(t, managedCryptoComponents.MessageSignVerifier()) + require.Nil(t, managedCryptoComponents.PublicKey()) + require.Nil(t, managedCryptoComponents.PrivateKey()) + require.Nil(t, managedCryptoComponents.P2pPrivateKey()) + require.Nil(t, managedCryptoComponents.P2pPublicKey()) + require.Empty(t, managedCryptoComponents.PublicKeyString()) + require.Nil(t, managedCryptoComponents.PublicKeyBytes()) + require.Nil(t, managedCryptoComponents.P2pPrivateKey()) + require.Nil(t, managedCryptoComponents.P2pSingleSigner()) + require.Nil(t, managedCryptoComponents.PeerSignatureHandler()) + require.Nil(t, managedCryptoComponents.P2pKeyGen()) + require.Nil(t, managedCryptoComponents.ManagedPeersHolder()) + multiSigner, errGet := managedCryptoComponents.GetMultiSigner(0) + require.Nil(t, multiSigner) + require.Equal(t, errorsMx.ErrNilCryptoComponentsHolder, errGet) + + err = managedCryptoComponents.Create() + require.NoError(t, err) + require.NotNil(t, managedCryptoComponents.TxSingleSigner()) + require.NotNil(t, managedCryptoComponents.BlockSigner()) + require.NotNil(t, managedCryptoComponents.MultiSignerContainer()) + multiSigner, errGet = managedCryptoComponents.GetMultiSigner(0) + require.NotNil(t, multiSigner) + require.Nil(t, errGet) + require.NotNil(t, managedCryptoComponents.BlockSignKeyGen()) + require.NotNil(t, managedCryptoComponents.TxSignKeyGen()) + require.NotNil(t, managedCryptoComponents.MessageSignVerifier()) + require.NotNil(t, managedCryptoComponents.PublicKey()) + require.NotNil(t, managedCryptoComponents.PrivateKey()) + require.NotNil(t, managedCryptoComponents.P2pPrivateKey()) + require.NotNil(t, managedCryptoComponents.P2pPublicKey()) + require.NotEmpty(t, managedCryptoComponents.PublicKeyString()) + require.NotNil(t, managedCryptoComponents.PublicKeyBytes()) + require.NotNil(t, managedCryptoComponents.P2pSingleSigner()) + require.NotNil(t, managedCryptoComponents.PeerSignatureHandler()) + require.NotNil(t, managedCryptoComponents.P2pKeyGen()) + require.NotNil(t, managedCryptoComponents.ManagedPeersHolder()) + + require.Equal(t, factory.CryptoComponentsName, managedCryptoComponents.String()) + + err = managedCryptoComponents.Close() + require.NoError(t, err) + + err = managedCryptoComponents.Close() + require.NoError(t, err) + }) } -func TestManagedCryptoComponents_CreateShouldWork(t *testing.T) { +func TestNewManagedCryptoComponents_CheckSubcomponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) cryptoComponentsFactory, _ := cryptoComp.NewCryptoComponentsFactory(args) managedCryptoComponents, err := cryptoComp.NewManagedCryptoComponents(cryptoComponentsFactory) require.NoError(t, err) - require.Nil(t, managedCryptoComponents.TxSingleSigner()) - require.Nil(t, managedCryptoComponents.BlockSigner()) - require.Nil(t, managedCryptoComponents.MultiSignerContainer()) - require.Nil(t, managedCryptoComponents.BlockSignKeyGen()) - require.Nil(t, managedCryptoComponents.TxSignKeyGen()) - require.Nil(t, managedCryptoComponents.MessageSignVerifier()) - require.Nil(t, managedCryptoComponents.ManagedPeersHolder()) + require.Equal(t, errorsMx.ErrNilCryptoComponents, managedCryptoComponents.CheckSubcomponents()) err = managedCryptoComponents.Create() require.NoError(t, err) - require.NotNil(t, managedCryptoComponents.TxSingleSigner()) - require.NotNil(t, managedCryptoComponents.BlockSigner()) - require.NotNil(t, managedCryptoComponents.MultiSignerContainer()) - multiSigner, errGet := managedCryptoComponents.MultiSignerContainer().GetMultiSigner(0) - require.NotNil(t, multiSigner) - require.Nil(t, errGet) - require.NotNil(t, managedCryptoComponents.BlockSignKeyGen()) - require.NotNil(t, managedCryptoComponents.TxSignKeyGen()) - require.NotNil(t, managedCryptoComponents.MessageSignVerifier()) - require.NotNil(t, managedCryptoComponents.ManagedPeersHolder()) -} - -func TestManagedCryptoComponents_CheckSubcomponents(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - managedCryptoComponents := getManagedCryptoComponents(t) - - err := managedCryptoComponents.CheckSubcomponents() - require.NoError(t, err) + require.Nil(t, managedCryptoComponents.CheckSubcomponents()) } -func TestManagedCryptoComponents_Close(t *testing.T) { +func TestNewManagedCryptoComponents_SetMultiSignerContainer(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - managedCryptoComponents := getManagedCryptoComponents(t) - - err := managedCryptoComponents.Close() - require.NoError(t, err) - multiSigner, errGet := managedCryptoComponents.GetMultiSigner(0) - require.Nil(t, multiSigner) - require.Equal(t, errors.ErrNilCryptoComponentsHolder, errGet) -} -func getManagedCryptoComponents(t *testing.T) factory.CryptoComponentsHandler { coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) cryptoComponentsFactory, _ := cryptoComp.NewCryptoComponentsFactory(args) - require.NotNil(t, cryptoComponentsFactory) managedCryptoComponents, _ := cryptoComp.NewManagedCryptoComponents(cryptoComponentsFactory) - require.NotNil(t, managedCryptoComponents) - err := managedCryptoComponents.Create() - require.NoError(t, err) + _ = managedCryptoComponents.Create() - return managedCryptoComponents + require.Equal(t, errorsMx.ErrNilMultiSignerContainer, managedCryptoComponents.SetMultiSignerContainer(nil)) + require.Nil(t, managedCryptoComponents.SetMultiSignerContainer(&mock.CryptoComponentsStub{})) } func TestManagedCryptoComponents_Clone(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -125,3 +169,18 @@ func TestManagedCryptoComponents_Clone(t *testing.T) { clonedAfterClose := managedCryptoComponents.Clone() require.Equal(t, managedCryptoComponents, clonedAfterClose) } + +func TestNewManagedCryptoComponents_IsInterfaceNil(t *testing.T) { + t.Parallel() + + managedCryptoComponents, err := cryptoComp.NewManagedCryptoComponents(nil) + require.Equal(t, errorsMx.ErrNilCryptoComponentsFactory, err) + require.True(t, managedCryptoComponents.IsInterfaceNil()) + + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetCryptoArgs(coreComponents) + cryptoComponentsFactory, _ := cryptoComp.NewCryptoComponentsFactory(args) + managedCryptoComponents, err = cryptoComp.NewManagedCryptoComponents(cryptoComponentsFactory) + require.NoError(t, err) + require.False(t, managedCryptoComponents.IsInterfaceNil()) +} diff --git a/factory/crypto/cryptoComponents_test.go b/factory/crypto/cryptoComponents_test.go index dc3b1541a79..1593cd3f234 100644 --- a/factory/crypto/cryptoComponents_test.go +++ b/factory/crypto/cryptoComponents_test.go @@ -19,9 +19,6 @@ import ( func TestNewCryptoComponentsFactory_NilCoreComponentsHandlerShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCryptoArgs(nil) ccf, err := cryptoComp.NewCryptoComponentsFactory(args) @@ -47,9 +44,6 @@ func TestNewCryptoComponentsFactory_NilValidatorPublicKeyConverterShouldErr(t *t func TestNewCryptoComponentsFactory_NilPemFileShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -61,9 +55,6 @@ func TestNewCryptoComponentsFactory_NilPemFileShouldErr(t *testing.T) { func TestCryptoComponentsFactory_CreateCryptoParamsNilKeyLoaderShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -76,9 +67,6 @@ func TestCryptoComponentsFactory_CreateCryptoParamsNilKeyLoaderShouldErr(t *test func TestNewCryptoComponentsFactory_OkValsShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -89,9 +77,6 @@ func TestNewCryptoComponentsFactory_OkValsShouldWork(t *testing.T) { func TestNewCryptoComponentsFactory_DisabledSigShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -103,9 +88,6 @@ func TestNewCryptoComponentsFactory_DisabledSigShouldWork(t *testing.T) { func TestNewCryptoComponentsFactory_CreateInvalidConsensusTypeShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -119,9 +101,6 @@ func TestNewCryptoComponentsFactory_CreateInvalidConsensusTypeShouldErr(t *testi func TestCryptoComponentsFactory_CreateShouldErrDueToMissingConfig(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -142,9 +121,6 @@ func TestCryptoComponentsFactory_CreateShouldErrDueToMissingConfig(t *testing.T) func TestCryptoComponentsFactory_CreateInvalidMultiSigHasherShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -159,9 +135,6 @@ func TestCryptoComponentsFactory_CreateInvalidMultiSigHasherShouldErr(t *testing func TestCryptoComponentsFactory_CreateOK(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -171,13 +144,11 @@ func TestCryptoComponentsFactory_CreateOK(t *testing.T) { require.NoError(t, err) require.NotNil(t, cc) assert.Equal(t, 0, len(cc.GetManagedPeersHolder().GetManagedKeysByCurrentNode())) + assert.Nil(t, cc.Close()) } func TestCryptoComponentsFactory_CreateWithDisabledSig(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -192,9 +163,6 @@ func TestCryptoComponentsFactory_CreateWithDisabledSig(t *testing.T) { func TestCryptoComponentsFactory_CreateWithAutoGenerateKey(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -209,9 +177,6 @@ func TestCryptoComponentsFactory_CreateWithAutoGenerateKey(t *testing.T) { func TestCryptoComponentsFactory_CreateSingleSignerInvalidConsensusTypeShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -227,9 +192,6 @@ func TestCryptoComponentsFactory_CreateSingleSignerInvalidConsensusTypeShouldErr func TestCryptoComponentsFactory_CreateSingleSignerOK(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -244,9 +206,6 @@ func TestCryptoComponentsFactory_CreateSingleSignerOK(t *testing.T) { func TestCryptoComponentsFactory_CreateMultiSignerInvalidConsensusTypeShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -262,9 +221,6 @@ func TestCryptoComponentsFactory_CreateMultiSignerInvalidConsensusTypeShouldErr( func TestCryptoComponentsFactory_CreateMultiSignerOK(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -282,9 +238,6 @@ func TestCryptoComponentsFactory_CreateMultiSignerOK(t *testing.T) { func TestCryptoComponentsFactory_GetSuiteInvalidConsensusTypeShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -300,9 +253,6 @@ func TestCryptoComponentsFactory_GetSuiteInvalidConsensusTypeShouldErr(t *testin func TestCryptoComponentsFactory_GetSuiteOK(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -318,9 +268,6 @@ func TestCryptoComponentsFactory_GetSuiteOK(t *testing.T) { func TestCryptoComponentsFactory_CreateCryptoParamsInvalidPrivateKeyByteArrayShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -337,9 +284,6 @@ func TestCryptoComponentsFactory_CreateCryptoParamsInvalidPrivateKeyByteArraySho func TestCryptoComponentsFactory_CreateCryptoParamsLoadKeysFailShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } expectedError := errors.New("expected error") @@ -358,9 +302,6 @@ func TestCryptoComponentsFactory_CreateCryptoParamsLoadKeysFailShouldErr(t *test func TestCryptoComponentsFactory_CreateCryptoParamsOK(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -376,9 +317,6 @@ func TestCryptoComponentsFactory_CreateCryptoParamsOK(t *testing.T) { func TestCryptoComponentsFactory_GetSkPkInvalidSkBytesShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } setSk := []byte("zxwY") setPk := []byte(componentsMock.DummyPk) @@ -395,9 +333,6 @@ func TestCryptoComponentsFactory_GetSkPkInvalidSkBytesShouldErr(t *testing.T) { func TestCryptoComponentsFactory_GetSkPkInvalidPkBytesShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } setSk := []byte(componentsMock.DummySk) setPk := "0" @@ -415,9 +350,6 @@ func TestCryptoComponentsFactory_GetSkPkInvalidPkBytesShouldErr(t *testing.T) { func TestCryptoComponentsFactory_GetSkPkOK(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) diff --git a/factory/crypto/multiSignerContainer_test.go b/factory/crypto/multiSignerContainer_test.go index 82ed4b0d1a7..6a392b46d72 100644 --- a/factory/crypto/multiSignerContainer_test.go +++ b/factory/crypto/multiSignerContainer_test.go @@ -269,9 +269,6 @@ func TestContainer_sortMultiSignerConfig(t *testing.T) { func Test_getMultiSigHasherFromConfigInvalidHasherShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := createDefaultMultiSignerArgs() args.ConsensusType = "" @@ -284,9 +281,6 @@ func Test_getMultiSigHasherFromConfigInvalidHasherShouldErr(t *testing.T) { func Test_getMultiSigHasherFromConfigMismatchConsensusTypeMultiSigHasher(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := createDefaultMultiSignerArgs() args.MultiSigHasherType = "sha256" @@ -298,9 +292,6 @@ func Test_getMultiSigHasherFromConfigMismatchConsensusTypeMultiSigHasher(t *test func Test_getMultiSigHasherFromConfigOK(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := createDefaultMultiSignerArgs() args.ConsensusType = "bls" diff --git a/factory/data/dataComponents.go b/factory/data/dataComponents.go index 8179e5db715..0585a6c4dad 100644 --- a/factory/data/dataComponents.go +++ b/factory/data/dataComponents.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" @@ -28,6 +29,7 @@ type DataComponentsFactoryArgs struct { Crypto factory.CryptoComponentsHolder CurrentEpoch uint32 CreateTrieEpochRootHashStorer bool + NodeProcessingMode common.NodeProcessingMode SnapshotsEnabled bool } @@ -40,6 +42,7 @@ type dataComponentsFactory struct { crypto factory.CryptoComponentsHolder currentEpoch uint32 createTrieEpochRootHashStorer bool + nodeProcessingMode common.NodeProcessingMode snapshotsEnabled bool } @@ -61,27 +64,12 @@ func NewDataComponentsFactory(args DataComponentsFactoryArgs) (*dataComponentsFa if check.IfNil(args.Core) { return nil, errors.ErrNilCoreComponents } - if check.IfNil(args.Core.PathHandler()) { - return nil, errors.ErrNilPathHandler - } - if check.IfNil(args.Core.EpochStartNotifierWithConfirm()) { - return nil, errors.ErrNilEpochStartNotifier - } - if check.IfNil(args.Core.EconomicsData()) { - return nil, errors.ErrNilEconomicsHandler - } if check.IfNil(args.StatusCore) { return nil, errors.ErrNilStatusCoreComponents } - if check.IfNil(args.StatusCore.AppStatusHandler()) { - return nil, errors.ErrNilAppStatusHandler - } if check.IfNil(args.Crypto) { return nil, errors.ErrNilCryptoComponents } - if check.IfNil(args.Crypto.ManagedPeersHolder()) { - return nil, errors.ErrNilManagedPeersHolder - } return &dataComponentsFactory{ config: args.Config, @@ -91,6 +79,7 @@ func NewDataComponentsFactory(args DataComponentsFactoryArgs) (*dataComponentsFa statusCore: args.StatusCore, currentEpoch: args.CurrentEpoch, createTrieEpochRootHashStorer: args.CreateTrieEpochRootHashStorer, + nodeProcessingMode: args.NodeProcessingMode, snapshotsEnabled: args.SnapshotsEnabled, crypto: args.Crypto, }, nil @@ -181,6 +170,7 @@ func (dcf *dataComponentsFactory) createDataStoreFromConfig() (dataRetriever.Sto CurrentEpoch: dcf.currentEpoch, StorageType: storageFactory.ProcessStorageService, CreateTrieEpochRootHashStorer: dcf.createTrieEpochRootHashStorer, + NodeProcessingMode: dcf.nodeProcessingMode, SnapshotsEnabled: dcf.snapshotsEnabled, ManagedPeersHolder: dcf.crypto.ManagedPeersHolder(), }) diff --git a/factory/data/dataComponentsHandler.go b/factory/data/dataComponentsHandler.go index 019633511e1..0c7c3501893 100644 --- a/factory/data/dataComponentsHandler.go +++ b/factory/data/dataComponentsHandler.go @@ -103,10 +103,16 @@ func (mdc *managedDataComponents) Blockchain() data.ChainHandler { } // SetBlockchain sets the blockchain subcomponent -func (mdc *managedDataComponents) SetBlockchain(chain data.ChainHandler) { +func (mdc *managedDataComponents) SetBlockchain(chain data.ChainHandler) error { + if check.IfNil(chain) { + return errors.ErrNilBlockChainHandler + } + mdc.mutDataComponents.Lock() mdc.blkc = chain mdc.mutDataComponents.Unlock() + + return nil } // StorageService returns the storage service diff --git a/factory/data/dataComponentsHandler_test.go b/factory/data/dataComponentsHandler_test.go index f59f99e6948..7c1e2fef913 100644 --- a/factory/data/dataComponentsHandler_test.go +++ b/factory/data/dataComponentsHandler_test.go @@ -4,59 +4,113 @@ import ( "testing" "github.com/multiversx/mx-chain-go/config" + errorsMx "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/factory" dataComp "github.com/multiversx/mx-chain-go/factory/data" "github.com/multiversx/mx-chain-go/factory/mock" + "github.com/multiversx/mx-chain-go/testscommon" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/stretchr/testify/require" ) -// ------------ Test ManagedDataComponents -------------------- -func TestManagedDataComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { +func TestNewManagedDataComponents(t *testing.T) { + t.Parallel() + + t.Run("nil factory should error", func(t *testing.T) { + t.Parallel() + + managedDataComponents, err := dataComp.NewManagedDataComponents(nil) + require.Equal(t, errorsMx.ErrNilDataComponentsFactory, err) + require.Nil(t, managedDataComponents) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) + dataComponentsFactory, _ := dataComp.NewDataComponentsFactory(args) + managedDataComponents, err := dataComp.NewManagedDataComponents(dataComponentsFactory) + require.Nil(t, err) + require.NotNil(t, managedDataComponents) + }) +} + +func TestManagedDataComponents_Create(t *testing.T) { + t.Parallel() + + t.Run("invalid config should error", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) + args.Config.ShardHdrNonceHashStorage = config.StorageConfig{} + dataComponentsFactory, _ := dataComp.NewDataComponentsFactory(args) + managedDataComponents, err := dataComp.NewManagedDataComponents(dataComponentsFactory) + require.NoError(t, err) + err = managedDataComponents.Create() + require.Error(t, err) + require.Nil(t, managedDataComponents.Blockchain()) + }) + t.Run("should work with getters", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) + dataComponentsFactory, _ := dataComp.NewDataComponentsFactory(args) + managedDataComponents, err := dataComp.NewManagedDataComponents(dataComponentsFactory) + require.NoError(t, err) + require.Nil(t, managedDataComponents.Blockchain()) + require.Nil(t, managedDataComponents.StorageService()) + require.Nil(t, managedDataComponents.Datapool()) + require.Nil(t, managedDataComponents.MiniBlocksProvider()) + + err = managedDataComponents.Create() + require.NoError(t, err) + require.NotNil(t, managedDataComponents.Blockchain()) + require.NotNil(t, managedDataComponents.StorageService()) + require.NotNil(t, managedDataComponents.Datapool()) + require.NotNil(t, managedDataComponents.MiniBlocksProvider()) + + require.Equal(t, factory.DataComponentsName, managedDataComponents.String()) + }) +} + +func TestManagedDataComponents_CheckSubcomponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) - args.Config.ShardHdrNonceHashStorage = config.StorageConfig{} dataComponentsFactory, _ := dataComp.NewDataComponentsFactory(args) managedDataComponents, err := dataComp.NewManagedDataComponents(dataComponentsFactory) require.NoError(t, err) + require.Equal(t, errorsMx.ErrNilDataComponents, managedDataComponents.CheckSubcomponents()) + err = managedDataComponents.Create() - require.Error(t, err) - require.Nil(t, managedDataComponents.Blockchain()) + require.NoError(t, err) + require.Nil(t, managedDataComponents.CheckSubcomponents()) } -func TestManagedDataComponents_CreateShouldWork(t *testing.T) { +func TestManagedDataComponents_SetBlockchain(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) dataComponentsFactory, _ := dataComp.NewDataComponentsFactory(args) - managedDataComponents, err := dataComp.NewManagedDataComponents(dataComponentsFactory) - require.NoError(t, err) - require.Nil(t, managedDataComponents.Blockchain()) - require.Nil(t, managedDataComponents.StorageService()) - require.Nil(t, managedDataComponents.Datapool()) + managedDataComponents, _ := dataComp.NewManagedDataComponents(dataComponentsFactory) - err = managedDataComponents.Create() - require.NoError(t, err) - require.NotNil(t, managedDataComponents.Blockchain()) - require.NotNil(t, managedDataComponents.StorageService()) - require.NotNil(t, managedDataComponents.Datapool()) + _ = managedDataComponents.Create() + + require.Equal(t, errorsMx.ErrNilBlockChainHandler, managedDataComponents.SetBlockchain(nil)) + require.Nil(t, managedDataComponents.SetBlockchain(&testscommon.ChainHandlerMock{})) } func TestManagedDataComponents_Close(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) @@ -68,14 +122,13 @@ func TestManagedDataComponents_Close(t *testing.T) { err = managedDataComponents.Close() require.NoError(t, err) - require.Nil(t, managedDataComponents.Blockchain()) + + err = managedDataComponents.Close() + require.NoError(t, err) } func TestManagedDataComponents_Clone(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) @@ -94,3 +147,17 @@ func TestManagedDataComponents_Clone(t *testing.T) { clonedAfterClose := managedDataComponents.Clone() require.Equal(t, managedDataComponents, clonedAfterClose) } + +func TestManagedDataComponents_IsInterfaceNil(t *testing.T) { + t.Parallel() + + managedDataComponents, _ := dataComp.NewManagedDataComponents(nil) + require.True(t, managedDataComponents.IsInterfaceNil()) + + coreComponents := componentsMock.GetCoreComponents() + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) + dataComponentsFactory, _ := dataComp.NewDataComponentsFactory(args) + managedDataComponents, _ = dataComp.NewManagedDataComponents(dataComponentsFactory) + require.False(t, managedDataComponents.IsInterfaceNil()) +} diff --git a/factory/data/dataComponents_test.go b/factory/data/dataComponents_test.go index 2eac8430020..2be2d501378 100644 --- a/factory/data/dataComponents_test.go +++ b/factory/data/dataComponents_test.go @@ -1,191 +1,229 @@ package data_test import ( + "errors" "testing" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/config" - "github.com/multiversx/mx-chain-go/errors" + errorsMx "github.com/multiversx/mx-chain-go/errors" dataComp "github.com/multiversx/mx-chain-go/factory/data" "github.com/multiversx/mx-chain-go/factory/mock" - "github.com/multiversx/mx-chain-go/testscommon" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/stretchr/testify/require" ) -func TestNewDataComponentsFactory_NilShardCoordinatorShouldErr(t *testing.T) { +func TestNewDataComponentsFactory(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) - args.ShardCoordinator = nil - - dcf, err := dataComp.NewDataComponentsFactory(args) - require.Nil(t, dcf) - require.Equal(t, errors.ErrNilShardCoordinator, err) -} - -func TestNewDataComponentsFactory_NilCoreComponentsShouldErr(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetDataArgs(nil, shardCoordinator) - args.Core = nil - - dcf, err := dataComp.NewDataComponentsFactory(args) - require.Nil(t, dcf) - require.Equal(t, errors.ErrNilCoreComponents, err) -} - -func TestNewDataComponentsFactory_NilCryptoComponentsShouldErr(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetDataArgs(componentsMock.GetCoreComponents(), shardCoordinator) - args.Crypto = nil - - dcf, err := dataComp.NewDataComponentsFactory(args) - require.Nil(t, dcf) - require.Equal(t, errors.ErrNilCryptoComponents, err) -} - -func TestNewDataComponentsFactory_NilManagedPeersHolderShouldErr(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetDataArgs(componentsMock.GetCoreComponents(), shardCoordinator) - args.Crypto = &mock.CryptoComponentsMock{ - ManagedPeersHolderField: nil, - } - - dcf, err := dataComp.NewDataComponentsFactory(args) - require.Nil(t, dcf) - require.Equal(t, errors.ErrNilManagedPeersHolder, err) -} - -func TestNewDataComponentsFactory_NilPathHandlerShouldErr(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - coreComponents := &mock.CoreComponentsMock{ - PathHdl: nil, - } - - args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) - - dcf, err := dataComp.NewDataComponentsFactory(args) - require.Nil(t, dcf) - require.Equal(t, errors.ErrNilPathHandler, err) -} - -func TestNewDataComponentsFactory_NilEpochStartNotifierShouldErr(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - coreComponents := &mock.CoreComponentsMock{ - PathHdl: &testscommon.PathManagerStub{}, - EpochNotifierWithConfirm: nil, - } - - args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) - - dcf, err := dataComp.NewDataComponentsFactory(args) - require.Nil(t, dcf) - require.Equal(t, errors.ErrNilEpochStartNotifier, err) -} - -func TestNewDataComponentsFactory_OkValsShouldWork(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) - dcf, err := dataComp.NewDataComponentsFactory(args) - require.NoError(t, err) - require.NotNil(t, dcf) + t.Run("nil shard coordinator should error", func(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) + args.ShardCoordinator = nil + + dcf, err := dataComp.NewDataComponentsFactory(args) + require.Nil(t, dcf) + require.Equal(t, errorsMx.ErrNilShardCoordinator, err) + }) + t.Run("nil core components should error", func(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + args := componentsMock.GetDataArgs(nil, shardCoordinator) + args.Core = nil + + dcf, err := dataComp.NewDataComponentsFactory(args) + require.Nil(t, dcf) + require.Equal(t, errorsMx.ErrNilCoreComponents, err) + }) + t.Run("nil status core components should error", func(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) + args.StatusCore = nil + + dcf, err := dataComp.NewDataComponentsFactory(args) + require.Nil(t, dcf) + require.Equal(t, errorsMx.ErrNilStatusCoreComponents, err) + }) + t.Run("nil crypto components should error", func(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) + args.Crypto = nil + + dcf, err := dataComp.NewDataComponentsFactory(args) + require.Nil(t, dcf) + require.Equal(t, errorsMx.ErrNilCryptoComponents, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) + dcf, err := dataComp.NewDataComponentsFactory(args) + require.NoError(t, err) + require.NotNil(t, dcf) + }) } -func TestDataComponentsFactory_CreateShouldErrDueBadConfig(t *testing.T) { +func TestDataComponentsFactory_Create(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) - args.Config.ShardHdrNonceHashStorage = config.StorageConfig{} - dcf, err := dataComp.NewDataComponentsFactory(args) - require.NoError(t, err) - - dc, err := dcf.Create() - require.Error(t, err) - require.Nil(t, dc) -} - -func TestDataComponentsFactory_CreateForShardShouldWork(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - coreComponents := componentsMock.GetCoreComponents() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) - dcf, err := dataComp.NewDataComponentsFactory(args) - - require.NoError(t, err) - dc, err := dcf.Create() - require.NoError(t, err) - require.NotNil(t, dc) -} - -func TestDataComponentsFactory_CreateForMetaShouldWork(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - coreComponents := componentsMock.GetCoreComponents() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - shardCoordinator.CurrentShard = core.MetachainShardId - args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) - - dcf, err := dataComp.NewDataComponentsFactory(args) - require.NoError(t, err) - dc, err := dcf.Create() - require.NoError(t, err) - require.NotNil(t, dc) + t.Run("NewBlockChain returns error for shard", func(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) + args.StatusCore = &factory.StatusCoreComponentsStub{ + AppStatusHandlerField: nil, + } + args.Config.ShardHdrNonceHashStorage = config.StorageConfig{} + dcf, err := dataComp.NewDataComponentsFactory(args) + require.NoError(t, err) + + dc, err := dcf.Create() + require.Error(t, err) + require.Nil(t, dc) + }) + t.Run("NewBlockChain returns error for meta", func(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + shardCoordinator.CurrentShard = core.MetachainShardId + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) + args.StatusCore = &factory.StatusCoreComponentsStub{ + AppStatusHandlerField: nil, + } + args.Config.ShardHdrNonceHashStorage = config.StorageConfig{} + dcf, err := dataComp.NewDataComponentsFactory(args) + require.NoError(t, err) + + dc, err := dcf.Create() + require.Error(t, err) + require.Nil(t, dc) + }) + t.Run("createBlockChainFromConfig returns error", func(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + shardCoordinator.CurrentShard = 12345 + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) + args.Config.ShardHdrNonceHashStorage = config.StorageConfig{} + dcf, err := dataComp.NewDataComponentsFactory(args) + require.NoError(t, err) + + dc, err := dcf.Create() + require.Equal(t, errorsMx.ErrBlockchainCreation, err) + require.Nil(t, dc) + }) + t.Run("NewStorageServiceFactory returns error", func(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) + args.Config.StoragePruning.NumActivePersisters = 0 + dcf, err := dataComp.NewDataComponentsFactory(args) + require.NoError(t, err) + + dc, err := dcf.Create() + require.Error(t, err) + require.Nil(t, dc) + }) + t.Run("createDataStoreFromConfig fails for shard due to bad config", func(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) + args.Config.ShardHdrNonceHashStorage = config.StorageConfig{} + dcf, err := dataComp.NewDataComponentsFactory(args) + require.NoError(t, err) + + dc, err := dcf.Create() + require.Error(t, err) + require.Nil(t, dc) + }) + t.Run("createDataStoreFromConfig fails, invalid shard", func(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + cnt := 0 + shardCoordinator.SelfIDCalled = func() uint32 { + cnt++ + if cnt > 1 { + return 12345 + } + return 0 + } + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) + args.Config.ShardHdrNonceHashStorage = config.StorageConfig{} + dcf, err := dataComp.NewDataComponentsFactory(args) + require.NoError(t, err) + + dc, err := dcf.Create() + require.Equal(t, errorsMx.ErrDataStoreCreation, err) + require.Nil(t, dc) + }) + t.Run("NewDataPoolFromConfig fails should error", func(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) + args.Config.TxBlockBodyDataPool.Type = "invalid" + dcf, err := dataComp.NewDataComponentsFactory(args) + require.NoError(t, err) + + dc, err := dcf.Create() + require.True(t, errors.Is(err, errorsMx.ErrDataPoolCreation)) + require.Nil(t, dc) + }) + t.Run("should work for shard", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) + dcf, err := dataComp.NewDataComponentsFactory(args) + + require.NoError(t, err) + dc, err := dcf.Create() + require.NoError(t, err) + require.NotNil(t, dc) + }) + t.Run("should work for meta", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + shardCoordinator.CurrentShard = core.MetachainShardId + args := componentsMock.GetDataArgs(coreComponents, shardCoordinator) + + dcf, err := dataComp.NewDataComponentsFactory(args) + require.NoError(t, err) + dc, err := dcf.Create() + require.NoError(t, err) + require.NotNil(t, dc) + }) } -// ------------ Test DataComponents -------------------- func TestManagedDataComponents_CloseShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) diff --git a/factory/heartbeat/heartbeatV2Components.go b/factory/heartbeat/heartbeatV2Components.go index c8f62080c9b..08b0e65bd58 100644 --- a/factory/heartbeat/heartbeatV2Components.go +++ b/factory/heartbeat/heartbeatV2Components.go @@ -93,25 +93,27 @@ func checkHeartbeatV2FactoryArgs(args ArgHeartbeatV2ComponentsFactory) error { if check.IfNil(args.DataComponents) { return errors.ErrNilDataComponentsHolder } + if check.IfNil(args.DataComponents.Datapool()) { + return errors.ErrNilDataPoolsHolder + } if check.IfNil(args.NetworkComponents) { return errors.ErrNilNetworkComponentsHolder } + if check.IfNil(args.NetworkComponents.NetworkMessenger()) { + return errors.ErrNilMessenger + } if check.IfNil(args.CryptoComponents) { return errors.ErrNilCryptoComponentsHolder } if check.IfNil(args.ProcessComponents) { return errors.ErrNilProcessComponentsHolder } + if check.IfNil(args.ProcessComponents.EpochStartTrigger()) { + return errors.ErrNilEpochStartTrigger + } if check.IfNil(args.StatusCoreComponents) { return errors.ErrNilStatusCoreComponents } - if check.IfNil(args.StatusCoreComponents.AppStatusHandler()) { - return errors.ErrNilAppStatusHandler - } - hardforkTrigger := args.ProcessComponents.HardforkTrigger() - if check.IfNil(hardforkTrigger) { - return errors.ErrNilHardforkTrigger - } return nil } @@ -238,7 +240,7 @@ func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error HeartbeatMonitor: heartbeatsMonitor, HeartbeatSenderInfoProvider: heartbeatV2Sender, AppStatusHandler: hcf.statusCoreComponents.AppStatusHandler(), - TimeBetweenConnectionsMetricsUpdate: time.Second * time.Duration(hcf.config.HeartbeatV2.TimeBetweenConnectionsMetricsUpdateInSec), + TimeBetweenConnectionsMetricsUpdate: time.Second * time.Duration(cfg.TimeBetweenConnectionsMetricsUpdateInSec), } statusHandler, err := status.NewMetricsUpdater(argsMetricsUpdater) if err != nil { @@ -246,7 +248,7 @@ func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error } argsDirectConnectionProcessor := processor.ArgsDirectConnectionProcessor{ - TimeToReadDirectConnections: time.Second * time.Duration(hcf.config.HeartbeatV2.TimeToReadDirectConnectionsInSec), + TimeToReadDirectConnections: time.Second * time.Duration(cfg.TimeToReadDirectConnectionsInSec), Messenger: hcf.networkComponents.NetworkMessenger(), PeerShardMapper: hcf.processComponents.PeerShardMapper(), ShardCoordinator: hcf.processComponents.ShardCoordinator(), diff --git a/factory/heartbeat/heartbeatV2ComponentsHandler.go b/factory/heartbeat/heartbeatV2ComponentsHandler.go index 0fa1deeee20..6ff35e7047d 100644 --- a/factory/heartbeat/heartbeatV2ComponentsHandler.go +++ b/factory/heartbeat/heartbeatV2ComponentsHandler.go @@ -65,6 +65,10 @@ func (mhc *managedHeartbeatV2Components) Monitor() factory.HeartbeatV2Monitor { mhc.mutHeartbeatV2Components.Lock() defer mhc.mutHeartbeatV2Components.Unlock() + if mhc.heartbeatV2Components == nil { + return nil + } + return mhc.monitor } diff --git a/factory/heartbeat/heartbeatV2ComponentsHandler_test.go b/factory/heartbeat/heartbeatV2ComponentsHandler_test.go index 5826af05a00..d3653ef37e1 100644 --- a/factory/heartbeat/heartbeatV2ComponentsHandler_test.go +++ b/factory/heartbeat/heartbeatV2ComponentsHandler_test.go @@ -3,13 +3,13 @@ package heartbeat_test import ( "testing" - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/errors" + errorsMx "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/factory" heartbeatComp "github.com/multiversx/mx-chain-go/factory/heartbeat" "github.com/stretchr/testify/assert" ) -func TestManagedHeartbeatV2Components(t *testing.T) { +func TestNewManagedHeartbeatV2Components(t *testing.T) { t.Parallel() defer func() { @@ -20,23 +20,87 @@ func TestManagedHeartbeatV2Components(t *testing.T) { }() mhc, err := heartbeatComp.NewManagedHeartbeatV2Components(nil) - assert.True(t, check.IfNil(mhc)) - assert.Equal(t, errors.ErrNilHeartbeatV2ComponentsFactory, err) + assert.Nil(t, mhc) + assert.Equal(t, errorsMx.ErrNilHeartbeatV2ComponentsFactory, err) args := createMockHeartbeatV2ComponentsFactoryArgs() hcf, _ := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) mhc, err = heartbeatComp.NewManagedHeartbeatV2Components(hcf) - assert.False(t, check.IfNil(mhc)) - assert.Nil(t, err) + assert.NotNil(t, mhc) + assert.NoError(t, err) +} + +func TestManagedHeartbeatV2Components_Create(t *testing.T) { + t.Parallel() + + t.Run("invalid config should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.Config.HeartbeatV2.PeerAuthenticationTimeBetweenSendsInSec = 0 // Create will fail + hcf, _ := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + mhc, _ := heartbeatComp.NewManagedHeartbeatV2Components(hcf) + assert.NotNil(t, mhc) + err := mhc.Create() + assert.Error(t, err) + }) + t.Run("should work with getters", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + hcf, _ := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + mhc, _ := heartbeatComp.NewManagedHeartbeatV2Components(hcf) + assert.NotNil(t, mhc) + assert.Nil(t, mhc.Monitor()) + + err := mhc.Create() + assert.NoError(t, err) + assert.NotNil(t, mhc.Monitor()) - err = mhc.Create() - assert.Nil(t, err) + assert.Equal(t, factory.HeartbeatV2ComponentsName, mhc.String()) - err = mhc.CheckSubcomponents() - assert.Nil(t, err) + assert.NoError(t, mhc.Close()) + }) +} - assert.Equal(t, "managedHeartbeatV2Components", mhc.String()) +func TestManagedHeartbeatV2Components_CheckSubcomponents(t *testing.T) { + t.Parallel() - err = mhc.Close() - assert.Nil(t, err) + args := createMockHeartbeatV2ComponentsFactoryArgs() + hcf, _ := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + mhc, _ := heartbeatComp.NewManagedHeartbeatV2Components(hcf) + assert.NotNil(t, mhc) + assert.Equal(t, errorsMx.ErrNilHeartbeatV2Components, mhc.CheckSubcomponents()) + + err := mhc.Create() + assert.NoError(t, err) + assert.Nil(t, mhc.CheckSubcomponents()) + + assert.NoError(t, mhc.Close()) +} + +func TestManagedHeartbeatV2Components_Close(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + hcf, _ := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + mhc, _ := heartbeatComp.NewManagedHeartbeatV2Components(hcf) + assert.NotNil(t, mhc) + assert.NoError(t, mhc.Close()) + + err := mhc.Create() + assert.NoError(t, err) + assert.NoError(t, mhc.Close()) +} + +func TestManagedHeartbeatV2Components_IsInterfaceNil(t *testing.T) { + t.Parallel() + + mhc, _ := heartbeatComp.NewManagedHeartbeatV2Components(nil) + assert.True(t, mhc.IsInterfaceNil()) + + args := createMockHeartbeatV2ComponentsFactoryArgs() + hcf, _ := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + mhc, _ = heartbeatComp.NewManagedHeartbeatV2Components(hcf) + assert.False(t, mhc.IsInterfaceNil()) } diff --git a/factory/heartbeat/heartbeatV2Components_test.go b/factory/heartbeat/heartbeatV2Components_test.go index b6102a67c05..06ff8958b40 100644 --- a/factory/heartbeat/heartbeatV2Components_test.go +++ b/factory/heartbeat/heartbeatV2Components_test.go @@ -4,98 +4,448 @@ import ( "errors" "testing" - "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" - errErd "github.com/multiversx/mx-chain-go/errors" - bootstrapComp "github.com/multiversx/mx-chain-go/factory/bootstrap" + errorsMx "github.com/multiversx/mx-chain-go/errors" heartbeatComp "github.com/multiversx/mx-chain-go/factory/heartbeat" - "github.com/multiversx/mx-chain-go/factory/mock" + testsMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/p2p" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/stretchr/testify/assert" ) func createMockHeartbeatV2ComponentsFactoryArgs() heartbeatComp.ArgHeartbeatV2ComponentsFactory { - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - bootStrapArgs := componentsMock.GetBootStrapFactoryArgs() - bootstrapComponentsFactory, _ := bootstrapComp.NewBootstrapComponentsFactory(bootStrapArgs) - bootstrapC, _ := bootstrapComp.NewTestManagedBootstrapComponents(bootstrapComponentsFactory) - _ = bootstrapC.Create() - - _ = bootstrapC.SetShardCoordinator(shardCoordinator) - - statusCoreC := componentsMock.GetStatusCoreComponents() - coreC := componentsMock.GetCoreComponents() - cryptoC := componentsMock.GetCryptoComponents(coreC) - networkC := componentsMock.GetNetworkComponents(cryptoC) - dataC := componentsMock.GetDataComponents(coreC, shardCoordinator) - stateC := componentsMock.GetStateComponents(coreC, shardCoordinator) - processC := componentsMock.GetProcessComponents(shardCoordinator, coreC, networkC, dataC, cryptoC, stateC) return heartbeatComp.ArgHeartbeatV2ComponentsFactory{ - Config: config.Config{ - HeartbeatV2: config.HeartbeatV2Config{ - PeerAuthenticationTimeBetweenSendsInSec: 1, - PeerAuthenticationTimeBetweenSendsWhenErrorInSec: 1, - PeerAuthenticationTimeThresholdBetweenSends: 0.1, - HeartbeatTimeBetweenSendsInSec: 1, - HeartbeatTimeBetweenSendsDuringBootstrapInSec: 1, - HeartbeatTimeBetweenSendsWhenErrorInSec: 1, - HeartbeatTimeThresholdBetweenSends: 0.1, - HeartbeatExpiryTimespanInSec: 30, - MinPeersThreshold: 0.8, - DelayBetweenPeerAuthenticationRequestsInSec: 10, - PeerAuthenticationMaxTimeoutForRequestsInSec: 60, - PeerShardTimeBetweenSendsInSec: 5, - PeerShardTimeThresholdBetweenSends: 0.1, - MaxMissingKeysInRequest: 100, - MaxDurationPeerUnresponsiveInSec: 10, - HideInactiveValidatorIntervalInSec: 60, - HardforkTimeBetweenSendsInSec: 5, - TimeBetweenConnectionsMetricsUpdateInSec: 10, - TimeToReadDirectConnectionsInSec: 15, - PeerAuthenticationTimeBetweenChecksInSec: 6, - HeartbeatPool: config.CacheConfig{ - Type: "LRU", - Capacity: 1000, - Shards: 1, - }, - }, - Hardfork: config.HardforkConfig{ - PublicKeyToListenFrom: componentsMock.DummyPk, - }, - }, + Config: createMockConfig(), Prefs: config.Preferences{ Preferences: config.PreferencesConfig{ NodeDisplayName: "node", Identity: "identity", }, }, - BaseVersion: "test-base", - AppVersion: "test", - BootstrapComponents: bootstrapC, - CoreComponents: coreC, - DataComponents: dataC, - NetworkComponents: networkC, - CryptoComponents: cryptoC, - ProcessComponents: processC, - StatusCoreComponents: statusCoreC, + AppVersion: "test", + BootstrapComponents: &mainFactoryMocks.BootstrapComponentsStub{ + ShCoordinator: &testscommon.ShardsCoordinatorMock{}, + BootstrapParams: &bootstrapMocks.BootstrapParamsHandlerMock{}, + }, + CoreComponents: &factory.CoreComponentsHolderStub{ + InternalMarshalizerCalled: func() marshal.Marshalizer { + return &testscommon.MarshalizerStub{} + }, + HardforkTriggerPubKeyCalled: func() []byte { + return []byte("hardfork pub key") + }, + ValidatorPubKeyConverterCalled: func() core.PubkeyConverter { + return &testscommon.PubkeyConverterStub{} + }, + }, + DataComponents: &testsMocks.DataComponentsStub{ + DataPool: &dataRetriever.PoolsHolderStub{ + PeerAuthenticationsCalled: func() storage.Cacher { + return &testscommon.CacherStub{} + }, + HeartbeatsCalled: func() storage.Cacher { + return &testscommon.CacherStub{} + }, + }, + BlockChain: &testscommon.ChainHandlerStub{}, + }, + NetworkComponents: &testsMocks.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{}, + }, + CryptoComponents: &testsMocks.CryptoComponentsStub{ + PrivKey: &cryptoMocks.PrivateKeyStub{}, + PeerSignHandler: &testsMocks.PeerSignatureHandler{}, + ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, + }, + ProcessComponents: &testsMocks.ProcessComponentsStub{ + EpochTrigger: &testsMocks.EpochStartTriggerStub{}, + EpochNotifier: &testsMocks.EpochStartNotifierStub{}, + NodesCoord: &shardingMocks.NodesCoordinatorStub{}, + NodeRedundancyHandlerInternal: &testsMocks.RedundancyHandlerStub{}, + HardforkTriggerField: &testscommon.HardforkTriggerStub{}, + ReqHandler: &testscommon.RequestHandlerStub{}, + PeerMapper: &testsMocks.PeerShardMapperStub{}, + ShardCoord: &testscommon.ShardsCoordinatorMock{}, + }, + StatusCoreComponents: &factory.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + }, + } +} + +func createMockConfig() config.Config { + return config.Config{ + HeartbeatV2: config.HeartbeatV2Config{ + PeerAuthenticationTimeBetweenSendsInSec: 1, + PeerAuthenticationTimeBetweenSendsWhenErrorInSec: 1, + PeerAuthenticationTimeThresholdBetweenSends: 0.1, + HeartbeatTimeBetweenSendsInSec: 1, + HeartbeatTimeBetweenSendsDuringBootstrapInSec: 1, + HeartbeatTimeBetweenSendsWhenErrorInSec: 1, + HeartbeatTimeThresholdBetweenSends: 0.1, + HeartbeatExpiryTimespanInSec: 30, + MinPeersThreshold: 0.8, + DelayBetweenPeerAuthenticationRequestsInSec: 10, + PeerAuthenticationMaxTimeoutForRequestsInSec: 60, + PeerAuthenticationTimeBetweenChecksInSec: 1, + PeerShardTimeBetweenSendsInSec: 5, + PeerShardTimeThresholdBetweenSends: 0.1, + MaxMissingKeysInRequest: 100, + MaxDurationPeerUnresponsiveInSec: 10, + HideInactiveValidatorIntervalInSec: 60, + HardforkTimeBetweenSendsInSec: 5, + TimeBetweenConnectionsMetricsUpdateInSec: 10, + TimeToReadDirectConnectionsInSec: 15, + HeartbeatPool: config.CacheConfig{ + Type: "LRU", + Capacity: 1000, + Shards: 1, + }, + }, + Hardfork: config.HardforkConfig{ + PublicKeyToListenFrom: componentsMock.DummyPk, + }, } } -func Test_heartbeatV2Components_Create(t *testing.T) { +func TestNewHeartbeatV2ComponentsFactory(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(createMockHeartbeatV2ComponentsFactoryArgs()) + assert.NotNil(t, hcf) + assert.NoError(t, err) + }) + t.Run("nil BootstrapComponents should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.BootstrapComponents = nil + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.Nil(t, hcf) + assert.Equal(t, errorsMx.ErrNilBootstrapComponentsHolder, err) + }) + t.Run("nil CoreComponents should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.CoreComponents = nil + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.Nil(t, hcf) + assert.Equal(t, errorsMx.ErrNilCoreComponentsHolder, err) + }) + t.Run("nil DataComponents should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.DataComponents = nil + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.Nil(t, hcf) + assert.Equal(t, errorsMx.ErrNilDataComponentsHolder, err) + }) + t.Run("nil DataPool should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.DataComponents = &testsMocks.DataComponentsStub{ + DataPool: nil, + } + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.Nil(t, hcf) + assert.Equal(t, errorsMx.ErrNilDataPoolsHolder, err) + }) + t.Run("nil NetworkComponents should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.NetworkComponents = nil + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.Nil(t, hcf) + assert.Equal(t, errorsMx.ErrNilNetworkComponentsHolder, err) + }) + t.Run("nil NetworkMessenger should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.NetworkComponents = &testsMocks.NetworkComponentsStub{ + Messenger: nil, + } + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.Nil(t, hcf) + assert.Equal(t, errorsMx.ErrNilMessenger, err) + }) + t.Run("nil CryptoComponents should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.CryptoComponents = nil + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.Nil(t, hcf) + assert.Equal(t, errorsMx.ErrNilCryptoComponentsHolder, err) + }) + t.Run("nil ProcessComponents should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.ProcessComponents = nil + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.Nil(t, hcf) + assert.Equal(t, errorsMx.ErrNilProcessComponentsHolder, err) + }) + t.Run("nil EpochStartTrigger should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.ProcessComponents = &testsMocks.ProcessComponentsStub{ + EpochTrigger: nil, + } + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.Nil(t, hcf) + assert.Equal(t, errorsMx.ErrNilEpochStartTrigger, err) + }) + t.Run("nil StatusCoreComponents should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.StatusCoreComponents = nil + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.Nil(t, hcf) + assert.Equal(t, errorsMx.ErrNilStatusCoreComponents, err) + }) +} + +func TestHeartbeatV2Components_Create(t *testing.T) { t.Parallel() + expectedErr := errors.New("expected error") + t.Run("messenger does not have PeerAuthenticationTopic and fails to create it", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.NetworkComponents = &testsMocks.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{ + HasTopicCalled: func(name string) bool { + if name == common.PeerAuthenticationTopic { + return false + } + assert.Fail(t, "should not have been called") + return true + }, + CreateTopicCalled: func(name string, createChannelForTopic bool) error { + if name == common.PeerAuthenticationTopic { + return expectedErr + } + assert.Fail(t, "should not have been called") + return nil + }, + }, + } + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.NotNil(t, hcf) + assert.NoError(t, err) + + hc, err := hcf.Create() + assert.Nil(t, hc) + assert.Equal(t, expectedErr, err) + }) + t.Run("messenger does not have HeartbeatV2Topic and fails to create it", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.NetworkComponents = &testsMocks.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{ + HasTopicCalled: func(name string) bool { + return name != common.HeartbeatV2Topic + }, + CreateTopicCalled: func(name string, createChannelForTopic bool) error { + if name == common.HeartbeatV2Topic { + return expectedErr + } + assert.Fail(t, "should not have been called") + return nil + }, + }, + } + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.NotNil(t, hcf) + assert.NoError(t, err) + + hc, err := hcf.Create() + assert.Nil(t, hc) + assert.Equal(t, expectedErr, err) + }) t.Run("invalid config should error", func(t *testing.T) { t.Parallel() args := createMockHeartbeatV2ComponentsFactoryArgs() args.Config.HeartbeatV2.HeartbeatExpiryTimespanInSec = args.Config.HeartbeatV2.PeerAuthenticationTimeBetweenSendsInSec hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) - assert.False(t, check.IfNil(hcf)) - assert.Nil(t, err) + assert.NotNil(t, hcf) + assert.NoError(t, err) + + hc, err := hcf.Create() + assert.Nil(t, hc) + assert.True(t, errors.Is(err, errorsMx.ErrInvalidHeartbeatV2Config)) + }) + t.Run("NewPeerTypeProvider fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + processComp := args.ProcessComponents + args.ProcessComponents = &testsMocks.ProcessComponentsStub{ + NodesCoord: nil, + EpochTrigger: processComp.EpochStartTrigger(), + EpochNotifier: processComp.EpochStartNotifier(), + } + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.NotNil(t, hcf) + assert.NoError(t, err) + + hc, err := hcf.Create() + assert.Nil(t, hc) + assert.Error(t, err) + }) + t.Run("NewSender fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.Config.HeartbeatV2.PeerAuthenticationTimeBetweenSendsInSec = 0 + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.NotNil(t, hcf) + assert.NoError(t, err) + + hc, err := hcf.Create() + assert.Nil(t, hc) + assert.Error(t, err) + }) + t.Run("NewPeerAuthenticationRequestsProcessor fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.Config.HeartbeatV2.DelayBetweenPeerAuthenticationRequestsInSec = 0 + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.NotNil(t, hcf) + assert.NoError(t, err) + + hc, err := hcf.Create() + assert.Nil(t, hc) + assert.Error(t, err) + }) + t.Run("NewPeerShardSender fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.Config.HeartbeatV2.PeerShardTimeBetweenSendsInSec = 0 + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.NotNil(t, hcf) + assert.NoError(t, err) + + hc, err := hcf.Create() + assert.Nil(t, hc) + assert.Error(t, err) + }) + t.Run("NewHeartbeatV2Monitor fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.Config.HeartbeatV2.MaxDurationPeerUnresponsiveInSec = 0 + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.NotNil(t, hcf) + assert.NoError(t, err) + + hc, err := hcf.Create() + assert.Nil(t, hc) + assert.Error(t, err) + }) + t.Run("NewMetricsUpdater fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.Config.HeartbeatV2.TimeBetweenConnectionsMetricsUpdateInSec = 0 + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.NotNil(t, hcf) + assert.NoError(t, err) + + hc, err := hcf.Create() + assert.Nil(t, hc) + assert.Error(t, err) + }) + t.Run("NewDirectConnectionProcessor fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.Config.HeartbeatV2.TimeToReadDirectConnectionsInSec = 0 + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.NotNil(t, hcf) + assert.NoError(t, err) + + hc, err := hcf.Create() + assert.Nil(t, hc) + assert.Error(t, err) + }) + t.Run("NewCrossShardPeerTopicNotifier fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + processComp := args.ProcessComponents + cnt := 0 + args.ProcessComponents = &testsMocks.ProcessComponentsStub{ + NodesCoord: processComp.NodesCoordinator(), + EpochTrigger: processComp.EpochStartTrigger(), + EpochNotifier: processComp.EpochStartNotifier(), + NodeRedundancyHandlerInternal: processComp.NodeRedundancyHandler(), + HardforkTriggerField: processComp.HardforkTrigger(), + PeerMapper: processComp.PeerShardMapper(), + ShardCoordinatorCalled: func() sharding.Coordinator { + cnt++ + if cnt > 3 { + return nil + } + return processComp.ShardCoordinator() + }, + } + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.NotNil(t, hcf) + assert.NoError(t, err) hc, err := hcf.Create() assert.Nil(t, hc) - assert.True(t, errors.Is(err, errErd.ErrInvalidHeartbeatV2Config)) + assert.Error(t, err) + }) + t.Run("AddPeerTopicNotifier fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.NetworkComponents = &testsMocks.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{ + AddPeerTopicNotifierCalled: func(notifier p2p.PeerTopicNotifier) error { + return expectedErr + }, + }, + } + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.NotNil(t, hcf) + assert.NoError(t, err) + + hc, err := hcf.Create() + assert.Nil(t, hc) + assert.Equal(t, expectedErr, err) }) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -108,15 +458,26 @@ func Test_heartbeatV2Components_Create(t *testing.T) { }() args := createMockHeartbeatV2ComponentsFactoryArgs() + args.Prefs.Preferences.FullArchive = true // coverage only hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) - assert.False(t, check.IfNil(hcf)) - assert.Nil(t, err) + assert.NotNil(t, hcf) + assert.NoError(t, err) hc, err := hcf.Create() assert.NotNil(t, hc) - assert.Nil(t, err) - - err = hc.Close() - assert.Nil(t, err) + assert.NoError(t, err) + assert.NoError(t, hc.Close()) }) } + +func TestHeartbeatV2ComponentsFactory_IsInterfaceNil(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.CoreComponents = nil + hcf, _ := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.True(t, hcf.IsInterfaceNil()) + + hcf, _ = heartbeatComp.NewHeartbeatV2ComponentsFactory(createMockHeartbeatV2ComponentsFactoryArgs()) + assert.False(t, hcf.IsInterfaceNil()) +} diff --git a/factory/interface.go b/factory/interface.go index 2f7aa233296..2d82d5ab86a 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -165,7 +165,6 @@ type CryptoParamsHolder interface { PrivateKey() crypto.PrivateKey PublicKeyString() string PublicKeyBytes() []byte - PrivateKeyBytes() []byte } // CryptoComponentsHolder holds the crypto components @@ -215,7 +214,7 @@ type MiniBlockProvider interface { // DataComponentsHolder holds the data components type DataComponentsHolder interface { Blockchain() data.ChainHandler - SetBlockchain(chain data.ChainHandler) + SetBlockchain(chain data.ChainHandler) error StorageService() dataRetriever.StorageService Datapool() dataRetriever.PoolsHolder MiniBlocksProvider() MiniBlockProvider @@ -327,6 +326,7 @@ type StateComponentsHolder interface { TriesContainer() common.TriesHolder TrieStorageManagers() map[string]common.StorageManager MissingTrieNodesNotifier() common.MissingTrieNodesNotifier + Close() error IsInterfaceNil() bool } @@ -342,7 +342,7 @@ type StatusComponentsHandler interface { ComponentHandler StatusComponentsHolder // SetForkDetector should be set before starting Polling for updates - SetForkDetector(forkDetector process.ForkDetector) + SetForkDetector(forkDetector process.ForkDetector) error StartPolling() error } @@ -446,6 +446,7 @@ type BootstrapComponentsHolder interface { VersionedHeaderFactory() factory.VersionedHeaderFactory HeaderVersionHandler() factory.HeaderVersionHandler HeaderIntegrityVerifier() factory.HeaderIntegrityVerifierHandler + GuardedAccountHandler() process.GuardedAccountHandler IsInterfaceNil() bool } diff --git a/factory/mock/cryptoComponentsMock.go b/factory/mock/cryptoComponentsMock.go index fce869adbd7..5d33c918369 100644 --- a/factory/mock/cryptoComponentsMock.go +++ b/factory/mock/cryptoComponentsMock.go @@ -19,7 +19,6 @@ type CryptoComponentsMock struct { P2pPrivKey crypto.PrivateKey P2pSig crypto.SingleSigner PubKeyString string - PrivKeyBytes []byte PubKeyBytes []byte BlockSig crypto.SingleSigner TxSig crypto.SingleSigner @@ -70,11 +69,6 @@ func (ccm *CryptoComponentsMock) PublicKeyBytes() []byte { return ccm.PubKeyBytes } -// PrivateKeyBytes - -func (ccm *CryptoComponentsMock) PrivateKeyBytes() []byte { - return ccm.PrivKeyBytes -} - // BlockSigner - func (ccm *CryptoComponentsMock) BlockSigner() crypto.SingleSigner { return ccm.BlockSig @@ -163,7 +157,6 @@ func (ccm *CryptoComponentsMock) Clone() interface{} { PubKey: ccm.PubKey, PrivKey: ccm.PrivKey, PubKeyString: ccm.PubKeyString, - PrivKeyBytes: ccm.PrivKeyBytes, PubKeyBytes: ccm.PubKeyBytes, BlockSig: ccm.BlockSig, TxSig: ccm.TxSig, diff --git a/factory/mock/dataComponentsMock.go b/factory/mock/dataComponentsMock.go index e39f62c3fbe..aeac7aa6823 100644 --- a/factory/mock/dataComponentsMock.go +++ b/factory/mock/dataComponentsMock.go @@ -50,8 +50,9 @@ func (dcm *DataComponentsMock) EconomicsHandler() factory.EconomicsHandler { } // SetBlockchain - -func (dcm *DataComponentsMock) SetBlockchain(chain data.ChainHandler) { +func (dcm *DataComponentsMock) SetBlockchain(chain data.ChainHandler) error { dcm.Blkc = chain + return nil } // IsInterfaceNil - diff --git a/factory/mock/forkDetectorMock.go b/factory/mock/forkDetectorMock.go index d681b976d7d..4a041bc814a 100644 --- a/factory/mock/forkDetectorMock.go +++ b/factory/mock/forkDetectorMock.go @@ -23,37 +23,56 @@ type ForkDetectorMock struct { // RestoreToGenesis - func (fdm *ForkDetectorMock) RestoreToGenesis() { - fdm.RestoreToGenesisCalled() + if fdm.RestoreToGenesisCalled != nil { + fdm.RestoreToGenesisCalled() + } } // AddHeader is a mock implementation for AddHeader func (fdm *ForkDetectorMock) AddHeader(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { - return fdm.AddHeaderCalled(header, hash, state, selfNotarizedHeaders, selfNotarizedHeadersHashes) + if fdm.AddHeaderCalled != nil { + return fdm.AddHeaderCalled(header, hash, state, selfNotarizedHeaders, selfNotarizedHeadersHashes) + } + return nil } // RemoveHeader is a mock implementation for RemoveHeader func (fdm *ForkDetectorMock) RemoveHeader(nonce uint64, hash []byte) { - fdm.RemoveHeaderCalled(nonce, hash) + if fdm.RemoveHeaderCalled != nil { + fdm.RemoveHeaderCalled(nonce, hash) + } } // CheckFork is a mock implementation for CheckFork func (fdm *ForkDetectorMock) CheckFork() *process.ForkInfo { - return fdm.CheckForkCalled() + if fdm.CheckForkCalled != nil { + return fdm.CheckForkCalled() + } + return &process.ForkInfo{} } // GetHighestFinalBlockNonce is a mock implementation for GetHighestFinalBlockNonce func (fdm *ForkDetectorMock) GetHighestFinalBlockNonce() uint64 { - return fdm.GetHighestFinalBlockNonceCalled() + if fdm.GetHighestFinalBlockNonceCalled != nil { + return fdm.GetHighestFinalBlockNonceCalled() + } + return 0 } // GetHighestFinalBlockHash - func (fdm *ForkDetectorMock) GetHighestFinalBlockHash() []byte { - return fdm.GetHighestFinalBlockHashCalled() + if fdm.GetHighestFinalBlockHashCalled != nil { + return fdm.GetHighestFinalBlockHashCalled() + } + return nil } // ProbableHighestNonce is a mock implementation for GetProbableHighestNonce func (fdm *ForkDetectorMock) ProbableHighestNonce() uint64 { - return fdm.ProbableHighestNonceCalled() + if fdm.ProbableHighestNonceCalled != nil { + return fdm.ProbableHighestNonceCalled() + } + return 0 } // SetRollBackNonce - @@ -65,12 +84,17 @@ func (fdm *ForkDetectorMock) SetRollBackNonce(nonce uint64) { // ResetFork - func (fdm *ForkDetectorMock) ResetFork() { - fdm.ResetForkCalled() + if fdm.ResetForkCalled != nil { + fdm.ResetForkCalled() + } } // GetNotarizedHeaderHash - func (fdm *ForkDetectorMock) GetNotarizedHeaderHash(nonce uint64) []byte { - return fdm.GetNotarizedHeaderHashCalled(nonce) + if fdm.GetNotarizedHeaderHashCalled != nil { + return fdm.GetNotarizedHeaderHashCalled(nonce) + } + return nil } // ResetProbableHighestNonce - diff --git a/factory/mock/stateComponentsHolderStub.go b/factory/mock/stateComponentsHolderStub.go index 010eb9e9168..c851fdc6dac 100644 --- a/factory/mock/stateComponentsHolderStub.go +++ b/factory/mock/stateComponentsHolderStub.go @@ -79,6 +79,11 @@ func (s *StateComponentsHolderStub) MissingTrieNodesNotifier() common.MissingTri return nil } +// Close - +func (s *StateComponentsHolderStub) Close() error { + return nil +} + // IsInterfaceNil - func (s *StateComponentsHolderStub) IsInterfaceNil() bool { return s == nil diff --git a/factory/network/networkComponentsHandler_test.go b/factory/network/networkComponentsHandler_test.go index e40ca1dcee8..51bfe86372c 100644 --- a/factory/network/networkComponentsHandler_test.go +++ b/factory/network/networkComponentsHandler_test.go @@ -3,64 +3,82 @@ package network_test import ( "testing" - "github.com/multiversx/mx-chain-core-go/core/check" + errorsMx "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/factory" networkComp "github.com/multiversx/mx-chain-go/factory/network" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/stretchr/testify/require" ) -// ------------ Test ManagedNetworkComponents -------------------- -func TestManagedNetworkComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { +func TestNewManagedNetworkComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - networkArgs := componentsMock.GetNetworkFactoryArgs() - networkArgs.P2pConfig.Node.Port = "invalid" - networkComponentsFactory, _ := networkComp.NewNetworkComponentsFactory(networkArgs) - managedNetworkComponents, err := networkComp.NewManagedNetworkComponents(networkComponentsFactory) - require.NoError(t, err) - err = managedNetworkComponents.Create() - require.Error(t, err) - require.Nil(t, managedNetworkComponents.NetworkMessenger()) + t.Run("nil factory should error", func(t *testing.T) { + t.Parallel() + + managedNetworkComponents, err := networkComp.NewManagedNetworkComponents(nil) + require.Equal(t, errorsMx.ErrNilNetworkComponentsFactory, err) + require.Nil(t, managedNetworkComponents) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + networkComponentsFactory, _ := networkComp.NewNetworkComponentsFactory(componentsMock.GetNetworkFactoryArgs()) + managedNetworkComponents, err := networkComp.NewManagedNetworkComponents(networkComponentsFactory) + require.NoError(t, err) + require.NotNil(t, managedNetworkComponents) + }) } -func TestManagedNetworkComponents_CreateShouldWork(t *testing.T) { +func TestManagedNetworkComponents_Create(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - networkArgs := componentsMock.GetNetworkFactoryArgs() - networkComponentsFactory, _ := networkComp.NewNetworkComponentsFactory(networkArgs) - managedNetworkComponents, err := networkComp.NewManagedNetworkComponents(networkComponentsFactory) - require.NoError(t, err) - require.False(t, check.IfNil(managedNetworkComponents)) - require.Nil(t, managedNetworkComponents.NetworkMessenger()) - require.Nil(t, managedNetworkComponents.InputAntiFloodHandler()) - require.Nil(t, managedNetworkComponents.OutputAntiFloodHandler()) - require.Nil(t, managedNetworkComponents.PeerBlackListHandler()) - require.Nil(t, managedNetworkComponents.PubKeyCacher()) - require.Nil(t, managedNetworkComponents.PreferredPeersHolderHandler()) - require.Nil(t, managedNetworkComponents.PeerHonestyHandler()) + t.Run("invalid config should error", func(t *testing.T) { + t.Parallel() - err = managedNetworkComponents.Create() - require.NoError(t, err) - require.NotNil(t, managedNetworkComponents.NetworkMessenger()) - require.NotNil(t, managedNetworkComponents.InputAntiFloodHandler()) - require.NotNil(t, managedNetworkComponents.OutputAntiFloodHandler()) - require.NotNil(t, managedNetworkComponents.PeerBlackListHandler()) - require.NotNil(t, managedNetworkComponents.PubKeyCacher()) - require.NotNil(t, managedNetworkComponents.PreferredPeersHolderHandler()) - require.NotNil(t, managedNetworkComponents.PeerHonestyHandler()) + networkArgs := componentsMock.GetNetworkFactoryArgs() + networkArgs.P2pConfig.Node.Port = "invalid" + networkComponentsFactory, _ := networkComp.NewNetworkComponentsFactory(networkArgs) + managedNetworkComponents, err := networkComp.NewManagedNetworkComponents(networkComponentsFactory) + require.NoError(t, err) + err = managedNetworkComponents.Create() + require.Error(t, err) + require.Nil(t, managedNetworkComponents.NetworkMessenger()) + }) + t.Run("should work with getters", func(t *testing.T) { + t.Parallel() + + networkArgs := componentsMock.GetNetworkFactoryArgs() + networkComponentsFactory, _ := networkComp.NewNetworkComponentsFactory(networkArgs) + managedNetworkComponents, err := networkComp.NewManagedNetworkComponents(networkComponentsFactory) + require.NoError(t, err) + require.NotNil(t, managedNetworkComponents) + require.Nil(t, managedNetworkComponents.NetworkMessenger()) + require.Nil(t, managedNetworkComponents.InputAntiFloodHandler()) + require.Nil(t, managedNetworkComponents.OutputAntiFloodHandler()) + require.Nil(t, managedNetworkComponents.PeerBlackListHandler()) + require.Nil(t, managedNetworkComponents.PubKeyCacher()) + require.Nil(t, managedNetworkComponents.PreferredPeersHolderHandler()) + require.Nil(t, managedNetworkComponents.PeerHonestyHandler()) + require.Nil(t, managedNetworkComponents.PeersRatingHandler()) + + err = managedNetworkComponents.Create() + require.NoError(t, err) + require.NotNil(t, managedNetworkComponents.NetworkMessenger()) + require.NotNil(t, managedNetworkComponents.InputAntiFloodHandler()) + require.NotNil(t, managedNetworkComponents.OutputAntiFloodHandler()) + require.NotNil(t, managedNetworkComponents.PeerBlackListHandler()) + require.NotNil(t, managedNetworkComponents.PubKeyCacher()) + require.NotNil(t, managedNetworkComponents.PreferredPeersHolderHandler()) + require.NotNil(t, managedNetworkComponents.PeerHonestyHandler()) + require.NotNil(t, managedNetworkComponents.PeersRatingHandler()) + + require.Equal(t, factory.NetworkComponentsName, managedNetworkComponents.String()) + }) } func TestManagedNetworkComponents_CheckSubcomponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } networkArgs := componentsMock.GetNetworkFactoryArgs() networkComponentsFactory, _ := networkComp.NewNetworkComponentsFactory(networkArgs) @@ -83,10 +101,25 @@ func TestManagedNetworkComponents_Close(t *testing.T) { networkArgs := componentsMock.GetNetworkFactoryArgs() networkComponentsFactory, _ := networkComp.NewNetworkComponentsFactory(networkArgs) managedNetworkComponents, _ := networkComp.NewManagedNetworkComponents(networkComponentsFactory) - err := managedNetworkComponents.Create() + err := managedNetworkComponents.Close() + require.NoError(t, err) + + err = managedNetworkComponents.Create() require.NoError(t, err) err = managedNetworkComponents.Close() require.NoError(t, err) require.Nil(t, managedNetworkComponents.NetworkMessenger()) } + +func TestManagedNetworkComponents_IsInterfaceNil(t *testing.T) { + t.Parallel() + + managedNetworkComponents, _ := networkComp.NewManagedNetworkComponents(nil) + require.True(t, managedNetworkComponents.IsInterfaceNil()) + + networkArgs := componentsMock.GetNetworkFactoryArgs() + networkComponentsFactory, _ := networkComp.NewNetworkComponentsFactory(networkArgs) + managedNetworkComponents, _ = networkComp.NewManagedNetworkComponents(networkComponentsFactory) + require.False(t, managedNetworkComponents.IsInterfaceNil()) +} diff --git a/factory/network/networkComponents_test.go b/factory/network/networkComponents_test.go index 205d3ed5249..1fe95107b6f 100644 --- a/factory/network/networkComponents_test.go +++ b/factory/network/networkComponents_test.go @@ -4,91 +4,155 @@ import ( "errors" "testing" - "github.com/multiversx/mx-chain-go/config" - errErd "github.com/multiversx/mx-chain-go/errors" + errorsMx "github.com/multiversx/mx-chain-go/errors" networkComp "github.com/multiversx/mx-chain-go/factory/network" "github.com/multiversx/mx-chain-go/p2p" - p2pConfig "github.com/multiversx/mx-chain-go/p2p/config" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/stretchr/testify/require" ) -func TestNewNetworkComponentsFactory_NilStatusHandlerShouldErr(t *testing.T) { +func TestNewNetworkComponentsFactory(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - args := componentsMock.GetNetworkFactoryArgs() - args.StatusHandler = nil - ncf, err := networkComp.NewNetworkComponentsFactory(args) - require.Nil(t, ncf) - require.Equal(t, errErd.ErrNilStatusHandler, err) + t.Run("nil StatusHandler should error", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetNetworkFactoryArgs() + args.StatusHandler = nil + ncf, err := networkComp.NewNetworkComponentsFactory(args) + require.Nil(t, ncf) + require.Equal(t, errorsMx.ErrNilStatusHandler, err) + }) + t.Run("nil Marshalizer should error", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetNetworkFactoryArgs() + args.Marshalizer = nil + ncf, err := networkComp.NewNetworkComponentsFactory(args) + require.Nil(t, ncf) + require.True(t, errors.Is(err, errorsMx.ErrNilMarshalizer)) + }) + t.Run("nil Syncer should error", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetNetworkFactoryArgs() + args.Syncer = nil + ncf, err := networkComp.NewNetworkComponentsFactory(args) + require.Nil(t, ncf) + require.Equal(t, errorsMx.ErrNilSyncTimer, err) + }) + t.Run("nil CryptoComponents should error", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetNetworkFactoryArgs() + args.CryptoComponents = nil + ncf, err := networkComp.NewNetworkComponentsFactory(args) + require.Nil(t, ncf) + require.Equal(t, errorsMx.ErrNilCryptoComponentsHolder, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetNetworkFactoryArgs() + ncf, err := networkComp.NewNetworkComponentsFactory(args) + require.NoError(t, err) + require.NotNil(t, ncf) + }) } -func TestNewNetworkComponentsFactory_NilMarshalizerShouldErr(t *testing.T) { +func TestNetworkComponentsFactory_Create(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - args := componentsMock.GetNetworkFactoryArgs() - args.Marshalizer = nil - ncf, err := networkComp.NewNetworkComponentsFactory(args) - require.Nil(t, ncf) - require.True(t, errors.Is(err, errErd.ErrNilMarshalizer)) -} + t.Run("NewPeersHolder fails should error", func(t *testing.T) { + t.Parallel() -func TestNewNetworkComponentsFactory_OkValsShouldWork(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + args := componentsMock.GetNetworkFactoryArgs() + args.PreferredPeersSlices = []string{"invalid peer"} - args := componentsMock.GetNetworkFactoryArgs() - ncf, err := networkComp.NewNetworkComponentsFactory(args) - require.NoError(t, err) - require.NotNil(t, ncf) -} + ncf, _ := networkComp.NewNetworkComponentsFactory(args) -func TestNetworkComponentsFactory_CreateShouldErrDueToBadConfig(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + nc, err := ncf.Create() + require.Error(t, err) + require.Nil(t, nc) + }) + t.Run("first NewLRUCache fails should error", func(t *testing.T) { + t.Parallel() - args := componentsMock.GetNetworkFactoryArgs() - args.MainConfig = config.Config{} - args.P2pConfig = p2pConfig.P2PConfig{} + args := componentsMock.GetNetworkFactoryArgs() + args.MainConfig.PeersRatingConfig.BadRatedCacheCapacity = 0 - ncf, _ := networkComp.NewNetworkComponentsFactory(args) + ncf, _ := networkComp.NewNetworkComponentsFactory(args) - nc, err := ncf.Create() - require.Error(t, err) - require.Nil(t, nc) -} + nc, err := ncf.Create() + require.Error(t, err) + require.Nil(t, nc) + }) + t.Run("second NewLRUCache fails should error", func(t *testing.T) { + t.Parallel() -func TestNetworkComponentsFactory_CreateShouldWork(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + args := componentsMock.GetNetworkFactoryArgs() + args.MainConfig.PeersRatingConfig.TopRatedCacheCapacity = 0 - args := componentsMock.GetNetworkFactoryArgs() - ncf, _ := networkComp.NewNetworkComponentsFactory(args) - ncf.SetListenAddress(p2p.ListenLocalhostAddrWithIp4AndTcp) + ncf, _ := networkComp.NewNetworkComponentsFactory(args) - nc, err := ncf.Create() - require.NoError(t, err) - require.NotNil(t, nc) + nc, err := ncf.Create() + require.Error(t, err) + require.Nil(t, nc) + }) + t.Run("NewP2PAntiFloodComponents fails should error", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetNetworkFactoryArgs() + args.MainConfig.Antiflood.Enabled = true + args.MainConfig.Antiflood.SlowReacting.BlackList.NumFloodingRounds = 0 // NewP2PAntiFloodComponents fails + + ncf, _ := networkComp.NewNetworkComponentsFactory(args) + + nc, err := ncf.Create() + require.Error(t, err) + require.Nil(t, nc) + }) + t.Run("NewAntifloodDebugger fails should error", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetNetworkFactoryArgs() + args.MainConfig.Antiflood.Enabled = true + args.MainConfig.Debug.Antiflood.CacheSize = 0 // NewAntifloodDebugger fails + + ncf, _ := networkComp.NewNetworkComponentsFactory(args) + + nc, err := ncf.Create() + require.Error(t, err) + require.Nil(t, nc) + }) + t.Run("createPeerHonestyHandler fails should error", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetNetworkFactoryArgs() + args.MainConfig.PeerHonesty.Type = "invalid" // createPeerHonestyHandler fails + + ncf, _ := networkComp.NewNetworkComponentsFactory(args) + + nc, err := ncf.Create() + require.Error(t, err) + require.Nil(t, nc) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetNetworkFactoryArgs() + ncf, _ := networkComp.NewNetworkComponentsFactory(args) + ncf.SetListenAddress(p2p.ListenLocalhostAddrWithIp4AndTcp) + + nc, err := ncf.Create() + require.NoError(t, err) + require.NotNil(t, nc) + require.NoError(t, nc.Close()) + }) } -// ------------ Test NetworkComponents -------------------- -func TestNetworkComponents_CloseShouldWork(t *testing.T) { +func TestNetworkComponents_Close(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetNetworkFactoryArgs() ncf, _ := networkComp.NewNetworkComponentsFactory(args) diff --git a/factory/processing/blockProcessorCreator.go b/factory/processing/blockProcessorCreator.go index e1cde3353e7..17010f72516 100644 --- a/factory/processing/blockProcessorCreator.go +++ b/factory/processing/blockProcessorCreator.go @@ -67,7 +67,8 @@ func (pcf *processComponentsFactory) newBlockProcessor( receiptsRepository mainFactory.ReceiptsRepository, missingTrieNodesNotifier common.MissingTrieNodesNotifier, ) (*blockProcessorAndVmFactories, error) { - if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return pcf.newShardBlockProcessor( requestHandler, forkDetector, @@ -84,7 +85,7 @@ func (pcf *processComponentsFactory) newBlockProcessor( missingTrieNodesNotifier, ) } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if shardCoordinator.SelfId() == core.MetachainShardId { return pcf.newMetaBlockProcessor( requestHandler, forkDetector, @@ -167,15 +168,18 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( return nil, err } - interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory( - pcf.bootstrapComponents.ShardCoordinator(), - pcf.coreData.InternalMarshalizer(), - pcf.coreData.Hasher(), - pcf.coreData.AddressPubKeyConverter(), - pcf.data.StorageService(), - pcf.data.Datapool(), - pcf.coreData.EconomicsData(), - ) + argsFactory := shard.ArgsNewIntermediateProcessorsContainerFactory{ + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + Hasher: pcf.coreData.Hasher(), + PubkeyConverter: pcf.coreData.AddressPubKeyConverter(), + Store: pcf.data.StorageService(), + PoolsHolder: pcf.data.Datapool(), + EconomicsFee: pcf.coreData.EconomicsData(), + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + } + + interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory(argsFactory) if err != nil { return nil, err } @@ -280,6 +284,8 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( ArgsParser: argsParser, ScrForwarder: scForwarder, EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + GuardianChecker: pcf.bootstrapComponents.GuardedAccountHandler(), + TxVersionChecker: pcf.coreData.TxVersionChecker(), } transactionProcessor, err := transaction.NewTxProcessor(argsNewTxProcessor) if err != nil { @@ -438,13 +444,11 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( return nil, err } - blockProcessorComponents := &blockProcessorAndVmFactories{ + return &blockProcessorAndVmFactories{ blockProcessor: blockProcessor, vmFactoryForTxSimulate: vmFactoryTxSimulator, vmFactoryForProcessing: vmFactory, - } - - return blockProcessorComponents, nil + }, nil } func (pcf *processComponentsFactory) newMetaBlockProcessor( @@ -485,15 +489,18 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( return nil, err } - interimProcFactory, err := metachain.NewIntermediateProcessorsContainerFactory( - pcf.bootstrapComponents.ShardCoordinator(), - pcf.coreData.InternalMarshalizer(), - pcf.coreData.Hasher(), - pcf.coreData.AddressPubKeyConverter(), - pcf.data.StorageService(), - pcf.data.Datapool(), - pcf.coreData.EconomicsData(), - ) + argsFactory := metachain.ArgsNewIntermediateProcessorsContainerFactory{ + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + Hasher: pcf.coreData.Hasher(), + PubkeyConverter: pcf.coreData.AddressPubKeyConverter(), + Store: pcf.data.StorageService(), + PoolsHolder: pcf.data.Datapool(), + EconomicsFee: pcf.coreData.EconomicsData(), + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + } + + interimProcFactory, err := metachain.NewIntermediateProcessorsContainerFactory(argsFactory) if err != nil { return nil, err } @@ -584,6 +591,8 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( TxTypeHandler: txTypeHandler, EconomicsFee: pcf.coreData.EconomicsData(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + GuardianChecker: pcf.bootstrapComponents.GuardedAccountHandler(), + TxVersionChecker: pcf.coreData.TxVersionChecker(), } transactionProcessor, err := transaction.NewMetaTxProcessor(argsNewMetaTxProcessor) @@ -958,15 +967,18 @@ func (pcf *processComponentsFactory) createShardTxSimulatorProcessor( return nil, err } - interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory( - pcf.bootstrapComponents.ShardCoordinator(), - pcf.coreData.InternalMarshalizer(), - pcf.coreData.Hasher(), - pcf.coreData.AddressPubKeyConverter(), - disabled.NewChainStorer(), - pcf.data.Datapool(), - &processDisabled.FeeHandler{}, - ) + argsFactory := shard.ArgsNewIntermediateProcessorsContainerFactory{ + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + Hasher: pcf.coreData.Hasher(), + PubkeyConverter: pcf.coreData.AddressPubKeyConverter(), + Store: disabled.NewChainStorer(), + PoolsHolder: pcf.data.Datapool(), + EconomicsFee: &processDisabled.FeeHandler{}, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + } + + interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory(argsFactory) if err != nil { return nil, err } @@ -1051,15 +1063,18 @@ func (pcf *processComponentsFactory) createMetaTxSimulatorProcessor( scProcArgs smartContract.ArgsNewSmartContractProcessor, txTypeHandler process.TxTypeHandler, ) (process.VirtualMachinesContainerFactory, error) { - interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory( - pcf.bootstrapComponents.ShardCoordinator(), - pcf.coreData.InternalMarshalizer(), - pcf.coreData.Hasher(), - pcf.coreData.AddressPubKeyConverter(), - disabled.NewChainStorer(), - pcf.data.Datapool(), - &processDisabled.FeeHandler{}, - ) + argsFactory := metachain.ArgsNewIntermediateProcessorsContainerFactory{ + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + Hasher: pcf.coreData.Hasher(), + PubkeyConverter: pcf.coreData.AddressPubKeyConverter(), + Store: disabled.NewChainStorer(), + PoolsHolder: pcf.data.Datapool(), + EconomicsFee: &processDisabled.FeeHandler{}, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + } + + interimProcFactory, err := metachain.NewIntermediateProcessorsContainerFactory(argsFactory) if err != nil { return nil, err } @@ -1130,6 +1145,8 @@ func (pcf *processComponentsFactory) createMetaTxSimulatorProcessor( TxTypeHandler: txTypeHandler, EconomicsFee: &processDisabled.FeeHandler{}, EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + TxVersionChecker: pcf.coreData.TxVersionChecker(), + GuardianChecker: pcf.bootstrapComponents.GuardedAccountHandler(), } txSimulatorProcessorArgs.TransactionProcessor, err = transaction.NewMetaTxProcessor(argsNewMetaTx) @@ -1274,6 +1291,7 @@ func (pcf *processComponentsFactory) createBuiltInFunctionContainer( ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), EpochNotifier: pcf.coreData.EpochNotifier(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + GuardedAccountHandler: pcf.bootstrapComponents.GuardedAccountHandler(), AutomaticCrawlerAddresses: convertedAddresses, MaxNumNodesInTransferRole: pcf.config.BuiltInFunctions.MaxNumAddressesInTransferRole, } diff --git a/factory/processing/blockProcessorCreator_test.go b/factory/processing/blockProcessorCreator_test.go index 774962cf943..9de2631ac17 100644 --- a/factory/processing/blockProcessorCreator_test.go +++ b/factory/processing/blockProcessorCreator_test.go @@ -31,9 +31,6 @@ import ( func Test_newBlockProcessorCreatorForShard(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) pcf, err := processComp.NewProcessComponentsFactory(componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator)) @@ -69,9 +66,6 @@ func Test_newBlockProcessorCreatorForShard(t *testing.T) { func Test_newBlockProcessorCreatorForMeta(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardC := mock.NewMultiShardsCoordinatorMock(1) diff --git a/factory/processing/export_test.go b/factory/processing/export_test.go index 94cb332031b..a4dde223b19 100644 --- a/factory/processing/export_test.go +++ b/factory/processing/export_test.go @@ -1,12 +1,9 @@ package processing import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/factory" - "github.com/multiversx/mx-chain-go/genesis" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/txsimulator" ) @@ -50,8 +47,3 @@ func (pcf *processComponentsFactory) NewBlockProcessor( return blockProcessorComponents.blockProcessor, blockProcessorComponents.vmFactoryForTxSimulate, nil } - -// IndexGenesisBlocks - -func (pcf *processComponentsFactory) IndexGenesisBlocks(genesisBlocks map[uint32]data.HeaderHandler, indexingData map[uint32]*genesis.IndexingData) error { - return pcf.indexGenesisBlocks(genesisBlocks, indexingData, map[string]*outport.AlteredAccount{}) -} diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 250aadb0621..52a92173118 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/outport" nodeFactory "github.com/multiversx/mx-chain-go/cmd/node/factory" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/dataRetriever" @@ -31,7 +32,7 @@ import ( "github.com/multiversx/mx-chain-go/epochStart/metachain" "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/epochStart/shardchain" - errErd "github.com/multiversx/mx-chain-go/errors" + errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" mainFactory "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/factory/disabled" @@ -739,6 +740,12 @@ func (pcf *processComponentsFactory) newValidatorStatisticsProcessor() (process. if hardforkConfig.AfterHardFork { ratingEnabledEpoch = hardforkConfig.StartEpoch + hardforkConfig.ValidatorGracePeriodInEpochs } + + genesisHeader := pcf.data.Blockchain().GetGenesisHeader() + if check.IfNil(genesisHeader) { + return nil, errorsMx.ErrGenesisBlockNotInitialized + } + arguments := peer.ArgValidatorStatisticsProcessor{ PeerAdapter: pcf.state.PeerAccounts(), PubkeyConv: pcf.coreData.ValidatorPubKeyConverter(), @@ -753,20 +760,16 @@ func (pcf *processComponentsFactory) newValidatorStatisticsProcessor() (process. RewardsHandler: pcf.coreData.EconomicsData(), NodesSetup: pcf.coreData.GenesisNodesSetup(), RatingEnableEpoch: ratingEnabledEpoch, - GenesisNonce: pcf.data.Blockchain().GetGenesisHeader().GetNonce(), + GenesisNonce: genesisHeader.GetNonce(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), } - validatorStatisticsProcessor, err := peer.NewValidatorStatisticsProcessor(arguments) - if err != nil { - return nil, err - } - - return validatorStatisticsProcessor, nil + return peer.NewValidatorStatisticsProcessor(arguments) } func (pcf *processComponentsFactory) newEpochStartTrigger(requestHandler epochStart.RequestHandler) (epochStart.TriggerHandler, error) { - if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { argsHeaderValidator := block.ArgsHeaderValidator{ Hasher: pcf.coreData.Hasher(), Marshalizer: pcf.coreData.InternalMarshalizer(), @@ -804,20 +807,20 @@ func (pcf *processComponentsFactory) newEpochStartTrigger(requestHandler epochSt AppStatusHandler: pcf.statusCoreComponents.AppStatusHandler(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), } - epochStartTrigger, err := shardchain.NewEpochStartTrigger(argEpochStart) - if err != nil { - return nil, errors.New("error creating new start of epoch trigger" + err.Error()) - } - - return epochStartTrigger, nil + return shardchain.NewEpochStartTrigger(argEpochStart) } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if shardCoordinator.SelfId() == core.MetachainShardId { + genesisHeader := pcf.data.Blockchain().GetGenesisHeader() + if check.IfNil(genesisHeader) { + return nil, errorsMx.ErrGenesisBlockNotInitialized + } + argEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ GenesisTime: time.Unix(pcf.coreData.GenesisNodesSetup().GetStartTime(), 0), Settings: &pcf.config.EpochStartConfig, Epoch: pcf.bootstrapComponents.EpochBootstrapParams().Epoch(), - EpochStartRound: pcf.data.Blockchain().GetGenesisHeader().GetRound(), + EpochStartRound: genesisHeader.GetRound(), EpochStartNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), Storage: pcf.data.StorageService(), Marshalizer: pcf.coreData.InternalMarshalizer(), @@ -825,12 +828,8 @@ func (pcf *processComponentsFactory) newEpochStartTrigger(requestHandler epochSt AppStatusHandler: pcf.statusCoreComponents.AppStatusHandler(), DataPool: pcf.data.Datapool(), } - epochStartTrigger, err := metachain.NewEpochStartTrigger(argEpochStart) - if err != nil { - return nil, errors.New("error creating new start of epoch trigger" + err.Error()) - } - return epochStartTrigger, nil + return metachain.NewEpochStartTrigger(argEpochStart) } return nil, errors.New("error creating new start of epoch trigger because of invalid shard id") @@ -892,16 +891,16 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string rootHash, err := pcf.state.AccountsAdapter().RootHash() if err != nil { - return nil, err + return map[string]*outport.AlteredAccount{}, err } leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err = pcf.state.AccountsAdapter().GetAllLeaves(leavesChannels, context.Background(), rootHash) if err != nil { - return nil, err + return map[string]*outport.AlteredAccount{}, err } genesisAccounts := make(map[string]*outport.AlteredAccount, 0) @@ -914,7 +913,7 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string encodedAddress, err := pcf.coreData.AddressPubKeyConverter().Encode(userAccount.AddressBytes()) if err != nil { - return nil, err + return map[string]*outport.AlteredAccount{}, err } genesisAccounts[encodedAddress] = &outport.AlteredAccount{ @@ -928,9 +927,9 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string } } - err = common.GetErrorFromChanNonBlocking(leavesChannels.ErrChan) + err = leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { - return nil, err + return map[string]*outport.AlteredAccount{}, err } shardID := pcf.bootstrapComponents.ShardCoordinator().SelfId() @@ -957,12 +956,7 @@ func (pcf *processComponentsFactory) setGenesisHeader(genesisBlocks map[uint32]d return errors.New("genesis block does not exist") } - err := pcf.data.Blockchain().SetGenesisHeader(genesisBlock) - if err != nil { - return err - } - - return nil + return pcf.data.Blockchain().SetGenesisHeader(genesisBlock) } func (pcf *processComponentsFactory) prepareGenesisBlock( @@ -986,12 +980,7 @@ func (pcf *processComponentsFactory) prepareGenesisBlock( pcf.data.Blockchain().SetGenesisHeaderHash(genesisBlockHash) nonceToByteSlice := pcf.coreData.Uint64ByteSliceConverter().ToByteSlice(genesisBlock.GetNonce()) - err = pcf.saveGenesisHeaderToStorage(genesisBlock, genesisBlockHash, nonceToByteSlice) - if err != nil { - return err - } - - return nil + return pcf.saveGenesisHeaderToStorage(genesisBlock, genesisBlockHash, nonceToByteSlice) } func (pcf *processComponentsFactory) saveGenesisHeaderToStorage( @@ -1185,17 +1174,12 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( return err } - err = pcf.saveAlteredGenesisHeaderToStorage( + return pcf.saveAlteredGenesisHeaderToStorage( genesisBlockHeader, genesisBlockHash, genesisBody, intraShardMiniBlocks, txsPoolPerShard) - if err != nil { - return err - } - - return nil } func (pcf *processComponentsFactory) saveAlteredGenesisHeaderToStorage( @@ -1284,13 +1268,14 @@ func (pcf *processComponentsFactory) newBlockTracker( requestHandler process.RequestHandler, genesisBlocks map[uint32]data.HeaderHandler, ) (process.BlockTracker, error) { + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() argBaseTracker := track.ArgBaseTracker{ Hasher: pcf.coreData.Hasher(), HeaderValidator: headerValidator, Marshalizer: pcf.coreData.InternalMarshalizer(), RequestHandler: requestHandler, RoundHandler: pcf.coreData.RoundHandler(), - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ShardCoordinator: shardCoordinator, Store: pcf.data.StorageService(), StartHeaders: genesisBlocks, PoolsHolder: pcf.data.Datapool(), @@ -1298,7 +1283,7 @@ func (pcf *processComponentsFactory) newBlockTracker( FeeHandler: pcf.coreData.EconomicsData(), } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { arguments := track.ArgShardTracker{ ArgBaseTracker: argBaseTracker, } @@ -1306,7 +1291,7 @@ func (pcf *processComponentsFactory) newBlockTracker( return track.NewShardBlockTrack(arguments) } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if shardCoordinator.SelfId() == core.MetachainShardId { arguments := track.ArgMetaTracker{ ArgBaseTracker: argBaseTracker, } @@ -1400,11 +1385,8 @@ func (pcf *processComponentsFactory) newMetaResolverContainerFactory( PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), PayloadValidator: payloadValidator, } - resolversContainerFactory, err := resolverscontainer.NewMetaResolversContainerFactory(resolversContainerFactoryArgs) - if err != nil { - return nil, err - } - return resolversContainerFactory, nil + + return resolverscontainer.NewMetaResolversContainerFactory(resolversContainerFactoryArgs) } func (pcf *processComponentsFactory) newRequestersContainerFactory( @@ -1416,9 +1398,10 @@ func (pcf *processComponentsFactory) newRequestersContainerFactory( return pcf.newStorageRequesters() } + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() requestersContainerFactoryArgs := requesterscontainer.FactoryArgs{ RequesterConfig: pcf.config.Requesters, - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ShardCoordinator: shardCoordinator, Messenger: pcf.network.NetworkMessenger(), Marshaller: pcf.coreData.InternalMarshalizer(), Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), @@ -1429,10 +1412,10 @@ func (pcf *processComponentsFactory) newRequestersContainerFactory( SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return requesterscontainer.NewShardRequestersContainerFactory(requestersContainerFactoryArgs) } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if shardCoordinator.SelfId() == core.MetachainShardId { return requesterscontainer.NewMetaRequestersContainerFactory(requestersContainerFactoryArgs) } @@ -1448,7 +1431,8 @@ func (pcf *processComponentsFactory) newInterceptorContainerFactory( peerShardMapper *networksharding.PeerShardMapper, hardforkTrigger factory.HardforkTrigger, ) (process.InterceptorsContainerFactory, process.TimeCacher, error) { - if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return pcf.newShardInterceptorContainerFactory( headerSigVerifier, headerIntegrityVerifier, @@ -1459,7 +1443,7 @@ func (pcf *processComponentsFactory) newInterceptorContainerFactory( hardforkTrigger, ) } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if shardCoordinator.SelfId() == core.MetachainShardId { return pcf.newMetaInterceptorContainerFactory( headerSigVerifier, headerIntegrityVerifier, @@ -1504,6 +1488,7 @@ func (pcf *processComponentsFactory) newStorageRequesters() (dataRetriever.Reque CurrentEpoch: pcf.bootstrapComponents.EpochBootstrapParams().Epoch(), StorageType: storageFactory.ProcessStorageService, CreateTrieEpochRootHashStorer: false, + NodeProcessingMode: common.GetNodeProcessingMode(&pcf.importDBConfig), SnapshotsEnabled: pcf.snapshotsEnabled, ManagedPeersHolder: pcf.crypto.ManagedPeersHolder(), }, @@ -1560,12 +1545,8 @@ func (pcf *processComponentsFactory) createStorageRequestersForMeta( ChanGracefullyClose: pcf.coreData.ChanStopNodeProcess(), SnapshotsEnabled: pcf.snapshotsEnabled, } - requestersContainerFactory, err := storagerequesterscontainer.NewMetaRequestersContainerFactory(requestersContainerFactoryArgs) - if err != nil { - return nil, err - } - return requestersContainerFactory, nil + return storagerequesterscontainer.NewMetaRequestersContainerFactory(requestersContainerFactoryArgs) } func (pcf *processComponentsFactory) createStorageRequestersForShard( @@ -1593,12 +1574,8 @@ func (pcf *processComponentsFactory) createStorageRequestersForShard( ChanGracefullyClose: pcf.coreData.ChanStopNodeProcess(), SnapshotsEnabled: pcf.snapshotsEnabled, } - requestersContainerFactory, err := storagerequesterscontainer.NewShardRequestersContainerFactory(requestersContainerFactoryArgs) - if err != nil { - return nil, err - } - return requestersContainerFactory, nil + return storagerequesterscontainer.NewShardRequestersContainerFactory(requestersContainerFactoryArgs) } func (pcf *processComponentsFactory) newShardInterceptorContainerFactory( @@ -1701,10 +1678,11 @@ func (pcf *processComponentsFactory) newForkDetector( headerBlackList process.TimeCacher, blockTracker process.BlockTracker, ) (process.ForkDetector, error) { - if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return sync.NewShardForkDetector(pcf.coreData.RoundHandler(), headerBlackList, blockTracker, pcf.coreData.GenesisNodesSetup().GetStartTime()) } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if shardCoordinator.SelfId() == core.MetachainShardId { return sync.NewMetaForkDetector(pcf.coreData.RoundHandler(), headerBlackList, blockTracker, pcf.coreData.GenesisNodesSetup().GetStartTime()) } @@ -1847,12 +1825,7 @@ func createNetworkShardingCollector( NodesCoordinator: nodesCoordinator, PreferredPeersHolder: preferredPeersHolder, } - psm, err := networksharding.NewPeerShardMapper(arg) - if err != nil { - return nil, err - } - - return psm, nil + return networksharding.NewPeerShardMapper(arg) } func createCache(cacheConfig config.CacheConfig) (storage.Cacher, error) { @@ -1862,85 +1835,91 @@ func createCache(cacheConfig config.CacheConfig) (storage.Cacher, error) { func checkProcessComponentsArgs(args ProcessComponentsFactoryArgs) error { baseErrMessage := "error creating process components" if check.IfNil(args.AccountsParser) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilAccountsParser) + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilAccountsParser) } - if check.IfNil(args.SmartContractParser) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilSmartContractParser) + if check.IfNil(args.GasSchedule) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilGasSchedule) } - if args.GasSchedule == nil { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilGasSchedule) + if check.IfNil(args.Data) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilDataComponentsHolder) } - if check.IfNil(args.NodesCoordinator) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilNodesCoordinator) + if check.IfNil(args.Data.Blockchain()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilBlockChainHandler) } - if check.IfNil(args.Data) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilDataComponentsHolder) + if check.IfNil(args.Data.Datapool()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilDataPoolsHolder) + } + if check.IfNil(args.Data.StorageService()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilStorageService) } if check.IfNil(args.CoreData) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilCoreComponentsHolder) + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilCoreComponentsHolder) } - if args.CoreData.EconomicsData() == nil { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilEconomicsData) + if check.IfNil(args.CoreData.EconomicsData()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilEconomicsData) } - if check.IfNil(args.CoreData.RoundHandler()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilRoundHandler) + if check.IfNil(args.CoreData.GenesisNodesSetup()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilGenesisNodesSetupHandler) } - if check.IfNil(args.Crypto) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilCryptoComponentsHolder) + if check.IfNil(args.CoreData.AddressPubKeyConverter()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilAddressPublicKeyConverter) } - if check.IfNil(args.State) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilStateComponentsHolder) + if check.IfNil(args.CoreData.EpochNotifier()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilEpochNotifier) } - if check.IfNil(args.Network) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilNetworkComponentsHolder) + if check.IfNil(args.CoreData.ValidatorPubKeyConverter()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilPubKeyConverter) } - if check.IfNil(args.RequestedItemsHandler) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilRequestedItemHandler) + if check.IfNil(args.CoreData.InternalMarshalizer()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilInternalMarshalizer) } - if check.IfNil(args.WhiteListHandler) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilWhiteListHandler) + if check.IfNil(args.CoreData.Uint64ByteSliceConverter()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilUint64ByteSliceConverter) } - if check.IfNil(args.WhiteListerVerifiedTxs) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilWhiteListVerifiedTxs) + if check.IfNil(args.Crypto) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilCryptoComponentsHolder) } - if check.IfNil(args.CoreData.EpochStartNotifierWithConfirm()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilEpochStartNotifier) + if check.IfNil(args.Crypto.BlockSignKeyGen()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilBlockSignKeyGen) } - if check.IfNil(args.CoreData.Rater()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilRater) + if check.IfNil(args.State) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilStateComponentsHolder) } - if check.IfNil(args.CoreData.RatingsData()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilRatingData) + if check.IfNil(args.State.AccountsAdapter()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilAccountsAdapter) } - if check.IfNil(args.CoreData.ValidatorPubKeyConverter()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilPubKeyConverter) + if check.IfNil(args.Network) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilNetworkComponentsHolder) } - if args.SystemSCConfig == nil { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilSystemSCConfig) + if check.IfNil(args.Network.NetworkMessenger()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilMessenger) } - if check.IfNil(args.CoreData.EpochNotifier()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilEpochNotifier) + if check.IfNil(args.Network.InputAntiFloodHandler()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilInputAntiFloodHandler) } - if check.IfNil(args.CoreData.EnableEpochsHandler()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilEnableEpochsHandler) + if args.SystemSCConfig == nil { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilSystemSCConfig) } if check.IfNil(args.BootstrapComponents) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilBootstrapComponentsHolder) + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilBootstrapComponentsHolder) } if check.IfNil(args.BootstrapComponents.ShardCoordinator()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilShardCoordinator) + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilShardCoordinator) + } + if check.IfNil(args.BootstrapComponents.EpochBootstrapParams()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilBootstrapParamsHandler) } if check.IfNil(args.StatusComponents) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilStatusComponentsHolder) + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilStatusComponentsHolder) } - if check.IfNil(args.StatusCoreComponents) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilStatusCoreComponents) + if check.IfNil(args.StatusComponents.OutportHandler()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilOutportHandler) } - if check.IfNil(args.StatusCoreComponents.AppStatusHandler()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilAppStatusHandler) + if check.IfNil(args.HistoryRepo) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilHistoryRepository) } - if check.IfNil(args.Crypto.ManagedPeersHolder()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilManagedPeersHolder) + if check.IfNil(args.StatusCoreComponents) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilStatusCoreComponents) } return nil diff --git a/factory/processing/processComponentsHandler_test.go b/factory/processing/processComponentsHandler_test.go index 92f28bd3d38..534fef02ec8 100644 --- a/factory/processing/processComponentsHandler_test.go +++ b/factory/processing/processComponentsHandler_test.go @@ -3,164 +3,175 @@ package processing_test import ( "testing" - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/factory/mock" + errorsMx "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/factory" processComp "github.com/multiversx/mx-chain-go/factory/processing" - componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/stretchr/testify/require" ) -// ------------ Test TestManagedProcessComponents -------------------- -func TestManagedProcessComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { +func TestNewManagedProcessComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) - _ = processArgs.CoreData.SetInternalMarshalizer(nil) - processComponentsFactory, _ := processComp.NewProcessComponentsFactory(processArgs) - managedProcessComponents, err := processComp.NewManagedProcessComponents(processComponentsFactory) - require.NoError(t, err) - err = managedProcessComponents.Create() - require.Error(t, err) - require.Nil(t, managedProcessComponents.NodesCoordinator()) + + t.Run("nil factory should error", func(t *testing.T) { + t.Parallel() + + managedProcessComponents, err := processComp.NewManagedProcessComponents(nil) + require.Equal(t, errorsMx.ErrNilProcessComponentsFactory, err) + require.Nil(t, managedProcessComponents) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + processComponentsFactory, _ := processComp.NewProcessComponentsFactory(createMockProcessComponentsFactoryArgs()) + managedProcessComponents, err := processComp.NewManagedProcessComponents(processComponentsFactory) + require.NoError(t, err) + require.NotNil(t, managedProcessComponents) + }) } -func TestManagedProcessComponents_CreateShouldWork(t *testing.T) { +func TestManagedProcessComponents_Create(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - coreComponents := componentsMock.GetCoreComponents() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) - shardCoordinator.SelfIDCalled = func() uint32 { - return core.MetachainShardId - } - shardCoordinator.ComputeIdCalled = func(address []byte) uint32 { - if core.IsSmartContractOnMetachain(address[len(address)-1:], address) { - return core.MetachainShardId - } - - return 0 - } - - shardCoordinator.CurrentShard = core.MetachainShardId - dataComponents := componentsMock.GetDataComponents(coreComponents, shardCoordinator) - cryptoComponents := componentsMock.GetCryptoComponents(coreComponents) - networkComponents := componentsMock.GetNetworkComponents(cryptoComponents) - stateComponents := componentsMock.GetStateComponents(coreComponents, shardCoordinator) - processArgs := componentsMock.GetProcessArgs( - shardCoordinator, - coreComponents, - dataComponents, - cryptoComponents, - stateComponents, - networkComponents, - ) - - componentsMock.SetShardCoordinator(t, processArgs.BootstrapComponents, shardCoordinator) - - processComponentsFactory, err := processComp.NewProcessComponentsFactory(processArgs) - require.Nil(t, err) - managedProcessComponents, err := processComp.NewManagedProcessComponents(processComponentsFactory) - require.NoError(t, err) - require.True(t, check.IfNil(managedProcessComponents.NodesCoordinator())) - require.True(t, check.IfNil(managedProcessComponents.InterceptorsContainer())) - require.True(t, check.IfNil(managedProcessComponents.ResolversContainer())) - require.True(t, check.IfNil(managedProcessComponents.RequestersFinder())) - require.True(t, check.IfNil(managedProcessComponents.RoundHandler())) - require.True(t, check.IfNil(managedProcessComponents.ForkDetector())) - require.True(t, check.IfNil(managedProcessComponents.BlockProcessor())) - require.True(t, check.IfNil(managedProcessComponents.EpochStartTrigger())) - require.True(t, check.IfNil(managedProcessComponents.EpochStartNotifier())) - require.True(t, check.IfNil(managedProcessComponents.BlackListHandler())) - require.True(t, check.IfNil(managedProcessComponents.BootStorer())) - require.True(t, check.IfNil(managedProcessComponents.HeaderSigVerifier())) - require.True(t, check.IfNil(managedProcessComponents.ValidatorsStatistics())) - require.True(t, check.IfNil(managedProcessComponents.ValidatorsProvider())) - require.True(t, check.IfNil(managedProcessComponents.BlockTracker())) - require.True(t, check.IfNil(managedProcessComponents.PendingMiniBlocksHandler())) - require.True(t, check.IfNil(managedProcessComponents.RequestHandler())) - require.True(t, check.IfNil(managedProcessComponents.TxLogsProcessor())) - require.True(t, check.IfNil(managedProcessComponents.HeaderConstructionValidator())) - require.True(t, check.IfNil(managedProcessComponents.HeaderIntegrityVerifier())) - require.True(t, check.IfNil(managedProcessComponents.CurrentEpochProvider())) - require.True(t, check.IfNil(managedProcessComponents.NodeRedundancyHandler())) - require.True(t, check.IfNil(managedProcessComponents.WhiteListHandler())) - require.True(t, check.IfNil(managedProcessComponents.WhiteListerVerifiedTxs())) - require.True(t, check.IfNil(managedProcessComponents.RequestedItemsHandler())) - require.True(t, check.IfNil(managedProcessComponents.ImportStartHandler())) - require.True(t, check.IfNil(managedProcessComponents.HistoryRepository())) - require.True(t, check.IfNil(managedProcessComponents.TransactionSimulatorProcessor())) - require.True(t, check.IfNil(managedProcessComponents.FallbackHeaderValidator())) - require.True(t, check.IfNil(managedProcessComponents.PeerShardMapper())) - require.True(t, check.IfNil(managedProcessComponents.ShardCoordinator())) - require.True(t, check.IfNil(managedProcessComponents.TxsSenderHandler())) - require.True(t, check.IfNil(managedProcessComponents.HardforkTrigger())) - require.True(t, check.IfNil(managedProcessComponents.ProcessedMiniBlocksTracker())) - - err = managedProcessComponents.Create() + + t.Run("invalid params should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.PublicKeyPeerId.Type = "invalid" + processComponentsFactory, _ := processComp.NewProcessComponentsFactory(args) + managedProcessComponents, _ := processComp.NewManagedProcessComponents(processComponentsFactory) + require.NotNil(t, managedProcessComponents) + + err := managedProcessComponents.Create() + require.Error(t, err) + }) + t.Run("should work with getters", func(t *testing.T) { + t.Parallel() + + processComponentsFactory, _ := processComp.NewProcessComponentsFactory(createMockProcessComponentsFactoryArgs()) + managedProcessComponents, _ := processComp.NewManagedProcessComponents(processComponentsFactory) + require.NotNil(t, managedProcessComponents) + + require.True(t, check.IfNil(managedProcessComponents.NodesCoordinator())) + require.True(t, check.IfNil(managedProcessComponents.InterceptorsContainer())) + require.True(t, check.IfNil(managedProcessComponents.ResolversContainer())) + require.True(t, check.IfNil(managedProcessComponents.RequestersFinder())) + require.True(t, check.IfNil(managedProcessComponents.RoundHandler())) + require.True(t, check.IfNil(managedProcessComponents.ForkDetector())) + require.True(t, check.IfNil(managedProcessComponents.BlockProcessor())) + require.True(t, check.IfNil(managedProcessComponents.EpochStartTrigger())) + require.True(t, check.IfNil(managedProcessComponents.EpochStartNotifier())) + require.True(t, check.IfNil(managedProcessComponents.BlackListHandler())) + require.True(t, check.IfNil(managedProcessComponents.BootStorer())) + require.True(t, check.IfNil(managedProcessComponents.HeaderSigVerifier())) + require.True(t, check.IfNil(managedProcessComponents.ValidatorsStatistics())) + require.True(t, check.IfNil(managedProcessComponents.ValidatorsProvider())) + require.True(t, check.IfNil(managedProcessComponents.BlockTracker())) + require.True(t, check.IfNil(managedProcessComponents.PendingMiniBlocksHandler())) + require.True(t, check.IfNil(managedProcessComponents.RequestHandler())) + require.True(t, check.IfNil(managedProcessComponents.TxLogsProcessor())) + require.True(t, check.IfNil(managedProcessComponents.HeaderConstructionValidator())) + require.True(t, check.IfNil(managedProcessComponents.HeaderIntegrityVerifier())) + require.True(t, check.IfNil(managedProcessComponents.CurrentEpochProvider())) + require.True(t, check.IfNil(managedProcessComponents.NodeRedundancyHandler())) + require.True(t, check.IfNil(managedProcessComponents.WhiteListHandler())) + require.True(t, check.IfNil(managedProcessComponents.WhiteListerVerifiedTxs())) + require.True(t, check.IfNil(managedProcessComponents.RequestedItemsHandler())) + require.True(t, check.IfNil(managedProcessComponents.ImportStartHandler())) + require.True(t, check.IfNil(managedProcessComponents.HistoryRepository())) + require.True(t, check.IfNil(managedProcessComponents.TransactionSimulatorProcessor())) + require.True(t, check.IfNil(managedProcessComponents.FallbackHeaderValidator())) + require.True(t, check.IfNil(managedProcessComponents.PeerShardMapper())) + require.True(t, check.IfNil(managedProcessComponents.ShardCoordinator())) + require.True(t, check.IfNil(managedProcessComponents.TxsSenderHandler())) + require.True(t, check.IfNil(managedProcessComponents.HardforkTrigger())) + require.True(t, check.IfNil(managedProcessComponents.ProcessedMiniBlocksTracker())) + require.True(t, check.IfNil(managedProcessComponents.AccountsParser())) + require.True(t, check.IfNil(managedProcessComponents.ScheduledTxsExecutionHandler())) + require.True(t, check.IfNil(managedProcessComponents.ESDTDataStorageHandlerForAPI())) + require.True(t, check.IfNil(managedProcessComponents.ReceiptsRepository())) + + err := managedProcessComponents.Create() + require.NoError(t, err) + require.False(t, check.IfNil(managedProcessComponents.NodesCoordinator())) + require.False(t, check.IfNil(managedProcessComponents.InterceptorsContainer())) + require.False(t, check.IfNil(managedProcessComponents.ResolversContainer())) + require.False(t, check.IfNil(managedProcessComponents.RequestersFinder())) + require.False(t, check.IfNil(managedProcessComponents.RoundHandler())) + require.False(t, check.IfNil(managedProcessComponents.ForkDetector())) + require.False(t, check.IfNil(managedProcessComponents.BlockProcessor())) + require.False(t, check.IfNil(managedProcessComponents.EpochStartTrigger())) + require.False(t, check.IfNil(managedProcessComponents.EpochStartNotifier())) + require.False(t, check.IfNil(managedProcessComponents.BlackListHandler())) + require.False(t, check.IfNil(managedProcessComponents.BootStorer())) + require.False(t, check.IfNil(managedProcessComponents.HeaderSigVerifier())) + require.False(t, check.IfNil(managedProcessComponents.ValidatorsStatistics())) + require.False(t, check.IfNil(managedProcessComponents.ValidatorsProvider())) + require.False(t, check.IfNil(managedProcessComponents.BlockTracker())) + require.False(t, check.IfNil(managedProcessComponents.PendingMiniBlocksHandler())) + require.False(t, check.IfNil(managedProcessComponents.RequestHandler())) + require.False(t, check.IfNil(managedProcessComponents.TxLogsProcessor())) + require.False(t, check.IfNil(managedProcessComponents.HeaderConstructionValidator())) + require.False(t, check.IfNil(managedProcessComponents.HeaderIntegrityVerifier())) + require.False(t, check.IfNil(managedProcessComponents.CurrentEpochProvider())) + require.False(t, check.IfNil(managedProcessComponents.NodeRedundancyHandler())) + require.False(t, check.IfNil(managedProcessComponents.WhiteListHandler())) + require.False(t, check.IfNil(managedProcessComponents.WhiteListerVerifiedTxs())) + require.False(t, check.IfNil(managedProcessComponents.RequestedItemsHandler())) + require.False(t, check.IfNil(managedProcessComponents.ImportStartHandler())) + require.False(t, check.IfNil(managedProcessComponents.HistoryRepository())) + require.False(t, check.IfNil(managedProcessComponents.TransactionSimulatorProcessor())) + require.False(t, check.IfNil(managedProcessComponents.FallbackHeaderValidator())) + require.False(t, check.IfNil(managedProcessComponents.PeerShardMapper())) + require.False(t, check.IfNil(managedProcessComponents.ShardCoordinator())) + require.False(t, check.IfNil(managedProcessComponents.TxsSenderHandler())) + require.False(t, check.IfNil(managedProcessComponents.HardforkTrigger())) + require.False(t, check.IfNil(managedProcessComponents.ProcessedMiniBlocksTracker())) + require.False(t, check.IfNil(managedProcessComponents.AccountsParser())) + require.False(t, check.IfNil(managedProcessComponents.ScheduledTxsExecutionHandler())) + require.False(t, check.IfNil(managedProcessComponents.ESDTDataStorageHandlerForAPI())) + require.False(t, check.IfNil(managedProcessComponents.ReceiptsRepository())) + + require.Equal(t, factory.ProcessComponentsName, managedProcessComponents.String()) + }) +} + +func TestManagedProcessComponents_CheckSubcomponents(t *testing.T) { + t.Parallel() + + processComponentsFactory, _ := processComp.NewProcessComponentsFactory(createMockProcessComponentsFactoryArgs()) + managedProcessComponents, _ := processComp.NewManagedProcessComponents(processComponentsFactory) + require.NotNil(t, managedProcessComponents) + require.Equal(t, errorsMx.ErrNilProcessComponents, managedProcessComponents.CheckSubcomponents()) + + err := managedProcessComponents.Create() require.NoError(t, err) - require.False(t, check.IfNil(managedProcessComponents.NodesCoordinator())) - require.False(t, check.IfNil(managedProcessComponents.InterceptorsContainer())) - require.False(t, check.IfNil(managedProcessComponents.ResolversContainer())) - require.False(t, check.IfNil(managedProcessComponents.RequestersFinder())) - require.False(t, check.IfNil(managedProcessComponents.RoundHandler())) - require.False(t, check.IfNil(managedProcessComponents.ForkDetector())) - require.False(t, check.IfNil(managedProcessComponents.BlockProcessor())) - require.False(t, check.IfNil(managedProcessComponents.EpochStartTrigger())) - require.False(t, check.IfNil(managedProcessComponents.EpochStartNotifier())) - require.False(t, check.IfNil(managedProcessComponents.BlackListHandler())) - require.False(t, check.IfNil(managedProcessComponents.BootStorer())) - require.False(t, check.IfNil(managedProcessComponents.HeaderSigVerifier())) - require.False(t, check.IfNil(managedProcessComponents.ValidatorsStatistics())) - require.False(t, check.IfNil(managedProcessComponents.ValidatorsProvider())) - require.False(t, check.IfNil(managedProcessComponents.BlockTracker())) - require.False(t, check.IfNil(managedProcessComponents.PendingMiniBlocksHandler())) - require.False(t, check.IfNil(managedProcessComponents.RequestHandler())) - require.False(t, check.IfNil(managedProcessComponents.TxLogsProcessor())) - require.False(t, check.IfNil(managedProcessComponents.HeaderConstructionValidator())) - require.False(t, check.IfNil(managedProcessComponents.HeaderIntegrityVerifier())) - require.False(t, check.IfNil(managedProcessComponents.CurrentEpochProvider())) - require.False(t, check.IfNil(managedProcessComponents.NodeRedundancyHandler())) - require.False(t, check.IfNil(managedProcessComponents.WhiteListHandler())) - require.False(t, check.IfNil(managedProcessComponents.WhiteListerVerifiedTxs())) - require.False(t, check.IfNil(managedProcessComponents.RequestedItemsHandler())) - require.False(t, check.IfNil(managedProcessComponents.ImportStartHandler())) - require.False(t, check.IfNil(managedProcessComponents.HistoryRepository())) - require.False(t, check.IfNil(managedProcessComponents.TransactionSimulatorProcessor())) - require.False(t, check.IfNil(managedProcessComponents.FallbackHeaderValidator())) - require.False(t, check.IfNil(managedProcessComponents.PeerShardMapper())) - require.False(t, check.IfNil(managedProcessComponents.ShardCoordinator())) - require.False(t, check.IfNil(managedProcessComponents.TxsSenderHandler())) - require.False(t, check.IfNil(managedProcessComponents.HardforkTrigger())) - require.False(t, check.IfNil(managedProcessComponents.ProcessedMiniBlocksTracker())) - - nodeSkBytes, err := cryptoComponents.PrivateKey().ToByteArray() - require.Nil(t, err) - observerSkBytes, err := managedProcessComponents.NodeRedundancyHandler().ObserverPrivateKey().ToByteArray() - require.Nil(t, err) - require.NotEqual(t, nodeSkBytes, observerSkBytes) + + require.Nil(t, managedProcessComponents.CheckSubcomponents()) } func TestManagedProcessComponents_Close(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) - processComponentsFactory, _ := processComp.NewProcessComponentsFactory(processArgs) + processComponentsFactory, _ := processComp.NewProcessComponentsFactory(createMockProcessComponentsFactoryArgs()) managedProcessComponents, _ := processComp.NewManagedProcessComponents(processComponentsFactory) err := managedProcessComponents.Create() require.NoError(t, err) err = managedProcessComponents.Close() require.NoError(t, err) - require.Nil(t, managedProcessComponents.NodesCoordinator()) + + err = managedProcessComponents.Close() + require.NoError(t, err) +} + +func TestManagedProcessComponents_IsInterfaceNil(t *testing.T) { + t.Parallel() + + managedProcessComponents, _ := processComp.NewManagedProcessComponents(nil) + require.True(t, managedProcessComponents.IsInterfaceNil()) + + processComponentsFactory, _ := processComp.NewProcessComponentsFactory(createMockProcessComponentsFactoryArgs()) + managedProcessComponents, _ = processComp.NewManagedProcessComponents(processComponentsFactory) + require.False(t, managedProcessComponents.IsInterfaceNil()) } diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index d4b3d99c030..391e164712c 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -1,111 +1,1025 @@ package processing_test import ( + "bytes" + "context" + "errors" + "math/big" "strings" "sync" "testing" + "github.com/multiversx/mx-chain-core-go/core/keyValStorage" coreData "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" dataBlock "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/endProcess" outportCore "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/hashing/blake2b" + "github.com/multiversx/mx-chain-core-go/hashing/keccak" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/factory" + "github.com/multiversx/mx-chain-go/config" + errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory/mock" processComp "github.com/multiversx/mx-chain-go/factory/processing" "github.com/multiversx/mx-chain-go/genesis" + genesisMocks "github.com/multiversx/mx-chain-go/genesis/mock" + testsMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" - componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + mxState "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks" + "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/dblookupext" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + factoryMocks "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" + "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/outport" - storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" - "github.com/stretchr/testify/assert" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/state" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + updateMocks "github.com/multiversx/mx-chain-go/update/mock" "github.com/stretchr/testify/require" ) -// ------------ Test TestProcessComponents -------------------- -func TestProcessComponents_CloseShouldWork(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } +const ( + testingProtocolSustainabilityAddress = "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp" +) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) - pcf, err := processComp.NewProcessComponentsFactory(processArgs) - require.Nil(t, err) +var ( + gasSchedule, _ = common.LoadGasScheduleConfig("../../cmd/node/config/gasSchedules/gasScheduleV1.toml") + addrPubKeyConv, _ = factory.NewPubkeyConverter(config.PubkeyConfig{ + Length: 32, + Type: "bech32", + SignatureLength: 0, + Hrp: "erd", + }) + valPubKeyConv, _ = factory.NewPubkeyConverter(config.PubkeyConfig{ + Length: 96, + Type: "hex", + SignatureLength: 48, + }) +) + +func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFactoryArgs { + + args := processComp.ProcessComponentsFactoryArgs{ + Config: testscommon.GetGeneralConfig(), + EpochConfig: config.EpochConfig{}, + PrefConfigs: config.PreferencesConfig{}, + ImportDBConfig: config.ImportDbConfig{}, + AccountsParser: &mock.AccountsParserStub{ + GenerateInitialTransactionsCalled: func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*dataBlock.MiniBlock, map[uint32]*outportCore.Pool, error) { + return []*dataBlock.MiniBlock{ + {}, + }, + map[uint32]*outportCore.Pool{ + 0: {}, + }, nil + }, + }, + SmartContractParser: &mock.SmartContractParserStub{}, + GasSchedule: &testscommon.GasScheduleNotifierMock{ + GasSchedule: gasSchedule, + }, + NodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, + RequestedItemsHandler: &testscommon.RequestedItemsHandlerStub{}, + WhiteListHandler: &testscommon.WhiteListHandlerStub{}, + WhiteListerVerifiedTxs: &testscommon.WhiteListHandlerStub{}, + MaxRating: 100, + SystemSCConfig: &config.SystemSmartContractsConfig{ + ESDTSystemSCConfig: config.ESDTSystemSCConfig{ + BaseIssuingCost: "1000", + OwnerAddress: "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c", + }, + GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ + V1: config.GovernanceSystemSCConfigV1{ + ProposalCost: "500", + NumNodes: 100, + MinQuorum: 50, + MinPassThreshold: 50, + MinVetoThreshold: 50, + }, + Active: config.GovernanceSystemSCConfigActive{ + ProposalCost: "500", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, + }, + ChangeConfigAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + }, + StakingSystemSCConfig: config.StakingSystemSCConfig{ + GenesisNodePrice: "2500000000000000000000", + MinStakeValue: "1", + UnJailValue: "1", + MinStepValue: "1", + UnBondPeriod: 0, + NumRoundsWithoutBleed: 0, + MaximumPercentageToBleed: 0, + BleedPercentagePerRound: 0, + MaxNumberOfNodesForStake: 10, + ActivateBLSPubKeyMessageVerification: false, + MinUnstakeTokensValue: "1", + }, + DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ + MinCreationDeposit: "100", + MinStakeAmount: "100", + ConfigChangeAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + }, + DelegationSystemSCConfig: config.DelegationSystemSCConfig{ + MinServiceFee: 0, + MaxServiceFee: 100, + }, + }, + Version: "v1.0.0", + ImportStartHandler: &testscommon.ImportStartHandlerStub{}, + HistoryRepo: &dblookupext.HistoryRepositoryStub{}, + Data: &testsMocks.DataComponentsStub{ + DataPool: dataRetriever.NewPoolsHolderMock(), + BlockChain: &testscommon.ChainHandlerStub{ + GetGenesisHeaderHashCalled: func() []byte { + return []byte("genesis hash") + }, + GetGenesisHeaderCalled: func() coreData.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, + MbProvider: &testsMocks.MiniBlocksProviderStub{}, + Store: genericMocks.NewChainStorerMock(0), + }, + CoreData: &mock.CoreComponentsMock{ + IntMarsh: &marshal.GogoProtoMarshalizer{}, + TxMarsh: &marshal.JsonMarshalizer{}, + UInt64ByteSliceConv: &testsMocks.Uint64ByteSliceConverterMock{}, + AddrPubKeyConv: addrPubKeyConv, + ValPubKeyConv: valPubKeyConv, + NodesConfig: &testscommon.NodesSetupStub{ + GetShardConsensusGroupSizeCalled: func() uint32 { + return 2 + }, + GetMetaConsensusGroupSizeCalled: func() uint32 { + return 2 + }, + }, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{ + ProtocolSustainabilityAddressCalled: func() string { + return testingProtocolSustainabilityAddress + }, + }, + Hash: blake2b.NewBlake2b(), + TxVersionCheckHandler: &testscommon.TxVersionCheckerStub{}, + RatingHandler: &testscommon.RaterMock{}, + EnableEpochsHandlerField: &testscommon.EnableEpochsHandlerStub{}, + EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, + EpochNotifierWithConfirm: &updateMocks.EpochStartNotifierStub{}, + RoundHandlerField: &testscommon.RoundHandlerMock{}, + ChanStopProcess: make(chan endProcess.ArgEndProcess, 1), + TxSignHasherField: keccak.NewKeccak(), + HardforkTriggerPubKeyField: []byte("hardfork pub key"), + WasmVMChangeLockerInternal: &sync.RWMutex{}, + NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, + RatingsConfig: &testscommon.RatingsInfoMock{}, + PathHdl: &testscommon.PathManagerStub{}, + ProcessStatusHandlerInternal: &testscommon.ProcessStatusHandlerStub{}, + }, + Crypto: &testsMocks.CryptoComponentsStub{ + BlKeyGen: &cryptoMocks.KeyGenStub{}, + BlockSig: &cryptoMocks.SingleSignerStub{}, + MultiSigContainer: &cryptoMocks.MultiSignerContainerMock{ + MultiSigner: &cryptoMocks.MultisignerMock{}, + }, + PrivKey: &cryptoMocks.PrivateKeyStub{}, + PubKey: &cryptoMocks.PublicKeyStub{}, + PubKeyString: "pub key string", + PubKeyBytes: []byte("pub key bytes"), + TxKeyGen: &cryptoMocks.KeyGenStub{}, + TxSig: &cryptoMocks.SingleSignerStub{}, + PeerSignHandler: &cryptoMocks.PeerSignatureHandlerStub{}, + MsgSigVerifier: &testscommon.MessageSignVerifierMock{}, + ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, + }, + Network: &testsMocks.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{}, + InputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, + OutputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, + }, + BootstrapComponents: &mainFactoryMocks.BootstrapComponentsStub{ + ShCoordinator: mock.NewMultiShardsCoordinatorMock(2), + BootstrapParams: &bootstrapMocks.BootstrapParamsHandlerMock{}, + HdrIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, + GuardedAccountHandlerField: &guardianMocks.GuardedAccountHandlerStub{}, + VersionedHdrFactory: &testscommon.VersionedHeaderFactoryStub{}, + }, + StatusComponents: &testsMocks.StatusComponentsStub{ + Outport: &outport.OutportStub{}, + }, + StatusCoreComponents: &factoryMocks.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + }, + } - pc, err := pcf.Create() - require.Nil(t, err) + args.State = components.GetStateComponents(args.CoreData) - err = pc.Close() - require.NoError(t, err) + return args } -func TestProcessComponentsFactory_CreateWithInvalidTxAccumulatorTimeExpectError(t *testing.T) { +func TestNewProcessComponentsFactory(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) - processArgs.Config.Antiflood.TxAccumulator.MaxAllowedTimeInMilliseconds = 0 - pcf, err := processComp.NewProcessComponentsFactory(processArgs) - require.Nil(t, err) + t.Run("nil AccountsParser should error", func(t *testing.T) { + t.Parallel() - instance, err := pcf.Create() - require.Nil(t, instance) - require.Error(t, err) - require.True(t, strings.Contains(err.Error(), process.ErrInvalidValue.Error())) + args := createMockProcessComponentsFactoryArgs() + args.AccountsParser = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilAccountsParser)) + require.Nil(t, pcf) + }) + t.Run("nil GasSchedule should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.GasSchedule = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilGasSchedule)) + require.Nil(t, pcf) + }) + t.Run("nil Data should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Data = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilDataComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil BlockChain should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Data = &testsMocks.DataComponentsStub{ + BlockChain: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilBlockChainHandler)) + require.Nil(t, pcf) + }) + t.Run("nil DataPool should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Data = &testsMocks.DataComponentsStub{ + BlockChain: &testscommon.ChainHandlerStub{}, + DataPool: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilDataPoolsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil StorageService should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Data = &testsMocks.DataComponentsStub{ + BlockChain: &testscommon.ChainHandlerStub{}, + DataPool: &dataRetriever.PoolsHolderStub{}, + Store: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilStorageService)) + require.Nil(t, pcf) + }) + t.Run("nil CoreData should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilCoreComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil EconomicsData should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilEconomicsData)) + require.Nil(t, pcf) + }) + t.Run("nil GenesisNodesSetup should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + NodesConfig: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilGenesisNodesSetupHandler)) + require.Nil(t, pcf) + }) + t.Run("nil AddressPubKeyConverter should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + NodesConfig: &testscommon.NodesSetupStub{}, + AddrPubKeyConv: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilAddressPublicKeyConverter)) + require.Nil(t, pcf) + }) + t.Run("nil EpochNotifier should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + NodesConfig: &testscommon.NodesSetupStub{}, + AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, + EpochChangeNotifier: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilEpochNotifier)) + require.Nil(t, pcf) + }) + t.Run("nil ValidatorPubKeyConverter should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + NodesConfig: &testscommon.NodesSetupStub{}, + AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + ValPubKeyConv: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilPubKeyConverter)) + require.Nil(t, pcf) + }) + t.Run("nil InternalMarshalizer should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + NodesConfig: &testscommon.NodesSetupStub{}, + AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + ValPubKeyConv: &testscommon.PubkeyConverterStub{}, + IntMarsh: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilInternalMarshalizer)) + require.Nil(t, pcf) + }) + t.Run("nil Uint64ByteSliceConverter should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + NodesConfig: &testscommon.NodesSetupStub{}, + AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + ValPubKeyConv: &testscommon.PubkeyConverterStub{}, + IntMarsh: &testscommon.MarshalizerStub{}, + UInt64ByteSliceConv: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilUint64ByteSliceConverter)) + require.Nil(t, pcf) + }) + t.Run("nil Crypto should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Crypto = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilCryptoComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil BlockSignKeyGen should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Crypto = &testsMocks.CryptoComponentsStub{ + BlKeyGen: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilBlockSignKeyGen)) + require.Nil(t, pcf) + }) + t.Run("nil State should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.State = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilStateComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil AccountsAdapter should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.State = &factoryMocks.StateComponentsMock{ + Accounts: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilAccountsAdapter)) + require.Nil(t, pcf) + }) + t.Run("nil Network should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Network = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilNetworkComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil NetworkMessenger should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Network = &testsMocks.NetworkComponentsStub{ + Messenger: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilMessenger)) + require.Nil(t, pcf) + }) + t.Run("nil InputAntiFloodHandler should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Network = &testsMocks.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{}, + InputAntiFlood: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilInputAntiFloodHandler)) + require.Nil(t, pcf) + }) + t.Run("nil SystemSCConfig should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.SystemSCConfig = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilSystemSCConfig)) + require.Nil(t, pcf) + }) + t.Run("nil BootstrapComponents should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.BootstrapComponents = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilBootstrapComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil ShardCoordinator should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.BootstrapComponents = &mainFactoryMocks.BootstrapComponentsStub{ + ShCoordinator: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilShardCoordinator)) + require.Nil(t, pcf) + }) + t.Run("nil EpochBootstrapParams should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.BootstrapComponents = &mainFactoryMocks.BootstrapComponentsStub{ + ShCoordinator: &testscommon.ShardsCoordinatorMock{}, + BootstrapParams: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilBootstrapParamsHandler)) + require.Nil(t, pcf) + }) + t.Run("nil StatusComponents should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.StatusComponents = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilStatusComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil OutportHandler should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.StatusComponents = &testsMocks.StatusComponentsStub{ + Outport: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilOutportHandler)) + require.Nil(t, pcf) + }) + t.Run("nil HistoryRepo should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.HistoryRepo = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilHistoryRepository)) + require.Nil(t, pcf) + }) + t.Run("nil StatusCoreComponents should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.StatusCoreComponents = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilStatusCoreComponents)) + require.Nil(t, pcf) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + pcf, err := processComp.NewProcessComponentsFactory(createMockProcessComponentsFactoryArgs()) + require.NoError(t, err) + require.NotNil(t, pcf) + }) } -func TestProcessComponents_IndexGenesisBlocks(t *testing.T) { +func TestProcessComponentsFactory_Create(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) - processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) - processArgs.Data = &mock.DataComponentsMock{ - Storage: &storageStubs.ChainStorerStub{}, - } + expectedErr := errors.New("expected error") + t.Run("CreateCurrentEpochProvider fails should error", func(t *testing.T) { + t.Parallel() - saveBlockCalledMutex := sync.Mutex{} + args := createMockProcessComponentsFactoryArgs() + args.Config.EpochStartConfig.RoundsPerEpoch = 0 + args.PrefConfigs.FullArchive = true + testCreateWithArgs(t, args, "rounds per epoch") + }) + t.Run("createNetworkShardingCollector fails due to invalid PublicKeyPeerId config should error", func(t *testing.T) { + t.Parallel() - outportHandler := &outport.OutportStub{ - HasDriversCalled: func() bool { - return true - }, - SaveBlockCalled: func(args *outportCore.ArgsSaveBlockData) { - saveBlockCalledMutex.Lock() - require.NotNil(t, args) + args := createMockProcessComponentsFactoryArgs() + args.Config.PublicKeyPeerId.Type = "invalid" + testCreateWithArgs(t, args, "cache type") + }) + t.Run("createNetworkShardingCollector fails due to invalid PublicKeyShardId config should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.PublicKeyShardId.Type = "invalid" + testCreateWithArgs(t, args, "cache type") + }) + t.Run("createNetworkShardingCollector fails due to invalid PeerIdShardId config should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.PeerIdShardId.Type = "invalid" + testCreateWithArgs(t, args, "cache type") + }) + t.Run("prepareNetworkShardingCollector fails due to SetPeerShardResolver failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + netwCompStub, ok := args.Network.(*testsMocks.NetworkComponentsStub) + require.True(t, ok) + netwCompStub.Messenger = &p2pmocks.MessengerStub{ + SetPeerShardResolverCalled: func(peerShardResolver p2p.PeerShardResolver) error { + return expectedErr + }, + } + testCreateWithArgs(t, args, expectedErr.Error()) + }) + t.Run("prepareNetworkShardingCollector fails due to SetPeerValidatorMapper failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + netwCompStub, ok := args.Network.(*testsMocks.NetworkComponentsStub) + require.True(t, ok) + netwCompStub.InputAntiFlood = &testsMocks.P2PAntifloodHandlerStub{ + SetPeerValidatorMapperCalled: func(validatorMapper process.PeerValidatorMapper) error { + return expectedErr + }, + } + testCreateWithArgs(t, args, expectedErr.Error()) + }) + t.Run("newStorageRequester fails due to NewStorageServiceFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.ImportDBConfig.IsImportDBMode = true + args.Config.StoragePruning.NumActivePersisters = 0 + testCreateWithArgs(t, args, "active persisters") + }) + t.Run("newResolverContainerFactory fails due to NewPeerAuthenticationPayloadValidator failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.HeartbeatV2.HeartbeatExpiryTimespanInSec = 0 + testCreateWithArgs(t, args, "expiry timespan") + }) + t.Run("generateGenesisHeadersAndApplyInitialBalances fails due to invalid GenesisNodePrice should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.LogsAndEvents.SaveInStorageEnabled = false // coverage + args.Config.DbLookupExtensions.Enabled = true // coverage + args.SystemSCConfig.StakingSystemSCConfig.GenesisNodePrice = "invalid" + testCreateWithArgs(t, args, "invalid genesis node price") + }) + t.Run("newValidatorStatisticsProcessor fails due to nil genesis header should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.ImportDBConfig.IsImportDBMode = true // coverage + dataCompStub, ok := args.Data.(*testsMocks.DataComponentsStub) + require.True(t, ok) + blockChainStub, ok := dataCompStub.BlockChain.(*testscommon.ChainHandlerStub) + require.True(t, ok) + blockChainStub.GetGenesisHeaderCalled = func() coreData.HeaderHandler { + return nil + } + testCreateWithArgs(t, args, errorsMx.ErrGenesisBlockNotInitialized.Error()) + }) + t.Run("indexGenesisBlocks fails due to GenerateInitialTransactions failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.AccountsParser = &mock.AccountsParserStub{ + GenerateInitialTransactionsCalled: func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*dataBlock.MiniBlock, map[uint32]*outportCore.Pool, error) { + return nil, nil, expectedErr + }, + } + testCreateWithArgs(t, args, expectedErr.Error()) + }) + t.Run("NewMiniBlocksPoolsCleaner fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.PoolsCleanersConfig.MaxRoundsToKeepUnprocessedMiniBlocks = 0 + testCreateWithArgs(t, args, "MaxRoundsToKeepUnprocessedData") + }) + t.Run("NewTxsPoolsCleaner fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.PoolsCleanersConfig.MaxRoundsToKeepUnprocessedTransactions = 0 + testCreateWithArgs(t, args, "MaxRoundsToKeepUnprocessedData") + }) + t.Run("createHardforkTrigger fails due to Decode failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.Hardfork.PublicKeyToListenFrom = "invalid key" + testCreateWithArgs(t, args, "PublicKeyToListenFrom") + }) + t.Run("NewCache fails for vmOutput should error", func(t *testing.T) { + t.Parallel() - bodyRequired := &dataBlock.Body{ - MiniBlocks: make([]*block.MiniBlock, 4), + args := createMockProcessComponentsFactoryArgs() + args.Config.VMOutputCacher.Type = "invalid" + testCreateWithArgs(t, args, "cache type") + }) + t.Run("newShardBlockProcessor: attachProcessDebugger fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.Debug.Process.Enabled = true + args.Config.Debug.Process.PollingTimeInSeconds = 0 + testCreateWithArgs(t, args, "PollingTimeInSeconds") + }) + t.Run("nodesSetupChecker.Check fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) + coreCompStub.GenesisNodesSetupCalled = func() sharding.GenesisNodesSetupHandler { + return &testscommon.NodesSetupStub{ + AllInitialNodesCalled: func() []nodesCoordinator.GenesisNodeInfoHandler { + return []nodesCoordinator.GenesisNodeInfoHandler{ + &genesisMocks.GenesisNodeInfoHandlerMock{ + PubKeyBytesValue: []byte("no stake"), + }, + } + }, + GetShardConsensusGroupSizeCalled: func() uint32 { + return 2 + }, + GetMetaConsensusGroupSizeCalled: func() uint32 { + return 2 + }, } + } + args.CoreData = coreCompStub + testCreateWithArgs(t, args, "no one staked") + }) + t.Run("should work with indexAndReturnGenesisAccounts failing due to RootHash failure", func(t *testing.T) { + t.Parallel() - txsPoolRequired := &outportCore.Pool{} + args := createMockProcessComponentsFactoryArgs() + statusCompStub, ok := args.StatusComponents.(*testsMocks.StatusComponentsStub) + require.True(t, ok) + statusCompStub.Outport = &outport.OutportStub{ + HasDriversCalled: func() bool { + return true + }, + } + stateCompMock := factoryMocks.NewStateComponentsMockFromRealComponent(args.State) + realAccounts := stateCompMock.AccountsAdapter() + stateCompMock.Accounts = &state.AccountsStub{ + GetAllLeavesCalled: realAccounts.GetAllLeaves, + RootHashCalled: func() ([]byte, error) { + return nil, expectedErr + }, + CommitCalled: realAccounts.Commit, + } + args.State = stateCompMock - assert.Equal(t, txsPoolRequired, args.TransactionsPool) - assert.Equal(t, bodyRequired, args.Body) - saveBlockCalledMutex.Unlock() - }, - } + pcf, _ := processComp.NewProcessComponentsFactory(args) + require.NotNil(t, pcf) - processArgs.StatusComponents = &mainFactoryMocks.StatusComponentsStub{ - Outport: outportHandler, - } + instance, err := pcf.Create() + require.Nil(t, err) + require.NotNil(t, instance) + + err = instance.Close() + require.NoError(t, err) + _ = args.State.Close() + }) + t.Run("should work with indexAndReturnGenesisAccounts failing due to GetAllLeaves failure", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + statusCompStub, ok := args.StatusComponents.(*testsMocks.StatusComponentsStub) + require.True(t, ok) + statusCompStub.Outport = &outport.OutportStub{ + HasDriversCalled: func() bool { + return true + }, + } + stateCompMock := factoryMocks.NewStateComponentsMockFromRealComponent(args.State) + realAccounts := stateCompMock.AccountsAdapter() + stateCompMock.Accounts = &state.AccountsStub{ + GetAllLeavesCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { + close(leavesChannels.LeavesChan) + leavesChannels.ErrChan.Close() + return expectedErr + }, + RootHashCalled: realAccounts.RootHash, + CommitCalled: realAccounts.Commit, + } + args.State = stateCompMock + + pcf, _ := processComp.NewProcessComponentsFactory(args) + require.NotNil(t, pcf) + + instance, err := pcf.Create() + require.Nil(t, err) + require.NotNil(t, instance) + + err = instance.Close() + require.NoError(t, err) + _ = args.State.Close() + }) + t.Run("should work with indexAndReturnGenesisAccounts failing due to Unmarshal failure", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + statusCompStub, ok := args.StatusComponents.(*testsMocks.StatusComponentsStub) + require.True(t, ok) + statusCompStub.Outport = &outport.OutportStub{ + HasDriversCalled: func() bool { + return true + }, + } + stateCompMock := factoryMocks.NewStateComponentsMockFromRealComponent(args.State) + realAccounts := stateCompMock.AccountsAdapter() + stateCompMock.Accounts = &state.AccountsStub{ + GetAllLeavesCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { + addrOk, _ := addrPubKeyConv.Decode("erd17c4fs6mz2aa2hcvva2jfxdsrdknu4220496jmswer9njznt22eds0rxlr4") + addrNOK, _ := addrPubKeyConv.Decode("erd1ulhw20j7jvgfgak5p05kv667k5k9f320sgef5ayxkt9784ql0zssrzyhjp") + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage(addrOk, []byte("value")) // coverage + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage(addrNOK, []byte("value")) + close(leavesChannels.LeavesChan) + leavesChannels.ErrChan.Close() + return nil + }, + RootHashCalled: realAccounts.RootHash, + CommitCalled: realAccounts.Commit, + } + args.State = stateCompMock + + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) + cnt := 0 + coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { + return &testscommon.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + cnt++ + if cnt == 1 { + return nil // coverage, key_ok + } + return expectedErr + }, + } + } + args.CoreData = coreCompStub + pcf, _ := processComp.NewProcessComponentsFactory(args) + require.NotNil(t, pcf) - pcf, err := processComp.NewProcessComponentsFactory(processArgs) - require.Nil(t, err) + instance, err := pcf.Create() + require.Nil(t, err) + require.NotNil(t, instance) - genesisBlocks := make(map[uint32]coreData.HeaderHandler) - indexingData := make(map[uint32]*genesis.IndexingData) + err = instance.Close() + require.NoError(t, err) + _ = args.State.Close() + }) + t.Run("should work with indexAndReturnGenesisAccounts failing due to error on GetAllLeaves", func(t *testing.T) { + t.Parallel() - for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - genesisBlocks[i] = &block.Header{} + args := createMockProcessComponentsFactoryArgs() + statusCompStub, ok := args.StatusComponents.(*testsMocks.StatusComponentsStub) + require.True(t, ok) + statusCompStub.Outport = &outport.OutportStub{ + HasDriversCalled: func() bool { + return true + }, + } + realStateComp := args.State + args.State = &factoryMocks.StateComponentsMock{ + Accounts: &state.AccountsStub{ + GetAllLeavesCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { + close(leavesChannels.LeavesChan) + leavesChannels.ErrChan.WriteInChanNonBlocking(expectedErr) + leavesChannels.ErrChan.Close() + return nil + }, + CommitCalled: realStateComp.AccountsAdapter().Commit, + RootHashCalled: realStateComp.AccountsAdapter().RootHash, + }, + PeersAcc: realStateComp.PeerAccounts(), + Tries: realStateComp.TriesContainer(), + AccountsAPI: realStateComp.AccountsAdapterAPI(), + StorageManagers: realStateComp.TrieStorageManagers(), + MissingNodesNotifier: realStateComp.MissingTrieNodesNotifier(), + } + + pcf, _ := processComp.NewProcessComponentsFactory(args) + require.NotNil(t, pcf) + + instance, err := pcf.Create() + require.Nil(t, err) + require.NotNil(t, instance) + + err = instance.Close() + require.NoError(t, err) + _ = args.State.Close() + }) + t.Run("should work with indexAndReturnGenesisAccounts failing due to error on Encode", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + statusCompStub, ok := args.StatusComponents.(*testsMocks.StatusComponentsStub) + require.True(t, ok) + statusCompStub.Outport = &outport.OutportStub{ + HasDriversCalled: func() bool { + return true + }, + } + realStateComp := args.State + args.State = &factoryMocks.StateComponentsMock{ + Accounts: &state.AccountsStub{ + GetAllLeavesCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("invalid addr"), []byte("value")) + close(leavesChannels.LeavesChan) + leavesChannels.ErrChan.Close() + return nil + }, + CommitCalled: realStateComp.AccountsAdapter().Commit, + RootHashCalled: realStateComp.AccountsAdapter().RootHash, + }, + PeersAcc: realStateComp.PeerAccounts(), + Tries: realStateComp.TriesContainer(), + AccountsAPI: realStateComp.AccountsAdapterAPI(), + StorageManagers: realStateComp.TrieStorageManagers(), + MissingNodesNotifier: realStateComp.MissingTrieNodesNotifier(), + } + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) + coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { + return &testscommon.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return nil + }, + } + } + args.CoreData = coreCompStub + + pcf, _ := processComp.NewProcessComponentsFactory(args) + require.NotNil(t, pcf) + + instance, err := pcf.Create() + require.Nil(t, err) + require.NotNil(t, instance) + + err = instance.Close() + require.NoError(t, err) + _ = args.State.Close() + }) + t.Run("should work - shard", func(t *testing.T) { + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + processArgs := components.GetProcessComponentsFactoryArgs(shardCoordinator) + pcf, _ := processComp.NewProcessComponentsFactory(processArgs) + require.NotNil(t, pcf) + + instance, err := pcf.Create() + require.NoError(t, err) + require.NotNil(t, instance) + + err = instance.Close() + require.NoError(t, err) + _ = processArgs.State.Close() + }) + t.Run("should work - meta", func(t *testing.T) { + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + shardCoordinator.CurrentShard = common.MetachainShardId + processArgs := components.GetProcessComponentsFactoryArgs(shardCoordinator) + + shardCoordinator.ComputeIdCalled = func(address []byte) uint32 { + protocolSustainabilityAddr, err := processArgs.CoreData.AddressPubKeyConverter().Decode(testingProtocolSustainabilityAddress) + require.NoError(t, err) + if bytes.Equal(protocolSustainabilityAddr, address) { + return 0 + } + return shardCoordinator.CurrentShard + } + fundGenesisWallets(t, processArgs) + + pcf, _ := processComp.NewProcessComponentsFactory(processArgs) + require.NotNil(t, pcf) + + instance, err := pcf.Create() + require.NoError(t, err) + require.NotNil(t, instance) + + err = instance.Close() + require.NoError(t, err) + _ = processArgs.State.Close() + }) +} + +func fundGenesisWallets(t *testing.T, args processComp.ProcessComponentsFactoryArgs) { + accounts := args.State.AccountsAdapter() + initialNodes := args.CoreData.GenesisNodesSetup().AllInitialNodes() + nodePrice, ok := big.NewInt(0).SetString(args.SystemSCConfig.StakingSystemSCConfig.GenesisNodePrice, 10) + require.True(t, ok) + for _, node := range initialNodes { + account, err := accounts.LoadAccount(node.AddressBytes()) + require.NoError(t, err) + + userAccount := account.(mxState.UserAccountHandler) + err = userAccount.AddToBalance(nodePrice) + require.NoError(t, err) + + require.NoError(t, accounts.SaveAccount(userAccount)) + _, err = accounts.Commit() + require.NoError(t, err) } +} + +func testCreateWithArgs(t *testing.T, args processComp.ProcessComponentsFactoryArgs, expectedErrSubstr string) { + pcf, _ := processComp.NewProcessComponentsFactory(args) + require.NotNil(t, pcf) + + instance, err := pcf.Create() + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), expectedErrSubstr)) + require.Nil(t, instance) - err = pcf.IndexGenesisBlocks(genesisBlocks, indexingData) - require.Nil(t, err) + _ = args.State.Close() } diff --git a/factory/state/stateComponents.go b/factory/state/stateComponents.go index a66990515cd..1778f0e103c 100644 --- a/factory/state/stateComponents.go +++ b/factory/state/stateComponents.go @@ -10,7 +10,6 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" - "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" factoryState "github.com/multiversx/mx-chain-go/state/factory" "github.com/multiversx/mx-chain-go/state/storagePruningManager" @@ -24,7 +23,6 @@ import ( // StateComponentsFactoryArgs holds the arguments needed for creating a state components factory type StateComponentsFactoryArgs struct { Config config.Config - ShardCoordinator sharding.Coordinator Core factory.CoreComponentsHolder StatusCore factory.StatusCoreComponentsHolder StorageService dataRetriever.StorageService @@ -36,7 +34,6 @@ type StateComponentsFactoryArgs struct { type stateComponentsFactory struct { config config.Config - shardCoordinator sharding.Coordinator core factory.CoreComponentsHolder statusCore factory.StatusCoreComponentsHolder storageService dataRetriever.StorageService @@ -59,37 +56,18 @@ type stateComponents struct { // NewStateComponentsFactory will return a new instance of stateComponentsFactory func NewStateComponentsFactory(args StateComponentsFactoryArgs) (*stateComponentsFactory, error) { - if args.Core == nil { + if check.IfNil(args.Core) { return nil, errors.ErrNilCoreComponents } - if check.IfNil(args.Core.Hasher()) { - return nil, errors.ErrNilHasher - } - if check.IfNil(args.Core.InternalMarshalizer()) { - return nil, errors.ErrNilMarshalizer - } - if check.IfNil(args.Core.PathHandler()) { - return nil, errors.ErrNilPathHandler - } - if check.IfNil(args.ShardCoordinator) { - return nil, errors.ErrNilShardCoordinator - } if check.IfNil(args.StorageService) { return nil, errors.ErrNilStorageService } - if check.IfNil(args.ChainHandler) { - return nil, errors.ErrNilBlockChainHandler - } if check.IfNil(args.StatusCore) { return nil, errors.ErrNilStatusCoreComponents } - if check.IfNil(args.StatusCore.AppStatusHandler()) { - return nil, errors.ErrNilAppStatusHandler - } return &stateComponentsFactory{ config: args.Config, - shardCoordinator: args.ShardCoordinator, core: args.Core, statusCore: args.StatusCore, storageService: args.StorageService, diff --git a/factory/state/stateComponentsHandler_test.go b/factory/state/stateComponentsHandler_test.go index 2abaecdfba0..ba552ed416a 100644 --- a/factory/state/stateComponentsHandler_test.go +++ b/factory/state/stateComponentsHandler_test.go @@ -4,7 +4,8 @@ import ( "testing" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/factory/mock" + errorsMx "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/factory" stateComp "github.com/multiversx/mx-chain-go/factory/state" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/multiversx/mx-chain-go/testscommon/storageManager" @@ -12,98 +13,115 @@ import ( "github.com/stretchr/testify/require" ) -// ------------ Test ManagedStateComponents -------------------- -func TestManagedStateComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { +func TestNewManagedStateComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - coreComponents := componentsMock.GetCoreComponents() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetStateFactoryArgs(coreComponents, shardCoordinator) - stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) - managedStateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) - require.NoError(t, err) - _ = args.Core.SetInternalMarshalizer(nil) - err = managedStateComponents.Create() - require.Error(t, err) - require.Nil(t, managedStateComponents.AccountsAdapter()) + t.Run("nil factory should error", func(t *testing.T) { + t.Parallel() + + managedStateComponents, err := stateComp.NewManagedStateComponents(nil) + require.Equal(t, errorsMx.ErrNilStateComponentsFactory, err) + require.Nil(t, managedStateComponents) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetStateFactoryArgs(coreComponents) + stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) + managedStateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) + require.NoError(t, err) + require.NotNil(t, managedStateComponents) + }) } -func TestManagedStateComponents_CreateShouldWork(t *testing.T) { +func TestManagedStateComponents_Create(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - coreComponents := componentsMock.GetCoreComponents() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetStateFactoryArgs(coreComponents, shardCoordinator) - stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) - managedStateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) - require.NoError(t, err) - require.Nil(t, managedStateComponents.AccountsAdapter()) - require.Nil(t, managedStateComponents.PeerAccounts()) - require.Nil(t, managedStateComponents.TriesContainer()) - require.Nil(t, managedStateComponents.TrieStorageManagers()) - require.Nil(t, managedStateComponents.MissingTrieNodesNotifier()) - - err = managedStateComponents.Create() - require.NoError(t, err) - require.NotNil(t, managedStateComponents.AccountsAdapter()) - require.NotNil(t, managedStateComponents.PeerAccounts()) - require.NotNil(t, managedStateComponents.TriesContainer()) - require.NotNil(t, managedStateComponents.TrieStorageManagers()) - require.NotNil(t, managedStateComponents.MissingTrieNodesNotifier()) + t.Run("invalid config should error", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetStateFactoryArgs(coreComponents) + stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) + managedStateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) + require.NoError(t, err) + _ = args.Core.SetInternalMarshalizer(nil) + err = managedStateComponents.Create() + require.Error(t, err) + require.Nil(t, managedStateComponents.AccountsAdapter()) + require.NoError(t, managedStateComponents.Close()) + }) + t.Run("should work with getters", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetStateFactoryArgs(coreComponents) + stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) + managedStateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) + require.NoError(t, err) + require.Nil(t, managedStateComponents.AccountsAdapter()) + require.Nil(t, managedStateComponents.PeerAccounts()) + require.Nil(t, managedStateComponents.TriesContainer()) + require.Nil(t, managedStateComponents.TrieStorageManagers()) + require.Nil(t, managedStateComponents.AccountsAdapterAPI()) + require.Nil(t, managedStateComponents.AccountsRepository()) + require.Nil(t, managedStateComponents.MissingTrieNodesNotifier()) + + err = managedStateComponents.Create() + require.NoError(t, err) + require.NotNil(t, managedStateComponents.AccountsAdapter()) + require.NotNil(t, managedStateComponents.PeerAccounts()) + require.NotNil(t, managedStateComponents.TriesContainer()) + require.NotNil(t, managedStateComponents.TrieStorageManagers()) + require.NotNil(t, managedStateComponents.AccountsAdapterAPI()) + require.NotNil(t, managedStateComponents.AccountsRepository()) + require.NotNil(t, managedStateComponents.MissingTrieNodesNotifier()) + + require.Equal(t, factory.StateComponentsName, managedStateComponents.String()) + require.NoError(t, managedStateComponents.Close()) + }) } func TestManagedStateComponents_Close(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetStateFactoryArgs(coreComponents, shardCoordinator) + args := componentsMock.GetStateFactoryArgs(coreComponents) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, _ := stateComp.NewManagedStateComponents(stateComponentsFactory) + require.NoError(t, managedStateComponents.Close()) err := managedStateComponents.Create() require.NoError(t, err) - err = managedStateComponents.Close() - require.NoError(t, err) + require.NoError(t, managedStateComponents.Close()) require.Nil(t, managedStateComponents.AccountsAdapter()) } func TestManagedStateComponents_CheckSubcomponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetStateFactoryArgs(coreComponents, shardCoordinator) + args := componentsMock.GetStateFactoryArgs(coreComponents) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, _ := stateComp.NewManagedStateComponents(stateComponentsFactory) - err := managedStateComponents.Create() + err := managedStateComponents.CheckSubcomponents() + require.Equal(t, errorsMx.ErrNilStateComponents, err) + + err = managedStateComponents.Create() require.NoError(t, err) err = managedStateComponents.CheckSubcomponents() require.NoError(t, err) + + require.NoError(t, managedStateComponents.Close()) } func TestManagedStateComponents_Setters(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetStateFactoryArgs(coreComponents, shardCoordinator) + args := componentsMock.GetStateFactoryArgs(coreComponents) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, _ := stateComp.NewManagedStateComponents(stateComponentsFactory) err := managedStateComponents.Create() @@ -112,12 +130,31 @@ func TestManagedStateComponents_Setters(t *testing.T) { triesContainer := &trieMock.TriesHolderStub{} triesStorageManagers := map[string]common.StorageManager{"a": &storageManager.StorageManagerStub{}} + err = managedStateComponents.SetTriesContainer(nil) + require.Equal(t, errorsMx.ErrNilTriesContainer, err) err = managedStateComponents.SetTriesContainer(triesContainer) require.NoError(t, err) + err = managedStateComponents.SetTriesStorageManagers(nil) + require.Equal(t, errorsMx.ErrNilTriesStorageManagers, err) err = managedStateComponents.SetTriesStorageManagers(triesStorageManagers) require.NoError(t, err) require.Equal(t, triesContainer, managedStateComponents.TriesContainer()) require.Equal(t, triesStorageManagers, managedStateComponents.TrieStorageManagers()) + + require.NoError(t, managedStateComponents.Close()) +} + +func TestManagedStateComponents_IsInterfaceNil(t *testing.T) { + t.Parallel() + + managedStateComponents, _ := stateComp.NewManagedStateComponents(nil) + require.True(t, managedStateComponents.IsInterfaceNil()) + + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetStateFactoryArgs(coreComponents) + stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) + managedStateComponents, _ = stateComp.NewManagedStateComponents(stateComponentsFactory) + require.False(t, managedStateComponents.IsInterfaceNil()) } diff --git a/factory/state/stateComponents_test.go b/factory/state/stateComponents_test.go index c45259758b7..177407226d8 100644 --- a/factory/state/stateComponents_test.go +++ b/factory/state/stateComponents_test.go @@ -3,91 +3,152 @@ package state_test import ( "testing" + "github.com/multiversx/mx-chain-core-go/hashing" + "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/errors" - "github.com/multiversx/mx-chain-go/factory/mock" stateComp "github.com/multiversx/mx-chain-go/factory/state" + "github.com/multiversx/mx-chain-go/testscommon" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/stretchr/testify/require" ) -func TestNewStateComponentsFactory_NilShardCoordinatorShouldErr(t *testing.T) { +func TestNewStateComponentsFactory(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - coreComponents := componentsMock.GetCoreComponents() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetStateFactoryArgs(coreComponents, shardCoordinator) - args.ShardCoordinator = nil - - scf, err := stateComp.NewStateComponentsFactory(args) - require.Nil(t, scf) - require.Equal(t, errors.ErrNilShardCoordinator, err) -} - -func TestNewStateComponentsFactory_NilCoreComponents(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - coreComponents := componentsMock.GetCoreComponents() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetStateFactoryArgs(coreComponents, shardCoordinator) - args.Core = nil - - scf, err := stateComp.NewStateComponentsFactory(args) - require.Nil(t, scf) - require.Equal(t, errors.ErrNilCoreComponents, err) + t.Run("nil Core should error", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetStateFactoryArgs(coreComponents) + args.Core = nil + + scf, err := stateComp.NewStateComponentsFactory(args) + require.Nil(t, scf) + require.Equal(t, errors.ErrNilCoreComponents, err) + }) + t.Run("nil StatusCore should error", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetStateFactoryArgs(coreComponents) + args.StatusCore = nil + + scf, err := stateComp.NewStateComponentsFactory(args) + require.Nil(t, scf) + require.Equal(t, errors.ErrNilStatusCoreComponents, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetStateFactoryArgs(coreComponents) + + scf, err := stateComp.NewStateComponentsFactory(args) + require.NoError(t, err) + require.NotNil(t, scf) + }) } -func TestNewStateComponentsFactory_ShouldWork(t *testing.T) { +func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - coreComponents := componentsMock.GetCoreComponents() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetStateFactoryArgs(coreComponents, shardCoordinator) - scf, err := stateComp.NewStateComponentsFactory(args) - require.NoError(t, err) - require.NotNil(t, scf) + t.Run("CreateTriesComponentsForShardId fails should error", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetStateFactoryArgs(coreComponents) + coreCompStub := factory.NewCoreComponentsHolderStubFromRealComponent(args.Core) + coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { + return nil + } + args.Core = coreCompStub + scf, _ := stateComp.NewStateComponentsFactory(args) + + sc, err := scf.Create() + require.Error(t, err) + require.Nil(t, sc) + }) + t.Run("NewMemoryEvictionWaitingList fails should error", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetStateFactoryArgs(coreComponents) + args.Config.EvictionWaitingList.RootHashesSize = 0 + scf, _ := stateComp.NewStateComponentsFactory(args) + + sc, err := scf.Create() + require.Error(t, err) + require.Nil(t, sc) + }) + t.Run("NewAccountsDB fails should error", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetStateFactoryArgs(coreComponents) + + coreCompStub := factory.NewCoreComponentsHolderStubFromRealComponent(args.Core) + cnt := 0 + coreCompStub.HasherCalled = func() hashing.Hasher { + cnt++ + if cnt > 1 { + return nil + } + return &testscommon.HasherStub{} + } + args.Core = coreCompStub + scf, _ := stateComp.NewStateComponentsFactory(args) + + sc, err := scf.Create() + require.Error(t, err) + require.Nil(t, sc) + }) + t.Run("CreateAccountsAdapterAPIOnFinal fails should error", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetStateFactoryArgs(coreComponents) + + coreCompStub := factory.NewCoreComponentsHolderStubFromRealComponent(args.Core) + cnt := 0 + coreCompStub.HasherCalled = func() hashing.Hasher { + cnt++ + if cnt > 2 { + return nil + } + return &testscommon.HasherStub{} + } + args.Core = coreCompStub + scf, _ := stateComp.NewStateComponentsFactory(args) + + sc, err := scf.Create() + require.Error(t, err) + require.Nil(t, sc) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + coreComponents := componentsMock.GetCoreComponents() + args := componentsMock.GetStateFactoryArgs(coreComponents) + scf, _ := stateComp.NewStateComponentsFactory(args) + + sc, err := scf.Create() + require.NoError(t, err) + require.NotNil(t, sc) + require.NoError(t, sc.Close()) + }) } -func TestStateComponentsFactory_CreateShouldWork(t *testing.T) { +func TestStateComponents_Close(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetStateFactoryArgs(coreComponents, shardCoordinator) - + args := componentsMock.GetStateFactoryArgs(coreComponents) scf, _ := stateComp.NewStateComponentsFactory(args) - res, err := scf.Create() + sc, err := scf.Create() require.NoError(t, err) - require.NotNil(t, res) -} - -// ------------ Test StateComponents -------------------- -func TestStateComponents_CloseShouldWork(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + require.NotNil(t, sc) - coreComponents := componentsMock.GetCoreComponents() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args := componentsMock.GetStateFactoryArgs(coreComponents, shardCoordinator) - scf, _ := stateComp.NewStateComponentsFactory(args) - - sc, _ := scf.Create() - - err := sc.Close() - require.NoError(t, err) + require.NoError(t, sc.Close()) } diff --git a/factory/status/export_test.go b/factory/status/export_test.go new file mode 100644 index 00000000000..09f8e9c76bf --- /dev/null +++ b/factory/status/export_test.go @@ -0,0 +1,28 @@ +package status + +import ( + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/p2p" +) + +// EpochStartEventHandler - +func (pc *statusComponents) EpochStartEventHandler() epochStart.ActionHandler { + return pc.epochStartEventHandler() +} + +// ComputeNumConnectedPeers - +func ComputeNumConnectedPeers( + appStatusHandler core.AppStatusHandler, + netMessenger p2p.Messenger, +) { + computeNumConnectedPeers(appStatusHandler, netMessenger) +} + +// ComputeConnectedPeers - +func ComputeConnectedPeers( + appStatusHandler core.AppStatusHandler, + netMessenger p2p.Messenger, +) { + computeConnectedPeers(appStatusHandler, netMessenger) +} diff --git a/factory/status/statusComponents.go b/factory/status/statusComponents.go index ed66739b3c7..c2898767ef3 100644 --- a/factory/status/statusComponents.go +++ b/factory/status/statusComponents.go @@ -2,7 +2,6 @@ package status import ( "context" - "fmt" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" @@ -45,7 +44,6 @@ type StatusComponentsFactoryArgs struct { EpochStartNotifier factory.EpochStartNotifier CoreComponents factory.CoreComponentsHolder StatusCoreComponents factory.StatusCoreComponentsHolder - DataComponents factory.DataComponentsHolder NetworkComponents factory.NetworkComponentsHolder StateComponents factory.StateComponentsHolder IsInImportMode bool @@ -61,7 +59,6 @@ type statusComponentsFactory struct { forkDetector process.ForkDetector coreComponents factory.CoreComponentsHolder statusCoreComponents factory.StatusCoreComponentsHolder - dataComponents factory.DataComponentsHolder networkComponents factory.NetworkComponentsHolder stateComponents factory.StateComponentsHolder isInImportMode bool @@ -74,18 +71,12 @@ func NewStatusComponentsFactory(args StatusComponentsFactoryArgs) (*statusCompon if check.IfNil(args.CoreComponents) { return nil, errors.ErrNilCoreComponentsHolder } - if check.IfNil(args.DataComponents) { - return nil, errors.ErrNilDataComponentsHolder + if check.IfNil(args.CoreComponents.GenesisNodesSetup()) { + return nil, errors.ErrNilGenesisNodesSetupHandler } if check.IfNil(args.NetworkComponents) { return nil, errors.ErrNilNetworkComponentsHolder } - if check.IfNil(args.CoreComponents.AddressPubKeyConverter()) { - return nil, fmt.Errorf("%w for address", errors.ErrNilPubKeyConverter) - } - if check.IfNil(args.CoreComponents.ValidatorPubKeyConverter()) { - return nil, fmt.Errorf("%w for validator", errors.ErrNilPubKeyConverter) - } if check.IfNil(args.ShardCoordinator) { return nil, errors.ErrNilShardCoordinator } @@ -98,9 +89,6 @@ func NewStatusComponentsFactory(args StatusComponentsFactoryArgs) (*statusCompon if check.IfNil(args.StatusCoreComponents) { return nil, errors.ErrNilStatusCoreComponents } - if check.IfNil(args.StatusCoreComponents.AppStatusHandler()) { - return nil, errors.ErrNilAppStatusHandler - } return &statusComponentsFactory{ config: args.Config, @@ -111,7 +99,6 @@ func NewStatusComponentsFactory(args StatusComponentsFactoryArgs) (*statusCompon epochStartNotifier: args.EpochStartNotifier, coreComponents: args.CoreComponents, statusCoreComponents: args.StatusCoreComponents, - dataComponents: args.DataComponents, networkComponents: args.NetworkComponents, stateComponents: args.StateComponents, isInImportMode: args.IsInImportMode, diff --git a/factory/status/statusComponentsHandler.go b/factory/status/statusComponentsHandler.go index 1eeb6c2754c..71f69b2a325 100644 --- a/factory/status/statusComponentsHandler.go +++ b/factory/status/statusComponentsHandler.go @@ -107,10 +107,16 @@ func (msc *managedStatusComponents) CheckSubcomponents() error { } // SetForkDetector sets the fork detector -func (msc *managedStatusComponents) SetForkDetector(forkDetector process.ForkDetector) { +func (msc *managedStatusComponents) SetForkDetector(forkDetector process.ForkDetector) error { + if check.IfNil(forkDetector) { + return errors.ErrNilForkDetector + } + msc.mutStatusComponents.Lock() msc.statusComponentsFactory.forkDetector = forkDetector msc.mutStatusComponents.Unlock() + + return nil } // StartPolling starts polling for the updated status diff --git a/factory/status/statusComponentsHandler_test.go b/factory/status/statusComponentsHandler_test.go index 702d911842a..f84cc07ce20 100644 --- a/factory/status/statusComponentsHandler_test.go +++ b/factory/status/statusComponentsHandler_test.go @@ -3,68 +3,90 @@ package status_test import ( "testing" + "github.com/multiversx/mx-chain-go/common" + errorsMx "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/factory/mock" statusComp "github.com/multiversx/mx-chain-go/factory/status" - componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" - "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/p2p" + factoryMocks "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/stretchr/testify/require" ) -// ------------ Test ManagedStatusComponents -------------------- -func TestManagedStatusComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { +func TestNewManagedStatusComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - statusArgs, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) - statusCoreComponents := &factory.StatusCoreComponentsStub{ - AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, - } - statusArgs.StatusCoreComponents = statusCoreComponents + t.Run("nil factory should error", func(t *testing.T) { + t.Parallel() - statusComponentsFactory, _ := statusComp.NewStatusComponentsFactory(statusArgs) - managedStatusComponents, err := statusComp.NewManagedStatusComponents(statusComponentsFactory) - require.NoError(t, err) + managedStatusComponents, err := statusComp.NewManagedStatusComponents(nil) + require.Equal(t, errorsMx.ErrNilStatusComponentsFactory, err) + require.Nil(t, managedStatusComponents) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - statusCoreComponents.AppStatusHandlerField = nil - err = managedStatusComponents.Create() - require.Error(t, err) + scf, err := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) + require.Nil(t, err) + managedStatusComponents, err := statusComp.NewManagedStatusComponents(scf) + require.Nil(t, err) + require.NotNil(t, managedStatusComponents) + }) } -func TestManagedStatusComponents_CreateShouldWork(t *testing.T) { +func TestManagedStatusComponents_Create(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - statusArgs, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) - statusComponentsFactory, _ := statusComp.NewStatusComponentsFactory(statusArgs) - managedStatusComponents, err := statusComp.NewManagedStatusComponents(statusComponentsFactory) - require.NoError(t, err) - require.Nil(t, managedStatusComponents.OutportHandler()) - require.Nil(t, managedStatusComponents.SoftwareVersionChecker()) + t.Run("invalid params should error", func(t *testing.T) { + t.Parallel() - err = managedStatusComponents.Create() - require.NoError(t, err) - require.NotNil(t, managedStatusComponents.OutportHandler()) - require.NotNil(t, managedStatusComponents.SoftwareVersionChecker()) + args := createMockStatusComponentsFactoryArgs() + args.StatusCoreComponents = &factoryMocks.StatusCoreComponentsStub{ + AppStatusHandlerField: nil, + } + scf, err := statusComp.NewStatusComponentsFactory(args) + require.Nil(t, err) + managedStatusComponents, err := statusComp.NewManagedStatusComponents(scf) + require.Nil(t, err) + require.NotNil(t, managedStatusComponents) + + err = managedStatusComponents.Create() + require.Error(t, err) + }) + t.Run("should work with getters", func(t *testing.T) { + t.Parallel() + + scf, err := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) + require.Nil(t, err) + managedStatusComponents, err := statusComp.NewManagedStatusComponents(scf) + require.Nil(t, err) + require.NotNil(t, managedStatusComponents) + require.Nil(t, managedStatusComponents.OutportHandler()) + require.Nil(t, managedStatusComponents.SoftwareVersionChecker()) + + err = managedStatusComponents.Create() + require.NoError(t, err) + require.NotNil(t, managedStatusComponents.OutportHandler()) + require.NotNil(t, managedStatusComponents.SoftwareVersionChecker()) + + require.Equal(t, factory.StatusComponentsName, managedStatusComponents.String()) + }) } func TestManagedStatusComponents_Close(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - statusArgs, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) - statusComponentsFactory, _ := statusComp.NewStatusComponentsFactory(statusArgs) - managedStatusComponents, _ := statusComp.NewManagedStatusComponents(statusComponentsFactory) - err := managedStatusComponents.Create() + scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) + managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) + err := managedStatusComponents.Close() + require.NoError(t, err) + + err = managedStatusComponents.Create() + require.NoError(t, err) + + err = managedStatusComponents.StartPolling() // coverage require.NoError(t, err) err = managedStatusComponents.Close() @@ -73,17 +95,199 @@ func TestManagedStatusComponents_Close(t *testing.T) { func TestManagedStatusComponents_CheckSubcomponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - statusArgs, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) - statusComponentsFactory, _ := statusComp.NewStatusComponentsFactory(statusArgs) - managedStatusComponents, _ := statusComp.NewManagedStatusComponents(statusComponentsFactory) - err := managedStatusComponents.Create() + scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) + managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) + + err := managedStatusComponents.CheckSubcomponents() + require.Equal(t, errorsMx.ErrNilStatusComponents, err) + + err = managedStatusComponents.Create() require.NoError(t, err) err = managedStatusComponents.CheckSubcomponents() require.NoError(t, err) } + +func TestManagedStatusComponents_SetForkDetector(t *testing.T) { + t.Parallel() + + scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) + managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) + err := managedStatusComponents.Create() + require.NoError(t, err) + + err = managedStatusComponents.SetForkDetector(nil) + require.Equal(t, errorsMx.ErrNilForkDetector, err) + err = managedStatusComponents.SetForkDetector(&mock.ForkDetectorMock{}) + require.NoError(t, err) +} + +func TestManagedStatusComponents_StartPolling(t *testing.T) { + t.Parallel() + + t.Run("NewAppStatusPolling fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockStatusComponentsFactoryArgs() + args.Config.GeneralSettings.StatusPollingIntervalSec = 0 + scf, _ := statusComp.NewStatusComponentsFactory(args) + managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) + err := managedStatusComponents.Create() + require.NoError(t, err) + + err = managedStatusComponents.StartPolling() + require.Equal(t, errorsMx.ErrStatusPollingInit, err) + }) + t.Run("RegisterPollingFunc fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockStatusComponentsFactoryArgs() + args.Config.GeneralSettings.StatusPollingIntervalSec = 0 + scf, _ := statusComp.NewStatusComponentsFactory(args) + managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) + err := managedStatusComponents.Create() + require.NoError(t, err) + + err = managedStatusComponents.StartPolling() + require.Equal(t, errorsMx.ErrStatusPollingInit, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) + managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) + err := managedStatusComponents.Create() + require.NoError(t, err) + + err = managedStatusComponents.StartPolling() + require.NoError(t, err) + }) +} + +func TestComputeNumConnectedPeers(t *testing.T) { + t.Parallel() + + netMes := &p2pmocks.MessengerStub{ + ConnectedAddressesCalled: func() []string { + return []string{"addr1", "addr2", "addr3"} + }, + } + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) { + require.Equal(t, common.MetricNumConnectedPeers, key) + require.Equal(t, uint64(3), value) + }, + } + + statusComp.ComputeNumConnectedPeers(appStatusHandler, netMes) +} + +func TestComputeConnectedPeers(t *testing.T) { + t.Parallel() + + netMes := &p2pmocks.MessengerStub{ + GetConnectedPeersInfoCalled: func() *p2p.ConnectedPeersInfo { + return &p2p.ConnectedPeersInfo{ + SelfShardID: 0, + UnknownPeers: []string{"unknown"}, + Seeders: []string{"seeder"}, + IntraShardValidators: map[uint32][]string{ + 0: {"intra-v-0"}, + 1: {"intra-v-1"}, + }, + IntraShardObservers: map[uint32][]string{ + 0: {"intra-o-0"}, + 1: {"intra-o-1"}, + }, + CrossShardValidators: map[uint32][]string{ + 0: {"cross-v-0"}, + 1: {"cross-v-1"}, + }, + CrossShardObservers: map[uint32][]string{ + 0: {"cross-o-0"}, + 1: {"cross-o-1"}, + }, + FullHistoryObservers: map[uint32][]string{ + 0: {"fh-0"}, + 1: {"fh-1"}, + }, + NumValidatorsOnShard: map[uint32]int{ + 0: 1, + 1: 1, + }, + NumObserversOnShard: map[uint32]int{ + 0: 1, + 1: 1, + }, + NumPreferredPeersOnShard: map[uint32]int{ + 0: 0, + 1: 0, + }, + NumIntraShardValidators: 2, + NumIntraShardObservers: 2, + NumCrossShardValidators: 2, + NumCrossShardObservers: 2, + NumFullHistoryObservers: 2, + } + }, + AddressesCalled: func() []string { + return []string{"intra-v-0", "intra-v-1", "intra-o-0", "intra-o-1", "cross-v-0", "cross-v-1"} + }, + } + expectedPeerClassification := "intraVal:2,crossVal:2,intraObs:2,crossObs:2,fullObs:2,unknown:1," + cnt := 0 + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetStringValueHandler: func(key string, value string) { + cnt++ + switch cnt { + case 1: + require.Equal(t, common.MetricNumConnectedPeersClassification, key) + require.Equal(t, expectedPeerClassification, value) + case 2: + require.Equal(t, common.MetricP2PNumConnectedPeersClassification, key) + require.Equal(t, expectedPeerClassification, value) + case 3: + require.Equal(t, common.MetricP2PUnknownPeers, key) + require.Equal(t, "unknown", value) + case 4: + require.Equal(t, common.MetricP2PIntraShardValidators, key) + require.Equal(t, "intra-v-0,intra-v-1", value) + case 5: + require.Equal(t, common.MetricP2PIntraShardObservers, key) + require.Equal(t, "intra-o-0,intra-o-1", value) + case 6: + require.Equal(t, common.MetricP2PCrossShardValidators, key) + require.Equal(t, "cross-v-0,cross-v-1", value) + case 7: + require.Equal(t, common.MetricP2PCrossShardObservers, key) + require.Equal(t, "cross-o-0,cross-o-1", value) + case 8: + require.Equal(t, common.MetricP2PFullHistoryObservers, key) + require.Equal(t, "fh-0,fh-1", value) + case 9: + require.Equal(t, common.MetricP2PPeerInfo, key) + require.Equal(t, "intra-v-0,intra-v-1,intra-o-0,intra-o-1,cross-v-0,cross-v-1", value) + default: + require.Fail(t, "should not have been called") + } + }, + SetUInt64ValueHandler: func(key string, value uint64) { + require.Equal(t, common.MetricNumConnectedPeers, key) + require.Equal(t, 3, key) + }, + } + + statusComp.ComputeConnectedPeers(appStatusHandler, netMes) +} + +func TestManagedStatusComponents_IsInterfaceNil(t *testing.T) { + t.Parallel() + + managedStatusComponents, _ := statusComp.NewManagedStatusComponents(nil) + require.True(t, managedStatusComponents.IsInterfaceNil()) + + scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) + managedStatusComponents, _ = statusComp.NewManagedStatusComponents(scf) + require.False(t, managedStatusComponents.IsInterfaceNil()) +} diff --git a/factory/status/statusComponents_test.go b/factory/status/statusComponents_test.go index 7a1f0ee83ad..5240fc11ba7 100644 --- a/factory/status/statusComponents_test.go +++ b/factory/status/statusComponents_test.go @@ -1,172 +1,254 @@ package status_test import ( + "errors" "testing" - "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/config" - "github.com/multiversx/mx-chain-go/errors" - coreComp "github.com/multiversx/mx-chain-go/factory/core" + errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory/mock" statusComp "github.com/multiversx/mx-chain-go/factory/status" + testsMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" "github.com/multiversx/mx-chain-go/testscommon" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" - "github.com/stretchr/testify/assert" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/stretchr/testify/require" ) -func TestNewStatusComponentsFactory_NilCoreComponentsShouldErr(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") +func createMockStatusComponentsFactoryArgs() statusComp.StatusComponentsFactoryArgs { + return statusComp.StatusComponentsFactoryArgs{ + Config: testscommon.GetGeneralConfig(), + ExternalConfig: config.ExternalConfig{ + ElasticSearchConnector: config.ElasticSearchConfig{ + Enabled: false, + URL: "url", + Username: "user", + Password: "pass", + EnabledIndexes: []string{"transactions", "blocks"}, + }, + WebSocketConnector: config.WebSocketDriverConfig{ + MarshallerType: "json", + }, + }, + EconomicsConfig: config.EconomicsConfig{}, + ShardCoordinator: &testscommon.ShardsCoordinatorMock{}, + NodesCoordinator: &shardingMocks.NodesCoordinatorMock{}, + EpochStartNotifier: &mock.EpochStartNotifierStub{}, + CoreComponents: &mock.CoreComponentsMock{ + NodesConfig: &testscommon.NodesSetupStub{ + GetRoundDurationCalled: func() uint64 { + return 1000 + }, + }, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + }, + StatusCoreComponents: &factory.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + NetworkStatisticsField: &testscommon.NetworkStatisticsProviderStub{}, + }, + NetworkComponents: &testsMocks.NetworkComponentsStub{}, + StateComponents: &mock.StateComponentsHolderStub{}, + IsInImportMode: false, } - - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) - args.CoreComponents = nil - scf, err := statusComp.NewStatusComponentsFactory(args) - assert.True(t, check.IfNil(scf)) - assert.Equal(t, errors.ErrNilCoreComponentsHolder, err) } -func TestNewStatusComponentsFactory_NilNodesCoordinatorShouldErr(t *testing.T) { +func TestNewStatusComponentsFactory(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) - args.NodesCoordinator = nil - scf, err := statusComp.NewStatusComponentsFactory(args) - assert.True(t, check.IfNil(scf)) - assert.Equal(t, errors.ErrNilNodesCoordinator, err) -} + t.Run("nil CoreComponents should error", func(t *testing.T) { + t.Parallel() -func TestNewStatusComponentsFactory_NilEpochStartNotifierShouldErr(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + args := createMockStatusComponentsFactoryArgs() + args.CoreComponents = nil + scf, err := statusComp.NewStatusComponentsFactory(args) + require.Nil(t, scf) + require.Equal(t, errorsMx.ErrNilCoreComponentsHolder, err) + }) + t.Run("CoreComponents with nil GenesisNodesSetup should error", func(t *testing.T) { + t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) - args.EpochStartNotifier = nil - scf, err := statusComp.NewStatusComponentsFactory(args) - assert.True(t, check.IfNil(scf)) - assert.Equal(t, errors.ErrNilEpochStartNotifier, err) -} + args := createMockStatusComponentsFactoryArgs() + args.CoreComponents = &mock.CoreComponentsMock{ + NodesConfig: nil, + } + scf, err := statusComp.NewStatusComponentsFactory(args) + require.Nil(t, scf) + require.Equal(t, errorsMx.ErrNilGenesisNodesSetupHandler, err) + }) + t.Run("nil NetworkComponents should error", func(t *testing.T) { + t.Parallel() -func TestNewStatusComponentsFactory_NilNetworkComponentsShouldErr(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + args := createMockStatusComponentsFactoryArgs() + args.NetworkComponents = nil + scf, err := statusComp.NewStatusComponentsFactory(args) + require.Nil(t, scf) + require.Equal(t, errorsMx.ErrNilNetworkComponentsHolder, err) + }) + t.Run("nil ShardCoordinator should error", func(t *testing.T) { + t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) - args.NetworkComponents = nil - scf, err := statusComp.NewStatusComponentsFactory(args) - assert.True(t, check.IfNil(scf)) - assert.Equal(t, errors.ErrNilNetworkComponentsHolder, err) -} + args := createMockStatusComponentsFactoryArgs() + args.ShardCoordinator = nil + scf, err := statusComp.NewStatusComponentsFactory(args) + require.Nil(t, scf) + require.Equal(t, errorsMx.ErrNilShardCoordinator, err) + }) + t.Run("nil NodesCoordinator should error", func(t *testing.T) { + t.Parallel() -func TestNewStatusComponentsFactory_NilShardCoordinatorShouldErr(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } + args := createMockStatusComponentsFactoryArgs() + args.NodesCoordinator = nil + scf, err := statusComp.NewStatusComponentsFactory(args) + require.Nil(t, scf) + require.Equal(t, errorsMx.ErrNilNodesCoordinator, err) + }) + t.Run("nil EpochStartNotifier should error", func(t *testing.T) { + t.Parallel() + + args := createMockStatusComponentsFactoryArgs() + args.EpochStartNotifier = nil + scf, err := statusComp.NewStatusComponentsFactory(args) + require.Nil(t, scf) + require.Equal(t, errorsMx.ErrNilEpochStartNotifier, err) + }) + t.Run("nil StatusCoreComponents should error", func(t *testing.T) { + t.Parallel() + + args := createMockStatusComponentsFactoryArgs() + args.StatusCoreComponents = nil + scf, err := statusComp.NewStatusComponentsFactory(args) + require.Nil(t, scf) + require.Equal(t, errorsMx.ErrNilStatusCoreComponents, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) - args.ShardCoordinator = nil - scf, err := statusComp.NewStatusComponentsFactory(args) - assert.True(t, check.IfNil(scf)) - assert.Equal(t, errors.ErrNilShardCoordinator, err) + scf, err := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) + require.NotNil(t, scf) + require.NoError(t, err) + }) } -func TestNewStatusComponents_InvalidRoundDurationShouldErr(t *testing.T) { +func TestStatusComponentsFactory_Create(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - coreArgs := componentsMock.GetCoreArgs() - coreArgs.NodesFilename = "../mock/testdata/nodesSetupMockInvalidRound.json" - coreComponentsFactory, _ := coreComp.NewCoreComponentsFactory(coreArgs) - coreComponents, err := coreComp.NewManagedCoreComponents(coreComponentsFactory) - require.Nil(t, err) - require.NotNil(t, coreComponents) - err = coreComponents.Create() - require.Nil(t, err) - networkComponents := componentsMock.GetNetworkComponents(componentsMock.GetCryptoComponents(coreComponents)) - dataComponents := componentsMock.GetDataComponents(coreComponents, shardCoordinator) - stateComponents := componentsMock.GetStateComponents(coreComponents, shardCoordinator) - - statusArgs := statusComp.StatusComponentsFactoryArgs{ - Config: testscommon.GetGeneralConfig(), - ExternalConfig: config.ExternalConfig{}, - ShardCoordinator: shardCoordinator, - NodesCoordinator: &shardingMocks.NodesCoordinatorMock{}, - EpochStartNotifier: &mock.EpochStartNotifierStub{}, - CoreComponents: coreComponents, - DataComponents: dataComponents, - NetworkComponents: networkComponents, - StateComponents: stateComponents, - IsInImportMode: false, - EconomicsConfig: config.EconomicsConfig{}, - StatusCoreComponents: componentsMock.GetStatusCoreComponents(), - } - scf, err := statusComp.NewStatusComponentsFactory(statusArgs) - assert.Nil(t, err) - assert.NotNil(t, scf) + t.Run("NewSoftwareVersionFactory fails should return error", func(t *testing.T) { + t.Parallel() - statusComponents, err := scf.Create() - assert.Nil(t, statusComponents) - assert.Equal(t, errors.ErrInvalidRoundDuration, err) + args := createMockStatusComponentsFactoryArgs() + args.StatusCoreComponents = &factory.StatusCoreComponentsStub{ + AppStatusHandlerField: nil, // make NewSoftwareVersionFactory fail + } + scf, _ := statusComp.NewStatusComponentsFactory(args) + require.NotNil(t, scf) + + sc, err := scf.Create() + require.Error(t, err) + require.Nil(t, sc) + }) + t.Run("softwareVersionCheckerFactory.Create fails should return error", func(t *testing.T) { + t.Parallel() + + args := createMockStatusComponentsFactoryArgs() + args.Config.SoftwareVersionConfig.PollingIntervalInMinutes = 0 + scf, _ := statusComp.NewStatusComponentsFactory(args) + require.NotNil(t, scf) + + sc, err := scf.Create() + require.Error(t, err) + require.Nil(t, sc) + }) + t.Run("invalid round duration should error", func(t *testing.T) { + t.Parallel() + + args := createMockStatusComponentsFactoryArgs() + args.CoreComponents = &mock.CoreComponentsMock{ + NodesConfig: &testscommon.NodesSetupStub{ + GetRoundDurationCalled: func() uint64 { + return 0 + }, + }, + } + scf, _ := statusComp.NewStatusComponentsFactory(args) + require.NotNil(t, scf) + + sc, err := scf.Create() + require.Equal(t, errorsMx.ErrInvalidRoundDuration, err) + require.Nil(t, sc) + }) + t.Run("makeWebSocketDriverArgs fails due to invalid marshaller type should error", func(t *testing.T) { + t.Parallel() + + args := createMockStatusComponentsFactoryArgs() + args.ExternalConfig.WebSocketConnector.Enabled = true + args.ExternalConfig.WebSocketConnector.MarshallerType = "invalid type" + scf, _ := statusComp.NewStatusComponentsFactory(args) + require.NotNil(t, scf) + + sc, err := scf.Create() + require.Error(t, err) + require.Nil(t, sc) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + shardCoordinator.SelfIDCalled = func() uint32 { + return core.MetachainShardId // coverage + } + args, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) + args.ExternalConfig.WebSocketConnector.Enabled = true // coverage + scf, err := statusComp.NewStatusComponentsFactory(args) + require.Nil(t, err) + + sc, err := scf.Create() + require.NoError(t, err) + require.NotNil(t, sc) + + require.NoError(t, sc.Close()) + }) } -func TestNewStatusComponentsFactory_ShouldWork(t *testing.T) { +func TestStatusComponentsFactory_epochStartEventHandler(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") + + args := createMockStatusComponentsFactoryArgs() + args.NodesCoordinator = &shardingMocks.NodesCoordinatorStub{ + GetAllEligibleValidatorsPublicKeysCalled: func(epoch uint32) (map[uint32][][]byte, error) { + return make(map[uint32][][]byte), errors.New("fail for coverage") + }, } + scf, _ := statusComp.NewStatusComponentsFactory(args) + require.NotNil(t, scf) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) - scf, err := statusComp.NewStatusComponentsFactory(args) - require.NoError(t, err) - require.False(t, check.IfNil(scf)) + sc, _ := scf.Create() + require.NotNil(t, sc) + + handler := sc.EpochStartEventHandler() + require.NotNil(t, handler) + handler.EpochStartAction(&testscommon.HeaderHandlerStub{}) } -func TestStatusComponentsFactory_Create(t *testing.T) { +func TestStatusComponentsFactory_IsInterfaceNil(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - args, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) - scf, err := statusComp.NewStatusComponentsFactory(args) - require.Nil(t, err) + args := createMockStatusComponentsFactoryArgs() + args.CoreComponents = nil + scf, _ := statusComp.NewStatusComponentsFactory(args) + require.True(t, scf.IsInterfaceNil()) - res, err := scf.Create() - require.NoError(t, err) - require.NotNil(t, res) + scf, _ = statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) + require.False(t, scf.IsInterfaceNil()) } -// ------------ Test StatusComponents -------------------- -func TestStatusComponents_CloseShouldWork(t *testing.T) { +func TestStatusComponents_Close(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - statusArgs, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) - scf, _ := statusComp.NewStatusComponentsFactory(statusArgs) + scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) cc, err := scf.Create() require.Nil(t, err) diff --git a/factory/statusCore/statusCoreComponents.go b/factory/statusCore/statusCoreComponents.go index 600ea96ea7e..f256f051611 100644 --- a/factory/statusCore/statusCoreComponents.go +++ b/factory/statusCore/statusCoreComponents.go @@ -6,7 +6,7 @@ import ( "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/common/statistics/machine" "github.com/multiversx/mx-chain-go/config" - errErd "github.com/multiversx/mx-chain-go/errors" + errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/node/external" "github.com/multiversx/mx-chain-go/node/metrics" @@ -67,19 +67,10 @@ func NewStatusCoreComponentsFactory(args StatusCoreComponentsFactoryArgs) (*stat func checkArgs(args StatusCoreComponentsFactoryArgs) error { if check.IfNil(args.CoreComp) { - return errErd.ErrNilCoreComponents + return errorsMx.ErrNilCoreComponents } if check.IfNil(args.CoreComp.EconomicsData()) { - return errErd.ErrNilEconomicsData - } - if check.IfNil(args.CoreComp.GenesisNodesSetup()) { - return errErd.ErrNilGenesisNodesSetupHandler - } - if check.IfNil(args.CoreComp.InternalMarshalizer()) { - return errErd.ErrNilMarshalizer - } - if check.IfNil(args.CoreComp.Uint64ByteSliceConverter()) { - return errErd.ErrNilUint64ByteSliceConverter + return errorsMx.ErrNilEconomicsData } return nil diff --git a/factory/statusCore/statusCoreComponentsHandler.go b/factory/statusCore/statusCoreComponentsHandler.go index 72bae620e49..89d6f6ad063 100644 --- a/factory/statusCore/statusCoreComponentsHandler.go +++ b/factory/statusCore/statusCoreComponentsHandler.go @@ -25,7 +25,7 @@ type managedStatusCoreComponents struct { // NewManagedStatusCoreComponents creates a new status core components handler implementation func NewManagedStatusCoreComponents(sccf *statusCoreComponentsFactory) (*managedStatusCoreComponents, error) { if sccf == nil { - return nil, errors.ErrNilCoreComponentsFactory + return nil, errors.ErrNilStatusCoreComponentsFactory } mcc := &managedStatusCoreComponents{ diff --git a/factory/statusCore/statusCoreComponentsHandler_test.go b/factory/statusCore/statusCoreComponentsHandler_test.go index d6cd676833d..83a6e94ec5d 100644 --- a/factory/statusCore/statusCoreComponentsHandler_test.go +++ b/factory/statusCore/statusCoreComponentsHandler_test.go @@ -3,57 +3,101 @@ package statusCore_test import ( "testing" + errorsMx "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/factory/statusCore" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/stretchr/testify/require" ) -func TestManagedStatusCoreComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { +func TestNewManagedStatusCoreComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - args := componentsMock.GetStatusCoreArgs(componentsMock.GetDefaultCoreComponents()) - args.Config.ResourceStats.RefreshIntervalInSec = 0 + t.Run("nil factory should error", func(t *testing.T) { + t.Parallel() - statusCoreComponentsFactory, err := statusCore.NewStatusCoreComponentsFactory(args) - require.NoError(t, err) - managedStatusCoreComponents, err := statusCore.NewManagedStatusCoreComponents(statusCoreComponentsFactory) - require.NoError(t, err) + managedStatusCoreComponents, err := statusCore.NewManagedStatusCoreComponents(nil) + require.Equal(t, errorsMx.ErrNilStatusCoreComponentsFactory, err) + require.Nil(t, managedStatusCoreComponents) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - err = managedStatusCoreComponents.Create() - require.Error(t, err) - require.Nil(t, managedStatusCoreComponents.ResourceMonitor()) + args := componentsMock.GetStatusCoreArgs(componentsMock.GetDefaultCoreComponents()) + statusCoreComponentsFactory, err := statusCore.NewStatusCoreComponentsFactory(args) + require.NoError(t, err) + managedStatusCoreComponents, err := statusCore.NewManagedStatusCoreComponents(statusCoreComponentsFactory) + require.NoError(t, err) + require.NotNil(t, managedStatusCoreComponents) + }) } -func TestManagedStatusCoreComponents_CreateShouldWork(t *testing.T) { +func TestManagedStatusCoreComponents_Create(t *testing.T) { + t.Parallel() + + t.Run("invalid params should error", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetStatusCoreArgs(componentsMock.GetDefaultCoreComponents()) + args.Config.ResourceStats.RefreshIntervalInSec = 0 + + statusCoreComponentsFactory, err := statusCore.NewStatusCoreComponentsFactory(args) + require.NoError(t, err) + managedStatusCoreComponents, err := statusCore.NewManagedStatusCoreComponents(statusCoreComponentsFactory) + require.NoError(t, err) + + err = managedStatusCoreComponents.Create() + require.Error(t, err) + }) + t.Run("should work with getters", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetStatusCoreArgs(componentsMock.GetCoreComponents()) + statusCoreComponentsFactory, err := statusCore.NewStatusCoreComponentsFactory(args) + require.NoError(t, err) + managedStatusCoreComponents, err := statusCore.NewManagedStatusCoreComponents(statusCoreComponentsFactory) + require.NoError(t, err) + + require.Nil(t, managedStatusCoreComponents.ResourceMonitor()) + require.Nil(t, managedStatusCoreComponents.NetworkStatistics()) + require.Nil(t, managedStatusCoreComponents.TrieSyncStatistics()) + require.Nil(t, managedStatusCoreComponents.AppStatusHandler()) + require.Nil(t, managedStatusCoreComponents.StatusMetrics()) + require.Nil(t, managedStatusCoreComponents.PersistentStatusHandler()) + + err = managedStatusCoreComponents.Create() + require.NoError(t, err) + + require.NotNil(t, managedStatusCoreComponents.ResourceMonitor()) + require.NotNil(t, managedStatusCoreComponents.NetworkStatistics()) + require.NotNil(t, managedStatusCoreComponents.TrieSyncStatistics()) + require.NotNil(t, managedStatusCoreComponents.AppStatusHandler()) + require.NotNil(t, managedStatusCoreComponents.StatusMetrics()) + require.NotNil(t, managedStatusCoreComponents.PersistentStatusHandler()) + + require.Equal(t, factory.StatusCoreComponentsName, managedStatusCoreComponents.String()) + }) +} + +func TestManagedStatusCoreComponents_CheckSubcomponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetStatusCoreArgs(componentsMock.GetCoreComponents()) - statusCoreComponentsFactory, err := statusCore.NewStatusCoreComponentsFactory(args) - require.NoError(t, err) - managedStatusCoreComponents, err := statusCore.NewManagedStatusCoreComponents(statusCoreComponentsFactory) - require.NoError(t, err) + statusCoreComponentsFactory, _ := statusCore.NewStatusCoreComponentsFactory(args) + managedStatusCoreComponents, _ := statusCore.NewManagedStatusCoreComponents(statusCoreComponentsFactory) - require.Nil(t, managedStatusCoreComponents.ResourceMonitor()) - require.Nil(t, managedStatusCoreComponents.NetworkStatistics()) + err := managedStatusCoreComponents.CheckSubcomponents() + require.Equal(t, errorsMx.ErrNilStatusCoreComponents, err) err = managedStatusCoreComponents.Create() require.NoError(t, err) - require.NotNil(t, managedStatusCoreComponents.ResourceMonitor()) - require.NotNil(t, managedStatusCoreComponents.NetworkStatistics()) + err = managedStatusCoreComponents.CheckSubcomponents() + require.NoError(t, err) } -func TestManagedCoreComponents_Close(t *testing.T) { +func TestManagedStatusCoreComponents_Close(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetStatusCoreArgs(componentsMock.GetCoreComponents()) statusCoreComponentsFactory, err := statusCore.NewStatusCoreComponentsFactory(args) @@ -61,10 +105,24 @@ func TestManagedCoreComponents_Close(t *testing.T) { managedStatusCoreComponents, err := statusCore.NewManagedStatusCoreComponents(statusCoreComponentsFactory) require.NoError(t, err) + err = managedStatusCoreComponents.Close() + require.NoError(t, err) + err = managedStatusCoreComponents.Create() require.NoError(t, err) err = managedStatusCoreComponents.Close() require.NoError(t, err) - require.Nil(t, managedStatusCoreComponents.ResourceMonitor()) +} + +func TestManagedStatusCoreComponents_IsInterfaceNil(t *testing.T) { + t.Parallel() + + managedStatusCoreComponents, _ := statusCore.NewManagedStatusCoreComponents(nil) + require.True(t, managedStatusCoreComponents.IsInterfaceNil()) + + args := componentsMock.GetStatusCoreArgs(componentsMock.GetCoreComponents()) + statusCoreComponentsFactory, _ := statusCore.NewStatusCoreComponentsFactory(args) + managedStatusCoreComponents, _ = statusCore.NewManagedStatusCoreComponents(statusCoreComponentsFactory) + require.False(t, managedStatusCoreComponents.IsInterfaceNil()) } diff --git a/factory/statusCore/statusCoreComponents_test.go b/factory/statusCore/statusCoreComponents_test.go index 66c5e6c07ea..bd85752faeb 100644 --- a/factory/statusCore/statusCoreComponents_test.go +++ b/factory/statusCore/statusCoreComponents_test.go @@ -4,30 +4,30 @@ import ( "errors" "testing" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" - errErd "github.com/multiversx/mx-chain-go/errors" + errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory/statusCore" "github.com/multiversx/mx-chain-go/integrationTests/mock" - "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/process" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestNewStatusCoreComponentsFactory(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } t.Run("nil core components should error", func(t *testing.T) { t.Parallel() args := componentsMock.GetStatusCoreArgs(nil) sccf, err := statusCore.NewStatusCoreComponentsFactory(args) - assert.Equal(t, errErd.ErrNilCoreComponents, err) + assert.Equal(t, errorsMx.ErrNilCoreComponents, err) require.Nil(t, sccf) }) t.Run("nil economics data should error", func(t *testing.T) { @@ -39,102 +39,91 @@ func TestNewStatusCoreComponentsFactory(t *testing.T) { args := componentsMock.GetStatusCoreArgs(coreComp) sccf, err := statusCore.NewStatusCoreComponentsFactory(args) - assert.Equal(t, errErd.ErrNilEconomicsData, err) + assert.Equal(t, errorsMx.ErrNilEconomicsData, err) require.Nil(t, sccf) }) - t.Run("nil genesis node setup should error", func(t *testing.T) { + t.Run("should work", func(t *testing.T) { t.Parallel() - coreComp := &mock.CoreComponentsStub{ - EconomicsDataField: &economicsmocks.EconomicsHandlerStub{}, - GenesisNodesSetupField: nil, - } - - args := componentsMock.GetStatusCoreArgs(coreComp) + args := componentsMock.GetStatusCoreArgs(componentsMock.GetCoreComponents()) sccf, err := statusCore.NewStatusCoreComponentsFactory(args) - assert.Equal(t, errErd.ErrNilGenesisNodesSetupHandler, err) - require.Nil(t, sccf) + assert.Nil(t, err) + require.NotNil(t, sccf) }) - t.Run("nil marshaller should error", func(t *testing.T) { +} + +func TestStatusCoreComponentsFactory_Create(t *testing.T) { + t.Parallel() + + t.Run("NewResourceMonitor fails should error", func(t *testing.T) { t.Parallel() - coreComp := &mock.CoreComponentsStub{ - EconomicsDataField: &economicsmocks.EconomicsHandlerStub{}, - GenesisNodesSetupField: &testscommon.NodesSetupStub{}, - InternalMarshalizerField: nil, + args := componentsMock.GetStatusCoreArgs(componentsMock.GetCoreComponents()) + args.Config = config.Config{ + ResourceStats: config.ResourceStatsConfig{ + RefreshIntervalInSec: 0, + }, } - - args := componentsMock.GetStatusCoreArgs(coreComp) sccf, err := statusCore.NewStatusCoreComponentsFactory(args) - assert.Equal(t, errErd.ErrNilMarshalizer, err) - require.Nil(t, sccf) + require.Nil(t, err) + + cc, err := sccf.Create() + require.Nil(t, cc) + require.True(t, errors.Is(err, statistics.ErrInvalidRefreshIntervalValue)) }) - t.Run("nil slice converter should error", func(t *testing.T) { + t.Run("NewPersistentStatusHandler fails should error", func(t *testing.T) { t.Parallel() - coreComp := &mock.CoreComponentsStub{ - EconomicsDataField: &economicsmocks.EconomicsHandlerStub{}, - GenesisNodesSetupField: &testscommon.NodesSetupStub{}, - InternalMarshalizerField: &testscommon.MarshalizerStub{}, - Uint64ByteSliceConverterField: nil, + coreCompStub := factory.NewCoreComponentsHolderStubFromRealComponent(componentsMock.GetCoreComponents()) + coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { + return nil } + args := componentsMock.GetStatusCoreArgs(coreCompStub) + sccf, err := statusCore.NewStatusCoreComponentsFactory(args) + require.Nil(t, err) - args := componentsMock.GetStatusCoreArgs(coreComp) + cc, err := sccf.Create() + require.Error(t, err) + require.Nil(t, cc) + }) + t.Run("SetStatusHandler fails should error", func(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + coreCompStub := factory.NewCoreComponentsHolderStubFromRealComponent(componentsMock.GetCoreComponents()) + coreCompStub.EconomicsDataCalled = func() process.EconomicsDataHandler { + return &economicsmocks.EconomicsHandlerStub{ + SetStatusHandlerCalled: func(statusHandler core.AppStatusHandler) error { + return expectedErr + }, + } + } + args := componentsMock.GetStatusCoreArgs(coreCompStub) sccf, err := statusCore.NewStatusCoreComponentsFactory(args) - assert.Equal(t, errErd.ErrNilUint64ByteSliceConverter, err) - require.Nil(t, sccf) + require.Nil(t, err) + + cc, err := sccf.Create() + require.Equal(t, expectedErr, err) + require.Nil(t, cc) }) t.Run("should work", func(t *testing.T) { t.Parallel() args := componentsMock.GetStatusCoreArgs(componentsMock.GetCoreComponents()) + args.Config.ResourceStats.Enabled = true // coverage sccf, err := statusCore.NewStatusCoreComponentsFactory(args) - assert.Nil(t, err) - require.NotNil(t, sccf) - }) -} - -func TestStatusCoreComponentsFactory_InvalidValueShouldErr(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - args := componentsMock.GetStatusCoreArgs(componentsMock.GetCoreComponents()) - args.Config = config.Config{ - ResourceStats: config.ResourceStatsConfig{ - RefreshIntervalInSec: 0, - }, - } - sccf, err := statusCore.NewStatusCoreComponentsFactory(args) - require.Nil(t, err) + require.Nil(t, err) - cc, err := sccf.Create() - require.Nil(t, cc) - require.True(t, errors.Is(err, statistics.ErrInvalidRefreshIntervalValue)) -} - -func TestStatusCoreComponentsFactory_CreateStatusCoreComponentsShouldWork(t *testing.T) { - t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } - - args := componentsMock.GetStatusCoreArgs(componentsMock.GetCoreComponents()) - sccf, err := statusCore.NewStatusCoreComponentsFactory(args) - require.Nil(t, err) - - cc, err := sccf.Create() - require.NoError(t, err) - require.NotNil(t, cc) + cc, err := sccf.Create() + require.NoError(t, err) + require.NotNil(t, cc) + require.NoError(t, cc.Close()) + }) } // ------------ Test CoreComponents -------------------- func TestStatusCoreComponents_CloseShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetStatusCoreArgs(componentsMock.GetCoreComponents()) sccf, err := statusCore.NewStatusCoreComponentsFactory(args) diff --git a/genesis/errors.go b/genesis/errors.go index 2553b9650aa..1c0330e4cad 100644 --- a/genesis/errors.go +++ b/genesis/errors.go @@ -167,8 +167,8 @@ var ErrBLSKeyNotStaked = errors.New("bls key not staked") // ErrMissingDeployedSC signals that a delegation referenced an un-deployed contract var ErrMissingDeployedSC = errors.New("missing deployed SC") -// ErrNilGeneralSettingsConfig signals that a nil general settings config was provided -var ErrNilGeneralSettingsConfig = errors.New("nil general settings config") - // ErrNilEpochConfig signals that a nil epoch config was provided var ErrNilEpochConfig = errors.New("nil epoch config") + +// ErrNilGasSchedule signals that an operation has been attempted with a nil gas schedule +var ErrNilGasSchedule = errors.New("nil gas schedule") diff --git a/genesis/mock/coreComponentsMock.go b/genesis/mock/coreComponentsMock.go index fe1bedd07e1..fb0907ef8a0 100644 --- a/genesis/mock/coreComponentsMock.go +++ b/genesis/mock/coreComponentsMock.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/process" ) // CoreComponentsMock - @@ -20,6 +21,7 @@ type CoreComponentsMock struct { MinTxVersion uint32 StatHandler core.AppStatusHandler EnableEpochsHandlerField common.EnableEpochsHandler + TxVersionCheck process.TxVersionCheckerHandler } // InternalMarshalizer - @@ -67,6 +69,11 @@ func (ccm *CoreComponentsMock) EnableEpochsHandler() common.EnableEpochsHandler return ccm.EnableEpochsHandlerField } +// TxVersionChecker - +func (ccm *CoreComponentsMock) TxVersionChecker() process.TxVersionCheckerHandler { + return ccm.TxVersionCheck +} + // IsInterfaceNil - func (ccm *CoreComponentsMock) IsInterfaceNil() bool { return ccm == nil diff --git a/genesis/mock/dataComponentsMock.go b/genesis/mock/dataComponentsMock.go index f4e24bd4420..be38cf0e884 100644 --- a/genesis/mock/dataComponentsMock.go +++ b/genesis/mock/dataComponentsMock.go @@ -52,8 +52,9 @@ func (dcm *DataComponentsMock) MiniBlocksProvider() MiniBlockProvider { } // SetBlockchain - -func (dcm *DataComponentsMock) SetBlockchain(chain data.ChainHandler) { +func (dcm *DataComponentsMock) SetBlockchain(chain data.ChainHandler) error { dcm.Blkc = chain + return nil } // IsInterfaceNil - diff --git a/genesis/mock/userAccountMock.go b/genesis/mock/userAccountMock.go index 88de0e5e47e..f2ae6ecf136 100644 --- a/genesis/mock/userAccountMock.go +++ b/genesis/mock/userAccountMock.go @@ -143,3 +143,8 @@ func (uam *UserAccountMock) GetUserName() []byte { func (uam *UserAccountMock) SaveDirtyData(_ common.Trie) (map[string][]byte, error) { return nil, nil } + +// IsGuarded - +func (uam *UserAccountMock) IsGuarded() bool { + return false +} diff --git a/genesis/process/argGenesisBlockCreator.go b/genesis/process/argGenesisBlockCreator.go index e6166457673..27ec8682a01 100644 --- a/genesis/process/argGenesisBlockCreator.go +++ b/genesis/process/argGenesisBlockCreator.go @@ -25,6 +25,7 @@ type coreComponentsHandler interface { Hasher() hashing.Hasher AddressPubKeyConverter() core.PubkeyConverter Uint64ByteSliceConverter() typeConverters.Uint64ByteSliceConverter + TxVersionChecker() process.TxVersionCheckerHandler ChainID() string EnableEpochsHandler() common.EnableEpochsHandler IsInterfaceNil() bool @@ -34,7 +35,7 @@ type dataComponentsHandler interface { StorageService() dataRetriever.StorageService Blockchain() data.ChainHandler Datapool() dataRetriever.PoolsHolder - SetBlockchain(chain data.ChainHandler) + SetBlockchain(chain data.ChainHandler) error Clone() interface{} IsInterfaceNil() bool } diff --git a/genesis/process/disabled/feeHandler.go b/genesis/process/disabled/feeHandler.go index 7dbb2d8ad8c..2cd4170f3bb 100644 --- a/genesis/process/disabled/feeHandler.go +++ b/genesis/process/disabled/feeHandler.go @@ -42,6 +42,16 @@ func (fh *FeeHandler) MinGasLimit() uint64 { return 0 } +// ExtraGasLimitGuardedTx returns 0 +func (fh *FeeHandler) ExtraGasLimitGuardedTx() uint64 { + return 0 +} + +// MaxGasPriceSetGuardian returns 0 +func (fh *FeeHandler) MaxGasPriceSetGuardian() uint64 { + return 0 +} + // MaxGasLimitPerBlock returns max uint64 func (fh *FeeHandler) MaxGasLimitPerBlock(uint32) uint64 { return math.MaxUint64 diff --git a/genesis/process/disabled/txVersionChecker.go b/genesis/process/disabled/txVersionChecker.go new file mode 100644 index 00000000000..f8089a9a211 --- /dev/null +++ b/genesis/process/disabled/txVersionChecker.go @@ -0,0 +1,31 @@ +package disabled + +import "github.com/multiversx/mx-chain-core-go/data/transaction" + +// TxVersionChecker implements the TxVersionChecker interface, it does nothing as it is a disabled component +type TxVersionChecker struct{} + +// NewDisabledTxVersionChecker is the constructor for the disabled tx version checker +func NewDisabledTxVersionChecker() *TxVersionChecker { + return &TxVersionChecker{} +} + +// IsGuardedTransaction returns false as this is a disabled component +func (tvc *TxVersionChecker) IsGuardedTransaction(_ *transaction.Transaction) bool { + return false +} + +// IsSignedWithHash returns false as this is a disabled component +func (tvc *TxVersionChecker) IsSignedWithHash(_ *transaction.Transaction) bool { + return false +} + +// CheckTxVersion returns nil as this is a disabled component +func (tvc *TxVersionChecker) CheckTxVersion(_ *transaction.Transaction) error { + return nil +} + +// IsInterfaceNil does the nil check for the receiver +func (tvc *TxVersionChecker) IsInterfaceNil() bool { + return tvc == nil +} diff --git a/genesis/process/genesisBlockCreator.go b/genesis/process/genesisBlockCreator.go index c4724f04ff0..2fde795be1f 100644 --- a/genesis/process/genesisBlockCreator.go +++ b/genesis/process/genesisBlockCreator.go @@ -204,6 +204,9 @@ func checkArgumentsForBlockCreator(arg ArgsGenesisBlockCreator) error { if arg.EpochConfig == nil { return genesis.ErrNilEpochConfig } + if arg.GasSchedule == nil { + return genesis.ErrNilGasSchedule + } return nil } @@ -357,7 +360,10 @@ func (gbc *genesisBlockCreator) createHeaders( return fmt.Errorf("'%w' while generating genesis block for metachain", err) } - metaArgsGenesisBlockCreator.Data.SetBlockchain(chain) + err = metaArgsGenesisBlockCreator.Data.SetBlockchain(chain) + if err != nil { + return fmt.Errorf("'%w' while setting blockchain for metachain", err) + } genesisBlock, scResults, gbc.initialIndexingData[shardID], err = CreateMetaGenesisBlock( metaArgsGenesisBlockCreator, mapBodies[core.MetachainShardId], diff --git a/genesis/process/genesisBlockCreator_test.go b/genesis/process/genesisBlockCreator_test.go index 63861f718fd..3e0a38493a4 100644 --- a/genesis/process/genesisBlockCreator_test.go +++ b/genesis/process/genesisBlockCreator_test.go @@ -1,3 +1,8 @@ +//go:build !race +// +build !race + +// TODO reinstate test after Wasm VM pointer fix + package process import ( @@ -64,6 +69,7 @@ func createMockArgument( UInt64ByteSliceConv: &mock.Uint64ByteSliceConverterMock{}, AddrPubKeyConv: testscommon.NewPubkeyConverterMock(32), Chain: "chainID", + TxVersionCheck: &testscommon.TxVersionCheckerStub{}, MinTxVersion: 1, EnableEpochsHandlerField: &testscommon.EnableEpochsHandlerStub{}, }, @@ -90,13 +96,16 @@ func createMockArgument( OwnerAddress: "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp", }, GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ + V1: config.GovernanceSystemSCConfigV1{ + ProposalCost: "500", + }, Active: config.GovernanceSystemSCConfigActive{ ProposalCost: "500", - MinQuorum: "50", - MinPassThreshold: "50", - MinVetoThreshold: "50", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, }, - FirstWhitelistedAddress: "3132333435363738393031323334353637383930313233343536373839303234", + ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: nodePrice.Text(10), @@ -204,11 +213,6 @@ func createMockArgument( } func TestGenesisBlockCreator_CreateGenesisBlockAfterHardForkShouldCreateSCResultingAddresses(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix - if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") - } - scAddressBytes, _ := hex.DecodeString("00000000000000000500761b8c4a25d3979359223208b412285f635e71300102") initialNodesSetup := &mock.InitialNodesHandlerStub{ InitialNodesInfoCalled: func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { @@ -269,11 +273,6 @@ func TestGenesisBlockCreator_CreateGenesisBlockAfterHardForkShouldCreateSCResult } func TestGenesisBlockCreator_CreateGenesisBlocksJustDelegationShouldWorkAndDNS(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix - if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") - } - scAddressBytes, _ := hex.DecodeString("00000000000000000500761b8c4a25d3979359223208b412285f635e71300102") stakedAddr, _ := hex.DecodeString("b00102030405060708090001020304050607080900010203040506070809000b") initialNodesSetup := &mock.InitialNodesHandlerStub{ @@ -318,11 +317,6 @@ func TestGenesisBlockCreator_CreateGenesisBlocksJustDelegationShouldWorkAndDNS(t } func TestGenesisBlockCreator_CreateGenesisBlocksStakingAndDelegationShouldWorkAndDNS(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix - if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") - } - scAddressBytes, _ := hex.DecodeString("00000000000000000500761b8c4a25d3979359223208b412285f635e71300102") stakedAddr, _ := hex.DecodeString("b00102030405060708090001020304050607080900010203040506070809000b") stakedAddr2, _ := hex.DecodeString("d00102030405060708090001020304050607080900010203040506070809000d") @@ -398,11 +392,6 @@ func TestGenesisBlockCreator_CreateGenesisBlocksStakingAndDelegationShouldWorkAn } func TestGenesisBlockCreator_GetIndexingDataShouldWork(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix - if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") - } - scAddressBytes, _ := hex.DecodeString("00000000000000000500761b8c4a25d3979359223208b412285f635e71300102") stakedAddr, _ := hex.DecodeString("b00102030405060708090001020304050607080900010203040506070809000b") stakedAddr2, _ := hex.DecodeString("d00102030405060708090001020304050607080900010203040506070809000d") diff --git a/genesis/process/metaGenesisBlockCreator.go b/genesis/process/metaGenesisBlockCreator.go index 472ae9de959..26a7aa18b45 100644 --- a/genesis/process/metaGenesisBlockCreator.go +++ b/genesis/process/metaGenesisBlockCreator.go @@ -27,6 +27,7 @@ import ( "github.com/multiversx/mx-chain-go/process/coordinator" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/process/factory/metachain" + disabledGuardian "github.com/multiversx/mx-chain-go/process/guardian/disabled" "github.com/multiversx/mx-chain-go/process/receipts" "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/process/smartContract/hooks" @@ -359,15 +360,17 @@ func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpoc } genesisFeeHandler := &disabled.FeeHandler{} - interimProcFactory, err := metachain.NewIntermediateProcessorsContainerFactory( - arg.ShardCoordinator, - arg.Core.InternalMarshalizer(), - arg.Core.Hasher(), - arg.Core.AddressPubKeyConverter(), - arg.Data.StorageService(), - arg.Data.Datapool(), - genesisFeeHandler, - ) + argsFactory := metachain.ArgsNewIntermediateProcessorsContainerFactory{ + ShardCoordinator: arg.ShardCoordinator, + Marshalizer: arg.Core.InternalMarshalizer(), + Hasher: arg.Core.Hasher(), + PubkeyConverter: arg.Core.AddressPubKeyConverter(), + Store: arg.Data.StorageService(), + PoolsHolder: arg.Data.Datapool(), + EconomicsFee: genesisFeeHandler, + EnableEpochsHandler: enableEpochsHandler, + } + interimProcFactory, err := metachain.NewIntermediateProcessorsContainerFactory(argsFactory) if err != nil { return nil, err } @@ -449,6 +452,8 @@ func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpoc TxTypeHandler: txTypeHandler, EconomicsFee: genesisFeeHandler, EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: disabled.NewDisabledTxVersionChecker(), + GuardianChecker: disabledGuardian.NewDisabledGuardedAccountHandler(), } txProcessor, err := processTransaction.NewMetaTxProcessor(argsNewMetaTxProcessor) if err != nil { diff --git a/genesis/process/shardGenesisBlockCreator.go b/genesis/process/shardGenesisBlockCreator.go index 0fb9f77a0f0..58fa13af0bf 100644 --- a/genesis/process/shardGenesisBlockCreator.go +++ b/genesis/process/shardGenesisBlockCreator.go @@ -22,6 +22,7 @@ import ( "github.com/multiversx/mx-chain-go/process/block/preprocess" "github.com/multiversx/mx-chain-go/process/coordinator" "github.com/multiversx/mx-chain-go/process/factory/shard" + disabledGuardian "github.com/multiversx/mx-chain-go/process/guardian/disabled" "github.com/multiversx/mx-chain-go/process/receipts" "github.com/multiversx/mx-chain-go/process/rewardTransaction" "github.com/multiversx/mx-chain-go/process/smartContract" @@ -140,6 +141,7 @@ func createGenesisConfig() config.EnableEpochs { DoNotReturnOldBlockInBlockchainHookEnableEpoch: unreachableEpoch, MaxBlockchainHookCountersEnableEpoch: unreachableEpoch, BLSMultiSignerEnableEpoch: blsMultiSignerEnableEpoch, + SetGuardianEnableEpoch: unreachableEpoch, } } @@ -399,6 +401,7 @@ func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpo EnableEpochsHandler: enableEpochsHandler, AutomaticCrawlerAddresses: [][]byte{make([]byte, 32)}, MaxNumNodesInTransferRole: math.MaxUint32, + GuardedAccountHandler: disabledGuardian.NewDisabledGuardedAccountHandler(), } builtInFuncFactory, err := builtInFunctions.CreateBuiltInFunctionsFactory(argsBuiltIn) if err != nil { @@ -463,15 +466,17 @@ func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpo } genesisFeeHandler := &disabled.FeeHandler{} - interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory( - arg.ShardCoordinator, - arg.Core.InternalMarshalizer(), - arg.Core.Hasher(), - arg.Core.AddressPubKeyConverter(), - arg.Data.StorageService(), - arg.Data.Datapool(), - genesisFeeHandler, - ) + argsFactory := shard.ArgsNewIntermediateProcessorsContainerFactory{ + ShardCoordinator: arg.ShardCoordinator, + Marshalizer: arg.Core.InternalMarshalizer(), + Hasher: arg.Core.Hasher(), + PubkeyConverter: arg.Core.AddressPubKeyConverter(), + Store: arg.Data.StorageService(), + PoolsHolder: arg.Data.Datapool(), + EconomicsFee: genesisFeeHandler, + EnableEpochsHandler: enableEpochsHandler, + } + interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory(argsFactory) if err != nil { return nil, err } @@ -574,6 +579,8 @@ func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpo ArgsParser: smartContract.NewArgumentParser(), ScrForwarder: scForwarder, EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: arg.Core.TxVersionChecker(), + GuardianChecker: disabledGuardian.NewDisabledGuardedAccountHandler(), } transactionProcessor, err := transaction.NewTxProcessor(argsNewTxProcessor) if err != nil { diff --git a/go.mod b/go.mod index c22f4c55a22..57dd47bbefc 100644 --- a/go.mod +++ b/go.mod @@ -13,22 +13,22 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.2.1-0.20230504075947-f67a2083a86f - github.com/multiversx/mx-chain-crypto-go v1.2.5 - github.com/multiversx/mx-chain-es-indexer-go v1.4.0 + github.com/multiversx/mx-chain-core-go v1.2.3-0.20230512130104-2a2e00c016b2 + github.com/multiversx/mx-chain-crypto-go v1.2.6 + github.com/multiversx/mx-chain-es-indexer-go v1.4.1 github.com/multiversx/mx-chain-logger-go v1.0.11 - github.com/multiversx/mx-chain-p2p-go v1.0.15 - github.com/multiversx/mx-chain-storage-go v1.0.8-0.20230403115027-9139fce478e0 - github.com/multiversx/mx-chain-vm-common-go v1.4.1-0.20230403123953-7fc57accc0c6 - github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50 - github.com/multiversx/mx-chain-vm-v1_3-go v1.3.51 - github.com/multiversx/mx-chain-vm-v1_4-go v1.4.77 + github.com/multiversx/mx-chain-p2p-go v1.0.16 + github.com/multiversx/mx-chain-storage-go v1.0.9-0.20230512130346-04e711f3d064 + github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230512130259-7b26a55bcd8a + github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 + github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 + github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80 github.com/pelletier/go-toml v1.9.3 github.com/pkg/errors v0.9.1 github.com/shirou/gopsutil v3.21.11+incompatible github.com/stretchr/testify v1.8.1 github.com/urfave/cli v1.22.10 - golang.org/x/crypto v0.5.0 + golang.org/x/crypto v0.7.0 gopkg.in/go-playground/validator.v8 v8.18.2 ) @@ -72,17 +72,18 @@ require ( github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/herumi/bls-go-binary v1.0.0 // indirect github.com/huin/goupnp v1.0.3 // indirect - github.com/ipfs/go-cid v0.2.0 // indirect + github.com/ipfs/go-cid v0.3.2 // indirect github.com/ipfs/go-datastore v0.5.1 // indirect github.com/ipfs/go-ipfs-util v0.0.2 // indirect github.com/ipfs/go-ipns v0.2.0 // indirect github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-log/v2 v2.5.1 // indirect - github.com/ipld/go-ipld-prime v0.9.0 // indirect + github.com/ipld/go-ipld-prime v0.19.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/klauspost/compress v1.15.1 // indirect github.com/klauspost/cpuid/v2 v2.1.0 // indirect github.com/koron/go-ssdp v0.0.3 // indirect @@ -127,7 +128,7 @@ require ( github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multibase v0.1.1 // indirect - github.com/multiformats/go-multicodec v0.5.0 // indirect + github.com/multiformats/go-multicodec v0.6.0 // indirect github.com/multiformats/go-multihash v0.2.1 // indirect github.com/multiformats/go-multistream v0.3.3 // indirect github.com/multiformats/go-varint v0.0.6 // indirect @@ -141,7 +142,7 @@ require ( github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pelletier/go-toml/v2 v2.0.6 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 // indirect + github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect github.com/prometheus/client_golang v1.12.1 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.37.0 // indirect @@ -149,6 +150,7 @@ require ( github.com/raulk/go-watchdog v1.3.0 // indirect github.com/russross/blackfriday/v2 v2.0.1 // indirect github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect + github.com/smartystreets/assertions v1.13.0 // indirect github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect @@ -167,12 +169,12 @@ require ( go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.22.0 // indirect golang.org/x/arch v0.0.0-20210923205945-b76863e36670 // indirect - golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect - golang.org/x/tools v0.1.12 // indirect + golang.org/x/mod v0.8.0 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect + golang.org/x/tools v0.6.0 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect diff --git a/go.sum b/go.sum index 936bffeb71a..e7e38c73066 100644 --- a/go.sum +++ b/go.sum @@ -37,6 +37,7 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= @@ -167,6 +168,8 @@ github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwU github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= @@ -274,8 +277,9 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -337,8 +341,9 @@ github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUP github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.2.0 h1:01JTiihFq9en9Vz0lc0VDWvZe/uBonGpzo4THP0vcQ0= github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= +github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= +github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= @@ -371,8 +376,9 @@ github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Ax github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= -github.com/ipld/go-ipld-prime v0.9.0 h1:N2OjJMb+fhyFPwPnVvJcWU/NsumP8etal+d2v3G4eww= github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.19.0 h1:5axC7rJmPc17Emw6TelxGwnzALk0PdupZ2oj2roDj04= +github.com/ipld/go-ipld-prime v0.19.0/go.mod h1:Q9j3BaVXwaA3o5JUDNvptDDr/x8+F7FG6XJ8WI3ILg4= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= @@ -589,8 +595,9 @@ github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPw github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= github.com/multiformats/go-multicodec v0.4.1/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= -github.com/multiformats/go-multicodec v0.5.0 h1:EgU6cBe/D7WRwQb1KmnBvU7lrcFGMggZVTPtOW9dDHs= github.com/multiformats/go-multicodec v0.5.0/go.mod h1:DiY2HFaEp5EhEXb/iYzVAunmyX/aSFMxq2KMKfWEues= +github.com/multiformats/go-multicodec v0.6.0 h1:KhH2kSuCARyuJraYMFxrNO3DqIaYhOdS039kbhgVwpE= +github.com/multiformats/go-multicodec v0.6.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= @@ -610,34 +617,30 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.1.34/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230403113932-916b16d18978/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230504075947-f67a2083a86f h1:I1MpzgdJIWovjvJqdifvLepuG1Tg1vbQXchjPKtlqy8= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230504075947-f67a2083a86f/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= -github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= -github.com/multiversx/mx-chain-es-indexer-go v1.4.0 h1:t2UCfbLRbFPBWK1IC1/qOVg+2D6y189xZZ1BoV83gq8= -github.com/multiversx/mx-chain-es-indexer-go v1.4.0/go.mod h1:3glMXvE42VvLlUdiMMtQoDr6uKYS6RGb0icRgyAGXIY= +github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.2.3-0.20230512130104-2a2e00c016b2 h1:j0kxDUOtqUPey78uWW39ScDT8S0V7G/L6kcl+JpGmq4= +github.com/multiversx/mx-chain-core-go v1.2.3-0.20230512130104-2a2e00c016b2/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= +github.com/multiversx/mx-chain-crypto-go v1.2.6/go.mod h1:rOj0Rr19HTOYt9YTeym7RKxlHt91NXln3LVKjHKVmA0= +github.com/multiversx/mx-chain-es-indexer-go v1.4.1 h1:gD/D7xZP7OL8L/ZZ3SoOfKjVHrU0iUxIG2AbidHFTUc= +github.com/multiversx/mx-chain-es-indexer-go v1.4.1/go.mod h1:o+LWvL+UEKx1lrFhkV2SfxoaFybKro3ZLN4HOMGXDos= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= -github.com/multiversx/mx-chain-p2p-go v1.0.15 h1:H7273huZG/zAR6MPvWuXwBEVBsJWH1MeSIDshYV0nh0= -github.com/multiversx/mx-chain-p2p-go v1.0.15/go.mod h1:hUE4H8kGJk3u9gTqeetF3uhjJpnfdV/hALKsJ6bMI+8= -github.com/multiversx/mx-chain-storage-go v1.0.7/go.mod h1:gtKoV32Cg2Uy8deHzF8Ud0qAl0zv92FvWgPSYIP0Zmg= -github.com/multiversx/mx-chain-storage-go v1.0.8-0.20230403115027-9139fce478e0 h1:jTGuq0IAQdghGLoNx2BgkxWvkcZV9ZmJ0qB8/oU4MNQ= -github.com/multiversx/mx-chain-storage-go v1.0.8-0.20230403115027-9139fce478e0/go.mod h1:FGhaeTNIcLZOPqsJZQ1TdcMaPVLhj642OzRNmt6+RQs= -github.com/multiversx/mx-chain-vm-common-go v1.3.36/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= -github.com/multiversx/mx-chain-vm-common-go v1.3.37/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= -github.com/multiversx/mx-chain-vm-common-go v1.4.0/go.mod h1:odBJC92ANA8zLtPh/wwajUUGJOaS88F5QYGf0t8Wgzw= -github.com/multiversx/mx-chain-vm-common-go v1.4.1-0.20230403123953-7fc57accc0c6 h1:3G8BHyVfz1DkeZcds4iME5vDHzg8Yg2++wet0DDYZ3c= -github.com/multiversx/mx-chain-vm-common-go v1.4.1-0.20230403123953-7fc57accc0c6/go.mod h1:rxb8laeh06wayB/dZPpN5LT3qcwv4SgpNHiSvPsNjuw= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50 h1:ScUq7/wq78vthMTQ6v5Ux1DvSMQMHxQ2Sl7aPP26q1w= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50/go.mod h1:e3uYdgoKzs3puaznbmSjDcRisJc5Do4tpg7VqyYwoek= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.51 h1:axtp5/mpA+xYJ1cu4KtAGETV4t6v6/tNfQh0HCclBYY= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.51/go.mod h1:oKj32V2nkd+KGNOL6emnwVkDRPpciwHHDqBmeorcL8k= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.77 h1:3Yh4brS5/Jye24l5AKy+Q6Yci6Rv55pHyj9/GR3AYos= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.77/go.mod h1:3IaAOHc1JfxL5ywQZIrcaHQu5+CVdZNDaoY64NGOtUE= +github.com/multiversx/mx-chain-p2p-go v1.0.16 h1:iMK8KUi006/avVcmecnk7lqbDCRL0BN04vgepoVLlyY= +github.com/multiversx/mx-chain-p2p-go v1.0.16/go.mod h1:7piVUb5Z7UHK6n3JW8yIc32RdDFZu7GivRY29q0r2gI= +github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= +github.com/multiversx/mx-chain-storage-go v1.0.9-0.20230512130346-04e711f3d064 h1:by2niUwKPvCONvTLUrhONwo+yl3Lin770A7uJAfEsaU= +github.com/multiversx/mx-chain-storage-go v1.0.9-0.20230512130346-04e711f3d064/go.mod h1:FGhaeTNIcLZOPqsJZQ1TdcMaPVLhj642OzRNmt6+RQs= +github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= +github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230512130259-7b26a55bcd8a h1:m0cQrhe2zet657jWjrE2nvba6DqM5I5bNSqbJcpwfEM= +github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230512130259-7b26a55bcd8a/go.mod h1:eBUoLYceIutumF+MZYrHhI+Fq/piUueuaR3vH8Pia8A= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 h1:HGbatzgIhVPJASN3ADnygH4MrKypAidOVtZkHkY/jKw= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53/go.mod h1:STVJW9m/TUJ9Q64W/T4P/KuhP5fPb+cCb6Q4gSehWJg= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 h1:bl2essObOEDwVWci71hJ2QO5AYTsKk6IlzRK0i8y63w= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54/go.mod h1:1rgU8wXdn76S7rZx+4YS6ObK+M1XiSdPoPmXVq8fuZE= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80 h1:iiOXTcwvfjQXlchlVnSdNeqHYKVn/k7s/MsHfk/wrr0= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80/go.mod h1:Be8y+QBPSKacW2TJaaQSeKYNGtCenFt4dpBOAnICAcc= github.com/multiversx/mx-components-big-int v0.1.1 h1:695mYPKYOrmGEGgRH4/pZruDoe3CPP1LHrBxKfvj5l4= github.com/multiversx/mx-components-big-int v0.1.1/go.mod h1:0QrcFdfeLgJ/am10HGBeH0G0DNF+0Qx1E4DS/iozQls= github.com/multiversx/protobuf v1.3.2 h1:RaNkxvGTGbA0lMcnHAN24qE1G1i+Xs5yHA6MDvQ4mSM= @@ -687,8 +690,9 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 h1:CskT+S6Ay54OwxBGB0R3Rsx4Muto6UnEYTyKJbyRIAI= github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls= +github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -759,8 +763,9 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.13.0 h1:Dx1kYM01xsSqKPno3aqLnrwac2LetPvN23diwyr69Qs= +github.com/smartystreets/assertions v1.13.0/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= @@ -821,6 +826,7 @@ github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60Nt github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= +github.com/warpfork/go-testmark v0.10.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= @@ -900,8 +906,9 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -937,8 +944,9 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -995,8 +1003,10 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1019,8 +1029,9 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1098,14 +1109,16 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1116,8 +1129,9 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1175,8 +1189,9 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/heartbeat/processor/export_test.go b/heartbeat/processor/export_test.go new file mode 100644 index 00000000000..8f0b1230151 --- /dev/null +++ b/heartbeat/processor/export_test.go @@ -0,0 +1,6 @@ +package processor + +// NewPeerAuthenticationRequestsProcessorWithoutGoRoutine - +func NewPeerAuthenticationRequestsProcessorWithoutGoRoutine(args ArgPeerAuthenticationRequestsProcessor) (*peerAuthenticationRequestsProcessor, error) { + return newPeerAuthenticationRequestsProcessor(args) +} diff --git a/heartbeat/processor/peerAuthenticationRequestsProcessor.go b/heartbeat/processor/peerAuthenticationRequestsProcessor.go index 640f5d96493..6daa24edf02 100644 --- a/heartbeat/processor/peerAuthenticationRequestsProcessor.go +++ b/heartbeat/processor/peerAuthenticationRequestsProcessor.go @@ -55,12 +55,26 @@ type peerAuthenticationRequestsProcessor struct { // NewPeerAuthenticationRequestsProcessor creates a new instance of peerAuthenticationRequestsProcessor func NewPeerAuthenticationRequestsProcessor(args ArgPeerAuthenticationRequestsProcessor) (*peerAuthenticationRequestsProcessor, error) { + processor, err := newPeerAuthenticationRequestsProcessor(args) + if err != nil { + return nil, err + } + + var ctx context.Context + ctx, processor.cancel = context.WithTimeout(context.Background(), args.MaxTimeoutForRequests) + + go processor.startRequestingMessages(ctx) + + return processor, nil +} + +func newPeerAuthenticationRequestsProcessor(args ArgPeerAuthenticationRequestsProcessor) (*peerAuthenticationRequestsProcessor, error) { err := checkArgs(args) if err != nil { return nil, err } - processor := &peerAuthenticationRequestsProcessor{ + return &peerAuthenticationRequestsProcessor{ requestHandler: args.RequestHandler, nodesCoordinator: args.NodesCoordinator, peerAuthenticationPool: args.PeerAuthenticationPool, @@ -70,14 +84,7 @@ func NewPeerAuthenticationRequestsProcessor(args ArgPeerAuthenticationRequestsPr delayBetweenRequests: args.DelayBetweenRequests, maxMissingKeysInRequest: args.MaxMissingKeysInRequest, randomizer: args.Randomizer, - } - - var ctx context.Context - ctx, processor.cancel = context.WithTimeout(context.Background(), args.MaxTimeoutForRequests) - - go processor.startRequestingMessages(ctx) - - return processor, nil + }, nil } func checkArgs(args ArgPeerAuthenticationRequestsProcessor) error { diff --git a/heartbeat/processor/peerAuthenticationRequestsProcessor_test.go b/heartbeat/processor/peerAuthenticationRequestsProcessor_test.go index 6f407a2a2e0..39e21d9eb80 100644 --- a/heartbeat/processor/peerAuthenticationRequestsProcessor_test.go +++ b/heartbeat/processor/peerAuthenticationRequestsProcessor_test.go @@ -3,6 +3,7 @@ package processor import ( "bytes" "errors" + "fmt" "sort" "strings" "sync" @@ -10,12 +11,14 @@ import ( "testing" "time" + mxAtomic "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/random" "github.com/multiversx/mx-chain-go/heartbeat" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func createMockArgPeerAuthenticationRequestsProcessor() ArgPeerAuthenticationRequestsProcessor { @@ -252,7 +255,7 @@ func TestPeerAuthenticationRequestsProcessor_isThresholdReached(t *testing.T) { }, } - processor, err := NewPeerAuthenticationRequestsProcessor(args) + processor, err := NewPeerAuthenticationRequestsProcessorWithoutGoRoutine(args) assert.Nil(t, err) assert.False(t, check.IfNil(processor)) @@ -276,7 +279,7 @@ func TestPeerAuthenticationRequestsProcessor_requestMissingKeys(t *testing.T) { }, } - processor, err := NewPeerAuthenticationRequestsProcessor(args) + processor, err := NewPeerAuthenticationRequestsProcessorWithoutGoRoutine(args) assert.Nil(t, err) assert.False(t, check.IfNil(processor)) @@ -293,7 +296,7 @@ func TestPeerAuthenticationRequestsProcessor_getRandMaxMissingKeys(t *testing.T) args := createMockArgPeerAuthenticationRequestsProcessor() args.MaxMissingKeysInRequest = 3 - processor, err := NewPeerAuthenticationRequestsProcessor(args) + processor, err := NewPeerAuthenticationRequestsProcessorWithoutGoRoutine(args) assert.Nil(t, err) assert.False(t, check.IfNil(processor)) @@ -307,3 +310,58 @@ func TestPeerAuthenticationRequestsProcessor_getRandMaxMissingKeys(t *testing.T) } } } + +func TestPeerAuthenticationRequestsProcessor_goRoutineIsWorkingAndCloseShouldStopIt(t *testing.T) { + t.Parallel() + + args := createMockArgPeerAuthenticationRequestsProcessor() + args.NodesCoordinator = &shardingMocks.NodesCoordinatorStub{ + GetAllEligibleValidatorsPublicKeysCalled: func(epoch uint32) (map[uint32][][]byte, error) { + return map[uint32][][]byte{ + 0: {[]byte("pk0")}, + }, nil + }, + } + keysCalled := &mxAtomic.Flag{} + args.PeerAuthenticationPool = &testscommon.CacherStub{ + KeysCalled: func() [][]byte { + keysCalled.SetValue(true) + return make([][]byte, 0) + }, + } + + processor, _ := NewPeerAuthenticationRequestsProcessor(args) + time.Sleep(args.DelayBetweenRequests*2 + time.Millisecond*300) // wait for the go routine to start and execute at least once + assert.True(t, keysCalled.IsSet()) + + err := processor.Close() + assert.Nil(t, err) + + time.Sleep(time.Second) // wait for the go routine to stop + keysCalled.SetValue(false) + + time.Sleep(args.DelayBetweenRequests*2 + time.Millisecond*300) // if the go routine did not stop it will set again the flag + assert.False(t, keysCalled.IsSet()) +} + +func TestPeerAuthenticationRequestsProcessor_CloseCalledTwiceShouldNotPanicNorError(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + require.Fail(t, fmt.Sprintf("should have not panicked: %v", r)) + } + }() + + args := createMockArgPeerAuthenticationRequestsProcessor() + processor, _ := NewPeerAuthenticationRequestsProcessor(args) + + time.Sleep(args.DelayBetweenRequests*2 + time.Millisecond*300) // wait for the go routine to start and execute at least once + + err := processor.Close() + assert.Nil(t, err) + + err = processor.Close() + assert.Nil(t, err) +} diff --git a/integrationTests/factory/consensusComponents/consensusComponents_test.go b/integrationTests/factory/consensusComponents/consensusComponents_test.go index 96b7afec65d..3ccb3f38a96 100644 --- a/integrationTests/factory/consensusComponents/consensusComponents_test.go +++ b/integrationTests/factory/consensusComponents/consensusComponents_test.go @@ -43,7 +43,7 @@ func TestConsensusComponents_Close_ShouldWork(t *testing.T) { require.Nil(t, err) managedDataComponents, err := nr.CreateManagedDataComponents(managedStatusCoreComponents, managedCoreComponents, managedBootstrapComponents, managedCryptoComponents) require.Nil(t, err) - managedStateComponents, err := nr.CreateManagedStateComponents(managedCoreComponents, managedBootstrapComponents, managedDataComponents, managedStatusCoreComponents) + managedStateComponents, err := nr.CreateManagedStateComponents(managedCoreComponents, managedDataComponents, managedStatusCoreComponents) require.Nil(t, err) nodesShufflerOut, err := bootstrapComp.CreateNodesShuffleOut(managedCoreComponents.GenesisNodesSetup(), configs.GeneralConfig.EpochStartConfig, managedCoreComponents.ChanStopNodeProcess()) require.Nil(t, err) @@ -74,7 +74,6 @@ func TestConsensusComponents_Close_ShouldWork(t *testing.T) { managedCoreComponents, managedNetworkComponents, managedBootstrapComponents, - managedDataComponents, managedStateComponents, nodesCoordinator, false, @@ -105,7 +104,8 @@ func TestConsensusComponents_Close_ShouldWork(t *testing.T) { require.Nil(t, err) time.Sleep(2 * time.Second) - managedStatusComponents.SetForkDetector(managedProcessComponents.ForkDetector()) + err = managedStatusComponents.SetForkDetector(managedProcessComponents.ForkDetector()) + require.Nil(t, err) err = managedStatusComponents.StartPolling() require.Nil(t, err) diff --git a/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go b/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go index bf9ce94dda0..9125b8609f1 100644 --- a/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go +++ b/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go @@ -43,7 +43,7 @@ func TestHeartbeatComponents_Close_ShouldWork(t *testing.T) { require.Nil(t, err) managedDataComponents, err := nr.CreateManagedDataComponents(managedStatusCoreComponents, managedCoreComponents, managedBootstrapComponents, managedCryptoComponents) require.Nil(t, err) - managedStateComponents, err := nr.CreateManagedStateComponents(managedCoreComponents, managedBootstrapComponents, managedDataComponents, managedStatusCoreComponents) + managedStateComponents, err := nr.CreateManagedStateComponents(managedCoreComponents, managedDataComponents, managedStatusCoreComponents) require.Nil(t, err) nodesShufflerOut, err := bootstrapComp.CreateNodesShuffleOut(managedCoreComponents.GenesisNodesSetup(), configs.GeneralConfig.EpochStartConfig, managedCoreComponents.ChanStopNodeProcess()) require.Nil(t, err) @@ -74,7 +74,6 @@ func TestHeartbeatComponents_Close_ShouldWork(t *testing.T) { managedCoreComponents, managedNetworkComponents, managedBootstrapComponents, - managedDataComponents, managedStateComponents, nodesCoordinator, false, @@ -105,7 +104,8 @@ func TestHeartbeatComponents_Close_ShouldWork(t *testing.T) { require.Nil(t, err) time.Sleep(2 * time.Second) - managedStatusComponents.SetForkDetector(managedProcessComponents.ForkDetector()) + err = managedStatusComponents.SetForkDetector(managedProcessComponents.ForkDetector()) + require.Nil(t, err) err = managedStatusComponents.StartPolling() require.Nil(t, err) diff --git a/integrationTests/factory/processComponents/processComponents_test.go b/integrationTests/factory/processComponents/processComponents_test.go index 87ddd17f644..c44b347ae5c 100644 --- a/integrationTests/factory/processComponents/processComponents_test.go +++ b/integrationTests/factory/processComponents/processComponents_test.go @@ -44,7 +44,7 @@ func TestProcessComponents_Close_ShouldWork(t *testing.T) { require.Nil(t, err) managedDataComponents, err := nr.CreateManagedDataComponents(managedStatusCoreComponents, managedCoreComponents, managedBootstrapComponents, managedCryptoComponents) require.Nil(t, err) - managedStateComponents, err := nr.CreateManagedStateComponents(managedCoreComponents, managedBootstrapComponents, managedDataComponents, managedStatusCoreComponents) + managedStateComponents, err := nr.CreateManagedStateComponents(managedCoreComponents, managedDataComponents, managedStatusCoreComponents) require.Nil(t, err) nodesShufflerOut, err := bootstrapComp.CreateNodesShuffleOut(managedCoreComponents.GenesisNodesSetup(), configs.GeneralConfig.EpochStartConfig, managedCoreComponents.ChanStopNodeProcess()) require.Nil(t, err) @@ -75,7 +75,6 @@ func TestProcessComponents_Close_ShouldWork(t *testing.T) { managedCoreComponents, managedNetworkComponents, managedBootstrapComponents, - managedDataComponents, managedStateComponents, nodesCoordinator, false, @@ -106,7 +105,8 @@ func TestProcessComponents_Close_ShouldWork(t *testing.T) { time.Sleep(2 * time.Second) - managedStatusComponents.SetForkDetector(managedProcessComponents.ForkDetector()) + err = managedStatusComponents.SetForkDetector(managedProcessComponents.ForkDetector()) + require.Nil(t, err) err = managedStatusComponents.StartPolling() require.Nil(t, err) diff --git a/integrationTests/factory/stateComponents/stateComponents_test.go b/integrationTests/factory/stateComponents/stateComponents_test.go index 6056fcc2126..3c942f54e53 100644 --- a/integrationTests/factory/stateComponents/stateComponents_test.go +++ b/integrationTests/factory/stateComponents/stateComponents_test.go @@ -40,7 +40,7 @@ func TestStateComponents_Create_Close_ShouldWork(t *testing.T) { require.Nil(t, err) managedDataComponents, err := nr.CreateManagedDataComponents(managedStatusCoreComponents, managedCoreComponents, managedBootstrapComponents, managedCryptoComponents) require.Nil(t, err) - managedStateComponents, err := nr.CreateManagedStateComponents(managedCoreComponents, managedBootstrapComponents, managedDataComponents, managedStatusCoreComponents) + managedStateComponents, err := nr.CreateManagedStateComponents(managedCoreComponents, managedDataComponents, managedStatusCoreComponents) require.Nil(t, err) require.NotNil(t, managedStateComponents) diff --git a/integrationTests/factory/statusComponents/statusComponents_test.go b/integrationTests/factory/statusComponents/statusComponents_test.go index df8c26ef00a..a7a5fbca56d 100644 --- a/integrationTests/factory/statusComponents/statusComponents_test.go +++ b/integrationTests/factory/statusComponents/statusComponents_test.go @@ -44,7 +44,7 @@ func TestStatusComponents_Create_Close_ShouldWork(t *testing.T) { require.Nil(t, err) managedDataComponents, err := nr.CreateManagedDataComponents(managedStatusCoreComponents, managedCoreComponents, managedBootstrapComponents, managedCryptoComponents) require.Nil(t, err) - managedStateComponents, err := nr.CreateManagedStateComponents(managedCoreComponents, managedBootstrapComponents, managedDataComponents, managedStatusCoreComponents) + managedStateComponents, err := nr.CreateManagedStateComponents(managedCoreComponents, managedDataComponents, managedStatusCoreComponents) require.Nil(t, err) nodesShufflerOut, err := bootstrapComp.CreateNodesShuffleOut(managedCoreComponents.GenesisNodesSetup(), configs.GeneralConfig.EpochStartConfig, managedCoreComponents.ChanStopNodeProcess()) require.Nil(t, err) @@ -75,7 +75,6 @@ func TestStatusComponents_Create_Close_ShouldWork(t *testing.T) { managedCoreComponents, managedNetworkComponents, managedBootstrapComponents, - managedDataComponents, managedStateComponents, nodesCoordinator, false, @@ -106,7 +105,8 @@ func TestStatusComponents_Create_Close_ShouldWork(t *testing.T) { require.Nil(t, err) time.Sleep(2 * time.Second) - managedStatusComponents.SetForkDetector(managedProcessComponents.ForkDetector()) + err = managedStatusComponents.SetForkDetector(managedProcessComponents.ForkDetector()) + require.Nil(t, err) err = managedStatusComponents.StartPolling() require.Nil(t, err) diff --git a/integrationTests/interface.go b/integrationTests/interface.go index 0502b9d0f69..fb61587b644 100644 --- a/integrationTests/interface.go +++ b/integrationTests/interface.go @@ -69,6 +69,7 @@ type Facade interface { GetAllESDTTokens(address string, options api.AccountQueryOptions) (map[string]*esdt.ESDigitalToken, api.BlockInfo, error) GetESDTsRoles(address string, options api.AccountQueryOptions) (map[string][]string, api.BlockInfo, error) GetKeyValuePairs(address string, options api.AccountQueryOptions) (map[string]string, api.BlockInfo, error) + GetGuardianData(address string, options api.AccountQueryOptions) (api.GuardianData, api.BlockInfo, error) GetBlockByHash(hash string, options api.BlockQueryOptions) (*dataApi.Block, error) GetBlockByNonce(nonce uint64, options api.BlockQueryOptions) (*dataApi.Block, error) GetBlockByRound(round uint64, options api.BlockQueryOptions) (*dataApi.Block, error) @@ -85,8 +86,7 @@ type Facade interface { GetEpochStartDataAPI(epoch uint32) (*common.EpochStartDataAPI, error) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) GetConnectedPeersRatings() string - CreateTransaction(nonce uint64, value string, receiver string, receiverUsername []byte, sender string, senderUsername []byte, gasPrice uint64, - gasLimit uint64, data []byte, signatureHex string, chainID string, version uint32, options uint32) (*transaction.Transaction, []byte, error) + CreateTransaction(txArgs *external.ArgsCreateTransaction) (*transaction.Transaction, []byte, error) ValidateTransaction(tx *transaction.Transaction) error ValidateTransactionForSimulation(tx *transaction.Transaction, bypassSignature bool) error SendBulkTransactions([]*transaction.Transaction) (uint64, error) diff --git a/integrationTests/mock/cryptoComponentsStub.go b/integrationTests/mock/cryptoComponentsStub.go index 9d927d8d33a..53abe25fe9b 100644 --- a/integrationTests/mock/cryptoComponentsStub.go +++ b/integrationTests/mock/cryptoComponentsStub.go @@ -14,10 +14,10 @@ import ( // CryptoComponentsStub - type CryptoComponentsStub struct { PubKey crypto.PublicKey + PublicKeyCalled func() crypto.PublicKey PrivKey crypto.PrivateKey P2pPubKey crypto.PublicKey P2pPrivKey crypto.PrivateKey - PrivKeyBytes []byte PubKeyBytes []byte PubKeyString string BlockSig crypto.SingleSigner @@ -31,6 +31,7 @@ type CryptoComponentsStub struct { MsgSigVerifier vm.MessageSignVerifier ManagedPeersHolderField common.ManagedPeersHolder KeysHandlerField consensus.KeysHandler + KeysHandlerCalled func() consensus.KeysHandler SigHandler consensus.SigningHandler mutMultiSig sync.RWMutex } @@ -52,6 +53,9 @@ func (ccs *CryptoComponentsStub) CheckSubcomponents() error { // PublicKey - func (ccs *CryptoComponentsStub) PublicKey() crypto.PublicKey { + if ccs.PublicKeyCalled != nil { + return ccs.PublicKeyCalled() + } return ccs.PubKey } @@ -80,11 +84,6 @@ func (ccs *CryptoComponentsStub) PublicKeyBytes() []byte { return ccs.PubKeyBytes } -// PrivateKeyBytes - -func (ccs *CryptoComponentsStub) PrivateKeyBytes() []byte { - return ccs.PrivKeyBytes -} - // BlockSigner - func (ccs *CryptoComponentsStub) BlockSigner() crypto.SingleSigner { return ccs.BlockSig @@ -169,6 +168,9 @@ func (ccs *CryptoComponentsStub) ManagedPeersHolder() common.ManagedPeersHolder // KeysHandler - func (ccs *CryptoComponentsStub) KeysHandler() consensus.KeysHandler { + if ccs.KeysHandlerCalled != nil { + return ccs.KeysHandlerCalled() + } return ccs.KeysHandlerField } @@ -180,7 +182,6 @@ func (ccs *CryptoComponentsStub) Clone() interface{} { PrivKey: ccs.PrivKey, P2pPrivKey: ccs.P2pPrivKey, PubKeyString: ccs.PubKeyString, - PrivKeyBytes: ccs.PrivKeyBytes, PubKeyBytes: ccs.PubKeyBytes, BlockSig: ccs.BlockSig, TxSig: ccs.TxSig, diff --git a/integrationTests/mock/dataComponentsStub.go b/integrationTests/mock/dataComponentsStub.go index cd06d64ddb2..d23f6b9080b 100644 --- a/integrationTests/mock/dataComponentsStub.go +++ b/integrationTests/mock/dataComponentsStub.go @@ -42,10 +42,11 @@ func (dcs *DataComponentsStub) Blockchain() data.ChainHandler { } // SetBlockchain - -func (dcs *DataComponentsStub) SetBlockchain(chain data.ChainHandler) { +func (dcs *DataComponentsStub) SetBlockchain(chain data.ChainHandler) error { dcs.mutDcm.Lock() dcs.BlockChain = chain dcs.mutDcm.Unlock() + return nil } // StorageService - diff --git a/integrationTests/mock/feeHandlerStub.go b/integrationTests/mock/feeHandlerStub.go deleted file mode 100644 index 88d0ea66f93..00000000000 --- a/integrationTests/mock/feeHandlerStub.go +++ /dev/null @@ -1,235 +0,0 @@ -package mock - -import ( - "math/big" - - "github.com/multiversx/mx-chain-core-go/data" -) - -// FeeHandlerStub - -type FeeHandlerStub struct { - SetMaxGasLimitPerBlockCalled func(maxGasLimitPerBlock uint64) - SetMinGasPriceCalled func(minGasPrice uint64) - SetMinGasLimitCalled func(minGasLimit uint64) - MaxGasLimitPerBlockCalled func() uint64 - MaxGasLimitPerMiniBlockCalled func() uint64 - MaxGasLimitPerBlockForSafeCrossShardCalled func() uint64 - MaxGasLimitPerMiniBlockForSafeCrossShardCalled func() uint64 - MaxGasLimitPerTxCalled func() uint64 - ComputeGasLimitCalled func(tx data.TransactionWithFeeHandler) uint64 - ComputeMoveBalanceFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int - ComputeTxFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int - CheckValidityTxValuesCalled func(tx data.TransactionWithFeeHandler) error - DeveloperPercentageCalled func() float64 - MinGasPriceCalled func() uint64 - GasPriceModifierCalled func() float64 - ComputeFeeForProcessingCalled func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int - GenesisTotalSupplyCalled func() *big.Int - SplitTxGasInCategoriesCalled func(tx data.TransactionWithFeeHandler) (uint64, uint64) - GasPriceForProcessingCalled func(tx data.TransactionWithFeeHandler) uint64 - GasPriceForMoveCalled func(tx data.TransactionWithFeeHandler) uint64 - MinGasPriceForProcessingCalled func() uint64 - ComputeGasUsedAndFeeBasedOnRefundValueCalled func(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) - ComputeTxFeeBasedOnGasUsedCalled func(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int - ComputeGasLimitBasedOnBalanceCalled func(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) -} - -// ComputeGasLimitBasedOnBalance - -func (fhs *FeeHandlerStub) ComputeGasLimitBasedOnBalance(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) { - if fhs.ComputeGasLimitBasedOnBalanceCalled != nil { - return fhs.ComputeGasLimitBasedOnBalanceCalled(tx, balance) - } - return 0, nil -} - -// ComputeFeeForProcessing - -func (fhs *FeeHandlerStub) ComputeFeeForProcessing(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int { - if fhs.ComputeFeeForProcessingCalled != nil { - return fhs.ComputeFeeForProcessingCalled(tx, gasToUse) - } - return big.NewInt(0) -} - -// GasPriceModifier - -func (fhs *FeeHandlerStub) GasPriceModifier() float64 { - if fhs.GasPriceModifierCalled != nil { - return fhs.GasPriceModifierCalled() - } - return 1.0 -} - -// MinGasPrice - -func (fhs *FeeHandlerStub) MinGasPrice() uint64 { - if fhs.MinGasPriceCalled != nil { - return fhs.MinGasPriceCalled() - } - return 0 -} - -// MinGasLimit will return min gas limit -func (fhs *FeeHandlerStub) MinGasLimit() uint64 { - return 0 -} - -// DeveloperPercentage - -func (fhs *FeeHandlerStub) DeveloperPercentage() float64 { - if fhs.DeveloperPercentageCalled != nil { - return fhs.DeveloperPercentageCalled() - } - - return 0.0 -} - -// GasPerDataByte - -func (fhs *FeeHandlerStub) GasPerDataByte() uint64 { - return 0 -} - -// SetMaxGasLimitPerBlock - -func (fhs *FeeHandlerStub) SetMaxGasLimitPerBlock(maxGasLimitPerBlock uint64) { - fhs.SetMaxGasLimitPerBlockCalled(maxGasLimitPerBlock) -} - -// SetMinGasPrice - -func (fhs *FeeHandlerStub) SetMinGasPrice(minGasPrice uint64) { - fhs.SetMinGasPriceCalled(minGasPrice) -} - -// SetMinGasLimit - -func (fhs *FeeHandlerStub) SetMinGasLimit(minGasLimit uint64) { - fhs.SetMinGasLimitCalled(minGasLimit) -} - -// MaxGasLimitPerBlock - -func (fhs *FeeHandlerStub) MaxGasLimitPerBlock(uint32) uint64 { - if fhs.MaxGasLimitPerBlockCalled != nil { - return fhs.MaxGasLimitPerBlockCalled() - } - return 0 -} - -// MaxGasLimitPerMiniBlock - -func (fhs *FeeHandlerStub) MaxGasLimitPerMiniBlock(uint32) uint64 { - if fhs.MaxGasLimitPerMiniBlockCalled != nil { - return fhs.MaxGasLimitPerMiniBlockCalled() - } - return 0 -} - -// MaxGasLimitPerBlockForSafeCrossShard - -func (fhs *FeeHandlerStub) MaxGasLimitPerBlockForSafeCrossShard() uint64 { - if fhs.MaxGasLimitPerBlockForSafeCrossShardCalled != nil { - return fhs.MaxGasLimitPerBlockForSafeCrossShardCalled() - } - return 0 -} - -// MaxGasLimitPerMiniBlockForSafeCrossShard - -func (fhs *FeeHandlerStub) MaxGasLimitPerMiniBlockForSafeCrossShard() uint64 { - if fhs.MaxGasLimitPerMiniBlockForSafeCrossShardCalled != nil { - return fhs.MaxGasLimitPerMiniBlockForSafeCrossShardCalled() - } - return 0 -} - -// MaxGasLimitPerTx - -func (fhs *FeeHandlerStub) MaxGasLimitPerTx() uint64 { - if fhs.MaxGasLimitPerTxCalled != nil { - return fhs.MaxGasLimitPerTxCalled() - } - return 0 -} - -// ComputeGasLimit - -func (fhs *FeeHandlerStub) ComputeGasLimit(tx data.TransactionWithFeeHandler) uint64 { - if fhs.ComputeGasLimitCalled != nil { - return fhs.ComputeGasLimitCalled(tx) - } - return 0 -} - -// ComputeMoveBalanceFee - -func (fhs *FeeHandlerStub) ComputeMoveBalanceFee(tx data.TransactionWithFeeHandler) *big.Int { - if fhs.ComputeMoveBalanceFeeCalled != nil { - return fhs.ComputeMoveBalanceFeeCalled(tx) - } - return big.NewInt(0) -} - -// ComputeTxFee - -func (fhs *FeeHandlerStub) ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int { - if fhs.ComputeTxFeeCalled != nil { - return fhs.ComputeTxFeeCalled(tx) - } - return big.NewInt(0) -} - -// GenesisTotalSupply - -func (fhs *FeeHandlerStub) GenesisTotalSupply() *big.Int { - if fhs.GenesisTotalSupplyCalled != nil { - return fhs.GenesisTotalSupplyCalled() - } - - return big.NewInt(0) -} - -// CheckValidityTxValues - -func (fhs *FeeHandlerStub) CheckValidityTxValues(tx data.TransactionWithFeeHandler) error { - if fhs.CheckValidityTxValuesCalled != nil { - return fhs.CheckValidityTxValuesCalled(tx) - } - return nil -} - -// SplitTxGasInCategories - -func (fhs *FeeHandlerStub) SplitTxGasInCategories(tx data.TransactionWithFeeHandler) (uint64, uint64) { - if fhs.SplitTxGasInCategoriesCalled != nil { - return fhs.SplitTxGasInCategoriesCalled(tx) - } - return 0, 0 -} - -// GasPriceForProcessing - -func (fhs *FeeHandlerStub) GasPriceForProcessing(tx data.TransactionWithFeeHandler) uint64 { - if fhs.GasPriceForProcessingCalled != nil { - return fhs.GasPriceForProcessingCalled(tx) - } - return 0 -} - -// GasPriceForMove - -func (fhs *FeeHandlerStub) GasPriceForMove(tx data.TransactionWithFeeHandler) uint64 { - if fhs.GasPriceForMoveCalled != nil { - return fhs.GasPriceForMoveCalled(tx) - } - return 0 -} - -// MinGasPriceForProcessing - -func (fhs *FeeHandlerStub) MinGasPriceForProcessing() uint64 { - if fhs.MinGasPriceForProcessingCalled != nil { - return fhs.MinGasPriceForProcessingCalled() - } - return 0 -} - -// ComputeGasUsedAndFeeBasedOnRefundValue - -func (fhs *FeeHandlerStub) ComputeGasUsedAndFeeBasedOnRefundValue(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) { - if fhs.ComputeGasUsedAndFeeBasedOnRefundValueCalled != nil { - return fhs.ComputeGasUsedAndFeeBasedOnRefundValueCalled(tx, refundValue) - } - return 0, big.NewInt(0) -} - -// ComputeTxFeeBasedOnGasUsed - -func (fhs *FeeHandlerStub) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int { - if fhs.ComputeTxFeeBasedOnGasUsedCalled != nil { - return fhs.ComputeTxFeeBasedOnGasUsedCalled(tx, gasUsed) - } - return big.NewInt(0) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (fhs *FeeHandlerStub) IsInterfaceNil() bool { - return fhs == nil -} diff --git a/integrationTests/mock/headersCacherStub.go b/integrationTests/mock/headersCacherStub.go index 391db3c1c1f..153988a6b13 100644 --- a/integrationTests/mock/headersCacherStub.go +++ b/integrationTests/mock/headersCacherStub.go @@ -14,10 +14,35 @@ type HeadersCacherStub struct { GetHeaderByNonceAndShardIdCalled func(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) GetHeaderByHashCalled func(hash []byte) (data.HeaderHandler, error) ClearCalled func() - RegisterHandlerCalled func(handler func(key []byte, value interface{})) KeysCalled func(shardId uint32) []uint64 LenCalled func() int MaxSizeCalled func() int + RegisterHandlerCalled func(handler func(headerHandler data.HeaderHandler, headerHash []byte)) + GetNumHeadersCalled func(shardId uint32) int + NoncesCalled func(shardId uint32) []uint64 +} + +// RegisterHandler - +func (hcs *HeadersCacherStub) RegisterHandler(handler func(headerHandler data.HeaderHandler, headerHash []byte)) { + if hcs.RegisterHandlerCalled != nil { + hcs.RegisterHandlerCalled(handler) + } +} + +// Nonces - +func (hcs *HeadersCacherStub) Nonces(shardId uint32) []uint64 { + if hcs.NoncesCalled != nil { + return hcs.NoncesCalled(shardId) + } + return nil +} + +// GetNumHeaders - +func (hcs *HeadersCacherStub) GetNumHeaders(shardId uint32) int { + if hcs.GetNumHeadersCalled != nil { + return hcs.GetNumHeadersCalled(shardId) + } + return 0 } // AddHeader - @@ -64,13 +89,6 @@ func (hcs *HeadersCacherStub) Clear() { } } -// RegisterHandler - -func (hcs *HeadersCacherStub) RegisterHandler(handler func(key []byte, value interface{})) { - if hcs.RegisterHandlerCalled != nil { - hcs.RegisterHandlerCalled(handler) - } -} - // Keys - func (hcs *HeadersCacherStub) Keys(shardId uint32) []uint64 { if hcs.KeysCalled != nil { diff --git a/integrationTests/mock/networkComponentsMock.go b/integrationTests/mock/networkComponentsMock.go index c8d27c4523b..573a4ae7f66 100644 --- a/integrationTests/mock/networkComponentsMock.go +++ b/integrationTests/mock/networkComponentsMock.go @@ -9,6 +9,7 @@ import ( // NetworkComponentsStub - type NetworkComponentsStub struct { Messenger p2p.Messenger + MessengerCalled func() p2p.Messenger InputAntiFlood factory.P2PAntifloodHandler OutputAntiFlood factory.P2PAntifloodHandler PeerBlackList process.PeerBlackListCacher @@ -45,6 +46,9 @@ func (ncs *NetworkComponentsStub) CheckSubcomponents() error { // NetworkMessenger - func (ncs *NetworkComponentsStub) NetworkMessenger() p2p.Messenger { + if ncs.MessengerCalled != nil { + return ncs.MessengerCalled() + } return ncs.Messenger } diff --git a/integrationTests/mock/p2pAntifloodHandlerStub.go b/integrationTests/mock/p2pAntifloodHandlerStub.go index bda3da406d5..c181d10909d 100644 --- a/integrationTests/mock/p2pAntifloodHandlerStub.go +++ b/integrationTests/mock/p2pAntifloodHandlerStub.go @@ -16,81 +16,85 @@ type P2PAntifloodHandlerStub struct { SetDebuggerCalled func(debugger process.AntifloodDebugger) error BlacklistPeerCalled func(peer core.PeerID, reason string, duration time.Duration) IsOriginatorEligibleForTopicCalled func(pid core.PeerID, topic string) error + SetPeerValidatorMapperCalled func(validatorMapper process.PeerValidatorMapper) error } // CanProcessMessage - -func (p2pahs *P2PAntifloodHandlerStub) CanProcessMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { - if p2pahs.CanProcessMessageCalled == nil { +func (stub *P2PAntifloodHandlerStub) CanProcessMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { + if stub.CanProcessMessageCalled == nil { return nil } - return p2pahs.CanProcessMessageCalled(message, fromConnectedPeer) + return stub.CanProcessMessageCalled(message, fromConnectedPeer) } // IsOriginatorEligibleForTopic - -func (p2pahs *P2PAntifloodHandlerStub) IsOriginatorEligibleForTopic(pid core.PeerID, topic string) error { - if p2pahs.IsOriginatorEligibleForTopicCalled != nil { - return p2pahs.IsOriginatorEligibleForTopicCalled(pid, topic) +func (stub *P2PAntifloodHandlerStub) IsOriginatorEligibleForTopic(pid core.PeerID, topic string) error { + if stub.IsOriginatorEligibleForTopicCalled != nil { + return stub.IsOriginatorEligibleForTopicCalled(pid, topic) } return nil } // CanProcessMessagesOnTopic - -func (p2pahs *P2PAntifloodHandlerStub) CanProcessMessagesOnTopic(peer core.PeerID, topic string, numMessages uint32, totalSize uint64, sequence []byte) error { - if p2pahs.CanProcessMessagesOnTopicCalled == nil { +func (stub *P2PAntifloodHandlerStub) CanProcessMessagesOnTopic(peer core.PeerID, topic string, numMessages uint32, totalSize uint64, sequence []byte) error { + if stub.CanProcessMessagesOnTopicCalled == nil { return nil } - return p2pahs.CanProcessMessagesOnTopicCalled(peer, topic, numMessages, totalSize, sequence) + return stub.CanProcessMessagesOnTopicCalled(peer, topic, numMessages, totalSize, sequence) } // ApplyConsensusSize - -func (p2pahs *P2PAntifloodHandlerStub) ApplyConsensusSize(size int) { - if p2pahs.ApplyConsensusSizeCalled != nil { - p2pahs.ApplyConsensusSizeCalled(size) +func (stub *P2PAntifloodHandlerStub) ApplyConsensusSize(size int) { + if stub.ApplyConsensusSizeCalled != nil { + stub.ApplyConsensusSizeCalled(size) } } // SetDebugger - -func (p2pahs *P2PAntifloodHandlerStub) SetDebugger(debugger process.AntifloodDebugger) error { - if p2pahs.SetDebuggerCalled != nil { - return p2pahs.SetDebuggerCalled(debugger) +func (stub *P2PAntifloodHandlerStub) SetDebugger(debugger process.AntifloodDebugger) error { + if stub.SetDebuggerCalled != nil { + return stub.SetDebuggerCalled(debugger) } return nil } // BlacklistPeer - -func (p2pahs *P2PAntifloodHandlerStub) BlacklistPeer(peer core.PeerID, reason string, duration time.Duration) { - if p2pahs.BlacklistPeerCalled != nil { - p2pahs.BlacklistPeerCalled(peer, reason, duration) +func (stub *P2PAntifloodHandlerStub) BlacklistPeer(peer core.PeerID, reason string, duration time.Duration) { + if stub.BlacklistPeerCalled != nil { + stub.BlacklistPeerCalled(peer, reason, duration) } } // ResetForTopic - -func (p2pahs *P2PAntifloodHandlerStub) ResetForTopic(_ string) { +func (stub *P2PAntifloodHandlerStub) ResetForTopic(_ string) { } // SetMaxMessagesForTopic - -func (p2pahs *P2PAntifloodHandlerStub) SetMaxMessagesForTopic(_ string, _ uint32) { +func (stub *P2PAntifloodHandlerStub) SetMaxMessagesForTopic(_ string, _ uint32) { } // SetPeerValidatorMapper - -func (p2pahs *P2PAntifloodHandlerStub) SetPeerValidatorMapper(_ process.PeerValidatorMapper) error { +func (stub *P2PAntifloodHandlerStub) SetPeerValidatorMapper(validatorMapper process.PeerValidatorMapper) error { + if stub.SetPeerValidatorMapperCalled != nil { + return stub.SetPeerValidatorMapperCalled(validatorMapper) + } return nil } // SetTopicsForAll - -func (p2pahs *P2PAntifloodHandlerStub) SetTopicsForAll(_ ...string) { +func (stub *P2PAntifloodHandlerStub) SetTopicsForAll(_ ...string) { } // Close - -func (p2pahs *P2PAntifloodHandlerStub) Close() error { +func (stub *P2PAntifloodHandlerStub) Close() error { return nil } // IsInterfaceNil - -func (p2pahs *P2PAntifloodHandlerStub) IsInterfaceNil() bool { - return p2pahs == nil +func (stub *P2PAntifloodHandlerStub) IsInterfaceNil() bool { + return stub == nil } diff --git a/integrationTests/mock/peerShardMapperStub.go b/integrationTests/mock/peerShardMapperStub.go index 8e2bc50dbfc..b32a1045c7b 100644 --- a/integrationTests/mock/peerShardMapperStub.go +++ b/integrationTests/mock/peerShardMapperStub.go @@ -8,6 +8,14 @@ type PeerShardMapperStub struct { UpdatePeerIDPublicKeyPairCalled func(pid core.PeerID, pk []byte) PutPeerIdShardIdCalled func(pid core.PeerID, shardID uint32) PutPeerIdSubTypeCalled func(pid core.PeerID, peerSubType core.P2PPeerSubType) + UpdatePeerIDInfoCalled func(pid core.PeerID, pk []byte, shardID uint32) +} + +// UpdatePeerIDInfo - +func (psms *PeerShardMapperStub) UpdatePeerIDInfo(pid core.PeerID, pk []byte, shardID uint32) { + if psms.UpdatePeerIDInfoCalled != nil { + psms.UpdatePeerIDInfoCalled(pid, pk, shardID) + } } // UpdatePeerIDPublicKeyPair - diff --git a/integrationTests/mock/processComponentsStub.go b/integrationTests/mock/processComponentsStub.go index 6bb3810d838..07da0ca9d87 100644 --- a/integrationTests/mock/processComponentsStub.go +++ b/integrationTests/mock/processComponentsStub.go @@ -17,11 +17,14 @@ import ( // ProcessComponentsStub - type ProcessComponentsStub struct { NodesCoord nodesCoordinator.NodesCoordinator + NodesCoordinatorCalled func() nodesCoordinator.NodesCoordinator ShardCoord sharding.Coordinator + ShardCoordinatorCalled func() sharding.Coordinator IntContainer process.InterceptorsContainer ResContainer dataRetriever.ResolversContainer ReqFinder dataRetriever.RequestersFinder RoundHandlerField consensus.RoundHandler + RoundHandlerCalled func() consensus.RoundHandler EpochTrigger epochStart.TriggerHandler EpochNotifier factory.EpochStartNotifier ForkDetect process.ForkDetector @@ -73,11 +76,17 @@ func (pcs *ProcessComponentsStub) CheckSubcomponents() error { // NodesCoordinator - func (pcs *ProcessComponentsStub) NodesCoordinator() nodesCoordinator.NodesCoordinator { + if pcs.NodesCoordinatorCalled != nil { + return pcs.NodesCoordinatorCalled() + } return pcs.NodesCoord } // ShardCoordinator - func (pcs *ProcessComponentsStub) ShardCoordinator() sharding.Coordinator { + if pcs.ShardCoordinatorCalled != nil { + return pcs.ShardCoordinatorCalled() + } return pcs.ShardCoord } @@ -98,6 +107,9 @@ func (pcs *ProcessComponentsStub) RequestersFinder() dataRetriever.RequestersFin // RoundHandler - func (pcs *ProcessComponentsStub) RoundHandler() consensus.RoundHandler { + if pcs.RoundHandlerCalled != nil { + return pcs.RoundHandlerCalled() + } return pcs.RoundHandlerField } diff --git a/integrationTests/mock/statusComponentsStub.go b/integrationTests/mock/statusComponentsStub.go index 8d1fd5465c8..ec6e211286a 100644 --- a/integrationTests/mock/statusComponentsStub.go +++ b/integrationTests/mock/statusComponentsStub.go @@ -1,7 +1,6 @@ package mock import ( - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/outport" ) @@ -10,7 +9,6 @@ import ( type StatusComponentsStub struct { Outport outport.OutportHandler SoftwareVersionCheck statistics.SoftwareVersionChecker - AppStatusHandler core.AppStatusHandler } // Create - diff --git a/integrationTests/mock/storageManagerStub.go b/integrationTests/mock/storageManagerStub.go deleted file mode 100644 index 83b88c88abb..00000000000 --- a/integrationTests/mock/storageManagerStub.go +++ /dev/null @@ -1,91 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-go/common" -) - -// StorageManagerStub - -type StorageManagerStub struct { - DatabaseCalled func() common.DBWriteCacher - TakeSnapshotCalled func([]byte) - SetCheckpointCalled func([]byte) - GetDbThatContainsHashCalled func([]byte) common.DBWriteCacher - GetSnapshotThatContainsHashCalled func(rootHash []byte) common.SnapshotDbHandler - IsPruningEnabledCalled func() bool - IsPruningBlockedCalled func() bool - EnterPruningBufferingModeCalled func() - ExitPruningBufferingModeCalled func() - IsInterfaceNilCalled func() bool -} - -// Database - -func (sms *StorageManagerStub) Database() common.DBWriteCacher { - if sms.DatabaseCalled != nil { - return sms.DatabaseCalled() - } - return nil -} - -// TakeSnapshot - -func (sms *StorageManagerStub) TakeSnapshot([]byte) { - -} - -// SetCheckpoint - -func (sms *StorageManagerStub) SetCheckpoint([]byte) { - -} - -// GetSnapshotThatContainsHash - -func (sms *StorageManagerStub) GetSnapshotThatContainsHash(d []byte) common.SnapshotDbHandler { - if sms.GetSnapshotThatContainsHashCalled != nil { - return sms.GetSnapshotThatContainsHashCalled(d) - } - - return nil -} - -// IsPruningEnabled - -func (sms *StorageManagerStub) IsPruningEnabled() bool { - if sms.IsPruningEnabledCalled != nil { - return sms.IsPruningEnabledCalled() - } - return false -} - -// IsPruningBlocked - -func (sms *StorageManagerStub) IsPruningBlocked() bool { - if sms.IsPruningBlockedCalled != nil { - return sms.IsPruningBlockedCalled() - } - return false -} - -// GetSnapshotDbBatchDelay - -func (sms *StorageManagerStub) GetSnapshotDbBatchDelay() int { - return 0 -} - -// Close - -func (sms *StorageManagerStub) Close() error { - return nil -} - -// EnterPruningBufferingMode - -func (sms *StorageManagerStub) EnterPruningBufferingMode() { - if sms.EnterPruningBufferingModeCalled != nil { - sms.EnterPruningBufferingModeCalled() - } -} - -// ExitPruningBufferingMode - -func (sms *StorageManagerStub) ExitPruningBufferingMode() { - if sms.ExitPruningBufferingModeCalled != nil { - sms.ExitPruningBufferingModeCalled() - } -} - -// IsInterfaceNil - -func (sms *StorageManagerStub) IsInterfaceNil() bool { - return sms == nil -} diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index d78a9d4145b..95622859072 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/endProcess" "github.com/multiversx/mx-chain-core-go/data/typeConverters/uint64ByteSlice" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart/bootstrap" @@ -286,6 +287,7 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui CurrentEpoch: 0, StorageType: factory.ProcessStorageService, CreateTrieEpochRootHashStorer: false, + NodeProcessingMode: common.Normal, SnapshotsEnabled: false, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, }, diff --git a/integrationTests/multiShard/hardFork/hardFork_test.go b/integrationTests/multiShard/hardFork/hardFork_test.go index 7caeeca4704..929d6afc1b9 100644 --- a/integrationTests/multiShard/hardFork/hardFork_test.go +++ b/integrationTests/multiShard/hardFork/hardFork_test.go @@ -435,13 +435,16 @@ func hardForkImport( OwnerAddress: "aaaaaa", }, GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ + V1: config.GovernanceSystemSCConfigV1{ + ProposalCost: "500", + }, Active: config.GovernanceSystemSCConfigActive{ ProposalCost: "500", - MinQuorum: "50", - MinPassThreshold: "50", - MinVetoThreshold: "50", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, }, - FirstWhitelistedAddress: integrationTests.DelegationManagerConfigChangeAddress, + ChangeConfigAddress: integrationTests.DelegationManagerConfigChangeAddress, }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", diff --git a/integrationTests/multiShard/relayedTx/common.go b/integrationTests/multiShard/relayedTx/common.go index 0b720d20cbc..f875dbb6f8b 100644 --- a/integrationTests/multiShard/relayedTx/common.go +++ b/integrationTests/multiShard/relayedTx/common.go @@ -11,7 +11,6 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" ) // CreateGeneralSetupForRelayTxTest will create the general setup for relayed transactions @@ -115,7 +114,7 @@ func createUserTx( ChainID: integrationTests.ChainID, Version: integrationTests.MinTransactionVersion, } - txBuff, _ := tx.GetDataForSigning(integrationTests.TestAddressPubkeyConverter, integrationTests.TestTxSignMarshalizer) + txBuff, _ := tx.GetDataForSigning(integrationTests.TestAddressPubkeyConverter, integrationTests.TestTxSignMarshalizer, integrationTests.TestTxSignHasher) tx.Signature, _ = player.SingleSigner.Sign(player.SkTxSign, txBuff) player.Nonce++ return tx @@ -142,7 +141,7 @@ func createRelayedTx( gasLimit := economicsFee.ComputeGasLimit(tx) tx.GasLimit = userTx.GasLimit + gasLimit - txBuff, _ := tx.GetDataForSigning(integrationTests.TestAddressPubkeyConverter, integrationTests.TestTxSignMarshalizer) + txBuff, _ := tx.GetDataForSigning(integrationTests.TestAddressPubkeyConverter, integrationTests.TestTxSignMarshalizer, integrationTests.TestTxSignHasher) tx.Signature, _ = relayer.SingleSigner.Sign(relayer.SkTxSign, txBuff) relayer.Nonce++ txFee := economicsFee.ComputeTxFee(tx) @@ -158,28 +157,20 @@ func createRelayedTxV2( userTx *transaction.Transaction, gasLimitForUserTx uint64, ) *transaction.Transaction { - dataBuilder := txDataBuilder.NewBuilder() - txData := dataBuilder. - Func(core.RelayedTransactionV2). - Bytes(userTx.RcvAddr). - Int64(int64(userTx.Nonce)). - Bytes(userTx.Data). - Bytes(userTx.Signature) - tx := &transaction.Transaction{ Nonce: relayer.Nonce, Value: big.NewInt(0).Set(userTx.Value), RcvAddr: userTx.SndAddr, SndAddr: relayer.Address, GasPrice: integrationTests.MinTxGasPrice, - Data: txData.ToBytes(), + Data: integrationTests.PrepareRelayedTxDataV2(userTx), ChainID: userTx.ChainID, Version: userTx.Version, } gasLimit := economicsFee.ComputeGasLimit(tx) tx.GasLimit = gasLimitForUserTx + gasLimit - txBuff, _ := tx.GetDataForSigning(integrationTests.TestAddressPubkeyConverter, integrationTests.TestTxSignMarshalizer) + txBuff, _ := tx.GetDataForSigning(integrationTests.TestAddressPubkeyConverter, integrationTests.TestTxSignMarshalizer, integrationTests.TestTxSignHasher) tx.Signature, _ = relayer.SingleSigner.Sign(relayer.SkTxSign, txBuff) relayer.Nonce++ txFee := economicsFee.ComputeTxFee(tx) diff --git a/integrationTests/multiShard/transaction/txRouting/txRouting_test.go b/integrationTests/multiShard/transaction/txRouting/txRouting_test.go index 0fc14b445ae..b0897f2a5ef 100644 --- a/integrationTests/multiShard/transaction/txRouting/txRouting_test.go +++ b/integrationTests/multiShard/transaction/txRouting/txRouting_test.go @@ -147,7 +147,7 @@ func generateTx(sender crypto.PrivateKey, receiver crypto.PublicKey, nonce uint6 Signature: nil, Version: integrationTests.MinTransactionVersion, } - marshalizedTxBeforeSigning, _ := tx.GetDataForSigning(integrationTests.TestAddressPubkeyConverter, integrationTests.TestTxSignMarshalizer) + marshalizedTxBeforeSigning, _ := tx.GetDataForSigning(integrationTests.TestAddressPubkeyConverter, integrationTests.TestTxSignMarshalizer, integrationTests.TestTxSignHasher) signer := integrationTests.TestSingleSigner signature, _ := signer.Sign(sender, marshalizedTxBeforeSigning) diff --git a/integrationTests/multiShard/txScenarios/common.go b/integrationTests/multiShard/txScenarios/common.go index 7b92107b75b..d720b9d8df5 100644 --- a/integrationTests/multiShard/txScenarios/common.go +++ b/integrationTests/multiShard/txScenarios/common.go @@ -109,7 +109,7 @@ func createUserTx( ChainID: integrationTests.ChainID, Version: integrationTests.MinTransactionVersion, } - txBuff, _ := tx.GetDataForSigning(integrationTests.TestAddressPubkeyConverter, integrationTests.TestTxSignMarshalizer) + txBuff, _ := tx.GetDataForSigning(integrationTests.TestAddressPubkeyConverter, integrationTests.TestTxSignMarshalizer, integrationTests.TestTxSignHasher) tx.Signature, _ = player.SingleSigner.Sign(player.SkTxSign, txBuff) player.Nonce++ return tx diff --git a/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go b/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go index 1a6b0d2e501..3e8a63e1b33 100644 --- a/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go @@ -150,7 +150,7 @@ func TestNode_SendTransactionFromAnUnmintedAccountShouldReturnErrorAtApiLevel(t Version: integrationTests.MinTransactionVersion, } - txBuff, _ := tx.GetDataForSigning(integrationTests.TestAddressPubkeyConverter, integrationTests.TestTxSignMarshalizer) + txBuff, _ := tx.GetDataForSigning(integrationTests.TestAddressPubkeyConverter, integrationTests.TestTxSignMarshalizer, integrationTests.TestTxSignHasher) tx.Signature, _ = node.OwnAccount.SingleSigner.Sign(node.OwnAccount.SkTxSign, txBuff) err := node.Node.ValidateTransaction(tx) diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go index 12b5294bd95..0de6313c58d 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go @@ -71,7 +71,7 @@ func TestNode_RequestInterceptTransactionWithMessengerAndWhitelist(t *testing.T) Version: integrationTests.MinTransactionVersion, } - txBuff, _ := tx.GetDataForSigning(integrationTests.TestAddressPubkeyConverter, integrationTests.TestTxSignMarshalizer) + txBuff, _ := tx.GetDataForSigning(integrationTests.TestAddressPubkeyConverter, integrationTests.TestTxSignMarshalizer, integrationTests.TestTxSignHasher) signer := integrationTests.TestSingleSigner tx.Signature, _ = signer.Sign(nRequester.OwnAccount.SkTxSign, txBuff) signedTxBuff, _ := integrationTests.TestMarshalizer.Marshal(&tx) diff --git a/integrationTests/state/stateTrie/stateTrie_test.go b/integrationTests/state/stateTrie/stateTrie_test.go index f71c42d8a85..b46575bdbce 100644 --- a/integrationTests/state/stateTrie/stateTrie_test.go +++ b/integrationTests/state/stateTrie/stateTrie_test.go @@ -1365,7 +1365,7 @@ func TestRollbackBlockAndCheckThatPruningIsCancelledOnAccountsTrie(t *testing.T) require.Nil(t, err) if !bytes.Equal(rootHash, rootHashOfRollbackedBlock) { - time.Sleep(time.Second * 3) + time.Sleep(time.Second * 6) err = shardNode.AccntState.RecreateTrie(rootHashOfRollbackedBlock) require.True(t, strings.Contains(err.Error(), trie.ErrKeyNotFound.Error())) } diff --git a/integrationTests/state/stateTrieClose/stateTrieClose_test.go b/integrationTests/state/stateTrieClose/stateTrieClose_test.go index ab18ce244b6..985f49c660a 100644 --- a/integrationTests/state/stateTrieClose/stateTrieClose_test.go +++ b/integrationTests/state/stateTrieClose/stateTrieClose_test.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/testscommon" @@ -36,25 +37,25 @@ func TestPatriciaMerkleTrie_Close(t *testing.T) { rootHash, _ := tr.RootHash() leavesChannel1 := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } _ = tr.GetAllLeavesOnChannel(leavesChannel1, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) time.Sleep(time.Second) // allow the go routine to start idx, _ := gc.Snapshot() diff := gc.DiffGoRoutines(idxInitial, idx) assert.True(t, len(diff) <= 1) // can be 0 on a fast running host - err := common.GetErrorFromChanNonBlocking(leavesChannel1.ErrChan) + err := leavesChannel1.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) leavesChannel1 = &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } _ = tr.GetAllLeavesOnChannel(leavesChannel1, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) idx, _ = gc.Snapshot() diff = gc.DiffGoRoutines(idxInitial, idx) assert.True(t, len(diff) <= 2) - err = common.GetErrorFromChanNonBlocking(leavesChannel1.ErrChan) + err = leavesChannel1.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) _ = tr.Update([]byte("god"), []byte("puppy")) @@ -63,13 +64,13 @@ func TestPatriciaMerkleTrie_Close(t *testing.T) { rootHash, _ = tr.RootHash() leavesChannel1 = &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } _ = tr.GetAllLeavesOnChannel(leavesChannel1, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) idx, _ = gc.Snapshot() diff = gc.DiffGoRoutines(idxInitial, idx) assert.Equal(t, 3, len(diff), fmt.Sprintf("%v", diff)) - err = common.GetErrorFromChanNonBlocking(leavesChannel1.ErrChan) + err = leavesChannel1.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) _ = tr.Update([]byte("eggod"), []byte("cat")) @@ -78,14 +79,14 @@ func TestPatriciaMerkleTrie_Close(t *testing.T) { rootHash, _ = tr.RootHash() leavesChannel2 := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } _ = tr.GetAllLeavesOnChannel(leavesChannel2, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) time.Sleep(time.Second) // allow the go routine to start idx, _ = gc.Snapshot() diff = gc.DiffGoRoutines(idxInitial, idx) assert.True(t, len(diff) <= 4) - err = common.GetErrorFromChanNonBlocking(leavesChannel2.ErrChan) + err = leavesChannel2.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) for range leavesChannel1.LeavesChan { @@ -94,7 +95,7 @@ func TestPatriciaMerkleTrie_Close(t *testing.T) { idx, _ = gc.Snapshot() diff = gc.DiffGoRoutines(idxInitial, idx) assert.True(t, len(diff) <= 3) - err = common.GetErrorFromChanNonBlocking(leavesChannel1.ErrChan) + err = leavesChannel1.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) for range leavesChannel2.LeavesChan { @@ -103,7 +104,7 @@ func TestPatriciaMerkleTrie_Close(t *testing.T) { idx, _ = gc.Snapshot() diff = gc.DiffGoRoutines(idxInitial, idx) assert.True(t, len(diff) <= 2) - err = common.GetErrorFromChanNonBlocking(leavesChannel2.ErrChan) + err = leavesChannel2.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) err = tr.Close() diff --git a/integrationTests/state/stateTrieSync/stateTrieSync_test.go b/integrationTests/state/stateTrieSync/stateTrieSync_test.go index 9462df96ff7..6ef9c6e5d9a 100644 --- a/integrationTests/state/stateTrieSync/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync/stateTrieSync_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/throttler" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/integrationTests" @@ -329,13 +330,13 @@ func testMultipleDataTriesSync(t *testing.T, numAccounts int, numDataTrieLeaves rootHash, _ := accState.RootHash() leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err = accState.GetAllLeaves(leavesChannel, context.Background(), rootHash) for range leavesChannel.LeavesChan { } require.Nil(t, err) - err = common.GetErrorFromChanNonBlocking(leavesChannel.ErrChan) + err = leavesChannel.ErrChan.ReadFromChanNonBlocking() require.Nil(t, err) requesterTrie := nRequester.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) @@ -357,7 +358,7 @@ func testMultipleDataTriesSync(t *testing.T, numAccounts int, numDataTrieLeaves leavesChannel = &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err = nRequester.AccntState.GetAllLeaves(leavesChannel, context.Background(), rootHash) assert.Nil(t, err) @@ -365,7 +366,7 @@ func testMultipleDataTriesSync(t *testing.T, numAccounts int, numDataTrieLeaves for range leavesChannel.LeavesChan { numLeaves++ } - err = common.GetErrorFromChanNonBlocking(leavesChannel.ErrChan) + err = leavesChannel.ErrChan.ReadFromChanNonBlocking() require.Nil(t, err) assert.Equal(t, numAccounts, numLeaves) checkAllDataTriesAreSynced(t, numDataTrieLeaves, requesterTrie, dataTrieRootHashes) @@ -559,7 +560,7 @@ func addAccountsToState(t *testing.T, numAccounts int, numDataTrieLeaves int, ac func getNumLeaves(t *testing.T, tr common.Trie, rootHash []byte) int { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err := tr.GetAllLeavesOnChannel(leavesChannel, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) require.Nil(t, err) @@ -569,7 +570,7 @@ func getNumLeaves(t *testing.T, tr common.Trie, rootHash []byte) int { numLeaves++ } - err = common.GetErrorFromChanNonBlocking(leavesChannel.ErrChan) + err = leavesChannel.ErrChan.ReadFromChanNonBlocking() require.Nil(t, err) return numLeaves diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 7ff1c51bdb1..dac26a1b4be 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -56,10 +56,13 @@ import ( "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" testStorage "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/multiversx/mx-chain-go/vm" @@ -664,12 +667,15 @@ func CreateFullGenesisBlocks( OwnerAddress: "aaaaaa", }, GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ - FirstWhitelistedAddress: DelegationManagerConfigChangeAddress, + ChangeConfigAddress: DelegationManagerConfigChangeAddress, + V1: config.GovernanceSystemSCConfigV1{ + ProposalCost: "500", + }, Active: config.GovernanceSystemSCConfigActive{ ProposalCost: "500", - MinQuorum: "50", - MinPassThreshold: "50", - MinVetoThreshold: "50", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, }, }, StakingSystemSCConfig: config.StakingSystemSCConfig{ @@ -769,13 +775,16 @@ func CreateGenesisMetaBlock( OwnerAddress: "aaaaaa", }, GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ + V1: config.GovernanceSystemSCConfigV1{ + ProposalCost: "500", + }, Active: config.GovernanceSystemSCConfigActive{ ProposalCost: "500", - MinQuorum: "50", - MinPassThreshold: "50", - MinVetoThreshold: "50", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, }, - FirstWhitelistedAddress: DelegationManagerConfigChangeAddress, + ChangeConfigAddress: DelegationManagerConfigChangeAddress, }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", @@ -823,7 +832,7 @@ func CreateGenesisMetaBlock( argsMetaGenesis.ShardCoordinator = newShardCoordinator argsMetaGenesis.Accounts = newAccounts - argsMetaGenesis.Data.SetBlockchain(newBlkc) + _ = argsMetaGenesis.Data.SetBlockchain(newBlkc) dataComponents.DataPool = newDataPool } @@ -984,7 +993,7 @@ func CreateSimpleTxProcessor(accnts state.AccountsAdapter) process.TransactionPr ScProcessor: &testscommon.SCProcessorMock{}, TxFeeHandler: &testscommon.UnsignedTxHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - EconomicsFee: &mock.FeeHandlerStub{ + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return tx.GetGasLimit() }, @@ -1003,6 +1012,8 @@ func CreateSimpleTxProcessor(accnts state.AccountsAdapter) process.TransactionPr ArgsParser: smartContract.NewArgumentParser(), ScrForwarder: &mock.IntermediateTransactionHandlerMock{}, EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, } txProcessor, _ := txProc.NewTxProcessor(argsNewTxProcessor) @@ -1672,7 +1683,7 @@ func CreateAndSendTransaction( Version: MinTransactionVersion, } - txBuff, _ := tx.GetDataForSigning(TestAddressPubkeyConverter, TestTxSignMarshalizer) + txBuff, _ := tx.GetDataForSigning(TestAddressPubkeyConverter, TestTxSignMarshalizer, TestTxSignHasher) tx.Signature, _ = node.OwnAccount.SingleSigner.Sign(node.OwnAccount.SkTxSign, txBuff) senderShardID := node.ShardCoordinator.ComputeId(node.OwnAccount.Address) @@ -1719,7 +1730,7 @@ func CreateAndSendTransactionWithGasLimit( Version: version, } - txBuff, _ := tx.GetDataForSigning(TestAddressPubkeyConverter, TestTxSignMarshalizer) + txBuff, _ := tx.GetDataForSigning(TestAddressPubkeyConverter, TestTxSignMarshalizer, TestTxSignHasher) tx.Signature, _ = node.OwnAccount.SingleSigner.Sign(node.OwnAccount.SkTxSign, txBuff) _, _ = node.SendTransaction(tx) @@ -1760,7 +1771,7 @@ func GenerateTransferTx( ChainID: chainID, Version: version, } - txBuff, _ := tx.GetDataForSigning(TestAddressPubkeyConverter, TestTxSignMarshalizer) + txBuff, _ := tx.GetDataForSigning(TestAddressPubkeyConverter, TestTxSignMarshalizer, TestTxSignHasher) signer := TestSingleSigner tx.Signature, _ = signer.Sign(senderPrivateKey, txBuff) @@ -1783,7 +1794,7 @@ func generateTx( ChainID: ChainID, Version: MinTransactionVersion, } - txBuff, _ := tx.GetDataForSigning(TestAddressPubkeyConverter, TestTxSignMarshalizer) + txBuff, _ := tx.GetDataForSigning(TestAddressPubkeyConverter, TestTxSignMarshalizer, TestTxSignHasher) tx.Signature, _ = signer.Sign(skSign, txBuff) return tx @@ -2596,3 +2607,22 @@ func SaveDelegationContractsList(nodes []*TestProcessorNode) { _, _ = n.AccntState.Commit() } } + +// PrepareRelayedTxDataV1 repares the data for a relayed transaction V1 +func PrepareRelayedTxDataV1(innerTx *transaction.Transaction) []byte { + userTxBytes, _ := TestMarshalizer.Marshal(innerTx) + return []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxBytes)) +} + +// PrepareRelayedTxDataV2 prepares the data for a relayed transaction V2 +func PrepareRelayedTxDataV2(innerTx *transaction.Transaction) []byte { + dataBuilder := txDataBuilder.NewBuilder() + txData := dataBuilder. + Func(core.RelayedTransactionV2). + Bytes(innerTx.RcvAddr). + Int64(int64(innerTx.Nonce)). + Bytes(innerTx.Data). + Bytes(innerTx.Signature) + + return txData.ToBytes() +} diff --git a/integrationTests/testNetwork.go b/integrationTests/testNetwork.go index 62170a084fe..63a253b3bc0 100644 --- a/integrationTests/testNetwork.go +++ b/integrationTests/testNetwork.go @@ -309,7 +309,7 @@ func (net *TestNetwork) CreateTx( // SignTx signs a transaction with the provided `signer` wallet. func (net *TestNetwork) SignTx(signer *TestWalletAccount, tx *transaction.Transaction) { - txBuff, err := tx.GetDataForSigning(TestAddressPubkeyConverter, TestTxSignMarshalizer) + txBuff, err := tx.GetDataForSigning(TestAddressPubkeyConverter, TestTxSignMarshalizer, TestTxSignHasher) net.handleOrBypassError(err) signature, err := signer.SingleSigner.Sign(signer.SkTxSign, txBuff) diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 50486f3e29c..db93a8b8e5a 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -34,6 +34,7 @@ import ( nodeFactory "github.com/multiversx/mx-chain-go/cmd/node/factory" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/enablers" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/forking" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/consensus" @@ -104,6 +105,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" testFactory "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" @@ -354,6 +356,7 @@ type TestProcessorNode struct { MultiSigner crypto.MultiSigner HeaderSigVerifier process.InterceptedHeaderSigVerifier HeaderIntegrityVerifier process.HeaderIntegrityVerifier + GuardedAccountHandler process.GuardedAccountHandler ValidatorStatisticsProcessor process.ValidatorStatisticsProcessor Rater sharding.PeerAccountListAndRatingHandler @@ -484,6 +487,7 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { BootstrapStorer: &mock.BoostrapStorerMock{}, RatingsData: args.RatingsData, EpochStartNotifier: args.EpochStartSubscriber, + GuardedAccountHandler: &guardianMocks.GuardedAccountHandlerStub{}, AppStatusHandler: appStatusHandler, PeersRatingMonitor: peersRatingMonitor, } @@ -798,6 +802,7 @@ func (tpn *TestProcessorNode) createFullSCQueryService(gasMap map[string]map[str EpochNotifier: tpn.EpochNotifier, EnableEpochsHandler: tpn.EnableEpochsHandler, MaxNumNodesInTransferRole: 100, + GuardedAccountHandler: tpn.GuardedAccountHandler, } argsBuiltIn.AutomaticCrawlerAddresses = GenerateOneAddressPerShard(argsBuiltIn.ShardCoordinator) builtInFuncFactory, _ := builtInFunctions.CreateBuiltInFunctionsFactory(argsBuiltIn) @@ -854,11 +859,11 @@ func (tpn *TestProcessorNode) createFullSCQueryService(gasMap map[string]map[str }, Active: config.GovernanceSystemSCConfigActive{ ProposalCost: "500", - MinQuorum: "50", - MinPassThreshold: "50", - MinVetoThreshold: "50", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, }, - FirstWhitelistedAddress: DelegationManagerConfigChangeAddress, + ChangeConfigAddress: DelegationManagerConfigChangeAddress, }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", @@ -992,6 +997,7 @@ func (tpn *TestProcessorNode) initEconomicsData(economicsConfig *config.Economic EpochNotifier: tpn.EpochNotifier, EnableEpochsHandler: tpn.EnableEpochsHandler, BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) tpn.EconomicsData = economics.NewTestEconomicsData(economicsData) @@ -1034,11 +1040,13 @@ func createDefaultEconomicsConfig() *config.EconomicsConfig { MaxGasLimitPerMetaMiniBlock: maxGasLimitPerBlock, MaxGasLimitPerTx: maxGasLimitPerBlock, MinGasLimit: minGasLimit, + ExtraGasLimitGuardedTx: "50000", }, }, - MinGasPrice: minGasPrice, - GasPerDataByte: "1", - GasPriceModifier: 0.01, + MinGasPrice: minGasPrice, + GasPerDataByte: "1", + GasPriceModifier: 0.01, + MaxGasPriceSetGuardian: "2000000000", }, } } @@ -1423,27 +1431,31 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u } } - interimProcFactory, _ := shard.NewIntermediateProcessorsContainerFactory( - tpn.ShardCoordinator, - TestMarshalizer, - TestHasher, - TestAddressPubkeyConverter, - tpn.Storage, - tpn.DataPool, - tpn.EconomicsData, - ) + argsFactory := shard.ArgsNewIntermediateProcessorsContainerFactory{ + ShardCoordinator: tpn.ShardCoordinator, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + PubkeyConverter: TestAddressPubkeyConverter, + Store: tpn.Storage, + PoolsHolder: tpn.DataPool, + EconomicsFee: tpn.EconomicsData, + EnableEpochsHandler: tpn.EnableEpochsHandler, + } + interimProcFactory, _ := shard.NewIntermediateProcessorsContainerFactory(argsFactory) tpn.InterimProcContainer, _ = interimProcFactory.Create() - tpn.ScrForwarder, _ = postprocess.NewTestIntermediateResultsProcessor( - TestHasher, - TestMarshalizer, - tpn.ShardCoordinator, - TestAddressPubkeyConverter, - tpn.Storage, - dataBlock.SmartContractResultBlock, - tpn.DataPool.CurrentBlockTxs(), - tpn.EconomicsData, - ) + argsNewIntermediateResultsProc := postprocess.ArgsNewIntermediateResultsProcessor{ + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + Coordinator: tpn.ShardCoordinator, + PubkeyConv: TestAddressPubkeyConverter, + Store: tpn.Storage, + BlockType: dataBlock.SmartContractResultBlock, + CurrTxs: tpn.DataPool.CurrentBlockTxs(), + EconomicsFee: tpn.EconomicsData, + EnableEpochsHandler: tpn.EnableEpochsHandler, + } + tpn.ScrForwarder, _ = postprocess.NewTestIntermediateResultsProcessor(argsNewIntermediateResultsProc) tpn.InterimProcContainer.Remove(dataBlock.SmartContractResultBlock) _ = tpn.InterimProcContainer.Add(dataBlock.SmartContractResultBlock, tpn.ScrForwarder) @@ -1469,6 +1481,7 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u EpochNotifier: tpn.EpochNotifier, EnableEpochsHandler: tpn.EnableEpochsHandler, MaxNumNodesInTransferRole: 100, + GuardedAccountHandler: tpn.GuardedAccountHandler, } argsBuiltIn.AutomaticCrawlerAddresses = GenerateOneAddressPerShard(argsBuiltIn.ShardCoordinator) builtInFuncFactory, _ := builtInFunctions.CreateBuiltInFunctionsFactory(argsBuiltIn) @@ -1589,6 +1602,8 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u ArgsParser: tpn.ArgsParser, ScrForwarder: tpn.ScrForwarder, EnableEpochsHandler: tpn.EnableEpochsHandler, + GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } tpn.TxProcessor, _ = transaction.NewTxProcessor(argsNewTxProcessor) scheduledSCRsStorer, _ := tpn.Storage.GetStorer(dataRetriever.ScheduledSCRsUnit) @@ -1653,27 +1668,31 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u } func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[string]uint64) { - interimProcFactory, _ := metaProcess.NewIntermediateProcessorsContainerFactory( - tpn.ShardCoordinator, - TestMarshalizer, - TestHasher, - TestAddressPubkeyConverter, - tpn.Storage, - tpn.DataPool, - tpn.EconomicsData, - ) + argsFactory := metaProcess.ArgsNewIntermediateProcessorsContainerFactory{ + ShardCoordinator: tpn.ShardCoordinator, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + PubkeyConverter: TestAddressPubkeyConverter, + Store: tpn.Storage, + PoolsHolder: tpn.DataPool, + EconomicsFee: tpn.EconomicsData, + EnableEpochsHandler: tpn.EnableEpochsHandler, + } + interimProcFactory, _ := metaProcess.NewIntermediateProcessorsContainerFactory(argsFactory) tpn.InterimProcContainer, _ = interimProcFactory.Create() - tpn.ScrForwarder, _ = postprocess.NewTestIntermediateResultsProcessor( - TestHasher, - TestMarshalizer, - tpn.ShardCoordinator, - TestAddressPubkeyConverter, - tpn.Storage, - dataBlock.SmartContractResultBlock, - tpn.DataPool.CurrentBlockTxs(), - tpn.EconomicsData, - ) + argsNewIntermediateResultsProc := postprocess.ArgsNewIntermediateResultsProcessor{ + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + Coordinator: tpn.ShardCoordinator, + PubkeyConv: TestAddressPubkeyConverter, + Store: tpn.Storage, + BlockType: dataBlock.SmartContractResultBlock, + CurrTxs: tpn.DataPool.CurrentBlockTxs(), + EconomicsFee: tpn.EconomicsData, + EnableEpochsHandler: tpn.EnableEpochsHandler, + } + tpn.ScrForwarder, _ = postprocess.NewTestIntermediateResultsProcessor(argsNewIntermediateResultsProc) tpn.InterimProcContainer.Remove(dataBlock.SmartContractResultBlock) _ = tpn.InterimProcContainer.Add(dataBlock.SmartContractResultBlock, tpn.ScrForwarder) @@ -1688,6 +1707,7 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri EpochNotifier: tpn.EpochNotifier, EnableEpochsHandler: tpn.EnableEpochsHandler, MaxNumNodesInTransferRole: 100, + GuardedAccountHandler: tpn.GuardedAccountHandler, } argsBuiltIn.AutomaticCrawlerAddresses = GenerateOneAddressPerShard(argsBuiltIn.ShardCoordinator) builtInFuncFactory, _ := builtInFunctions.CreateBuiltInFunctionsFactory(argsBuiltIn) @@ -1737,13 +1757,16 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri OwnerAddress: "aaaaaa", }, GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ + V1: config.GovernanceSystemSCConfigV1{ + ProposalCost: "500", + }, Active: config.GovernanceSystemSCConfigActive{ ProposalCost: "500", - MinQuorum: "50", - MinPassThreshold: "50", - MinVetoThreshold: "50", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, }, - FirstWhitelistedAddress: DelegationManagerConfigChangeAddress, + ChangeConfigAddress: DelegationManagerConfigChangeAddress, }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", @@ -1829,6 +1852,8 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri TxTypeHandler: txTypeHandler, EconomicsFee: tpn.EconomicsData, EnableEpochsHandler: tpn.EnableEpochsHandler, + GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } tpn.TxProcessor, _ = transaction.NewMetaTxProcessor(argsNewMetaTxProc) scheduledSCRsStorer, _ := tpn.Storage.GetStorer(dataRetriever.ScheduledSCRsUnit) @@ -2403,21 +2428,24 @@ func (tpn *TestProcessorNode) SendTransaction(tx *dataTransaction.Transaction) ( return "", err } - tx, txHash, err := tpn.Node.CreateTransaction( - tx.Nonce, - tx.Value.String(), - encodedRcvAddr, - nil, - encodedSndAddr, - nil, - tx.GasPrice, - tx.GasLimit, - tx.Data, - hex.EncodeToString(tx.Signature), - string(tx.ChainID), - tx.Version, - tx.Options, - ) + txArgsLocal := &external.ArgsCreateTransaction{ + Nonce: tx.Nonce, + Value: tx.Value.String(), + Receiver: encodedRcvAddr, + ReceiverUsername: nil, + Sender: encodedSndAddr, + SenderUsername: nil, + GasPrice: tx.GasPrice, + GasLimit: tx.GasLimit, + DataField: tx.Data, + SignatureHex: hex.EncodeToString(tx.Signature), + ChainID: string(tx.ChainID), + Version: tx.Version, + Options: tx.Options, + Guardian: TestAddressPubkeyConverter.SilentEncode(tx.GuardianAddr, log), + GuardianSigHex: hex.EncodeToString(tx.GuardianSignature), + } + tx, txHash, err := tpn.Node.CreateTransaction(txArgsLocal) if err != nil { return "", err } @@ -3166,7 +3194,6 @@ func GetDefaultCryptoComponents() *mock.CryptoComponentsStub { PubKey: &mock.PublicKeyMock{}, PrivKey: &mock.PrivateKeyMock{}, PubKeyString: "pubKey", - PrivKeyBytes: []byte("privKey"), PubKeyBytes: []byte("pubKey"), BlockSig: &mock.SignerMock{}, TxSig: &mock.SignerMock{}, @@ -3181,8 +3208,8 @@ func GetDefaultCryptoComponents() *mock.CryptoComponentsStub { } // GetDefaultStateComponents - -func GetDefaultStateComponents() *testscommon.StateComponentsMock { - return &testscommon.StateComponentsMock{ +func GetDefaultStateComponents() *testFactory.StateComponentsMock { + return &testFactory.StateComponentsMock{ PeersAcc: &stateMock.AccountsStub{}, Accounts: &stateMock.AccountsStub{}, AccountsRepo: &stateMock.AccountsRepositoryStub{}, @@ -3213,7 +3240,6 @@ func GetDefaultStatusComponents() *mock.StatusComponentsStub { return &mock.StatusComponentsStub{ Outport: mock.NewNilOutport(), SoftwareVersionCheck: &mock.SoftwareVersionCheckerMock{}, - AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, } } @@ -3233,12 +3259,13 @@ func getDefaultBootstrapComponents(shardCoordinator sharding.Coordinator) *mainF StorageManagers: map[string]common.StorageManager{"0": &storageManager.StorageManagerStub{}}, BootstrapCalled: nil, }, - BootstrapParams: &bootstrapMocks.BootstrapParamsHandlerMock{}, - NodeRole: "", - ShCoordinator: shardCoordinator, - HdrVersionHandler: headerVersionHandler, - VersionedHdrFactory: versionedHeaderFactory, - HdrIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, + BootstrapParams: &bootstrapMocks.BootstrapParamsHandlerMock{}, + NodeRole: "", + ShCoordinator: shardCoordinator, + HdrVersionHandler: headerVersionHandler, + VersionedHdrFactory: versionedHeaderFactory, + HdrIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, + GuardedAccountHandlerField: &guardianMocks.GuardedAccountHandlerStub{}, } } @@ -3260,7 +3287,7 @@ func GetTokenIdentifier(nodes []*TestProcessorNode, ticker []byte) []byte { rootHash, _ := userAcc.DataTrie().RootHash() chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } _ = userAcc.DataTrie().GetAllLeavesOnChannel(chLeaves, context.Background(), rootHash, keyBuilder.NewKeyBuilder()) for leaf := range chLeaves.LeavesChan { @@ -3271,7 +3298,7 @@ func GetTokenIdentifier(nodes []*TestProcessorNode, ticker []byte) []byte { return leaf.Key() } - err := common.GetErrorFromChanNonBlocking(chLeaves.ErrChan) + err := chLeaves.ErrChan.ReadFromChanNonBlocking() if err != nil { log.Error("error getting all leaves from channel", "err", err) } diff --git a/integrationTests/testProcessorNodeWithTestWebServer.go b/integrationTests/testProcessorNodeWithTestWebServer.go index 302051f09db..e76dd3fd464 100644 --- a/integrationTests/testProcessorNodeWithTestWebServer.go +++ b/integrationTests/testProcessorNodeWithTestWebServer.go @@ -148,6 +148,7 @@ func createFacadeComponents(tpn *TestProcessorNode) (nodeFacade.ApiResolver, nod EpochNotifier: tpn.EpochNotifier, EnableEpochsHandler: tpn.EnableEpochsHandler, MaxNumNodesInTransferRole: 100, + GuardedAccountHandler: tpn.GuardedAccountHandler, } argsBuiltIn.AutomaticCrawlerAddresses = GenerateOneAddressPerShard(argsBuiltIn.ShardCoordinator) builtInFuncs, err := builtInFunctions.CreateBuiltInFunctionsFactory(argsBuiltIn) diff --git a/integrationTests/vm/delegation/delegationMulti_test.go b/integrationTests/vm/delegation/delegationMulti_test.go new file mode 100644 index 00000000000..3d87dec1e08 --- /dev/null +++ b/integrationTests/vm/delegation/delegationMulti_test.go @@ -0,0 +1,261 @@ +//go:build !race +// +build !race + +package delegation + +import ( + "encoding/hex" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/integrationTests" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/vm" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDelegationSystemClaimMulti(t *testing.T) { + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ + MaxShards: 1, + NodeShardId: core.MetachainShardId, + TxSignPrivKeyShardId: 0, + }) + tpn.InitDelegationManager() + maxDelegationCap := big.NewInt(5000) + serviceFee := big.NewInt(10000) // 10% + numContracts := 2 + totalNumNodes := 5 + numDelegators := 4 + delegationVal := int64(1000) + tpn.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ + EpochField: integrationTests.UnreachableEpoch + 1, + }) + tpn.BlockchainHook.SetCurrentHeader(&block.MetaBlock{Nonce: 1}) + + ownerAddresses := getAddresses(numContracts) + + delegators := getAddresses(numDelegators) + delegationScAddresses := make([][]byte, numContracts) + + firstTwoDelegators := delegators[:2] + lastTwoDelegators := delegators[2:] + halfDelegationVal := delegationVal / 2 + + for i := range delegationScAddresses { + delegationScAddresses[i] = deployNewSc(t, tpn, maxDelegationCap, serviceFee, big.NewInt(2000), ownerAddresses[i]) + verifyDelegatorsStake(t, tpn, "getUserActiveStake", [][]byte{ownerAddresses[i]}, delegationScAddresses[i], big.NewInt(2000)) + + blsKeys, sigs := getBlsKeysAndSignatures(delegationScAddresses[i], totalNumNodes) + txData := addNodesTxData(blsKeys, sigs) + returnedCode, err := processTransaction(tpn, ownerAddresses[i], delegationScAddresses[i], txData, big.NewInt(0)) + assert.Nil(t, err) + assert.Equal(t, vmcommon.Ok, returnedCode) + + processMultipleTransactions(t, tpn, firstTwoDelegators, delegationScAddresses[i], "delegate", big.NewInt(delegationVal)) + verifyDelegatorsStake(t, tpn, "getUserActiveStake", firstTwoDelegators, delegationScAddresses[i], big.NewInt(delegationVal)) + + processMultipleTransactions(t, tpn, lastTwoDelegators, delegationScAddresses[i], "delegate", big.NewInt(halfDelegationVal)) + verifyDelegatorsStake(t, tpn, "getUserActiveStake", lastTwoDelegators, delegationScAddresses[i], big.NewInt(halfDelegationVal)) + + txData = txDataForFunc("stakeNodes", blsKeys) + returnedCode, err = processTransaction(tpn, ownerAddresses[i], delegationScAddresses[i], txData, big.NewInt(0)) + assert.Nil(t, err) + assert.Equal(t, vmcommon.Ok, returnedCode) + } + + verifyValidatorSCStake(t, tpn, delegationScAddresses[0], big.NewInt(5000)) + verifyValidatorSCStake(t, tpn, delegationScAddresses[1], big.NewInt(5000)) + + for i := range delegationScAddresses { + addRewardsToDelegation(tpn, delegationScAddresses[i], big.NewInt(1000), 1, 1) + addRewardsToDelegation(tpn, delegationScAddresses[i], big.NewInt(2000), 2, 1) + addRewardsToDelegation(tpn, delegationScAddresses[i], big.NewInt(3000), 3, 1) + addRewardsToDelegation(tpn, delegationScAddresses[i], big.NewInt(4000), 4, 1) + addRewardsToDelegation(tpn, delegationScAddresses[i], big.NewInt(5000), 5, 1) + } + + for i := range delegationScAddresses { + checkRewardData(t, tpn, delegationScAddresses[i], 1, 1000, 5000, serviceFee) + checkRewardData(t, tpn, delegationScAddresses[i], 2, 2000, 5000, serviceFee) + checkRewardData(t, tpn, delegationScAddresses[i], 3, 3000, 5000, serviceFee) + checkRewardData(t, tpn, delegationScAddresses[i], 4, 4000, 5000, serviceFee) + checkRewardData(t, tpn, delegationScAddresses[i], 5, 5000, 5000, serviceFee) + } + + for i := range delegationScAddresses { + checkDelegatorReward(t, tpn, delegationScAddresses[i], delegators[0], 2700) + checkDelegatorReward(t, tpn, delegationScAddresses[i], delegators[1], 2700) + checkDelegatorReward(t, tpn, delegationScAddresses[i], delegators[2], 1350) + checkDelegatorReward(t, tpn, delegationScAddresses[i], delegators[3], 1350) + checkDelegatorReward(t, tpn, delegationScAddresses[i], ownerAddresses[i], 6900) + } + + txData := "claimMulti" + listAddresses := make([][]byte, 0) + for _, address := range delegationScAddresses { + txData += "@" + hex.EncodeToString(address) + listAddresses = append(listAddresses, address) + } + + checkClaimMultiReturn(t, tpn, vm.DelegationManagerSCAddress, delegators[0], "claimMulti", listAddresses, 5400) + checkClaimMultiReturn(t, tpn, vm.DelegationManagerSCAddress, delegators[1], "claimMulti", listAddresses, 5400) + checkClaimMultiReturn(t, tpn, vm.DelegationManagerSCAddress, delegators[2], "claimMulti", listAddresses, 2700) + checkClaimMultiReturn(t, tpn, vm.DelegationManagerSCAddress, delegators[3], "claimMulti", listAddresses, 2700) + + for _, delegator := range delegators { + tpn.ScrForwarder.CreateBlockStarted() + returnedCode, err := processTransaction(tpn, delegator, vm.DelegationManagerSCAddress, txData, big.NewInt(0)) + assert.Nil(t, err) + assert.Equal(t, vmcommon.Ok, returnedCode) + assert.Equal(t, len(delegationScAddresses), len(tpn.ScrForwarder.GetAllCurrentFinishedTxs())) + } + + for i := range delegationScAddresses { + checkDelegatorReward(t, tpn, delegationScAddresses[i], delegators[0], 0) + checkDelegatorReward(t, tpn, delegationScAddresses[i], delegators[1], 0) + checkDelegatorReward(t, tpn, delegationScAddresses[i], delegators[2], 0) + checkDelegatorReward(t, tpn, delegationScAddresses[i], delegators[3], 0) + checkDelegatorReward(t, tpn, delegationScAddresses[i], ownerAddresses[i], 6900) + } + + verifyValidatorSCStake(t, tpn, delegationScAddresses[0], big.NewInt(5000)) + verifyValidatorSCStake(t, tpn, delegationScAddresses[1], big.NewInt(5000)) +} + +func TestDelegationSystemRedelegateMulti(t *testing.T) { + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ + MaxShards: 1, + NodeShardId: core.MetachainShardId, + TxSignPrivKeyShardId: 0, + }) + tpn.InitDelegationManager() + maxDelegationCap := big.NewInt(5000000) + serviceFee := big.NewInt(10000) // 10% + numContracts := 2 + totalNumNodes := 5 + numDelegators := 4 + delegationVal := int64(1000) + tpn.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ + EpochField: integrationTests.UnreachableEpoch + 1, + }) + tpn.BlockchainHook.SetCurrentHeader(&block.MetaBlock{Nonce: 1}) + + ownerAddresses := getAddresses(numContracts) + + delegators := getAddresses(numDelegators) + delegationScAddresses := make([][]byte, numContracts) + + firstTwoDelegators := delegators[:2] + lastTwoDelegators := delegators[2:] + halfDelegationVal := delegationVal / 2 + + for i := range delegationScAddresses { + delegationScAddresses[i] = deployNewSc(t, tpn, maxDelegationCap, serviceFee, big.NewInt(2000), ownerAddresses[i]) + verifyDelegatorsStake(t, tpn, "getUserActiveStake", [][]byte{ownerAddresses[i]}, delegationScAddresses[i], big.NewInt(2000)) + + blsKeys, sigs := getBlsKeysAndSignatures(delegationScAddresses[i], totalNumNodes) + txData := addNodesTxData(blsKeys, sigs) + returnedCode, err := processTransaction(tpn, ownerAddresses[i], delegationScAddresses[i], txData, big.NewInt(0)) + assert.Nil(t, err) + assert.Equal(t, vmcommon.Ok, returnedCode) + + processMultipleTransactions(t, tpn, firstTwoDelegators, delegationScAddresses[i], "delegate", big.NewInt(delegationVal)) + verifyDelegatorsStake(t, tpn, "getUserActiveStake", firstTwoDelegators, delegationScAddresses[i], big.NewInt(delegationVal)) + + processMultipleTransactions(t, tpn, lastTwoDelegators, delegationScAddresses[i], "delegate", big.NewInt(halfDelegationVal)) + verifyDelegatorsStake(t, tpn, "getUserActiveStake", lastTwoDelegators, delegationScAddresses[i], big.NewInt(halfDelegationVal)) + + txData = txDataForFunc("stakeNodes", blsKeys) + returnedCode, err = processTransaction(tpn, ownerAddresses[i], delegationScAddresses[i], txData, big.NewInt(0)) + assert.Nil(t, err) + assert.Equal(t, vmcommon.Ok, returnedCode) + } + + verifyValidatorSCStake(t, tpn, delegationScAddresses[0], big.NewInt(5000)) + verifyValidatorSCStake(t, tpn, delegationScAddresses[1], big.NewInt(5000)) + + for i := range delegationScAddresses { + addRewardsToDelegation(tpn, delegationScAddresses[i], big.NewInt(1000), 1, 1) + addRewardsToDelegation(tpn, delegationScAddresses[i], big.NewInt(2000), 2, 1) + addRewardsToDelegation(tpn, delegationScAddresses[i], big.NewInt(3000), 3, 1) + addRewardsToDelegation(tpn, delegationScAddresses[i], big.NewInt(4000), 4, 1) + addRewardsToDelegation(tpn, delegationScAddresses[i], big.NewInt(5000), 5, 1) + } + + for i := range delegationScAddresses { + checkRewardData(t, tpn, delegationScAddresses[i], 1, 1000, 5000, serviceFee) + checkRewardData(t, tpn, delegationScAddresses[i], 2, 2000, 5000, serviceFee) + checkRewardData(t, tpn, delegationScAddresses[i], 3, 3000, 5000, serviceFee) + checkRewardData(t, tpn, delegationScAddresses[i], 4, 4000, 5000, serviceFee) + checkRewardData(t, tpn, delegationScAddresses[i], 5, 5000, 5000, serviceFee) + } + + for i := range delegationScAddresses { + checkDelegatorReward(t, tpn, delegationScAddresses[i], delegators[0], 2700) + checkDelegatorReward(t, tpn, delegationScAddresses[i], delegators[1], 2700) + checkDelegatorReward(t, tpn, delegationScAddresses[i], delegators[2], 1350) + checkDelegatorReward(t, tpn, delegationScAddresses[i], delegators[3], 1350) + checkDelegatorReward(t, tpn, delegationScAddresses[i], ownerAddresses[i], 6900) + } + + txData := "reDelegateMulti" + listAddresses := make([][]byte, 0) + for _, address := range delegationScAddresses { + txData += "@" + hex.EncodeToString(address) + listAddresses = append(listAddresses, address) + } + + checkClaimMultiReturn(t, tpn, vm.DelegationManagerSCAddress, delegators[0], "reDelegateMulti", listAddresses, 5400) + checkClaimMultiReturn(t, tpn, vm.DelegationManagerSCAddress, delegators[1], "reDelegateMulti", listAddresses, 5400) + checkClaimMultiReturn(t, tpn, vm.DelegationManagerSCAddress, delegators[2], "reDelegateMulti", listAddresses, 2700) + checkClaimMultiReturn(t, tpn, vm.DelegationManagerSCAddress, delegators[3], "reDelegateMulti", listAddresses, 2700) + + for _, delegator := range delegators { + returnedCode, err := processTransaction(tpn, delegator, vm.DelegationManagerSCAddress, txData, big.NewInt(0)) + assert.Nil(t, err) + assert.Equal(t, vmcommon.Ok, returnedCode) + } + + for i := range delegationScAddresses { + checkDelegatorReward(t, tpn, delegationScAddresses[i], delegators[0], 0) + checkDelegatorReward(t, tpn, delegationScAddresses[i], delegators[1], 0) + checkDelegatorReward(t, tpn, delegationScAddresses[i], delegators[2], 0) + checkDelegatorReward(t, tpn, delegationScAddresses[i], delegators[3], 0) + checkDelegatorReward(t, tpn, delegationScAddresses[i], ownerAddresses[i], 6900) + } + + verifyDelegatorsStake(t, tpn, "getUserActiveStake", firstTwoDelegators, delegationScAddresses[0], big.NewInt(3700)) + verifyDelegatorsStake(t, tpn, "getUserActiveStake", firstTwoDelegators, delegationScAddresses[1], big.NewInt(3700)) + verifyDelegatorsStake(t, tpn, "getUserActiveStake", lastTwoDelegators, delegationScAddresses[0], big.NewInt(1850)) + verifyDelegatorsStake(t, tpn, "getUserActiveStake", lastTwoDelegators, delegationScAddresses[1], big.NewInt(1850)) + + verifyValidatorSCStake(t, tpn, delegationScAddresses[0], big.NewInt(5000+8100)) + verifyValidatorSCStake(t, tpn, delegationScAddresses[1], big.NewInt(5000+8100)) +} + +func checkClaimMultiReturn( + t *testing.T, + tpn *integrationTests.TestProcessorNode, + delegMgrAddr []byte, + delegAddr []byte, + function string, + arguments [][]byte, + expectedRewards int64, +) { + query := &process.SCQuery{ + ScAddress: delegMgrAddr, + FuncName: function, + CallerAddr: delegAddr, + CallValue: big.NewInt(0), + Arguments: arguments, + } + vmOutput, err := tpn.SCQueryService.ExecuteQuery(query) + assert.Nil(t, err) + require.Equal(t, vmcommon.Ok, vmOutput.ReturnCode) + require.Equal(t, big.NewInt(expectedRewards).Bytes(), vmOutput.ReturnData[0]) +} diff --git a/integrationTests/vm/delegation/delegationScenarios_test.go b/integrationTests/vm/delegation/delegationScenarios_test.go index 3ea763e8e08..a03f52d183f 100644 --- a/integrationTests/vm/delegation/delegationScenarios_test.go +++ b/integrationTests/vm/delegation/delegationScenarios_test.go @@ -20,6 +20,7 @@ import ( "github.com/multiversx/mx-chain-crypto-go/signing" "github.com/multiversx/mx-chain-crypto-go/signing/mcl" mclsig "github.com/multiversx/mx-chain-crypto-go/signing/mcl/singlesig" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever/dataPool" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/process" @@ -31,11 +32,12 @@ import ( "github.com/stretchr/testify/require" ) -func TestDelegationSystemNodesOperationsTest(t *testing.T) { +func TestDelegationSystemNodesOperationsTestBackwardComp(t *testing.T) { tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, TxSignPrivKeyShardId: 0, + EpochsConfig: &config.EnableEpochs{MultiClaimOnDelegationEnableEpoch: 5}, }) tpn.InitDelegationManager() maxDelegationCap := big.NewInt(5000) @@ -45,7 +47,7 @@ func TestDelegationSystemNodesOperationsTest(t *testing.T) { tpn.BlockchainHook.SetCurrentHeader(&block.MetaBlock{Nonce: 1}) tpn.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ - EpochField: integrationTests.UnreachableEpoch + 1, + EpochField: 3, }) // create new delegation contract @@ -69,6 +71,15 @@ func TestDelegationSystemNodesOperationsTest(t *testing.T) { } assert.Equal(t, 2, numExpectedScrsFound) + + tpn.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ + EpochField: 6, + }) + scrsHandler.CreateBlockStarted() + // create new delegation contract + delegationScAddress = deployNewSc(t, tpn, maxDelegationCap, serviceFee, value, bytes.Repeat([]byte{1}, 32)) + assert.NotNil(t, delegationScAddress) + assert.Equal(t, 0, len(scrsHandler.GetAllCurrentFinishedTxs())) } func TestDelegationSystemNodesOperations(t *testing.T) { @@ -324,7 +335,7 @@ func TestDelegationSystemDelegateUnDelegateFromTopUpWithdraw(t *testing.T) { verifyDelegatorsStake(t, tpn, "getUserUnStakedValue", delegators[:numDelegators-2], delegationScAddress, big.NewInt(delegationVal)) verifyUserUndelegatedList(t, tpn, delegationScAddress, delegators[0], []*big.Int{big.NewInt(delegationVal)}) // withdraw unDelegated delegators should not withdraw because of unBond period - processMultipleTransactions(t, tpn, delegators[:numDelegators-2], delegationScAddress, "withdraw", big.NewInt(0)) + processMultipleWithdraws(t, tpn, delegators[:numDelegators-2], delegationScAddress, vmcommon.UserError) verifyDelegatorsStake(t, tpn, "getUserActiveStake", delegators[:numDelegators-2], delegationScAddress, big.NewInt(0)) verifyDelegatorsStake(t, tpn, "getUserUnStakedValue", delegators[:numDelegators-2], delegationScAddress, big.NewInt(delegationVal)) @@ -332,7 +343,7 @@ func TestDelegationSystemDelegateUnDelegateFromTopUpWithdraw(t *testing.T) { tpn.BlockchainHook.SetCurrentHeader(&block.Header{Epoch: 1}) // withdraw unDelegated delegators should withdraw after unBond period has passed - processMultipleTransactions(t, tpn, delegators[:numDelegators-2], delegationScAddress, "withdraw", big.NewInt(0)) + processMultipleWithdraws(t, tpn, delegators[:numDelegators-2], delegationScAddress, vmcommon.Ok) verifyDelegatorIsDeleted(t, tpn, delegators[:numDelegators-2], delegationScAddress) } @@ -384,7 +395,7 @@ func TestDelegationSystemDelegateUnDelegateOnlyPartOfDelegation(t *testing.T) { verifyDelegatorsStake(t, tpn, "getUserUnStakedValue", delegators[:numDelegators-2], delegationScAddress, big.NewInt(delegationVal/2)) // withdraw unDelegated delegators should not withdraw because of unBond period - processMultipleTransactions(t, tpn, delegators[:numDelegators-2], delegationScAddress, "withdraw", big.NewInt(0)) + processMultipleWithdraws(t, tpn, delegators[:numDelegators-2], delegationScAddress, vmcommon.UserError) verifyDelegatorsStake(t, tpn, "getUserActiveStake", delegators[:numDelegators-2], delegationScAddress, big.NewInt(delegationVal/2)) verifyDelegatorsStake(t, tpn, "getUserUnStakedValue", delegators[:numDelegators-2], delegationScAddress, big.NewInt(delegationVal/2)) @@ -392,7 +403,7 @@ func TestDelegationSystemDelegateUnDelegateOnlyPartOfDelegation(t *testing.T) { tpn.BlockchainHook.SetCurrentHeader(&block.Header{Epoch: 1}) // withdraw unDelegated delegators should withdraw after unBond period has passed - processMultipleTransactions(t, tpn, delegators[:numDelegators-2], delegationScAddress, "withdraw", big.NewInt(0)) + processMultipleWithdraws(t, tpn, delegators[:numDelegators-2], delegationScAddress, vmcommon.Ok) verifyDelegatorsStake(t, tpn, "getUserActiveStake", delegators[:numDelegators-2], delegationScAddress, big.NewInt(delegationVal/2)) verifyDelegatorsStake(t, tpn, "getUserUnStakedValue", delegators[:numDelegators-2], delegationScAddress, big.NewInt(0)) @@ -879,7 +890,7 @@ func TestDelegationSystemMultipleDelegationContractsAndSameDelegatorsClaimReward tpn.BlockchainHook.SetCurrentHeader(&block.Header{Epoch: 5, Nonce: 50}) for i := range delegationScAddresses { - processMultipleTransactions(t, tpn, firstTwoDelegators, delegationScAddresses[i], "withdraw", big.NewInt(0)) + processMultipleWithdraws(t, tpn, firstTwoDelegators, delegationScAddresses[i], vmcommon.UserError) txData = "unDelegate" + "@" + intToString(uint32(quarterDelegationVal)) processMultipleTransactions(t, tpn, lastTwoDelegators, delegationScAddresses[i], txData, big.NewInt(0)) @@ -1379,6 +1390,20 @@ func processMultipleTransactions( } } +func processMultipleWithdraws( + t *testing.T, + tpn *integrationTests.TestProcessorNode, + delegatorsAddr [][]byte, + receiverAddr []byte, + expected vmcommon.ReturnCode, +) { + for i := range delegatorsAddr { + returnCode, err := processTransaction(tpn, delegatorsAddr[i], receiverAddr, "withdraw", big.NewInt(0)) + assert.Nil(t, err) + assert.Equal(t, expected, returnCode) + } +} + func txDataForFunc(function string, blsKeys [][]byte) string { txData := function diff --git a/integrationTests/vm/esdt/process/esdtProcess_test.go b/integrationTests/vm/esdt/process/esdtProcess_test.go index d161e69dc76..adf71fa47f7 100644 --- a/integrationTests/vm/esdt/process/esdtProcess_test.go +++ b/integrationTests/vm/esdt/process/esdtProcess_test.go @@ -179,6 +179,7 @@ func TestESDTCallBurnOnANonBurnableToken(t *testing.T) { OptimizeGasUsedInCrossMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, + MultiClaimOnDelegationEnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( diff --git a/integrationTests/vm/mockVM/vmGet/vmGet_test.go b/integrationTests/vm/mockVM/vmGet/vmGet_test.go index 0762c43eadb..34d374bfa64 100644 --- a/integrationTests/vm/mockVM/vmGet/vmGet_test.go +++ b/integrationTests/vm/mockVM/vmGet/vmGet_test.go @@ -19,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/process/sync/disabled" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -34,8 +35,8 @@ func TestVmGetShouldReturnValue(t *testing.T) { }} argsNewSCQueryService := smartContract.ArgsNewSCQueryService{ VmContainer: vmContainer, - EconomicsFee: &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return uint64(math.MaxUint64) }, }, diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index 0ee90fb4317..14388d9fabd 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/pubkeyConverter" + "github.com/multiversx/mx-chain-core-go/core/versioning" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/scheduled" @@ -36,6 +37,7 @@ import ( "github.com/multiversx/mx-chain-go/process/economics" "github.com/multiversx/mx-chain-go/process/factory/metachain" "github.com/multiversx/mx-chain-go/process/factory/shard" + "github.com/multiversx/mx-chain-go/process/guardian" "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/process/smartContract/builtInFunctions" "github.com/multiversx/mx-chain-go/process/smartContract/hooks" @@ -51,6 +53,7 @@ import ( "github.com/multiversx/mx-chain-go/storage/txcache" "github.com/multiversx/mx-chain-go/testscommon" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" @@ -67,6 +70,10 @@ import ( "github.com/stretchr/testify/require" ) +// EpochGuardianDelay is the test constant for the delay in epochs for the guardian feature +const EpochGuardianDelay = uint32(2) +const minTransactionVersion = 1 + var dnsAddr = []byte{0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 137, 17, 46, 56, 127, 47, 62, 172, 4, 126, 190, 242, 221, 230, 209, 243, 105, 104, 242, 66, 49, 49} // TODO: Merge test utilities from this file with the ones from "wasmvm/utils.go" @@ -95,22 +102,23 @@ type VMTestAccount struct { // VMTestContext - type VMTestContext struct { - ChainHandler *testscommon.ChainHandlerStub - TxProcessor process.TransactionProcessor - ScProcessor *smartContract.TestScProcessor - Accounts state.AccountsAdapter - BlockchainHook vmcommon.BlockchainHook - VMContainer process.VirtualMachinesContainer - TxFeeHandler process.TransactionFeeHandler - ShardCoordinator sharding.Coordinator - ScForwarder process.IntermediateTransactionHandler - EconomicsData process.EconomicsDataHandler - Marshalizer marshal.Marshalizer - GasSchedule core.GasScheduleNotifier - VMConfiguration *config.VirtualMachineConfig - EpochNotifier process.EpochNotifier - EnableEpochsHandler common.EnableEpochsHandler - SCQueryService *smartContract.SCQueryService + ChainHandler *testscommon.ChainHandlerStub + TxProcessor process.TransactionProcessor + ScProcessor *smartContract.TestScProcessor + Accounts state.AccountsAdapter + BlockchainHook vmcommon.BlockchainHook + VMContainer process.VirtualMachinesContainer + TxFeeHandler process.TransactionFeeHandler + ShardCoordinator sharding.Coordinator + ScForwarder process.IntermediateTransactionHandler + EconomicsData process.EconomicsDataHandler + Marshalizer marshal.Marshalizer + GasSchedule core.GasScheduleNotifier + VMConfiguration *config.VirtualMachineConfig + EpochNotifier process.EpochNotifier + EnableEpochsHandler common.EnableEpochsHandler + SCQueryService *smartContract.SCQueryService + GuardedAccountsHandler process.GuardedAccountHandler Alice VMTestAccount Bob VMTestAccount @@ -226,8 +234,8 @@ func (vmTestContext *VMTestContext) GetIntValueFromSCWithTransientVM(funcName st // GetVMOutputWithTransientVM - func (vmTestContext *VMTestContext) GetVMOutputWithTransientVM(funcName string, args ...[]byte) *vmcommon.VMOutput { scAddressBytes := vmTestContext.Contract.Address - feeHandler := &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + feeHandler := &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return uint64(math.MaxUint64) }, } @@ -333,16 +341,19 @@ func createEconomicsData(enableEpochsConfig config.EnableEpochs) (process.Econom MaxGasLimitPerMetaMiniBlock: maxGasLimitPerBlock, MaxGasLimitPerTx: maxGasLimitPerBlock, MinGasLimit: minGasLimit, + ExtraGasLimitGuardedTx: "50000", }, }, - MinGasPrice: minGasPrice, - GasPerDataByte: "1", - GasPriceModifier: 1.0, + MinGasPrice: minGasPrice, + GasPerDataByte: "1", + GasPriceModifier: 1.0, + MaxGasPriceSetGuardian: "2000000000", }, }, EpochNotifier: realEpochNotifier, EnableEpochsHandler: enableEpochsHandler, BuiltInFunctionsCostHandler: builtInCost, + TxVersionChecker: versioning.NewTxVersionChecker(minTransactionVersion), } return economics.NewEconomicsData(argsNewEconomicsData) @@ -438,6 +449,11 @@ func CreateTxProcessorWithOneSCExecutorMockVM( } scProcessor, _ := smartContract.NewSmartContractProcessor(argsNewSCProcessor) + guardedAccountHandler, err := guardian.NewGuardedAccount(integrationtests.TestMarshalizer, genericEpochNotifier, EpochGuardianDelay) + if err != nil { + return nil, err + } + argsNewTxProcessor := transaction.ArgsNewTxProcessor{ Accounts: accnts, Hasher: integrationtests.TestHasher, @@ -454,6 +470,8 @@ func CreateTxProcessorWithOneSCExecutorMockVM( ArgsParser: smartContract.NewArgumentParser(), ScrForwarder: &mock.IntermediateTransactionHandlerMock{}, EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: versioning.NewTxVersionChecker(minTransactionVersion), + GuardianChecker: guardedAccountHandler, } return transaction.NewTxProcessor(argsNewTxProcessor) @@ -499,6 +517,7 @@ func CreateVMAndBlockchainHookAndDataPool( epochNotifierInstance process.EpochNotifier, enableEpochsHandler common.EnableEpochsHandler, chainHandler data.ChainHandler, + guardedAccountHandler vmcommon.GuardedAccountHandler, ) (process.VirtualMachinesContainer, *hooks.BlockChainHookImpl, dataRetriever.PoolsHolder) { if check.IfNil(gasSchedule) || gasSchedule.LatestGasSchedule() == nil { testGasSchedule := wasmConfig.MakeGasMapForTests() @@ -517,6 +536,7 @@ func CreateVMAndBlockchainHookAndDataPool( EpochNotifier: epochNotifierInstance, EnableEpochsHandler: enableEpochsHandler, MaxNumNodesInTransferRole: 100, + GuardedAccountHandler: guardedAccountHandler, } argsBuiltIn.AutomaticCrawlerAddresses = integrationTests.GenerateOneAddressPerShard(argsBuiltIn.ShardCoordinator) builtInFuncFactory, _ := builtInFunctions.CreateBuiltInFunctionsFactory(argsBuiltIn) @@ -591,6 +611,12 @@ func CreateVMAndBlockchainHookMeta( gasSchedule = mock.NewGasScheduleNotifierMock(testGasSchedule) } + var err error + guardedAccountHandler, err := guardian.NewGuardedAccount(integrationtests.TestMarshalizer, globalEpochNotifier, EpochGuardianDelay) + if err != nil { + panic(err) + } + enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsConfig, globalEpochNotifier) argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: gasSchedule, @@ -603,6 +629,7 @@ func CreateVMAndBlockchainHookMeta( EpochNotifier: globalEpochNotifier, EnableEpochsHandler: enableEpochsHandler, MaxNumNodesInTransferRole: 100, + GuardedAccountHandler: guardedAccountHandler, } argsBuiltIn.AutomaticCrawlerAddresses = integrationTests.GenerateOneAddressPerShard(argsBuiltIn.ShardCoordinator) builtInFuncFactory, _ := builtInFunctions.CreateBuiltInFunctionsFactory(argsBuiltIn) @@ -682,11 +709,11 @@ func createSystemSCConfig() *config.SystemSmartContractsConfig { }, Active: config.GovernanceSystemSCConfigActive{ ProposalCost: "500", - MinQuorum: "50", - MinPassThreshold: "50", - MinVetoThreshold: "50", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, }, - FirstWhitelistedAddress: "3132333435363738393031323334353637383930313233343536373839303234", + ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "2500000000000000000000", @@ -747,6 +774,7 @@ func CreateTxProcessorWithOneSCExecutorWithVMs( wasmVMChangeLocker common.Locker, poolsHolder dataRetriever.PoolsHolder, epochNotifierInstance process.EpochNotifier, + guardianChecker process.GuardianChecker, ) (*ResultsCreateTxProcessor, error) { if check.IfNil(poolsHolder) { poolsHolder = dataRetrieverMock.NewPoolsHolderMock() @@ -828,6 +856,8 @@ func CreateTxProcessorWithOneSCExecutorWithVMs( ArgsParser: smartContract.NewArgumentParser(), ScrForwarder: intermediateTxHandler, EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: versioning.NewTxVersionChecker(minTransactionVersion), + GuardianChecker: guardianChecker, } txProcessor, err := transaction.NewTxProcessor(argsNewTxProcessor) if err != nil { @@ -840,15 +870,17 @@ func CreateTxProcessorWithOneSCExecutorWithVMs( return nil, err } - interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory( - shardCoordinator, - integrationtests.TestMarshalizer, - integrationtests.TestHasher, - pubkeyConv, - disabled.NewChainStorer(), - poolsHolder, - &processDisabled.FeeHandler{}, - ) + argsFactory := shard.ArgsNewIntermediateProcessorsContainerFactory{ + ShardCoordinator: shardCoordinator, + Marshalizer: integrationtests.TestMarshalizer, + Hasher: integrationtests.TestHasher, + PubkeyConverter: pubkeyConv, + Store: disabled.NewChainStorer(), + PoolsHolder: poolsHolder, + EconomicsFee: &processDisabled.FeeHandler{}, + EnableEpochsHandler: enableEpochsHandler, + } + interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory(argsFactory) if err != nil { return nil, err } @@ -998,6 +1030,12 @@ func CreatePreparedTxProcessorAndAccountsWithVMs( enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsConfig, epochNotifierInstance) chainHandler := &testscommon.ChainHandlerStub{} + var err error + guardedAccountHandler, err := guardian.NewGuardedAccount(integrationtests.TestMarshalizer, epochNotifierInstance, EpochGuardianDelay) + if err != nil { + return nil, err + } + vmContainer, blockchainHook, pool := CreateVMAndBlockchainHookAndDataPool( accounts, nil, @@ -1007,6 +1045,7 @@ func CreatePreparedTxProcessorAndAccountsWithVMs( epochNotifierInstance, enableEpochsHandler, chainHandler, + guardedAccountHandler, ) res, err := CreateTxProcessorWithOneSCExecutorWithVMs( accounts, @@ -1018,22 +1057,24 @@ func CreatePreparedTxProcessorAndAccountsWithVMs( wasmVMChangeLocker, pool, epochNotifierInstance, + guardedAccountHandler, ) if err != nil { return nil, err } return &VMTestContext{ - TxProcessor: res.TxProc, - ScProcessor: res.SCProc, - Accounts: accounts, - BlockchainHook: blockchainHook, - VMContainer: vmContainer, - TxFeeHandler: feeAccumulator, - ScForwarder: res.IntermediateTxProc, - EpochNotifier: epochNotifierInstance, - EnableEpochsHandler: enableEpochsHandler, - ChainHandler: chainHandler, + TxProcessor: res.TxProc, + ScProcessor: res.SCProc, + Accounts: accounts, + BlockchainHook: blockchainHook, + VMContainer: vmContainer, + TxFeeHandler: feeAccumulator, + ScForwarder: res.IntermediateTxProc, + EpochNotifier: epochNotifierInstance, + EnableEpochsHandler: enableEpochsHandler, + ChainHandler: chainHandler, + GuardedAccountsHandler: guardedAccountHandler, }, nil } @@ -1092,6 +1133,12 @@ func CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas( enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsConfig, epochNotifierInstance) chainHandler := &testscommon.ChainHandlerStub{} + var err error + guardedAccountHandler, err := guardian.NewGuardedAccount(integrationtests.TestMarshalizer, epochNotifierInstance, EpochGuardianDelay) + if err != nil { + return nil, err + } + vmContainer, blockchainHook, pool := CreateVMAndBlockchainHookAndDataPool( accounts, gasScheduleNotifier, @@ -1101,6 +1148,7 @@ func CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas( epochNotifierInstance, enableEpochsHandler, chainHandler, + guardedAccountHandler, ) res, err := CreateTxProcessorWithOneSCExecutorWithVMs( accounts, @@ -1112,27 +1160,30 @@ func CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas( wasmVMChangeLocker, pool, epochNotifierInstance, + guardedAccountHandler, ) if err != nil { return nil, err } return &VMTestContext{ - TxProcessor: res.TxProc, - ScProcessor: res.SCProc, - Accounts: accounts, - BlockchainHook: blockchainHook, - VMContainer: vmContainer, - TxFeeHandler: feeAccumulator, - ScForwarder: res.IntermediateTxProc, - ShardCoordinator: shardCoordinator, - EconomicsData: res.EconomicsHandler, - TxCostHandler: res.CostHandler, - TxsLogsProcessor: res.TxLogProc, - GasSchedule: gasScheduleNotifier, - EpochNotifier: epochNotifierInstance, - EnableEpochsHandler: enableEpochsHandler, - ChainHandler: chainHandler, + TxProcessor: res.TxProc, + ScProcessor: res.SCProc, + Accounts: accounts, + BlockchainHook: blockchainHook, + VMContainer: vmContainer, + TxFeeHandler: feeAccumulator, + ScForwarder: res.IntermediateTxProc, + ShardCoordinator: shardCoordinator, + EconomicsData: res.EconomicsHandler, + TxCostHandler: res.CostHandler, + TxsLogsProcessor: res.TxLogProc, + GasSchedule: gasScheduleNotifier, + EpochNotifier: epochNotifierInstance, + EnableEpochsHandler: enableEpochsHandler, + ChainHandler: chainHandler, + Marshalizer: integrationtests.TestMarshalizer, + GuardedAccountsHandler: guardedAccountHandler, }, nil } @@ -1154,6 +1205,13 @@ func CreateTxProcessorWasmVMWithGasSchedule( epochNotifierInstance := forking.NewGenericEpochNotifier() enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsConfig, epochNotifierInstance) chainHandler := &testscommon.ChainHandlerStub{} + + var err error + guardedAccountHandler, err := guardian.NewGuardedAccount(integrationtests.TestMarshalizer, epochNotifierInstance, EpochGuardianDelay) + if err != nil { + return nil, err + } + vmContainer, blockchainHook, pool := CreateVMAndBlockchainHookAndDataPool( accounts, gasScheduleNotifier, @@ -1163,6 +1221,7 @@ func CreateTxProcessorWasmVMWithGasSchedule( epochNotifierInstance, enableEpochsHandler, chainHandler, + guardedAccountHandler, ) res, err := CreateTxProcessorWithOneSCExecutorWithVMs( accounts, @@ -1174,23 +1233,25 @@ func CreateTxProcessorWasmVMWithGasSchedule( wasmVMChangeLocker, pool, epochNotifierInstance, + guardedAccountHandler, ) if err != nil { return nil, err } return &VMTestContext{ - TxProcessor: res.TxProc, - ScProcessor: res.SCProc, - Accounts: accounts, - BlockchainHook: blockchainHook, - VMContainer: vmContainer, - TxFeeHandler: feeAccumulator, - ScForwarder: res.IntermediateTxProc, - GasSchedule: gasScheduleNotifier, - EpochNotifier: epochNotifierInstance, - EnableEpochsHandler: enableEpochsHandler, - ChainHandler: chainHandler, + TxProcessor: res.TxProc, + ScProcessor: res.SCProc, + Accounts: accounts, + BlockchainHook: blockchainHook, + VMContainer: vmContainer, + TxFeeHandler: feeAccumulator, + ScForwarder: res.IntermediateTxProc, + GasSchedule: gasScheduleNotifier, + EpochNotifier: epochNotifierInstance, + EnableEpochsHandler: enableEpochsHandler, + ChainHandler: chainHandler, + GuardedAccountsHandler: guardedAccountHandler, }, nil } @@ -1207,6 +1268,13 @@ func CreateTxProcessorWasmVMWithVMConfig( epochNotifierInstance := forking.NewGenericEpochNotifier() enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsConfig, epochNotifierInstance) chainHandler := &testscommon.ChainHandlerStub{} + + var err error + guardedAccountHandler, err := guardian.NewGuardedAccount(integrationtests.TestMarshalizer, epochNotifierInstance, EpochGuardianDelay) + if err != nil { + return nil, err + } + vmContainer, blockchainHook, pool := CreateVMAndBlockchainHookAndDataPool( accounts, gasScheduleNotifier, @@ -1216,6 +1284,7 @@ func CreateTxProcessorWasmVMWithVMConfig( epochNotifierInstance, enableEpochsHandler, chainHandler, + guardedAccountHandler, ) res, err := CreateTxProcessorWithOneSCExecutorWithVMs( accounts, @@ -1227,24 +1296,26 @@ func CreateTxProcessorWasmVMWithVMConfig( wasmVMChangeLocker, pool, epochNotifierInstance, + guardedAccountHandler, ) if err != nil { return nil, err } return &VMTestContext{ - TxProcessor: res.TxProc, - ScProcessor: res.SCProc, - Accounts: accounts, - BlockchainHook: blockchainHook, - VMContainer: vmContainer, - TxFeeHandler: feeAccumulator, - ScForwarder: res.IntermediateTxProc, - GasSchedule: gasScheduleNotifier, - VMConfiguration: vmConfig, - EpochNotifier: epochNotifierInstance, - EnableEpochsHandler: enableEpochsHandler, - ChainHandler: chainHandler, + TxProcessor: res.TxProc, + ScProcessor: res.SCProc, + Accounts: accounts, + BlockchainHook: blockchainHook, + VMContainer: vmContainer, + TxFeeHandler: feeAccumulator, + ScForwarder: res.IntermediateTxProc, + GasSchedule: gasScheduleNotifier, + VMConfiguration: vmConfig, + EpochNotifier: epochNotifierInstance, + EnableEpochsHandler: enableEpochsHandler, + ChainHandler: chainHandler, + GuardedAccountsHandler: guardedAccountHandler, }, nil } @@ -1371,6 +1442,12 @@ func GetVmOutput(gasSchedule map[string]map[string]uint64, accnts state.Accounts gasScheduleNotifier := mock.NewGasScheduleNotifierMock(gasSchedule) epochNotifierInstance := forking.NewGenericEpochNotifier() enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(config.EnableEpochs{}, epochNotifierInstance) + + guardedAccountHandler, err := guardian.NewGuardedAccount(integrationtests.TestMarshalizer, epochNotifierInstance, EpochGuardianDelay) + if err != nil { + panic(err) + } + vmContainer, blockChainHook, _ := CreateVMAndBlockchainHookAndDataPool( accnts, gasScheduleNotifier, @@ -1380,13 +1457,14 @@ func GetVmOutput(gasSchedule map[string]map[string]uint64, accnts state.Accounts epochNotifierInstance, enableEpochsHandler, &testscommon.ChainHandlerStub{}, + guardedAccountHandler, ) defer func() { _ = vmContainer.Close() }() - feeHandler := &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + feeHandler := &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return uint64(math.MaxUint64) }, } @@ -1429,6 +1507,7 @@ func ComputeGasLimit(gasSchedule map[string]map[string]uint64, testContext *VMTe testContext.EpochNotifier, testContext.EnableEpochsHandler, &testscommon.ChainHandlerStub{}, + testContext.GuardedAccountsHandler, ) defer func() { _ = vmContainer.Close() @@ -1526,6 +1605,12 @@ func CreatePreparedTxProcessorWithVMsMultiShard(selfShardID uint32, enableEpochs epochNotifierInstance := forking.NewGenericEpochNotifier() enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsConfig, epochNotifierInstance) chainHandler := &testscommon.ChainHandlerStub{} + + guardedAccountHandler, err := guardian.NewGuardedAccount(integrationtests.TestMarshalizer, epochNotifierInstance, EpochGuardianDelay) + if err != nil { + return nil, err + } + if selfShardID == core.MetachainShardId { vmContainer, blockchainHook = CreateVMAndBlockchainHookMeta(accounts, nil, shardCoordinator, enableEpochsConfig) } else { @@ -1539,6 +1624,7 @@ func CreatePreparedTxProcessorWithVMsMultiShard(selfShardID uint32, enableEpochs epochNotifierInstance, enableEpochsHandler, chainHandler, + guardedAccountHandler, ) } @@ -1552,26 +1638,28 @@ func CreatePreparedTxProcessorWithVMsMultiShard(selfShardID uint32, enableEpochs wasmVMChangeLocker, nil, epochNotifierInstance, + guardedAccountHandler, ) if err != nil { return nil, err } return &VMTestContext{ - TxProcessor: res.TxProc, - ScProcessor: res.SCProc, - Accounts: accounts, - BlockchainHook: blockchainHook, - VMContainer: vmContainer, - TxFeeHandler: feeAccumulator, - ShardCoordinator: shardCoordinator, - ScForwarder: res.IntermediateTxProc, - EconomicsData: res.EconomicsHandler, - Marshalizer: integrationtests.TestMarshalizer, - TxsLogsProcessor: res.TxLogProc, - EpochNotifier: epochNotifierInstance, - EnableEpochsHandler: enableEpochsHandler, - ChainHandler: chainHandler, + TxProcessor: res.TxProc, + ScProcessor: res.SCProc, + Accounts: accounts, + BlockchainHook: blockchainHook, + VMContainer: vmContainer, + TxFeeHandler: feeAccumulator, + ShardCoordinator: shardCoordinator, + ScForwarder: res.IntermediateTxProc, + EconomicsData: res.EconomicsHandler, + Marshalizer: integrationtests.TestMarshalizer, + TxsLogsProcessor: res.TxLogProc, + EpochNotifier: epochNotifierInstance, + EnableEpochsHandler: enableEpochsHandler, + ChainHandler: chainHandler, + GuardedAccountsHandler: guardedAccountHandler, }, nil } diff --git a/integrationTests/vm/txsFee/asyncCall_test.go b/integrationTests/vm/txsFee/asyncCall_test.go index 20c635f0a94..ffa618c256d 100644 --- a/integrationTests/vm/txsFee/asyncCall_test.go +++ b/integrationTests/vm/txsFee/asyncCall_test.go @@ -42,7 +42,6 @@ func TestAsyncCallShouldWork(t *testing.T) { _, _ = vm.CreateAccount(testContext.Accounts, ownerAddr, 0, egldBalance) _, _ = vm.CreateAccount(testContext.Accounts, senderAddr, 0, egldBalance) - gasPrice := uint64(10) ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) deployGasLimit := uint64(50000) @@ -101,7 +100,6 @@ func TestMinterContractWithAsyncCalls(t *testing.T) { token := []byte("miiutoken") roles := [][]byte{[]byte(core.ESDTRoleNFTCreate)} - gasPrice := uint64(10) ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) deployGasLimit := uint64(500000) pathToContract := "testdata/minter/minter.wasm" diff --git a/integrationTests/vm/txsFee/asyncESDT_test.go b/integrationTests/vm/txsFee/asyncESDT_test.go index 28415adc9e5..0e3f0a41fc1 100644 --- a/integrationTests/vm/txsFee/asyncESDT_test.go +++ b/integrationTests/vm/txsFee/asyncESDT_test.go @@ -6,15 +6,19 @@ package txsFee import ( + "context" "encoding/hex" "math/big" "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/state" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/require" ) @@ -36,7 +40,6 @@ func TestAsyncESDTCallShouldWork(t *testing.T) { utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, egldBalance, token, 0, esdtBalance) // deploy 2 contracts - gasPrice := uint64(10) ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) deployGasLimit := uint64(50000) @@ -89,7 +92,6 @@ func TestAsyncESDTCallSecondScRefusesPayment(t *testing.T) { utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, egldBalance, token, 0, esdtBalance) // deploy 2 contracts - gasPrice := uint64(10) ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) deployGasLimit := uint64(50000) @@ -143,7 +145,6 @@ func TestAsyncESDTCallsOutOfGas(t *testing.T) { utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, egldBalance, token, 0, esdtBalance) // deploy 2 contracts - gasPrice := uint64(10) ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) deployGasLimit := uint64(50000) @@ -193,7 +194,6 @@ func TestAsyncMultiTransferOnCallback(t *testing.T) { utils.CreateAccountWithESDTBalance(t, testContext.Accounts, ownerAddr, big.NewInt(1000000000), sftTokenID, sftNonce, sftBalance) utils.CheckESDTNFTBalance(t, testContext, ownerAddr, sftTokenID, sftNonce, sftBalance) - gasPrice := uint64(10) ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) deployGasLimit := uint64(1000000) txGasLimit := uint64(1000000) @@ -287,7 +287,6 @@ func TestAsyncMultiTransferOnCallAndOnCallback(t *testing.T) { utils.CreateAccountWithESDTBalance(t, testContext.Accounts, ownerAddr, big.NewInt(1000000000), sftTokenID, sftNonce, sftBalance) utils.CheckESDTNFTBalance(t, testContext, ownerAddr, sftTokenID, sftNonce, sftBalance) - gasPrice := uint64(10) ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) deployGasLimit := uint64(1000000) txGasLimit := uint64(1000000) @@ -387,7 +386,6 @@ func TestSendNFTToContractWith0Function(t *testing.T) { utils.CreateAccountWithESDTBalance(t, testContext.Accounts, ownerAddr, big.NewInt(1000000000), sftTokenID, sftNonce, sftBalance) utils.CheckESDTNFTBalance(t, testContext, ownerAddr, sftTokenID, sftNonce, sftBalance) - gasPrice := uint64(10) ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) deployGasLimit := uint64(1000000) txGasLimit := uint64(1000000) @@ -437,7 +435,6 @@ func TestSendNFTToContractWith0FunctionNonPayable(t *testing.T) { utils.CreateAccountWithESDTBalance(t, testContext.Accounts, ownerAddr, big.NewInt(1000000000), sftTokenID, sftNonce, sftBalance) utils.CheckESDTNFTBalance(t, testContext, ownerAddr, sftTokenID, sftNonce, sftBalance) - gasPrice := uint64(10) ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) deployGasLimit := uint64(1000000) txGasLimit := uint64(1000000) @@ -474,3 +471,80 @@ func TestSendNFTToContractWith0FunctionNonPayable(t *testing.T) { _, err = testContext.Accounts.Commit() require.Nil(t, err) } + +func TestAsyncESDTCallForThirdContractShouldWork(t *testing.T) { + testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) + require.Nil(t, err) + defer testContext.Close() + + function1 := []byte("add_queued_call") + function2 := []byte("forward_queued_calls") + + egldBalance := big.NewInt(100000000) + ownerAddr := []byte("owner-78901234567890123456789000") + _, _ = vm.CreateAccount(testContext.Accounts, ownerAddr, 0, egldBalance) + + // create an address with ESDT token + sndAddr := []byte("sender-8901234567890123456789000") + + esdtBalance := big.NewInt(100000000) + esdtTransferValue := big.NewInt(5000) + token := []byte("miiutoken") + utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, egldBalance, token, 0, esdtBalance) + + // deploy contract + gasPrice := uint64(10) + ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) + deployGasLimit := uint64(50000) + scAddress := utils.DoDeploySecond(t, testContext, "./testdata/third/third.wasm", ownerAccount, gasPrice, deployGasLimit, nil, big.NewInt(0)) + + testContext.TxFeeHandler.CreateBlockStarted(getZeroGasAndFees()) + utils.CleanAccumulatedIntermediateTransactions(t, testContext) + + // execute first call + gasLimit := uint64(500000) + tx := utils.CreateESDTTransferTx(0, sndAddr, scAddress, token, esdtTransferValue, gasPrice, gasLimit) + tx.Data = []byte(string(tx.Data) + "@" + hex.EncodeToString(function1) + "@01@" + hex.EncodeToString(scAddress) + "@" + hex.EncodeToString(function2)) + + retCode, err := testContext.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.UserError, retCode) + require.Nil(t, err) + + utils.CheckESDTBalance(t, testContext, sndAddr, token, esdtBalance) + utils.CheckESDTBalance(t, testContext, scAddress, token, big.NewInt(0)) + + // execute second call + tx = utils.CreateESDTTransferTx(1, sndAddr, scAddress, token, big.NewInt(5000), gasPrice, gasLimit) + tx.Data = []byte(string(tx.Data) + "@" + hex.EncodeToString(function2)) + + retCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + _, err = testContext.Accounts.Commit() + require.Nil(t, err) + + utils.CheckESDTBalance(t, testContext, sndAddr, token, big.NewInt(0).Sub(esdtBalance, esdtTransferValue)) + utils.CheckESDTBalance(t, testContext, scAddress, token, esdtTransferValue) + + // try to recreate the data trie + scAccount, err := testContext.Accounts.LoadAccount(scAddress) + require.Nil(t, err) + userScAccount := scAccount.(state.UserAccountHandler) + roothash := userScAccount.GetRootHash() + log.Info("recreating data trie", "roothash", roothash) + + leaves := &common.TrieIteratorChannels{ + LeavesChan: make(chan core.KeyValueHolder, 1), + ErrChan: errChan.NewErrChanWrapper(), + } + err = testContext.Accounts.GetAllLeaves(leaves, context.Background(), roothash) + require.Nil(t, err) + + for range leaves.LeavesChan { + // do nothing, just iterate + } + + err = leaves.ErrChan.ReadFromChanNonBlocking() + require.Nil(t, err) +} diff --git a/integrationTests/vm/txsFee/backwardsCompatibility_test.go b/integrationTests/vm/txsFee/backwardsCompatibility_test.go index abc67b92d16..b4a73596edb 100644 --- a/integrationTests/vm/txsFee/backwardsCompatibility_test.go +++ b/integrationTests/vm/txsFee/backwardsCompatibility_test.go @@ -30,7 +30,6 @@ func TestMoveBalanceSelfShouldWorkAndConsumeTxFeeWhenAllFlagsAreDisabled(t *test sndAddr := []byte("12345678901234567890123456789012") senderNonce := uint64(0) senderBalance := big.NewInt(10000) - gasPrice := uint64(10) gasLimit := uint64(100) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -72,7 +71,6 @@ func TestMoveBalanceAllFlagsDisabledLessBalanceThanGasLimitMulGasPrice(t *testin sndAddr := []byte("12345678901234567890123456789012") senderNonce := uint64(0) senderBalance := big.NewInt(10000) - gasPrice := uint64(10) gasLimit := uint64(10000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -97,7 +95,6 @@ func TestMoveBalanceSelfShouldWorkAndConsumeTxFeeWhenSomeFlagsAreDisabled(t *tes sndAddr := []byte("12345678901234567890123456789012") senderNonce := uint64(0) senderBalance := big.NewInt(10000) - gasPrice := uint64(10) gasLimit := uint64(100) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) diff --git a/integrationTests/vm/txsFee/builtInFunctions_test.go b/integrationTests/vm/txsFee/builtInFunctions_test.go index 0367decab5c..c321a85df89 100644 --- a/integrationTests/vm/txsFee/builtInFunctions_test.go +++ b/integrationTests/vm/txsFee/builtInFunctions_test.go @@ -36,7 +36,6 @@ func TestBuildInFunctionChangeOwnerCallShouldWork(t *testing.T) { utils.CleanAccumulatedIntermediateTransactions(t, testContext) newOwner := []byte("12345678901234567890123456789112") - gasPrice := uint64(10) gasLimit := uint64(1000) txData := []byte(core.BuiltInFunctionChangeOwnerAddress + "@" + hex.EncodeToString(newOwner)) @@ -72,7 +71,6 @@ func TestBuildInFunctionChangeOwnerCallWrongOwnerShouldConsumeGas(t *testing.T) sndAddr := []byte("12345678901234567890123456789113") newOwner := []byte("12345678901234567890123456789112") - gasPrice := uint64(10) gasLimit := uint64(1000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(100000)) @@ -109,7 +107,6 @@ func TestBuildInFunctionChangeOwnerInvalidAddressShouldConsumeGas(t *testing.T) testContext.TxFeeHandler.CreateBlockStarted(getZeroGasAndFees()) newOwner := []byte("invalidAddress") - gasPrice := uint64(10) gasLimit := uint64(1000) txData := []byte(core.BuiltInFunctionChangeOwnerAddress + "@" + hex.EncodeToString(newOwner)) @@ -143,7 +140,6 @@ func TestBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldNotConsumeGas(t testContext.TxFeeHandler.CreateBlockStarted(getZeroGasAndFees()) newOwner := []byte("12345678901234567890123456789112") - gasPrice := uint64(10) _, _ = vm.CreateAccount(testContext.Accounts, owner, 1, big.NewInt(10970)) @@ -181,7 +177,6 @@ func TestBuildInFunctionChangeOwnerOutOfGasShouldConsumeGas(t *testing.T) { testContext.TxFeeHandler.CreateBlockStarted(getZeroGasAndFees()) newOwner := []byte("12345678901234567890123456789112") - gasPrice := uint64(10) txData := []byte(core.BuiltInFunctionChangeOwnerAddress + "@" + hex.EncodeToString(newOwner)) gasLimit := uint64(len(txData) + 1) @@ -226,7 +221,6 @@ func TestBuildInFunctionSaveKeyValue_WrongDestination(t *testing.T) { txData := []byte(core.BuiltInFunctionSaveKeyValue + "@01@02") gasLimit := uint64(len(txData) + 1) - gasPrice := uint64(10) tx := vm.CreateTransaction(0, big.NewInt(0), sndAddr, destAddr, gasPrice, gasLimit, txData) retCode, err := testContext.TxProcessor.ProcessTransaction(tx) @@ -259,7 +253,6 @@ func TestBuildInFunctionSaveKeyValue_NotEnoughGasFor3rdSave(t *testing.T) { txData := []byte(core.BuiltInFunctionSaveKeyValue + "@01000000@02000000@03000000@04000000@05000000@06000000") gasLimit := uint64(len(txData) + 20) - gasPrice := uint64(10) tx := vm.CreateTransaction(0, big.NewInt(0), sndAddr, sndAddr, gasPrice, gasLimit, txData) retCode, err := testContext.TxProcessor.ProcessTransaction(tx) diff --git a/integrationTests/vm/txsFee/dns_test.go b/integrationTests/vm/txsFee/dns_test.go index 3d8edc6c63a..1d4a439a692 100644 --- a/integrationTests/vm/txsFee/dns_test.go +++ b/integrationTests/vm/txsFee/dns_test.go @@ -30,7 +30,6 @@ func TestDeployDNSContract_TestRegisterAndResolveAndSendTxWithSndAndRcvUserName( sndAddr := []byte("12345678901234567890123456789112") senderBalance := big.NewInt(10000000) - gasPrice := uint64(10) gasLimit := uint64(200000) rcvAddr := []byte("12345678901234567890123456789113") diff --git a/integrationTests/vm/txsFee/esdtLocalBurn_test.go b/integrationTests/vm/txsFee/esdtLocalBurn_test.go index 636e9b7c7f0..c76957928a5 100644 --- a/integrationTests/vm/txsFee/esdtLocalBurn_test.go +++ b/integrationTests/vm/txsFee/esdtLocalBurn_test.go @@ -26,7 +26,6 @@ func TestESDTLocalBurnShouldWork(t *testing.T) { roles := [][]byte{[]byte(core.ESDTRoleLocalMint), []byte(core.ESDTRoleLocalBurn)} utils.CreateAccountWithESDTBalanceAndRoles(t, testContext.Accounts, sndAddr, egldBalance, token, 0, esdtBalance, roles) - gasPrice := uint64(10) gasLimit := uint64(40) tx := utils.CreateESDTLocalBurnTx(0, sndAddr, sndAddr, token, big.NewInt(100), gasPrice, gasLimit) retCode, err := testContext.TxProcessor.ProcessTransaction(tx) @@ -57,7 +56,6 @@ func TestESDTLocalBurnMoreThanTotalBalanceShouldErr(t *testing.T) { roles := [][]byte{[]byte(core.ESDTRoleLocalMint), []byte(core.ESDTRoleLocalBurn)} utils.CreateAccountWithESDTBalanceAndRoles(t, testContext.Accounts, sndAddr, egldBalance, token, 0, esdtBalance, roles) - gasPrice := uint64(10) gasLimit := uint64(60) tx := utils.CreateESDTLocalBurnTx(0, sndAddr, sndAddr, token, big.NewInt(100000001), gasPrice, gasLimit) retCode, err := testContext.TxProcessor.ProcessTransaction(tx) @@ -87,7 +85,6 @@ func TestESDTLocalBurnNotAllowedShouldErr(t *testing.T) { token := []byte("miiutoken") utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, egldBalance, token, 0, esdtBalance) - gasPrice := uint64(10) gasLimit := uint64(40) tx := utils.CreateESDTLocalBurnTx(0, sndAddr, sndAddr, token, big.NewInt(100), gasPrice, gasLimit) retCode, err := testContext.TxProcessor.ProcessTransaction(tx) diff --git a/integrationTests/vm/txsFee/esdtLocalMint_test.go b/integrationTests/vm/txsFee/esdtLocalMint_test.go index 440e0d2f7d0..491d9102372 100644 --- a/integrationTests/vm/txsFee/esdtLocalMint_test.go +++ b/integrationTests/vm/txsFee/esdtLocalMint_test.go @@ -26,7 +26,6 @@ func TestESDTLocalMintShouldWork(t *testing.T) { roles := [][]byte{[]byte(core.ESDTRoleLocalMint), []byte(core.ESDTRoleLocalBurn)} utils.CreateAccountWithESDTBalanceAndRoles(t, testContext.Accounts, sndAddr, egldBalance, token, 0, esdtBalance, roles) - gasPrice := uint64(10) gasLimit := uint64(40) tx := utils.CreateESDTLocalMintTx(0, sndAddr, sndAddr, token, big.NewInt(100), gasPrice, gasLimit) retCode, err := testContext.TxProcessor.ProcessTransaction(tx) @@ -56,7 +55,6 @@ func TestESDTLocalMintNotAllowedShouldErr(t *testing.T) { token := []byte("miiutoken") utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, egldBalance, token, 0, esdtBalance) - gasPrice := uint64(10) gasLimit := uint64(40) tx := utils.CreateESDTLocalMintTx(0, sndAddr, sndAddr, token, big.NewInt(100), gasPrice, gasLimit) retCode, err := testContext.TxProcessor.ProcessTransaction(tx) diff --git a/integrationTests/vm/txsFee/esdt_test.go b/integrationTests/vm/txsFee/esdt_test.go index b3196010fc6..da865619d4e 100644 --- a/integrationTests/vm/txsFee/esdt_test.go +++ b/integrationTests/vm/txsFee/esdt_test.go @@ -30,7 +30,6 @@ func TestESDTTransferShouldWork(t *testing.T) { token := []byte("miiutoken") utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, egldBalance, token, 0, esdtBalance) - gasPrice := uint64(10) gasLimit := uint64(40) tx := utils.CreateESDTTransferTx(0, sndAddr, rcvAddr, token, big.NewInt(100), gasPrice, gasLimit) retCode, err := testContext.TxProcessor.ProcessTransaction(tx) @@ -67,7 +66,6 @@ func TestESDTTransferShouldWorkToMuchGasShouldConsumeAllGas(t *testing.T) { token := []byte("miiutoken") utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, egldBalance, token, 0, esdtBalance) - gasPrice := uint64(10) gasLimit := uint64(1000) tx := utils.CreateESDTTransferTx(0, sndAddr, rcvAddr, token, big.NewInt(100), gasPrice, gasLimit) retCode, err := testContext.TxProcessor.ProcessTransaction(tx) @@ -104,7 +102,6 @@ func TestESDTTransferInvalidESDTValueShouldConsumeGas(t *testing.T) { token := []byte("miiutoken") utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, egldBalance, token, 0, esdtBalance) - gasPrice := uint64(10) gasLimit := uint64(1000) tx := utils.CreateESDTTransferTx(0, sndAddr, rcvAddr, token, big.NewInt(100000000+1), gasPrice, gasLimit) diff --git a/integrationTests/vm/txsFee/guardAccount_test.go b/integrationTests/vm/txsFee/guardAccount_test.go new file mode 100644 index 00000000000..01bab1228a5 --- /dev/null +++ b/integrationTests/vm/txsFee/guardAccount_test.go @@ -0,0 +1,1105 @@ +//go:build !race +// +build !race + +// TODO remove build condition above to allow -race -short, after Wasm VM fix + +package txsFee + +import ( + "encoding/hex" + "io/ioutil" + "math/big" + "sort" + "strings" + "sync" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/guardians" + "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests" + "github.com/multiversx/mx-chain-go/integrationTests/vm" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/guardian" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + testscommonIntegrationTests "github.com/multiversx/mx-chain-go/testscommon/integrationtests" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const txWithOptionVersion = 2 +const guardianSigVerificationGas = uint64(50000) +const guardAccountGas = uint64(250000) +const unGuardAccountGas = uint64(250000) +const setGuardianGas = uint64(250000) +const transferGas = uint64(1000) + +var ( + alice = []byte("alice-12345678901234567890123456") + bob = []byte("bob-1234567890123456789012345678") + charlie = []byte("charlie-123456789012345678901234") + david = []byte("david-12345678901234567890123456") + allAddresses = map[string][]byte{ + "alice": alice, + "bob": bob, + "charlie": charlie, + "david": david, + } + uuid = []byte("uuid") + transferValue = big.NewInt(2000000) + initialMint = big.NewInt(1000000000000000000) +) + +type guardianInfo struct { + address []byte + uuid []byte + epoch uint32 +} + +type guardAccountStatus struct { + isGuarded bool + active *guardianInfo + pending *guardianInfo +} + +func createUnGuardedAccountStatus() guardAccountStatus { + return guardAccountStatus{ + isGuarded: false, + active: nil, + pending: nil, + } +} + +func prepareTestContextForGuardedAccounts(tb testing.TB) *vm.VMTestContext { + unreachableEpoch := uint32(999999) + db := testscommonIntegrationTests.CreateStorer(tb.TempDir()) + gasScheduleDir := "../../../cmd/node/config/gasSchedules" + + cfg := config.GasScheduleByEpochs{ + StartEpoch: 0, + FileName: getLatestGasScheduleVersion(tb, gasScheduleDir), + } + + argsGasScheduleNotifier := forking.ArgsNewGasScheduleNotifier{ + GasScheduleConfig: config.GasScheduleConfig{ + GasScheduleByEpochs: []config.GasScheduleByEpochs{cfg}, + }, + ConfigDir: gasScheduleDir, + EpochNotifier: forking.NewGenericEpochNotifier(), + WasmVMChangeLocker: &sync.RWMutex{}, + } + gasScheduleNotifier, err := forking.NewGasScheduleNotifier(argsGasScheduleNotifier) + require.Nil(tb, err) + + testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas( + config.EnableEpochs{ + GovernanceEnableEpoch: unreachableEpoch, + WaitingListFixEnableEpoch: unreachableEpoch, + SetSenderInEeiOutputTransferEnableEpoch: unreachableEpoch, + RefactorPeersMiniBlocksEnableEpoch: unreachableEpoch, + }, + testscommon.NewMultiShardsCoordinatorMock(2), + db, + gasScheduleNotifier, + ) + require.Nil(tb, err) + + return testContext +} + +func getLatestGasScheduleVersion(tb testing.TB, directoryToSearch string) string { + fileInfoSlice, err := ioutil.ReadDir(directoryToSearch) + require.Nil(tb, err) + + gasSchedulePrefix := "gasScheduleV" + + files := make([]string, 0) + for _, fileInfo := range fileInfoSlice { + if fileInfo.IsDir() { + continue + } + if !strings.Contains(fileInfo.Name(), gasSchedulePrefix) { + continue + } + + files = append(files, fileInfo.Name()) + } + + sort.Slice(files, func(i, j int) bool { + return files[i] > files[j] + }) + + gasSchedule := files[0] + log.Info("using gas schedule", "file", gasSchedule) + + return gasSchedule +} + +func mintAddress(tb testing.TB, testContext *vm.VMTestContext, address []byte, value *big.Int, addressDescription string) { + addressString := integrationTests.TestAddressPubkeyConverter.SilentEncode(address, log) + log.Info("minting "+addressDescription+" address", "address", addressString, "value", value) + + accnt, err := testContext.Accounts.LoadAccount(address) + require.Nil(tb, err) + + userAccnt := accnt.(vmcommon.UserAccountHandler) + err = userAccnt.AddToBalance(value) + require.Nil(tb, err) + + err = testContext.Accounts.SaveAccount(accnt) + require.Nil(tb, err) + + _, err = testContext.Accounts.Commit() + require.Nil(tb, err) +} + +func getNonce(testContext *vm.VMTestContext, address []byte) uint64 { + accnt, _ := testContext.Accounts.LoadAccount(address) + + return accnt.GetNonce() +} + +func getGuardiansData(tb testing.TB, testContext *vm.VMTestContext, address []byte) (*guardians.Guardian, *guardians.Guardian, bool) { + accnt, err := testContext.Accounts.GetExistingAccount(address) + require.Nil(tb, err) + + userAccnt := accnt.(state.UserAccountHandler) + guardedAccount, err := guardian.NewGuardedAccount( + testContext.Marshalizer, + testContext.EpochNotifier, + vm.EpochGuardianDelay) + require.Nil(tb, err) + + active, pending, err := guardedAccount.GetConfiguredGuardians(userAccnt) + require.Nil(tb, err) + + return active, pending, userAccnt.IsGuarded() +} + +func setGuardian(testContext *vm.VMTestContext, userAddress []byte, guardianAddress []byte, uuid []byte) (vmcommon.ReturnCode, error) { + gasLimit := setGuardianGas + transferGas + + tx := vm.CreateTransaction( + getNonce(testContext, userAddress), + big.NewInt(0), + userAddress, + userAddress, + gasPrice, + gasLimit, + []byte("SetGuardian@"+hex.EncodeToString(guardianAddress)+"@"+hex.EncodeToString(uuid))) + + return testContext.TxProcessor.ProcessTransaction(tx) +} + +func setGuardianCoSigned( + testContext *vm.VMTestContext, + userAddress []byte, + currentGuardianAddress []byte, + newGuardianAddress []byte, + uuid []byte, +) (vmcommon.ReturnCode, error) { + gasLimit := setGuardianGas + guardianSigVerificationGas + transferGas + + tx := vm.CreateTransaction( + getNonce(testContext, userAddress), + big.NewInt(0), + userAddress, + userAddress, + gasPrice, + gasLimit, + []byte("SetGuardian@"+hex.EncodeToString(newGuardianAddress)+"@"+hex.EncodeToString(uuid))) + + tx.GuardianAddr = currentGuardianAddress + tx.Options = tx.Options | transaction.MaskGuardedTransaction + tx.Version = txWithOptionVersion + + return testContext.TxProcessor.ProcessTransaction(tx) +} + +func removeGuardiansCoSigned( + testContext *vm.VMTestContext, + userAddress []byte, + currentGuardianAddress []byte, +) (vmcommon.ReturnCode, error) { + gasLimit := unGuardAccountGas + guardianSigVerificationGas + transferGas + + tx := vm.CreateTransaction( + getNonce(testContext, userAddress), + big.NewInt(0), + userAddress, + userAddress, + gasPrice, + gasLimit, + []byte("UnGuardAccount")) + + tx.GuardianAddr = currentGuardianAddress + tx.Options = tx.Options | transaction.MaskGuardedTransaction + tx.Version = txWithOptionVersion + + return testContext.TxProcessor.ProcessTransaction(tx) +} + +func guardAccount(testContext *vm.VMTestContext, userAddress []byte) (vmcommon.ReturnCode, error) { + gasLimit := guardAccountGas + transferGas + + tx := vm.CreateTransaction( + getNonce(testContext, userAddress), + big.NewInt(0), + userAddress, + userAddress, + gasPrice, + gasLimit, + []byte("GuardAccount"), + ) + return testContext.TxProcessor.ProcessTransaction(tx) +} + +func transferFunds( + testContext *vm.VMTestContext, + senderAddress []byte, + transferValue *big.Int, + receiverAddress []byte, +) error { + tx := vm.CreateTransaction( + getNonce(testContext, senderAddress), + transferValue, + senderAddress, + receiverAddress, + gasPrice, + transferGas, + make([]byte, 0)) + + _, err := testContext.TxProcessor.ProcessTransaction(tx) + return err +} + +func transferFundsCoSigned( + testContext *vm.VMTestContext, + senderAddress []byte, + transferValue *big.Int, + receiverAddress []byte, + guardianAddress []byte, +) error { + gasLimit := guardianSigVerificationGas + transferGas + + tx := vm.CreateTransaction( + getNonce(testContext, senderAddress), + transferValue, + senderAddress, + receiverAddress, + gasPrice, + gasLimit, + make([]byte, 0)) + tx.Version = txWithOptionVersion + tx.Options = tx.Options | transaction.MaskGuardedTransaction + tx.GuardianAddr = guardianAddress + + _, err := testContext.TxProcessor.ProcessTransaction(tx) + return err +} + +func getBalance(testContext *vm.VMTestContext, address []byte) *big.Int { + accnt, _ := testContext.Accounts.LoadAccount(address) + userAccnt := accnt.(vmcommon.UserAccountHandler) + + return userAccnt.GetBalance() +} + +func testGuardianStatus( + tb testing.TB, + testContext *vm.VMTestContext, + address []byte, + expectedStatus guardAccountStatus, +) { + active, pending, isGuarded := getGuardiansData(tb, testContext, address) + assert.Equal(tb, expectedStatus.isGuarded, isGuarded) + + testGuardianData(tb, active, expectedStatus.active) + testGuardianData(tb, pending, expectedStatus.pending) +} + +func testGuardianData( + tb testing.TB, + guardian *guardians.Guardian, + info *guardianInfo, +) { + if info == nil { + require.Nil(tb, guardian) + return + } + + require.NotNil(tb, guardian) + expectedAddress := integrationTests.TestAddressPubkeyConverter.SilentEncode(info.address, log) + providedAddress := integrationTests.TestAddressPubkeyConverter.SilentEncode(guardian.Address, log) + assert.Equal(tb, expectedAddress, providedAddress) + assert.Equal(tb, info.uuid, guardian.ServiceUID) + assert.Equal(tb, info.epoch, guardian.ActivationEpoch) +} + +func setNewEpochOnContext(testContext *vm.VMTestContext, epoch uint32) { + hdr := &block.Header{ + Epoch: epoch, + } + testContext.EpochNotifier.CheckEpoch(hdr) + log.Info("current epoch is now", "epoch", epoch) +} + +func TestGuardAccount_ShouldErrorIfInstantSetIsDoneOnANotProtectedAccount(t *testing.T) { + testContext := prepareTestContextForGuardedAccounts(t) + defer testContext.Close() + + // alice is the user, bob is the guardian + mintAddress(t, testContext, alice, initialMint, "alice") + + expectedStatus := createUnGuardedAccountStatus() + testGuardianStatus(t, testContext, alice, expectedStatus) + + returnCode, err := setGuardianCoSigned(testContext, alice, bob, bob, uuid) + require.ErrorIs(t, err, process.ErrTransactionNotExecutable) + require.Equal(t, vmcommon.UserError, returnCode) + + testGuardianStatus(t, testContext, alice, expectedStatus) +} + +func TestGuardAccount_ShouldSetGuardianOnANotProtectedAccount(t *testing.T) { + testContext := prepareTestContextForGuardedAccounts(t) + defer testContext.Close() + + // alice is the user, bob is the guardian + mintAddress(t, testContext, alice, initialMint, "alice") + + expectedStatus := createUnGuardedAccountStatus() + testGuardianStatus(t, testContext, alice, expectedStatus) + + returnCode, err := setGuardian(testContext, alice, bob, uuid) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + currentEpoch := uint32(0) + + expectedStatus = guardAccountStatus{ + isGuarded: false, + active: nil, + pending: &guardianInfo{ + address: bob, + uuid: uuid, + epoch: currentEpoch + vm.EpochGuardianDelay, + }, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + allLogs := testContext.TxsLogsProcessor.GetAllCurrentLogs() + require.NotNil(t, allLogs) + + event := allLogs[0].LogHandler.GetLogEvents()[0] + require.Equal(t, &transaction.Event{ + Address: alice, + Identifier: []byte(core.BuiltInFunctionSetGuardian), + Topics: [][]byte{bob, uuid}, + }, event) + testContext.TxsLogsProcessor.Clean() + + // can not activate guardian now + returnCode, err = guardAccount(testContext, alice) + require.Equal(t, process.ErrFailedTransaction, err) + require.Equal(t, vmcommon.UserError, returnCode) + + currentEpoch = vm.EpochGuardianDelay + setNewEpochOnContext(testContext, currentEpoch) + + expectedStatus = guardAccountStatus{ + isGuarded: false, + active: &guardianInfo{ + address: bob, + uuid: uuid, + epoch: currentEpoch, + }, + pending: nil, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + allLogs = testContext.TxsLogsProcessor.GetAllCurrentLogs() + require.NotNil(t, allLogs) + + event = allLogs[0].LogHandler.GetLogEvents()[0] + require.Equal(t, &transaction.Event{ + Address: alice, + Identifier: []byte(core.SignalErrorOperation), + Topics: [][]byte{alice, []byte("account has no active guardian")}, + Data: []byte("@6163636f756e7420686173206e6f2061637469766520677561726469616e"), + }, event) + testContext.TxsLogsProcessor.Clean() + + // can activate guardian now + returnCode, err = guardAccount(testContext, alice) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + + expectedStatus = guardAccountStatus{ + isGuarded: true, + active: &guardianInfo{ + address: bob, + uuid: uuid, + epoch: currentEpoch, + }, + pending: nil, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + allLogs = testContext.TxsLogsProcessor.GetAllCurrentLogs() + require.NotNil(t, allLogs) + + event = allLogs[0].LogHandler.GetLogEvents()[0] + require.Equal(t, &transaction.Event{ + Address: alice, + Identifier: []byte(core.BuiltInFunctionGuardAccount), + }, event) + testContext.TxsLogsProcessor.Clean() +} + +func TestGuardAccount_SendingFundsWhileProtectedAndNotProtected(t *testing.T) { + testContext := prepareTestContextForGuardedAccounts(t) + defer testContext.Close() + + // alice is the user, bob is the guardian, charlie is the receiver, david is the wrong guardian + mintAddress(t, testContext, alice, initialMint, "alice") + + expectedStatus := createUnGuardedAccountStatus() + testGuardianStatus(t, testContext, alice, expectedStatus) + + // userAddress can send funds while not protected + err := transferFunds(testContext, alice, transferValue, charlie) + require.Nil(t, err) + require.Equal(t, transferValue, getBalance(testContext, charlie)) + + // userAddress can not send funds while not protected with a guardian address + err = transferFundsCoSigned(testContext, alice, transferValue, charlie, bob) + require.ErrorIs(t, err, process.ErrTransactionNotExecutable) + require.Contains(t, err.Error(), "guarded transaction not expected") + require.Equal(t, transferValue, getBalance(testContext, charlie)) + + // userAddress can send funds while it just added a guardian + returnCode, err := setGuardian(testContext, alice, bob, uuid) + assert.Nil(t, err) + assert.Equal(t, vmcommon.Ok, returnCode) + currentEpoch := uint32(0) + + expectedStatus = guardAccountStatus{ + isGuarded: false, + active: nil, + pending: &guardianInfo{ + address: bob, + uuid: uuid, + epoch: currentEpoch + vm.EpochGuardianDelay, + }, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + err = transferFunds(testContext, alice, transferValue, charlie) + require.Nil(t, err) + require.Equal(t, big.NewInt(transferValue.Int64()*2), getBalance(testContext, charlie)) + + // userAddress can not send funds while not protected with a guardian address + err = transferFundsCoSigned(testContext, alice, transferValue, charlie, bob) + require.ErrorIs(t, err, process.ErrTransactionNotExecutable) + require.Contains(t, err.Error(), "guarded transaction not expected") + require.Equal(t, big.NewInt(transferValue.Int64()*2), getBalance(testContext, charlie)) + + // delay epoch pasts, the pending guardian is now active (but not activated), userAddress can send funds + currentEpoch = vm.EpochGuardianDelay + setNewEpochOnContext(testContext, currentEpoch) + + expectedStatus = guardAccountStatus{ + isGuarded: false, + active: &guardianInfo{ + address: bob, + uuid: uuid, + epoch: currentEpoch, + }, + pending: nil, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + err = transferFunds(testContext, alice, transferValue, charlie) + require.Nil(t, err) + require.Equal(t, big.NewInt(transferValue.Int64()*3), getBalance(testContext, charlie)) + + // userAddress can not send funds while protected without setting the guardian address + returnCode, err = guardAccount(testContext, alice) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + + expectedStatus = guardAccountStatus{ + isGuarded: true, + active: &guardianInfo{ + address: bob, + uuid: uuid, + epoch: currentEpoch, + }, + pending: nil, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + err = transferFunds(testContext, alice, transferValue, charlie) + require.ErrorIs(t, err, process.ErrTransactionNotExecutable) + require.Contains(t, err.Error(), "not allowed to bypass guardian") + require.Equal(t, big.NewInt(transferValue.Int64()*3), getBalance(testContext, charlie)) + + // userAddress can send funds while protected with the guardian address + err = transferFundsCoSigned(testContext, alice, transferValue, charlie, bob) + require.Nil(t, err) + require.Equal(t, big.NewInt(transferValue.Int64()*4), getBalance(testContext, charlie)) + + // userAddress can not send funds while protected with a wrong guardian address (david) + err = transferFundsCoSigned(testContext, alice, transferValue, charlie, david) + require.ErrorIs(t, err, process.ErrTransactionNotExecutable) + require.Contains(t, err.Error(), "mismatch between transaction guardian and configured account guardian") + require.Equal(t, big.NewInt(transferValue.Int64()*4), getBalance(testContext, charlie)) + + // userAddress can not send funds while protected with an empty guardian address + err = transferFundsCoSigned(testContext, alice, transferValue, charlie, nil) + require.ErrorIs(t, err, process.ErrTransactionNotExecutable) + require.Contains(t, err.Error(), "mismatch between transaction guardian and configured account guardian") + require.Equal(t, big.NewInt(transferValue.Int64()*4), getBalance(testContext, charlie)) +} + +// Scenario 1 description: +// 1. create & mint 4 addresses: alice, bob, charlie and david +// 2. alice sets bob as guardian (test if pending) +// 3. alice can not set bob as guardian again (test if pending & same activation epoch) +// 3.1 alice can not set bob as guardian again even if one epoch past +// 4. alice activates the guardian (test if active) +// 5. alice sets charlie as pending guardian (test if pending & different activation epoch) +// 5.1. alice wants to set david as pending guardian (transaction is not executable, will not be included in a miniblock) +// 6. alice sets charlie as guardian immediately through a cosigned transaction (test active & pending guardians) +// 7. alice immediately sets bob as guardian through a cosigned transaction (test active & pending guardians) +// 8. alice adds charlie as a pending guardian (test if pending & different activation epoch) +// wait until charlie becomes active, no more pending guardians +// 9. alice adds bob as a pending guardian and calls set charlie immediately cosigned and should remove the pending guardian +// 10. alice un-guards the account immediately by using a cosigned transaction +// 11. alice guards the account immediately by calling the GuardAccount function +// 13. alice sends a guarded transaction, while account is guarded -> should work +// 14. alice un-guards the accounts immediately using a cosigned transaction and then sends a guarded transaction -> should error +// 14.1 alice sends unguarded transaction -> should work +func TestGuardAccount_Scenario1(t *testing.T) { + testContext := prepareTestContextForGuardedAccounts(t) + defer testContext.Close() + + // step 1 - mint addresses + for addressDescription, address := range allAddresses { + mintAddress(t, testContext, address, initialMint, addressDescription) + } + expectedStatus := createUnGuardedAccountStatus() + for _, address := range allAddresses { + testGuardianStatus(t, testContext, address, expectedStatus) + } + currentEpoch := uint32(0) + + // step 2 - alice sets bob as guardian + step2Epoch := currentEpoch + returnCode, err := setGuardian(testContext, alice, bob, uuid) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + expectedStatus = guardAccountStatus{ + isGuarded: false, + active: nil, + pending: &guardianInfo{ + address: bob, + uuid: uuid, + epoch: step2Epoch + vm.EpochGuardianDelay, + }, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + // step 3 - alice wants to set bob as guardian again - should fail + returnCode, err = setGuardian(testContext, alice, bob, uuid) + require.Equal(t, process.ErrFailedTransaction, err) + require.Equal(t, vmcommon.UserError, returnCode) + expectedStatus = guardAccountStatus{ + isGuarded: false, + active: nil, + pending: &guardianInfo{ + address: bob, + uuid: uuid, + epoch: step2Epoch + vm.EpochGuardianDelay, + }, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + // step 3.1 - one epoch pass, try to make bob again as guardian + currentEpoch++ + setNewEpochOnContext(testContext, currentEpoch) + returnCode, err = setGuardian(testContext, alice, bob, uuid) + require.Equal(t, process.ErrFailedTransaction, err) + require.Equal(t, vmcommon.UserError, returnCode) + expectedStatus = guardAccountStatus{ + isGuarded: false, + active: nil, + pending: &guardianInfo{ + address: bob, + uuid: uuid, + epoch: step2Epoch + vm.EpochGuardianDelay, + }, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + // step 4 - alice activates the guardian + currentEpoch++ + setNewEpochOnContext(testContext, currentEpoch) + returnCode, err = guardAccount(testContext, alice) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + expectedStatus = guardAccountStatus{ + isGuarded: true, + active: &guardianInfo{ + address: bob, + uuid: uuid, + epoch: step2Epoch + vm.EpochGuardianDelay, + }, + pending: nil, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + // step 5 - alice sets charlie as pending guardian + step5Epoch := currentEpoch + returnCode, err = setGuardian(testContext, alice, charlie, uuid) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + expectedStatus = guardAccountStatus{ + isGuarded: true, + active: &guardianInfo{ + address: bob, + uuid: uuid, + epoch: step2Epoch + vm.EpochGuardianDelay, + }, + pending: &guardianInfo{ + address: charlie, + uuid: uuid, + epoch: step5Epoch + vm.EpochGuardianDelay, + }, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + // step 5.1 - alice tries to set david as pending guardian, overwriting charlie + currentEpoch++ + setNewEpochOnContext(testContext, currentEpoch) + returnCode, err = setGuardian(testContext, alice, david, uuid) + require.ErrorIs(t, err, process.ErrTransactionNotExecutable) + require.Equal(t, vmcommon.UserError, returnCode) + expectedStatus = guardAccountStatus{ + isGuarded: true, + active: &guardianInfo{ + address: bob, + uuid: uuid, + epoch: step2Epoch + vm.EpochGuardianDelay, + }, + pending: &guardianInfo{ + address: charlie, + uuid: uuid, + epoch: step5Epoch + vm.EpochGuardianDelay, + }, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + // step 6 - alice sets charlie as guardian immediately through a cosigned transaction + step6Epoch := currentEpoch + returnCode, err = setGuardianCoSigned(testContext, alice, bob, charlie, uuid) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + expectedStatus = guardAccountStatus{ + isGuarded: true, + active: &guardianInfo{ // instant set, no delay added + address: charlie, + uuid: uuid, + epoch: step6Epoch, + }, + pending: nil, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + // step 7 - alice immediately sets bob as guardian through a cosigned transaction + currentEpoch++ + setNewEpochOnContext(testContext, currentEpoch) + step7Epoch := currentEpoch + returnCode, err = setGuardianCoSigned(testContext, alice, charlie, bob, uuid) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + expectedStatus = guardAccountStatus{ + isGuarded: true, + active: &guardianInfo{ // instant set, no delay added + address: bob, + uuid: uuid, + epoch: step7Epoch, + }, + pending: nil, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + // step 8 - alice adds charlie as a pending guardian (test if pending & different activation epoch) + step8Epoch := currentEpoch + returnCode, err = setGuardian(testContext, alice, charlie, uuid) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + expectedStatus = guardAccountStatus{ + isGuarded: true, + active: &guardianInfo{ + address: bob, + uuid: uuid, + epoch: step7Epoch, + }, + pending: &guardianInfo{ + address: charlie, + uuid: uuid, + epoch: step8Epoch + vm.EpochGuardianDelay, + }, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + currentEpoch += vm.EpochGuardianDelay + setNewEpochOnContext(testContext, currentEpoch) + expectedStatus = guardAccountStatus{ + isGuarded: true, + active: &guardianInfo{ + address: charlie, + uuid: uuid, + epoch: step8Epoch + vm.EpochGuardianDelay, + }, + pending: nil, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + // step 9 - alice adds bob as a pending guardian and calls set charlie immediately cosigned and should remove the pending guardian + step9Epoch := currentEpoch + returnCode, err = setGuardian(testContext, alice, bob, uuid) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + expectedStatus = guardAccountStatus{ + isGuarded: true, + active: &guardianInfo{ + address: charlie, + uuid: uuid, + epoch: step8Epoch + vm.EpochGuardianDelay, + }, + pending: &guardianInfo{ + address: bob, + uuid: uuid, + epoch: step9Epoch + vm.EpochGuardianDelay, + }, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + // guard account by charlie should remove bob pending guardian + returnCode, err = setGuardianCoSigned(testContext, alice, charlie, charlie, uuid) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + expectedStatus = guardAccountStatus{ + isGuarded: true, + active: &guardianInfo{ + address: charlie, + uuid: uuid, + epoch: step8Epoch + vm.EpochGuardianDelay, + }, + pending: nil, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + // step 10 - alice un-guards the account immediately by using a cosigned transaction + currentEpoch++ + setNewEpochOnContext(testContext, currentEpoch) + returnCode, err = removeGuardiansCoSigned(testContext, alice, charlie) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + expectedStatus = guardAccountStatus{ + isGuarded: false, + active: &guardianInfo{ + address: charlie, + uuid: uuid, + epoch: step8Epoch + vm.EpochGuardianDelay, + }, + pending: nil, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + // step 11 - alice guards the account immediately by calling the GuardAccount function + returnCode, err = guardAccount(testContext, alice) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + expectedStatus = guardAccountStatus{ + isGuarded: true, + active: &guardianInfo{ + address: charlie, + uuid: uuid, + epoch: step8Epoch + vm.EpochGuardianDelay, + }, + pending: nil, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + // 13. alice sends a guarded transaction, while account is guarded -> should work + err = transferFundsCoSigned(testContext, alice, transferValue, david, charlie) + require.Nil(t, err) + + // 14. alice un-guards the accounts immediately using a cosigned transaction and then sends a guarded transaction -> should error + returnCode, err = removeGuardiansCoSigned(testContext, alice, charlie) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + expectedStatus = guardAccountStatus{ + isGuarded: false, + active: &guardianInfo{ + address: charlie, + uuid: uuid, + epoch: step8Epoch + vm.EpochGuardianDelay, + }, + pending: nil, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + err = transferFundsCoSigned(testContext, alice, transferValue, david, charlie) + require.ErrorIs(t, err, process.ErrTransactionNotExecutable) + // 14.1 alice sends unguarded transaction -> should work + err = transferFunds(testContext, alice, transferValue, david) + require.Nil(t, err) +} + +// 1. create & mint 4 addresses: alice, bob, charlie and david +// 2. alice sets bob as guardian and the account becomes guarded +// 3. test that charlie can send a relayed transaction v1 on the behalf of alice to david +// 3.1 cosigned transaction should work +// 3.2 single signed transaction should not work +func TestGuardAccounts_RelayedTransactionV1(t *testing.T) { + testContext := prepareTestContextForGuardedAccounts(t) + defer testContext.Close() + + // step 1 - mint addresses + for addressDescription, address := range allAddresses { + mintAddress(t, testContext, address, initialMint, addressDescription) + } + expectedStatus := createUnGuardedAccountStatus() + for _, address := range allAddresses { + testGuardianStatus(t, testContext, address, expectedStatus) + } + + currentEpoch := uint32(0) + + // step 2 - alice sets bob as guardian + step2Epoch := currentEpoch + returnCode, err := setGuardian(testContext, alice, bob, uuid) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + currentEpoch += vm.EpochGuardianDelay + setNewEpochOnContext(testContext, currentEpoch) + returnCode, err = guardAccount(testContext, alice) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + expectedStatus = guardAccountStatus{ + isGuarded: true, + active: &guardianInfo{ + address: bob, + uuid: uuid, + epoch: step2Epoch + vm.EpochGuardianDelay, + }, + pending: nil, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + aliceCurrentBalance := getBalance(testContext, alice) + + // step 3 - charlie sends a relayed transaction v1 on the behalf of alice + // 3.1 cosigned transaction should work + userTx := vm.CreateTransaction( + getNonce(testContext, alice), + transferValue, + alice, + david, + gasPrice, + transferGas+guardianSigVerificationGas, + make([]byte, 0)) + + userTx.GuardianAddr = bob + userTx.Options = userTx.Options | transaction.MaskGuardedTransaction + userTx.Version = txWithOptionVersion + + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) + rTxGasLimit := 1 + transferGas + guardianSigVerificationGas + uint64(len(rtxData)) + rtx := vm.CreateTransaction(getNonce(testContext, charlie), big.NewInt(0), charlie, alice, gasPrice, rTxGasLimit, rtxData) + returnCode, err = testContext.TxProcessor.ProcessTransaction(rtx) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + // balance tests: + // alice: aliceCurrentBalance - transferValue (no fee for relayed transaction) + // bob: initialMint + // charlie: initialMint - rtxGasLimit * gasPrice + // david: initialMint + transferValue + aliceExpectedBalance := big.NewInt(0).Sub(aliceCurrentBalance, transferValue) + assert.Equal(t, aliceExpectedBalance, getBalance(testContext, alice)) + bobExpectedBalance := big.NewInt(0).Set(initialMint) + assert.Equal(t, bobExpectedBalance, getBalance(testContext, bob)) + charlieExpectedBalance := big.NewInt(0).Sub(initialMint, big.NewInt(int64(rTxGasLimit*gasPrice))) + assert.Equal(t, charlieExpectedBalance, getBalance(testContext, charlie)) + davidExpectedBalance := big.NewInt(0).Add(initialMint, transferValue) + assert.Equal(t, davidExpectedBalance, getBalance(testContext, david)) + + aliceCurrentBalance = getBalance(testContext, alice) + charlieCurrentBalance := getBalance(testContext, charlie) + davidCurrentBalance := getBalance(testContext, david) + testContext.CleanIntermediateTransactions(t) + + // 3.1 single signed transaction should not work + userTx = vm.CreateTransaction( + getNonce(testContext, alice), + transferValue, + alice, + david, + gasPrice, + transferGas+guardianSigVerificationGas, + make([]byte, 0)) + + userTx.Version = txWithOptionVersion + + rtxData = integrationTests.PrepareRelayedTxDataV1(userTx) + rTxGasLimit = 1 + transferGas + guardianSigVerificationGas + uint64(len(rtxData)) + rtx = vm.CreateTransaction(getNonce(testContext, charlie), big.NewInt(0), charlie, alice, gasPrice, rTxGasLimit, rtxData) + returnCode, err = testContext.TxProcessor.ProcessTransaction(rtx) + require.Nil(t, err) + require.Equal(t, vmcommon.UserError, returnCode) + intermediateTxs := testContext.GetIntermediateTransactions(t) + require.Equal(t, 1, len(intermediateTxs)) + scr := intermediateTxs[0].(*smartContractResult.SmartContractResult) + // expectedReturnMessage is hardcoded for backwards compatibility reasons + expectedReturnMessage := "transaction is not executable and gas will not be consumed, not allowed to bypass guardian" + require.Equal(t, expectedReturnMessage, string(scr.ReturnMessage)) + // balance tests: + // alice: aliceCurrentBalance (no fee for the failed relayed transaction) + // bob: initialMint + // charlie: charlieCurrentBalance - rtxGasLimit * gasPrice + // david: davidCurrentBalance + assert.Equal(t, aliceCurrentBalance, getBalance(testContext, alice)) + bobExpectedBalance = big.NewInt(0).Set(initialMint) + assert.Equal(t, bobExpectedBalance, getBalance(testContext, bob)) + charlieExpectedBalance = big.NewInt(0).Sub(charlieCurrentBalance, big.NewInt(int64(rTxGasLimit*gasPrice))) + assert.Equal(t, charlieExpectedBalance, getBalance(testContext, charlie)) + assert.Equal(t, davidCurrentBalance, getBalance(testContext, david)) +} + +// 1. create & mint 4 addresses: alice, bob, charlie and david +// 2. alice sets bob as guardian and the account becomes guarded +// 3. test that charlie can not send a relayed transaction v2 on the behalf of alice to david +// 3.1 cosigned transaction should not work +// 3.2 single signed transaction should not work +func TestGuardAccounts_RelayedTransactionV2(t *testing.T) { + testContext := prepareTestContextForGuardedAccounts(t) + defer testContext.Close() + + // step 1 - mint addresses + for addressDescription, address := range allAddresses { + mintAddress(t, testContext, address, initialMint, addressDescription) + } + expectedStatus := createUnGuardedAccountStatus() + for _, address := range allAddresses { + testGuardianStatus(t, testContext, address, expectedStatus) + } + + currentEpoch := uint32(0) + + // step 2 - alice sets bob as guardian + step2Epoch := currentEpoch + returnCode, err := setGuardian(testContext, alice, bob, uuid) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + currentEpoch += vm.EpochGuardianDelay + setNewEpochOnContext(testContext, currentEpoch) + returnCode, err = guardAccount(testContext, alice) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, returnCode) + expectedStatus = guardAccountStatus{ + isGuarded: true, + active: &guardianInfo{ + address: bob, + uuid: uuid, + epoch: step2Epoch + vm.EpochGuardianDelay, + }, + pending: nil, + } + testGuardianStatus(t, testContext, alice, expectedStatus) + + aliceCurrentBalance := getBalance(testContext, alice) + testContext.CleanIntermediateTransactions(t) + + // step 3 - charlie sends a relayed transaction v1 on the behalf of alice + // 3.1 cosigned transaction should work + userTx := vm.CreateTransaction( + getNonce(testContext, alice), + transferValue, + alice, + david, + gasPrice, + transferGas+guardianSigVerificationGas, + make([]byte, 0)) + + userTx.GuardianAddr = bob + userTx.Options = userTx.Options | transaction.MaskGuardedTransaction + userTx.Version = txWithOptionVersion + + rtxData := integrationTests.PrepareRelayedTxDataV2(userTx) + rTxGasLimit := 1 + transferGas + guardianSigVerificationGas + uint64(len(rtxData)) + rtx := vm.CreateTransaction(getNonce(testContext, charlie), big.NewInt(0), charlie, alice, gasPrice, rTxGasLimit, rtxData) + returnCode, err = testContext.TxProcessor.ProcessTransaction(rtx) + require.Nil(t, err) + require.Equal(t, vmcommon.UserError, returnCode) + intermediateTxs := testContext.GetIntermediateTransactions(t) + require.Equal(t, 1, len(intermediateTxs)) + scr := intermediateTxs[0].(*smartContractResult.SmartContractResult) + // expectedReturnMessage is hardcoded for backwards compatibility reasons + expectedReturnMessage := "transaction is not executable and gas will not be consumed, not allowed to bypass guardian" + require.Equal(t, expectedReturnMessage, string(scr.ReturnMessage)) + // balance tests: + // alice: aliceCurrentBalance (no fee for relayed transaction V2) + // bob: initialMint + // charlie: initialMint - rtxGasLimit * gasPrice + // david: initialMint + assert.Equal(t, aliceCurrentBalance, getBalance(testContext, alice)) + bobExpectedBalance := big.NewInt(0).Set(initialMint) + assert.Equal(t, bobExpectedBalance, getBalance(testContext, bob)) + charlieExpectedBalance := big.NewInt(0).Sub(initialMint, big.NewInt(int64(rTxGasLimit*gasPrice))) + assert.Equal(t, charlieExpectedBalance, getBalance(testContext, charlie)) + assert.Equal(t, initialMint, getBalance(testContext, david)) + + charlieCurrentBalance := getBalance(testContext, charlie) + testContext.CleanIntermediateTransactions(t) + + // 3.1 single signed transaction should not work + userTx = vm.CreateTransaction( + getNonce(testContext, alice), + transferValue, + alice, + david, + gasPrice, + transferGas+guardianSigVerificationGas, + make([]byte, 0)) + + userTx.Version = txWithOptionVersion + + rtxData = integrationTests.PrepareRelayedTxDataV2(userTx) + rTxGasLimit = 1 + transferGas + guardianSigVerificationGas + uint64(len(rtxData)) + rtx = vm.CreateTransaction(getNonce(testContext, charlie), big.NewInt(0), charlie, alice, gasPrice, rTxGasLimit, rtxData) + returnCode, err = testContext.TxProcessor.ProcessTransaction(rtx) + require.Nil(t, err) + require.Equal(t, vmcommon.UserError, returnCode) + intermediateTxs = testContext.GetIntermediateTransactions(t) + require.Equal(t, 1, len(intermediateTxs)) + scr = intermediateTxs[0].(*smartContractResult.SmartContractResult) + require.Equal(t, expectedReturnMessage, string(scr.ReturnMessage)) + // balance tests: + // alice: aliceCurrentBalance (no fee for the failed relayed transaction) + // bob: initialMint + // charlie: charlieCurrentBalance - rtxGasLimit * gasPrice + // david: davidCurrentBalance + assert.Equal(t, aliceCurrentBalance, getBalance(testContext, alice)) + bobExpectedBalance = big.NewInt(0).Set(initialMint) + assert.Equal(t, bobExpectedBalance, getBalance(testContext, bob)) + charlieExpectedBalance = big.NewInt(0).Sub(charlieCurrentBalance, big.NewInt(int64(rTxGasLimit*gasPrice))) + assert.Equal(t, charlieExpectedBalance, getBalance(testContext, charlie)) + assert.Equal(t, initialMint, getBalance(testContext, david)) +} diff --git a/integrationTests/vm/txsFee/moveBalance_test.go b/integrationTests/vm/txsFee/moveBalance_test.go index 7e07584dbb4..78646813825 100644 --- a/integrationTests/vm/txsFee/moveBalance_test.go +++ b/integrationTests/vm/txsFee/moveBalance_test.go @@ -16,6 +16,8 @@ import ( "github.com/stretchr/testify/require" ) +const gasPrice = uint64(10) + // minGasPrice = 1, gasPerDataByte = 1, minGasLimit = 1 func TestMoveBalanceSelfShouldWorkAndConsumeTxFee(t *testing.T) { testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) @@ -25,7 +27,6 @@ func TestMoveBalanceSelfShouldWorkAndConsumeTxFee(t *testing.T) { sndAddr := []byte("12345678901234567890123456789012") senderNonce := uint64(0) senderBalance := big.NewInt(10000) - gasPrice := uint64(10) gasLimit := uint64(100) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -61,7 +62,6 @@ func TestMoveBalanceAllFlagsEnabledLessBalanceThanGasLimitMulGasPrice(t *testing sndAddr := []byte("12345678901234567890123456789012") senderNonce := uint64(0) senderBalance := big.NewInt(10000) - gasPrice := uint64(10) gasLimit := uint64(10000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -80,7 +80,6 @@ func TestMoveBalanceShouldWork(t *testing.T) { rcvAddr := []byte("12345678901234567890123456789022") senderNonce := uint64(0) senderBalance := big.NewInt(10000) - gasPrice := uint64(10) gasLimit := uint64(100) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -120,10 +119,10 @@ func TestMoveBalanceInvalidHasGasButNoValueShouldConsumeGas(t *testing.T) { sndAddr := []byte("12345678901234567890123456789012") rcvAddr := []byte("12345678901234567890123456789022") senderBalance := big.NewInt(100) - gasPrice := uint64(1) + gasPriceLocal := uint64(1) gasLimit := uint64(20) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) - tx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, rcvAddr, gasPrice, gasLimit, []byte("aaaa")) + tx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, rcvAddr, gasPriceLocal, gasLimit, []byte("aaaa")) returnCode, err := testContext.TxProcessor.ProcessTransaction(tx) require.Equal(t, process.ErrFailedTransaction, err) @@ -150,11 +149,11 @@ func TestMoveBalanceHigherNonceShouldNotConsumeGas(t *testing.T) { rcvAddr := []byte("12345678901234567890123456789022") senderBalance := big.NewInt(100) - gasPrice := uint64(1) + gasPriceLocal := uint64(1) gasLimit := uint64(20) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) - tx := vm.CreateTransaction(1, big.NewInt(500), sndAddr, rcvAddr, gasPrice, gasLimit, []byte("aaaa")) + tx := vm.CreateTransaction(1, big.NewInt(500), sndAddr, rcvAddr, gasPriceLocal, gasLimit, []byte("aaaa")) _, err = testContext.TxProcessor.ProcessTransaction(tx) require.Equal(t, process.ErrHigherNonceInTransaction, err) @@ -180,11 +179,11 @@ func TestMoveBalanceMoreGasThanGasLimitPerMiniBlockForSafeCrossShard(t *testing. rcvAddr := []byte("12345678901234567890123456789022") senderBalance := big.NewInt(0).SetUint64(math.MaxUint64) - gasPrice := uint64(1) + gasPriceLocal := uint64(1) gasLimit := uint64(math.MaxUint64) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) - tx := vm.CreateTransaction(0, big.NewInt(500), sndAddr, rcvAddr, gasPrice, gasLimit, []byte("aaaa")) + tx := vm.CreateTransaction(0, big.NewInt(500), sndAddr, rcvAddr, gasPriceLocal, gasLimit, []byte("aaaa")) returnCode, err := testContext.TxProcessor.ProcessTransaction(tx) require.Equal(t, process.ErrMoreGasThanGasLimitPerBlock, err) @@ -211,7 +210,6 @@ func TestMoveBalanceInvalidUserNames(t *testing.T) { rcvAddr := []byte("12345678901234567890123456789022") senderNonce := uint64(0) senderBalance := big.NewInt(10000) - gasPrice := uint64(10) gasLimit := uint64(100) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) diff --git a/integrationTests/vm/txsFee/multiESDTTransfer_test.go b/integrationTests/vm/txsFee/multiESDTTransfer_test.go index dd0ffcf788b..8f7978685b8 100644 --- a/integrationTests/vm/txsFee/multiESDTTransfer_test.go +++ b/integrationTests/vm/txsFee/multiESDTTransfer_test.go @@ -29,7 +29,6 @@ func TestMultiESDTTransferShouldWork(t *testing.T) { secondToken := []byte("second") utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, big.NewInt(0), secondToken, 0, esdtBalance) - gasPrice := uint64(10) gasLimit := uint64(4000) tx := utils.CreateMultiTransferTX(0, sndAddr, rcvAddr, gasPrice, gasLimit, &utils.TransferESDTData{ Token: token, @@ -87,7 +86,6 @@ func TestMultiESDTTransferFailsBecauseOfMaxLimit(t *testing.T) { secondToken := []byte("second") utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, big.NewInt(0), secondToken, 0, esdtBalance) - gasPrice := uint64(10) gasLimit := uint64(4000) tx := utils.CreateMultiTransferTX(0, sndAddr, rcvAddr, gasPrice, gasLimit, &utils.TransferESDTData{ Token: token, diff --git a/integrationTests/vm/txsFee/multiShard/relayedBuiltInFunctions_test.go b/integrationTests/vm/txsFee/multiShard/relayedBuiltInFunctions_test.go index 2fdeb14eeb9..56d97f9546b 100644 --- a/integrationTests/vm/txsFee/multiShard/relayedBuiltInFunctions_test.go +++ b/integrationTests/vm/txsFee/multiShard/relayedBuiltInFunctions_test.go @@ -56,7 +56,7 @@ func TestRelayedBuiltInFunctionExecuteOnRelayerAndDstShardShouldWork(t *testing. _, _ = vm.CreateAccount(testContextRelayer.Accounts, relayerAddr, 0, big.NewInt(15000)) - rtxData := utils.PrepareRelayerTxData(innerTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, owner, gasPrice, rTxGasLimit, rtxData) diff --git a/integrationTests/vm/txsFee/multiShard/relayedMoveBalance_test.go b/integrationTests/vm/txsFee/multiShard/relayedMoveBalance_test.go index bdb65593459..2dd36161143 100644 --- a/integrationTests/vm/txsFee/multiShard/relayedMoveBalance_test.go +++ b/integrationTests/vm/txsFee/multiShard/relayedMoveBalance_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -34,7 +35,7 @@ func TestRelayedMoveBalanceRelayerShard0InnerTxSenderAndReceiverShard1ShouldWork userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, rcvAddr, gasPrice, gasLimit, []byte("aaaa")) - rtxData := utils.PrepareRelayerTxData(userTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) @@ -80,7 +81,7 @@ func TestRelayedMoveBalanceRelayerAndInnerTxSenderShard0ReceiverShard1(t *testin userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddrBytes, gasPrice, gasLimit, nil) - rtxData := utils.PrepareRelayerTxData(userTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) @@ -131,7 +132,7 @@ func TestRelayedMoveBalanceExecuteOnSourceAndDestination(t *testing.T) { userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddrBytes, gasPrice, gasLimit, nil) - rtxData := utils.PrepareRelayerTxData(userTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) @@ -193,7 +194,7 @@ func TestRelayedMoveBalanceExecuteOnSourceAndDestinationRelayerAndInnerTxSenderS userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, rcvAddr, gasPrice, gasLimit, nil) - rtxData := utils.PrepareRelayerTxData(userTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) @@ -253,7 +254,7 @@ func TestRelayedMoveBalanceRelayerAndInnerTxReceiverShard0SenderShard1(t *testin innerTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, rcvAddr, gasPrice, gasLimit, nil) - rtxData := utils.PrepareRelayerTxData(innerTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) @@ -329,7 +330,7 @@ func TestMoveBalanceRelayerShard0InnerTxSenderShard1InnerTxReceiverShard2ShouldW innerTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, rcvAddr, gasPrice, gasLimit, nil) - rtxData := utils.PrepareRelayerTxData(innerTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) diff --git a/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go b/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go index 0a82260631d..82bf9fc370f 100644 --- a/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go +++ b/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" @@ -39,7 +40,7 @@ func TestRelayedSCDeployShouldWork(t *testing.T) { scCode := wasm.GetSCCode(contractPath) userTx := vm.CreateTransaction(0, big.NewInt(0), sndAddr, vm.CreateEmptyAddress(), gasPrice, gasLimit, []byte(wasm.CreateDeployTxData(scCode))) - rtxData := utils.PrepareRelayerTxData(userTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) diff --git a/integrationTests/vm/txsFee/multiShard/relayedTxScCalls_test.go b/integrationTests/vm/txsFee/multiShard/relayedTxScCalls_test.go index f9849be4720..1c37b9624c3 100644 --- a/integrationTests/vm/txsFee/multiShard/relayedTxScCalls_test.go +++ b/integrationTests/vm/txsFee/multiShard/relayedTxScCalls_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -57,7 +58,7 @@ func TestRelayedTxScCallMultiShardShouldWork(t *testing.T) { gasLimit := uint64(500) innerTx := vm.CreateTransaction(0, big.NewInt(0), sndAddr, scAddr, gasPrice, gasLimit, []byte("increment")) - rtxData := utils.PrepareRelayerTxData(innerTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) @@ -162,7 +163,7 @@ func TestRelayedTxScCallMultiShardFailOnInnerTxDst(t *testing.T) { gasLimit := uint64(500) innerTx := vm.CreateTransaction(0, big.NewInt(0), sndAddr, scAddr, gasPrice, gasLimit, []byte("incremeno")) - rtxData := utils.PrepareRelayerTxData(innerTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) diff --git a/integrationTests/vm/txsFee/relayedAsyncCall_test.go b/integrationTests/vm/txsFee/relayedAsyncCall_test.go index 7cf8c80d82c..8de0eaf7948 100644 --- a/integrationTests/vm/txsFee/relayedAsyncCall_test.go +++ b/integrationTests/vm/txsFee/relayedAsyncCall_test.go @@ -11,6 +11,7 @@ import ( "testing" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -30,7 +31,6 @@ func TestRelayedAsyncCallShouldWork(t *testing.T) { _, _ = vm.CreateAccount(testContext.Accounts, ownerAddr, 0, egldBalance) _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, egldBalance) - gasPrice := uint64(10) ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) deployGasLimit := uint64(50000) @@ -48,7 +48,7 @@ func TestRelayedAsyncCallShouldWork(t *testing.T) { innerTx := vm.CreateTransaction(0, big.NewInt(0), senderAddr, secondSCAddress, gasPrice, gasLimit, []byte("doSomething")) - rtxData := utils.PrepareRelayerTxData(innerTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, senderAddr, gasPrice, rTxGasLimit, rtxData) diff --git a/integrationTests/vm/txsFee/relayedAsyncESDT_test.go b/integrationTests/vm/txsFee/relayedAsyncESDT_test.go index 15db4d3b331..3a447d0d361 100644 --- a/integrationTests/vm/txsFee/relayedAsyncESDT_test.go +++ b/integrationTests/vm/txsFee/relayedAsyncESDT_test.go @@ -11,6 +11,7 @@ import ( "testing" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -36,7 +37,6 @@ func TestRelayedAsyncESDTCallShouldWork(t *testing.T) { _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, egldBalance) // deploy 2 contracts - gasPrice := uint64(10) ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) deployGasLimit := uint64(50000) @@ -54,7 +54,7 @@ func TestRelayedAsyncESDTCallShouldWork(t *testing.T) { innerTx := utils.CreateESDTTransferTx(0, sndAddr, firstSCAddress, token, big.NewInt(5000), gasPrice, gasLimit) innerTx.Data = []byte(string(innerTx.Data) + "@" + hex.EncodeToString([]byte("transferToSecondContractHalf"))) - rtxData := utils.PrepareRelayerTxData(innerTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) @@ -95,7 +95,6 @@ func TestRelayedAsyncESDTCall_InvalidCallFirstContract(t *testing.T) { _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, egldBalance) // deploy 2 contracts - gasPrice := uint64(10) ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) deployGasLimit := uint64(50000) @@ -113,7 +112,7 @@ func TestRelayedAsyncESDTCall_InvalidCallFirstContract(t *testing.T) { innerTx := utils.CreateESDTTransferTx(0, sndAddr, firstSCAddress, token, big.NewInt(5000), gasPrice, gasLimit) innerTx.Data = []byte(string(innerTx.Data) + "@" + hex.EncodeToString([]byte("transferToSecondContractRejected"))) - rtxData := utils.PrepareRelayerTxData(innerTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) @@ -154,7 +153,6 @@ func TestRelayedAsyncESDTCall_InvalidOutOfGas(t *testing.T) { _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, egldBalance) // deploy 2 contracts - gasPrice := uint64(10) ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) deployGasLimit := uint64(50000) @@ -172,7 +170,7 @@ func TestRelayedAsyncESDTCall_InvalidOutOfGas(t *testing.T) { innerTx := utils.CreateESDTTransferTx(0, sndAddr, firstSCAddress, token, big.NewInt(5000), gasPrice, gasLimit) innerTx.Data = []byte(string(innerTx.Data) + "@" + hex.EncodeToString([]byte("transferToSecondContractHalf"))) - rtxData := utils.PrepareRelayerTxData(innerTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) diff --git a/integrationTests/vm/txsFee/relayedBuiltInFunctions_test.go b/integrationTests/vm/txsFee/relayedBuiltInFunctions_test.go index 72d972b07ff..2c56fdd1f04 100644 --- a/integrationTests/vm/txsFee/relayedBuiltInFunctions_test.go +++ b/integrationTests/vm/txsFee/relayedBuiltInFunctions_test.go @@ -34,7 +34,6 @@ func TestRelayedBuildInFunctionChangeOwnerCallShouldWork(t *testing.T) { relayerAddr := []byte("12345678901234567890123456789033") newOwner := []byte("12345678901234567890123456789112") - gasPrice := uint64(10) gasLimit := uint64(1000) txData := []byte(core.BuiltInFunctionChangeOwnerAddress + "@" + hex.EncodeToString(newOwner)) @@ -42,7 +41,7 @@ func TestRelayedBuildInFunctionChangeOwnerCallShouldWork(t *testing.T) { _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) - rtxData := utils.PrepareRelayerTxData(innerTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, owner, gasPrice, rTxGasLimit, rtxData) @@ -81,7 +80,6 @@ func TestRelayedBuildInFunctionChangeOwnerCallWrongOwnerShouldConsumeGas(t *test relayerAddr := []byte("12345678901234567890123456789033") sndAddr := []byte("12345678901234567890123456789113") newOwner := []byte("12345678901234567890123456789112") - gasPrice := uint64(10) gasLimit := uint64(1000) txData := []byte(core.BuiltInFunctionChangeOwnerAddress + "@" + hex.EncodeToString(newOwner)) @@ -89,7 +87,7 @@ func TestRelayedBuildInFunctionChangeOwnerCallWrongOwnerShouldConsumeGas(t *test _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) - rtxData := utils.PrepareRelayerTxData(innerTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, owner, gasPrice, rTxGasLimit, rtxData) @@ -127,7 +125,6 @@ func TestRelayedBuildInFunctionChangeOwnerInvalidAddressShouldConsumeGas(t *test relayerAddr := []byte("12345678901234567890123456789033") newOwner := []byte("invalidAddress") - gasPrice := uint64(10) gasLimit := uint64(1000) txData := []byte(core.BuiltInFunctionChangeOwnerAddress + "@" + hex.EncodeToString(newOwner)) @@ -135,7 +132,7 @@ func TestRelayedBuildInFunctionChangeOwnerInvalidAddressShouldConsumeGas(t *test _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) - rtxData := utils.PrepareRelayerTxData(innerTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, owner, gasPrice, rTxGasLimit, rtxData) @@ -172,7 +169,6 @@ func TestRelayedBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldConsumeG relayerAddr := []byte("12345678901234567890123456789033") newOwner := []byte("12345678901234567890123456789112") - gasPrice := uint64(10) txData := []byte(core.BuiltInFunctionChangeOwnerAddress + "@" + hex.EncodeToString(newOwner)) gasLimit := uint64(len(txData) - 1) @@ -180,7 +176,7 @@ func TestRelayedBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldConsumeG _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) - rtxData := utils.PrepareRelayerTxData(innerTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, owner, gasPrice, rTxGasLimit, rtxData) @@ -217,7 +213,6 @@ func TestRelayedBuildInFunctionChangeOwnerCallOutOfGasShouldConsumeGas(t *testin relayerAddr := []byte("12345678901234567890123456789033") newOwner := []byte("12345678901234567890123456789112") - gasPrice := uint64(10) txData := []byte(core.BuiltInFunctionChangeOwnerAddress + "@" + hex.EncodeToString(newOwner)) gasLimit := uint64(len(txData) + 1) @@ -225,7 +220,7 @@ func TestRelayedBuildInFunctionChangeOwnerCallOutOfGasShouldConsumeGas(t *testin _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) - rtxData := utils.PrepareRelayerTxData(innerTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, owner, gasPrice, rTxGasLimit, rtxData) diff --git a/integrationTests/vm/txsFee/relayedDns_test.go b/integrationTests/vm/txsFee/relayedDns_test.go index 31867df3330..37d1633a53f 100644 --- a/integrationTests/vm/txsFee/relayedDns_test.go +++ b/integrationTests/vm/txsFee/relayedDns_test.go @@ -11,6 +11,7 @@ import ( "testing" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -28,7 +29,6 @@ func TestRelayedTxDnsTransaction_ShouldWork(t *testing.T) { relayerAddr := []byte("12345678901234567890123456789033") sndAddr := []byte("12345678901234567890123456789112") rcvAddr := []byte("12345678901234567890123456789110") - gasPrice := uint64(10) gasLimit := uint64(500000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(0)) @@ -40,7 +40,7 @@ func TestRelayedTxDnsTransaction_ShouldWork(t *testing.T) { // create user name for sender innerTx := vm.CreateTransaction(0, big.NewInt(0), sndAddr, scAddress, gasPrice, gasLimit, txData) - rtxData := utils.PrepareRelayerTxData(innerTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) @@ -62,7 +62,7 @@ func TestRelayedTxDnsTransaction_ShouldWork(t *testing.T) { // create user name for receiver innerTx = vm.CreateTransaction(0, big.NewInt(0), rcvAddr, scAddress, gasPrice, gasLimit, txData) - rtxData = utils.PrepareRelayerTxData(innerTx) + rtxData = integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit = 1 + gasLimit + uint64(len(rtxData)) rtx = vm.CreateTransaction(1, innerTx.Value, relayerAddr, rcvAddr, gasPrice, rTxGasLimit, rtxData) @@ -84,7 +84,7 @@ func TestRelayedTxDnsTransaction_ShouldWork(t *testing.T) { innerTx.SndUserName = sndAddrUserName innerTx.RcvUserName = rcvAddrUserName - rtxData = utils.PrepareRelayerTxData(innerTx) + rtxData = integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit = 1 + gasLimit + uint64(len(rtxData)) rtx = vm.CreateTransaction(2, innerTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) diff --git a/integrationTests/vm/txsFee/relayedESDT_test.go b/integrationTests/vm/txsFee/relayedESDT_test.go index 80e4b0e0462..96b4ad6ce14 100644 --- a/integrationTests/vm/txsFee/relayedESDT_test.go +++ b/integrationTests/vm/txsFee/relayedESDT_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -31,11 +32,10 @@ func TestRelayedESDTTransferShouldWork(t *testing.T) { utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, big.NewInt(0), token, 0, esdtBalance) _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, relayerBalance) - gasPrice := uint64(10) gasLimit := uint64(40) innerTx := utils.CreateESDTTransferTx(0, sndAddr, rcvAddr, token, big.NewInt(100), gasPrice, gasLimit) - rtxData := utils.PrepareRelayerTxData(innerTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) @@ -77,11 +77,10 @@ func TestTestRelayedESTTransferNotEnoughESTValueShouldConsumeGas(t *testing.T) { utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, big.NewInt(0), token, 0, esdtBalance) _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, relayerBalance) - gasPrice := uint64(10) gasLimit := uint64(40) innerTx := utils.CreateESDTTransferTx(0, sndAddr, rcvAddr, token, big.NewInt(100000001), gasPrice, gasLimit) - rtxData := utils.PrepareRelayerTxData(innerTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) diff --git a/integrationTests/vm/txsFee/relayedMoveBalance_test.go b/integrationTests/vm/txsFee/relayedMoveBalance_test.go index e0e52bc1e4b..89a447742ca 100644 --- a/integrationTests/vm/txsFee/relayedMoveBalance_test.go +++ b/integrationTests/vm/txsFee/relayedMoveBalance_test.go @@ -5,8 +5,8 @@ import ( "testing" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" - "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/process" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/require" @@ -23,7 +23,6 @@ func TestRelayedMoveBalanceShouldWork(t *testing.T) { senderNonce := uint64(0) senderBalance := big.NewInt(0) - gasPrice := uint64(10) gasLimit := uint64(100) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -32,7 +31,7 @@ func TestRelayedMoveBalanceShouldWork(t *testing.T) { // gas consumed = 50 userTx := vm.CreateTransaction(senderNonce, big.NewInt(100), sndAddr, rcvAddr, gasPrice, gasLimit, []byte("aaaa")) - rtxData := utils.PrepareRelayerTxData(userTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) @@ -73,7 +72,7 @@ func TestRelayedMoveBalanceInvalidGasLimitShouldConsumeGas(t *testing.T) { _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(3000)) - rtxData := utils.PrepareRelayerTxData(userTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) rTxGasLimit := 2 + userTx.GasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, 1, rTxGasLimit, rtxData) @@ -105,7 +104,7 @@ func TestRelayedMoveBalanceInvalidUserTxShouldConsumeGas(t *testing.T) { _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(3000)) - rtxData := utils.PrepareRelayerTxData(userTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) rTxGasLimit := 1 + userTx.GasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, 1, rTxGasLimit, rtxData) @@ -137,7 +136,7 @@ func TestRelayedMoveBalanceInvalidUserTxValueShouldConsumeGas(t *testing.T) { _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(3000)) - rtxData := utils.PrepareRelayerTxData(userTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) rTxGasLimit := 1 + userTx.GasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, big.NewInt(100), relayerAddr, sndAddr, 1, rTxGasLimit, rtxData) diff --git a/integrationTests/vm/txsFee/relayedScCalls_test.go b/integrationTests/vm/txsFee/relayedScCalls_test.go index 24b1ca74787..cb1c9bbfbd9 100644 --- a/integrationTests/vm/txsFee/relayedScCalls_test.go +++ b/integrationTests/vm/txsFee/relayedScCalls_test.go @@ -11,6 +11,7 @@ import ( "testing" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -27,7 +28,6 @@ func TestRelayedScCallShouldWork(t *testing.T) { relayerAddr := []byte("12345678901234567890123456789033") sndAddr := []byte("12345678901234567890123456789112") - gasPrice := uint64(10) gasLimit := uint64(1000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(0)) @@ -35,7 +35,7 @@ func TestRelayedScCallShouldWork(t *testing.T) { userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddress, gasPrice, gasLimit, []byte("increment")) - rtxData := utils.PrepareRelayerTxData(userTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) @@ -70,7 +70,6 @@ func TestRelayedScCallContractNotFoundShouldConsumeGas(t *testing.T) { relayerAddr := []byte("12345678901234567890123456789033") sndAddr := []byte("12345678901234567890123456789112") - gasPrice := uint64(10) gasLimit := uint64(1000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(0)) @@ -78,7 +77,7 @@ func TestRelayedScCallContractNotFoundShouldConsumeGas(t *testing.T) { userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddrBytes, gasPrice, gasLimit, []byte("increment")) - rtxData := utils.PrepareRelayerTxData(userTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) @@ -110,7 +109,6 @@ func TestRelayedScCallInvalidMethodShouldConsumeGas(t *testing.T) { relayerAddr := []byte("12345678901234567890123456789033") sndAddr := []byte("12345678901234567890123456789112") - gasPrice := uint64(10) gasLimit := uint64(1000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(0)) @@ -118,7 +116,7 @@ func TestRelayedScCallInvalidMethodShouldConsumeGas(t *testing.T) { userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddress, gasPrice, gasLimit, []byte("invalidMethod")) - rtxData := utils.PrepareRelayerTxData(userTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) @@ -150,7 +148,6 @@ func TestRelayedScCallInsufficientGasLimitShouldConsumeGas(t *testing.T) { relayerAddr := []byte("12345678901234567890123456789033") sndAddr := []byte("12345678901234567890123456789112") - gasPrice := uint64(10) gasLimit := uint64(5) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(0)) @@ -158,7 +155,7 @@ func TestRelayedScCallInsufficientGasLimitShouldConsumeGas(t *testing.T) { userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddress, gasPrice, gasLimit, []byte("increment")) - rtxData := utils.PrepareRelayerTxData(userTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) @@ -189,7 +186,6 @@ func TestRelayedScCallOutOfGasShouldConsumeGas(t *testing.T) { relayerAddr := []byte("12345678901234567890123456789033") sndAddr := []byte("12345678901234567890123456789112") - gasPrice := uint64(10) gasLimit := uint64(20) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(0)) @@ -197,7 +193,7 @@ func TestRelayedScCallOutOfGasShouldConsumeGas(t *testing.T) { userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddress, gasPrice, gasLimit, []byte("increment")) - rtxData := utils.PrepareRelayerTxData(userTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) diff --git a/integrationTests/vm/txsFee/relayedScDeploy_test.go b/integrationTests/vm/txsFee/relayedScDeploy_test.go index d56071d61ec..e31ec9c5386 100644 --- a/integrationTests/vm/txsFee/relayedScDeploy_test.go +++ b/integrationTests/vm/txsFee/relayedScDeploy_test.go @@ -10,8 +10,8 @@ import ( "testing" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" - "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/require" @@ -27,7 +27,6 @@ func TestRelayedScDeployShouldWork(t *testing.T) { senderNonce := uint64(0) senderBalance := big.NewInt(0) - gasPrice := uint64(10) gasLimit := uint64(1000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -36,7 +35,7 @@ func TestRelayedScDeployShouldWork(t *testing.T) { scCode := wasm.GetSCCode("../wasm/testdata/misc/fib_wasm/output/fib_wasm.wasm") userTx := vm.CreateTransaction(senderNonce, big.NewInt(0), sndAddr, vm.CreateEmptyAddress(), gasPrice, gasLimit, []byte(wasm.CreateDeployTxData(scCode))) - rtxData := utils.PrepareRelayerTxData(userTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) @@ -68,7 +67,6 @@ func TestRelayedScDeployInvalidCodeShouldConsumeGas(t *testing.T) { senderNonce := uint64(0) senderBalance := big.NewInt(0) - gasPrice := uint64(10) gasLimit := uint64(500) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -79,7 +77,7 @@ func TestRelayedScDeployInvalidCodeShouldConsumeGas(t *testing.T) { scCodeBytes = append(scCodeBytes, []byte("aaaaa")...) userTx := vm.CreateTransaction(senderNonce, big.NewInt(0), sndAddr, vm.CreateEmptyAddress(), gasPrice, gasLimit, scCodeBytes) - rtxData := utils.PrepareRelayerTxData(userTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) @@ -110,7 +108,6 @@ func TestRelayedScDeployInsufficientGasLimitShouldConsumeGas(t *testing.T) { senderNonce := uint64(0) senderBalance := big.NewInt(0) - gasPrice := uint64(10) gasLimit := uint64(500) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -119,7 +116,7 @@ func TestRelayedScDeployInsufficientGasLimitShouldConsumeGas(t *testing.T) { scCode := wasm.GetSCCode("../wasm/testdata/misc/fib_wasm/output/fib_wasm.wasm") userTx := vm.CreateTransaction(senderNonce, big.NewInt(0), sndAddr, vm.CreateEmptyAddress(), gasPrice, gasLimit, []byte(wasm.CreateDeployTxData(scCode))) - rtxData := utils.PrepareRelayerTxData(userTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) @@ -150,7 +147,6 @@ func TestRelayedScDeployOutOfGasShouldConsumeGas(t *testing.T) { senderNonce := uint64(0) senderBalance := big.NewInt(0) - gasPrice := uint64(10) gasLimit := uint64(570) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -159,7 +155,7 @@ func TestRelayedScDeployOutOfGasShouldConsumeGas(t *testing.T) { scCode := wasm.GetSCCode("../wasm/testdata/misc/fib_wasm/output/fib_wasm.wasm") userTx := vm.CreateTransaction(senderNonce, big.NewInt(0), sndAddr, vm.CreateEmptyAddress(), gasPrice, gasLimit, []byte(wasm.CreateDeployTxData(scCode))) - rtxData := utils.PrepareRelayerTxData(userTx) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) diff --git a/integrationTests/vm/txsFee/scCalls_test.go b/integrationTests/vm/txsFee/scCalls_test.go index 9717f0c3c86..3c52770d448 100644 --- a/integrationTests/vm/txsFee/scCalls_test.go +++ b/integrationTests/vm/txsFee/scCalls_test.go @@ -97,7 +97,6 @@ func TestScCallShouldWork(t *testing.T) { sndAddr := []byte("12345678901234567890123456789112") senderBalance := big.NewInt(100000) - gasPrice := uint64(10) gasLimit := uint64(1000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -139,7 +138,6 @@ func TestScCallContractNotFoundShouldConsumeGas(t *testing.T) { scAddrBytes, _ := hex.DecodeString(scAddress) sndAddr := []byte("12345678901234567890123456789112") senderBalance := big.NewInt(100000) - gasPrice := uint64(10) gasLimit := uint64(1000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -170,7 +168,6 @@ func TestScCallInvalidMethodToCallShouldConsumeGas(t *testing.T) { sndAddr := []byte("12345678901234567890123456789112") senderBalance := big.NewInt(100000) - gasPrice := uint64(10) gasLimit := uint64(1000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -203,7 +200,6 @@ func TestScCallInsufficientGasLimitShouldNotConsumeGas(t *testing.T) { sndAddr := []byte("12345678901234567890123456789112") senderBalance := big.NewInt(100000) - gasPrice := uint64(10) gasLimit := uint64(9) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -239,7 +235,6 @@ func TestScCallOutOfGasShouldConsumeGas(t *testing.T) { sndAddr := []byte("12345678901234567890123456789112") senderBalance := big.NewInt(100000) - gasPrice := uint64(10) gasLimit := uint64(20) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -275,7 +270,6 @@ func TestScCallAndGasChangeShouldWork(t *testing.T) { sndAddr := []byte("12345678901234567890123456789112") senderBalance := big.NewInt(10000000) - gasPrice := uint64(10) gasLimit := uint64(1000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -314,7 +308,6 @@ func TestESDTScCallAndGasChangeShouldWork(t *testing.T) { owner := []byte("12345678901234567890123456789011") senderBalance := big.NewInt(1000000000) - gasPrice := uint64(10) gasLimit := uint64(2000000) _, _ = vm.CreateAccount(testContext.Accounts, owner, 0, senderBalance) @@ -324,7 +317,6 @@ func TestESDTScCallAndGasChangeShouldWork(t *testing.T) { sndAddr := []byte("12345678901234567890123456789112") senderBalance = big.NewInt(10000000) - gasPrice = uint64(10) gasLimit = uint64(30000) esdtBalance := big.NewInt(100000000) @@ -427,7 +419,6 @@ func TestScCallBuyNFT_OneFailedTxAndOneOkTx(t *testing.T) { sndAddr1 := []byte("12345678901234567890123456789112") sndAddr2 := []byte("12345678901234567890123456789113") senderBalance := big.NewInt(1000000000000000000) - gasPrice := uint64(10) gasLimit := uint64(1000000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr1, 0, senderBalance) @@ -489,7 +480,6 @@ func TestScCallBuyNFT_TwoOkTxs(t *testing.T) { sndAddr1 := []byte("12345678901234567890123456789112") sndAddr2 := []byte("12345678901234567890123456789113") senderBalance := big.NewInt(1000000000000000000) - gasPrice := uint64(10) gasLimit := uint64(1000000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr1, 0, senderBalance) @@ -560,7 +550,6 @@ func TestScCallDistributeStakingRewards_ShouldWork(t *testing.T) { require.Nil(t, err) senderBalance := big.NewInt(1000000000000000000) - gasPrice := uint64(10) gasLimit := uint64(600000000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr1, 0, senderBalance) diff --git a/integrationTests/vm/txsFee/scDeploy_test.go b/integrationTests/vm/txsFee/scDeploy_test.go index 6157314bd4d..2f608b8cf86 100644 --- a/integrationTests/vm/txsFee/scDeploy_test.go +++ b/integrationTests/vm/txsFee/scDeploy_test.go @@ -25,7 +25,6 @@ func TestScDeployShouldWork(t *testing.T) { sndAddr := []byte("12345678901234567890123456789012") senderNonce := uint64(0) senderBalance := big.NewInt(100000) - gasPrice := uint64(10) gasLimit := uint64(1000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -57,7 +56,6 @@ func TestScDeployInvalidContractCodeShouldConsumeGas(t *testing.T) { sndAddr := []byte("12345678901234567890123456789012") senderNonce := uint64(0) senderBalance := big.NewInt(100000) - gasPrice := uint64(10) gasLimit := uint64(1000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -90,7 +88,6 @@ func TestScDeployInsufficientGasLimitShouldNotConsumeGas(t *testing.T) { sndAddr := []byte("12345678901234567890123456789012") senderNonce := uint64(0) senderBalance := big.NewInt(100000) - gasPrice := uint64(10) gasLimit := uint64(568) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -122,7 +119,6 @@ func TestScDeployOutOfGasShouldConsumeGas(t *testing.T) { sndAddr := []byte("12345678901234567890123456789012") senderNonce := uint64(0) senderBalance := big.NewInt(100000) - gasPrice := uint64(10) gasLimit := uint64(570) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) diff --git a/integrationTests/vm/txsFee/testdata/third/third.wasm b/integrationTests/vm/txsFee/testdata/third/third.wasm new file mode 100644 index 00000000000..ee7f953b028 Binary files /dev/null and b/integrationTests/vm/txsFee/testdata/third/third.wasm differ diff --git a/integrationTests/vm/txsFee/txCostEstimator_test.go b/integrationTests/vm/txsFee/txCostEstimator_test.go index 0157944a8b4..c39f3f9769f 100644 --- a/integrationTests/vm/txsFee/txCostEstimator_test.go +++ b/integrationTests/vm/txsFee/txCostEstimator_test.go @@ -39,7 +39,6 @@ func TestSCCallCostTransactionCost(t *testing.T) { sndAddr := []byte("12345678901234567890123456789112") senderBalance := big.NewInt(100000) - gasPrice := uint64(10) gasLimit := uint64(1000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -86,7 +85,6 @@ func TestAsyncCallsTransactionCost(t *testing.T) { _, _ = vm.CreateAccount(testContext.Accounts, ownerAddr, 0, egldBalance) _, _ = vm.CreateAccount(testContext.Accounts, senderAddr, 0, egldBalance) - gasPrice := uint64(10) ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) deployGasLimit := uint64(2000) @@ -173,7 +171,6 @@ func TestAsyncESDTTransfer(t *testing.T) { utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, egldBalance, token, 0, esdtBalance) // deploy 2 contracts - gasPrice := uint64(10) ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) deployGasLimit := uint64(50000) diff --git a/integrationTests/vm/txsFee/utils/utils.go b/integrationTests/vm/txsFee/utils/utils.go index 1d9e114d8c2..88ba54a0fc2 100644 --- a/integrationTests/vm/txsFee/utils/utils.go +++ b/integrationTests/vm/txsFee/utils/utils.go @@ -11,13 +11,11 @@ import ( "strings" "testing" - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/scheduled" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/hashing/keccak" - "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/integrationTests/mock" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" @@ -31,8 +29,7 @@ import ( ) var ( - protoMarshalizer = &marshal.GogoProtoMarshalizer{} - log = logger.GetOrCreate("integrationTests/vm/txFee/utils") + log = logger.GetOrCreate("integrationTests/vm/txFee/utils") ) // DoDeploy - @@ -257,12 +254,6 @@ func DoDeployDNS(t *testing.T, testContext *vm.VMTestContext, pathToContract str return scAddr, owner } -// PrepareRelayerTxData - -func PrepareRelayerTxData(innerTx *transaction.Transaction) []byte { - userTxBytes, _ := protoMarshalizer.Marshal(innerTx) - return []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxBytes)) -} - // CheckOwnerAddr - func CheckOwnerAddr(t *testing.T, testContext *vm.VMTestContext, scAddr []byte, owner []byte) { acc, err := testContext.Accounts.GetExistingAccount(scAddr) diff --git a/integrationTests/vm/txsFee/utils/utilsESDT.go b/integrationTests/vm/txsFee/utils/utilsESDT.go index 96c2c56e24e..dbc2a6665e1 100644 --- a/integrationTests/vm/txsFee/utils/utilsESDT.go +++ b/integrationTests/vm/txsFee/utils/utilsESDT.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" @@ -46,7 +47,7 @@ func CreateAccountWithESDTBalance( } } - esdtDataBytes, err := protoMarshalizer.Marshal(esdtData) + esdtDataBytes, err := integrationTests.TestMarshalizer.Marshal(esdtData) require.Nil(t, err) key := append([]byte(core.ProtectedKeyPrefix), []byte(core.ESDTKeyIdentifier)...) @@ -95,7 +96,7 @@ func CreateAccountWithNFT( }, } - esdtDataBytes, err := protoMarshalizer.Marshal(esdtData) + esdtDataBytes, err := integrationTests.TestMarshalizer.Marshal(esdtData) require.Nil(t, err) key := append([]byte(core.ProtectedKeyPrefix), []byte(core.ESDTKeyIdentifier)...) @@ -120,7 +121,7 @@ func saveNewTokenOnSystemAccount(t *testing.T, accnts state.AccountsAdapter, tok esdtDataOnSystemAcc.Reserved = []byte{1} esdtDataOnSystemAcc.Value.Set(esdtData.Value) - esdtDataBytes, err := protoMarshalizer.Marshal(esdtData) + esdtDataBytes, err := integrationTests.TestMarshalizer.Marshal(esdtData) require.Nil(t, err) sysAccount, err := accnts.LoadAccount(core.SystemAccountAddress) @@ -179,7 +180,7 @@ func SetESDTRoles( Roles: roles, } - rolesDataBytes, err := protoMarshalizer.Marshal(rolesData) + rolesDataBytes, err := integrationTests.TestMarshalizer.Marshal(rolesData) require.Nil(t, err) err = userAccount.SaveKeyValue(key, rolesDataBytes) diff --git a/integrationTests/vm/txsFee/validatorSC_test.go b/integrationTests/vm/txsFee/validatorSC_test.go index b9e9ffe9adf..ed385bfb4a8 100644 --- a/integrationTests/vm/txsFee/validatorSC_test.go +++ b/integrationTests/vm/txsFee/validatorSC_test.go @@ -58,7 +58,6 @@ func TestValidatorsSC_DoStakePutInQueueUnStakeAndUnBondShouldRefund(t *testing.T testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) saveDelegationManagerConfig(testContextMeta) - gasPrice := uint64(10) gasLimit := uint64(4000) sndAddr := []byte("12345678901234567890123456789012") tx := vm.CreateTransaction(0, value2500EGLD, sndAddr, vmAddr.ValidatorSCAddress, gasPrice, gasLimit, []byte(validatorStakeData)) @@ -94,8 +93,8 @@ func checkReturnLog(t *testing.T, testContextMeta *vm.VMTestContext, subStr stri } found := false - for _, log := range allLogs { - for _, event := range log.GetLogEvents() { + for _, eventLog := range allLogs { + for _, event := range eventLog.GetLogEvents() { if string(event.GetIdentifier()) == identifierStr { require.True(t, strings.Contains(string(event.GetTopics()[1]), subStr)) found = true @@ -115,7 +114,6 @@ func TestValidatorsSC_DoStakePutInQueueUnStakeAndUnBondTokensShouldRefund(t *tes saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) - gasPrice := uint64(10) gasLimit := uint64(4000) sndAddr := []byte("12345678901234567890123456789012") tx := vm.CreateTransaction(0, value2500EGLD, sndAddr, vmAddr.ValidatorSCAddress, gasPrice, gasLimit, []byte(validatorStakeData)) @@ -158,7 +156,6 @@ func testValidatorsSCDoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t *t saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 0}) - gasPrice := uint64(10) gasLimit := uint64(4000) sndAddr := []byte("12345678901234567890123456789012") tx := vm.CreateTransaction(0, value2700EGLD, sndAddr, vmAddr.ValidatorSCAddress, gasPrice, gasLimit, []byte(validatorStakeData)) @@ -171,9 +168,9 @@ func testValidatorsSCDoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t *t testContextMeta.TxsLogsProcessor.Clean() tx = vm.CreateTransaction(0, big.NewInt(0), sndAddr, vmAddr.ValidatorSCAddress, gasPrice, gasLimit, []byte("unBondTokens@"+hex.EncodeToString(value200EGLD.Bytes()))) - executeTxAndCheckResults(t, testContextMeta, tx, vmcommon.Ok, nil) + executeTxAndCheckResults(t, testContextMeta, tx, vmcommon.UserError, nil) - checkReturnLog(t, testContextMeta, noTokensToUnBondMessage, false) + checkReturnLog(t, testContextMeta, noTokensToUnBondMessage, true) } func TestValidatorsSC_ToStakePutInQueueUnStakeAndUnBondShouldRefundUnBondTokens(t *testing.T) { @@ -186,7 +183,6 @@ func TestValidatorsSC_ToStakePutInQueueUnStakeAndUnBondShouldRefundUnBondTokens( saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) - gasPrice := uint64(10) gasLimit := uint64(4000) sndAddr := []byte("12345678901234567890123456789012") tx := vm.CreateTransaction(0, value2700EGLD, sndAddr, vmAddr.ValidatorSCAddress, gasPrice, gasLimit, []byte(validatorStakeData)) @@ -233,7 +229,6 @@ func TestValidatorsSC_ToStakePutInQueueUnStakeNodesAndUnBondNodesShouldRefund(t saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) - gasPrice := uint64(10) gasLimit := uint64(4000) sndAddr := []byte("12345678901234567890123456789012") tx := vm.CreateTransaction(0, value2700EGLD, sndAddr, vmAddr.ValidatorSCAddress, gasPrice, gasLimit, []byte(validatorStakeData)) diff --git a/integrationTests/vm/wasm/utils.go b/integrationTests/vm/wasm/utils.go index 85db964ab33..6147c83de0d 100644 --- a/integrationTests/vm/wasm/utils.go +++ b/integrationTests/vm/wasm/utils.go @@ -46,6 +46,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" @@ -220,16 +221,19 @@ func (context *TestContext) initFeeHandlers() { MaxGasLimitPerMetaMiniBlock: maxGasLimitPerBlock, MaxGasLimitPerTx: maxGasLimitPerBlock, MinGasLimit: minGasLimit, + ExtraGasLimitGuardedTx: "50000", }, }, - MinGasPrice: minGasPrice, - GasPerDataByte: "1", - GasPriceModifier: 1.0, + MinGasPrice: minGasPrice, + GasPerDataByte: "1", + GasPriceModifier: 1.0, + MaxGasPriceSetGuardian: "2000000000", }, }, EpochNotifier: context.EpochNotifier, EnableEpochsHandler: context.EnableEpochsHandler, BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) @@ -247,6 +251,7 @@ func (context *TestContext) initVMAndBlockchainHook() { EpochNotifier: context.EpochNotifier, EnableEpochsHandler: context.EnableEpochsHandler, MaxNumNodesInTransferRole: 100, + GuardedAccountHandler: &guardianMocks.GuardedAccountHandlerStub{}, } argsBuiltIn.AutomaticCrawlerAddresses = integrationTests.GenerateOneAddressPerShard(argsBuiltIn.ShardCoordinator) @@ -384,6 +389,8 @@ func (context *TestContext) initTxProcessorWithOneSCExecutorWithVMs() { ArgsParser: smartContract.NewArgumentParser(), ScrForwarder: &mock.IntermediateTransactionHandlerMock{}, EnableEpochsHandler: context.EnableEpochsHandler, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, } context.TxProcessor, err = processTransaction.NewTxProcessor(argsNewTxProcessor) diff --git a/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go b/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go index bbf4ee6ffb9..135ee310df4 100644 --- a/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go +++ b/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go @@ -31,6 +31,7 @@ func TestScDeployShouldManageCorrectlyTheCodeMetadata(t *testing.T) { senderBalance, config.EnableEpochs{ IsPayableBySCEnableEpoch: 1, + SetGuardianEnableEpoch: 1, }, ) require.Nil(t, err) @@ -46,6 +47,7 @@ func TestScDeployShouldManageCorrectlyTheCodeMetadata(t *testing.T) { PayableBySC: false, Upgradeable: true, Readable: true, + Guarded: false, } assert.Equal(t, expectedCodeMetadata.ToBytes(), getCodeMetadata(t, testContext.Accounts, contractAddress)) @@ -60,6 +62,7 @@ func TestScDeployShouldManageCorrectlyTheCodeMetadata(t *testing.T) { PayableBySC: true, Upgradeable: true, Readable: true, + Guarded: false, } assert.Equal(t, expectedCodeMetadata.ToBytes(), getCodeMetadata(t, testContext.Accounts, contractAddress)) diff --git a/integrationTests/vm/wasm/wasmvm/wasmVM_test.go b/integrationTests/vm/wasm/wasmvm/wasmVM_test.go index 7e97d8b69d7..cb6fbaf3717 100644 --- a/integrationTests/vm/wasm/wasmvm/wasmVM_test.go +++ b/integrationTests/vm/wasm/wasmvm/wasmVM_test.go @@ -32,6 +32,7 @@ import ( processTransaction "github.com/multiversx/mx-chain-go/process/transaction" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -514,7 +515,7 @@ func TestExecuteTransactionAndTimeToProcessChange(t *testing.T) { EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, } txTypeHandler, _ := coordinator.NewTxTypeHandler(argsTxTypeHandler) - feeHandler := &mock.FeeHandlerStub{ + feeHandler := &economicsmocks.EconomicsHandlerStub{ ComputeMoveBalanceFeeCalled: func(tx data.TransactionWithFeeHandler) *big.Int { return big.NewInt(10) }, @@ -536,7 +537,7 @@ func TestExecuteTransactionAndTimeToProcessChange(t *testing.T) { ScProcessor: &testscommon.SCProcessorMock{}, TxFeeHandler: &testscommon.UnsignedTxHandlerStub{}, TxTypeHandler: txTypeHandler, - EconomicsFee: &mock.FeeHandlerStub{}, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, ReceiptForwarder: &mock.IntermediateTransactionHandlerMock{}, BadTxForwarder: &mock.IntermediateTransactionHandlerMock{}, ArgsParser: smartContract.NewArgumentParser(), diff --git a/node/errors.go b/node/errors.go index 119a3833b96..86d875a792b 100644 --- a/node/errors.go +++ b/node/errors.go @@ -135,3 +135,6 @@ var ErrTrieOperationsTimeout = errors.New("trie operations timeout") // ErrNilStatusHandler signals that a nil status handler was provided var ErrNilStatusHandler = errors.New("nil status handler") + +// ErrNilCreateTransactionArgs signals that create transaction args is nil +var ErrNilCreateTransactionArgs = errors.New("nil args for create transaction") diff --git a/node/export_test.go b/node/export_test.go index 65b79d215e3..4f93f8278a4 100644 --- a/node/export_test.go +++ b/node/export_test.go @@ -3,8 +3,10 @@ package node import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/api" + "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/state" ) // GetClosableComponentName - @@ -41,3 +43,14 @@ func MergeAccountQueryOptionsIntoBlockInfo(options api.AccountQueryOptions, info func ExtractApiBlockInfoIfErrAccountNotFoundAtBlock(err error) (api.BlockInfo, bool) { return extractApiBlockInfoIfErrAccountNotFoundAtBlock(err) } + +// SetTxGuardianData - +func (n *Node) SetTxGuardianData(guardian string, guardianSigHex string, tx *transaction.Transaction) error { + return n.setTxGuardianData(guardian, guardianSigHex, tx) +} + +func (n *Node) GetPendingAndActiveGuardians( + userAccount state.UserAccountHandler, +) (activeGuardian *api.Guardian, pendingGuardian *api.Guardian, err error) { + return n.getPendingAndActiveGuardians(userAccount) +} diff --git a/node/external/dtos.go b/node/external/dtos.go new file mode 100644 index 00000000000..f884d8d32c9 --- /dev/null +++ b/node/external/dtos.go @@ -0,0 +1,20 @@ +package external + +// ArgsCreateTransaction defines arguments for creating a transaction +type ArgsCreateTransaction struct { + Nonce uint64 + Value string + Receiver string + ReceiverUsername []byte + Sender string + SenderUsername []byte + GasPrice uint64 + GasLimit uint64 + DataField []byte + SignatureHex string + ChainID string + Version uint32 + Options uint32 + Guardian string + GuardianSigHex string +} diff --git a/node/external/logs/logsFacade_test.go b/node/external/logs/logsFacade_test.go index f277ea811f6..21d11f99c59 100644 --- a/node/external/logs/logsFacade_test.go +++ b/node/external/logs/logsFacade_test.go @@ -4,7 +4,6 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/testscommon" @@ -23,7 +22,7 @@ func TestNewLogsFacade(t *testing.T) { facade, err := NewLogsFacade(arguments) require.ErrorIs(t, err, errCannotCreateLogsFacade) require.ErrorContains(t, err, core.ErrNilStore.Error()) - require.True(t, check.IfNil(facade)) + require.Nil(t, facade) }) t.Run("NilMarshaller", func(t *testing.T) { @@ -36,7 +35,7 @@ func TestNewLogsFacade(t *testing.T) { facade, err := NewLogsFacade(arguments) require.ErrorIs(t, err, errCannotCreateLogsFacade) require.ErrorContains(t, err, core.ErrNilMarshalizer.Error()) - require.True(t, check.IfNil(facade)) + require.Nil(t, facade) }) t.Run("NilPubKeyConverter", func(t *testing.T) { @@ -49,7 +48,7 @@ func TestNewLogsFacade(t *testing.T) { facade, err := NewLogsFacade(arguments) require.ErrorIs(t, err, errCannotCreateLogsFacade) require.ErrorContains(t, err, core.ErrNilPubkeyConverter.Error()) - require.True(t, check.IfNil(facade)) + require.Nil(t, facade) }) } @@ -144,3 +143,18 @@ func TestLogsFacade_IncludeLogsInTransactionsShouldWork(t *testing.T) { require.Nil(t, transactions[2].Logs) require.Equal(t, "fourth", transactions[3].Logs.Events[0].Identifier) } + +func TestLogsFacade_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var lf *logsFacade + require.True(t, lf.IsInterfaceNil()) + + arguments := ArgsNewLogsFacade{ + StorageService: genericMocks.NewChainStorerMock(7), + Marshaller: &marshal.GogoProtoMarshalizer{}, + PubKeyConverter: testscommon.NewPubkeyConverterMock(32), + } + lf, _ = NewLogsFacade(arguments) + require.False(t, lf.IsInterfaceNil()) +} diff --git a/node/external/nodeApiResolver_test.go b/node/external/nodeApiResolver_test.go index 0f4528ba2c7..1132c7bbdcf 100644 --- a/node/external/nodeApiResolver_test.go +++ b/node/external/nodeApiResolver_test.go @@ -7,7 +7,6 @@ import ( "math/big" "testing" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" @@ -127,7 +126,7 @@ func TestNewNodeApiResolver_ShouldWork(t *testing.T) { nar, err := external.NewNodeApiResolver(arg) assert.Nil(t, err) - assert.False(t, check.IfNil(nar)) + assert.NotNil(t, nar) } func TestNodeApiResolver_CloseShouldReturnNil(t *testing.T) { @@ -676,3 +675,14 @@ func TestNodeApiResolver_GetGasConfigs(t *testing.T) { _ = nar.GetGasConfigs() require.True(t, wasCalled) } + +func TestNodeApiResolver_IsInterfaceNil(t *testing.T) { + t.Parallel() + + nar, _ := external.NewNodeApiResolver(external.ArgNodeApiResolver{}) + require.True(t, nar.IsInterfaceNil()) + + arg := createMockArgs() + nar, _ = external.NewNodeApiResolver(arg) + require.False(t, nar.IsInterfaceNil()) +} diff --git a/node/external/timemachine/fee/args.go b/node/external/timemachine/fee/args.go index a79026e02ec..be33f0d743c 100644 --- a/node/external/timemachine/fee/args.go +++ b/node/external/timemachine/fee/args.go @@ -12,12 +12,16 @@ type ArgsNewFeeComputer struct { BuiltInFunctionsCostHandler economics.BuiltInFunctionsCostHandler EconomicsConfig config.EconomicsConfig EnableEpochsConfig config.EnableEpochs + TxVersionChecker process.TxVersionCheckerHandler } func (args *ArgsNewFeeComputer) check() error { if check.IfNil(args.BuiltInFunctionsCostHandler) { return process.ErrNilBuiltInFunctionsCostHandler } + if check.IfNil(args.TxVersionChecker) { + return process.ErrNilTransactionVersionChecker + } return nil } diff --git a/node/external/timemachine/fee/feeComputer.go b/node/external/timemachine/fee/feeComputer.go index 11961b9baa0..422e5306d6f 100644 --- a/node/external/timemachine/fee/feeComputer.go +++ b/node/external/timemachine/fee/feeComputer.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-go/common/enablers" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/node/external/timemachine" + "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/economics" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -15,6 +16,7 @@ import ( var log = logger.GetOrCreate("node/external/timemachine/fee") type feeComputer struct { + txVersionChecker process.TxVersionCheckerHandler builtInFunctionsCostHandler economics.BuiltInFunctionsCostHandler economicsConfig config.EconomicsConfig economicsInstances map[uint32]economicsDataWithComputeFee @@ -35,6 +37,7 @@ func NewFeeComputer(args ArgsNewFeeComputer) (*feeComputer, error) { // TODO: use a LRU cache instead economicsInstances: make(map[uint32]economicsDataWithComputeFee), enableEpochsConfig: args.EnableEpochsConfig, + txVersionChecker: args.TxVersionChecker, } // Create some economics data instance (but do not save them) in order to validate the arguments: @@ -132,6 +135,7 @@ func (computer *feeComputer) createEconomicsInstance(epoch uint32) (economicsDat BuiltInFunctionsCostHandler: computer.builtInFunctionsCostHandler, EpochNotifier: &timemachine.DisabledEpochNotifier{}, EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: computer.txVersionChecker, } economicsData, err := economics.NewEconomicsData(args) diff --git a/node/external/timemachine/fee/feeComputer_test.go b/node/external/timemachine/fee/feeComputer_test.go index 14e11af7792..bff68baef98 100644 --- a/node/external/timemachine/fee/feeComputer_test.go +++ b/node/external/timemachine/fee/feeComputer_test.go @@ -6,7 +6,6 @@ import ( "sync" "testing" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process" @@ -15,41 +14,44 @@ import ( "github.com/stretchr/testify/require" ) -func TestNewFeeComputer(t *testing.T) { - t.Run("NilBuiltInFunctionsCostHandler", func(t *testing.T) { - arguments := ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: nil, - EconomicsConfig: testscommon.GetEconomicsConfig(), - } +func createMockFeeComputerArgs() ArgsNewFeeComputer { + return ArgsNewFeeComputer{ + BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, + EconomicsConfig: testscommon.GetEconomicsConfig(), + EnableEpochsConfig: config.EnableEpochs{ + PenalizedTooMuchGasEnableEpoch: 124, + GasPriceModifierEnableEpoch: 180, + }, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + } +} - computer, err := NewFeeComputer(arguments) +func TestNewFeeComputer(t *testing.T) { + t.Run("nil builtin function cost handler should error", func(t *testing.T) { + args := createMockFeeComputerArgs() + args.BuiltInFunctionsCostHandler = nil + computer, err := NewFeeComputer(args) require.Equal(t, process.ErrNilBuiltInFunctionsCostHandler, err) - require.True(t, check.IfNil(computer)) + require.Nil(t, computer) + }) + t.Run("nil tx version checker should error", func(t *testing.T) { + args := createMockFeeComputerArgs() + args.TxVersionChecker = nil + computer, err := NewFeeComputer(args) + require.Equal(t, process.ErrNilTransactionVersionChecker, err) + require.Nil(t, computer) }) - t.Run("AllArgumentsProvided", func(t *testing.T) { - arguments := ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - } - - computer, err := NewFeeComputer(arguments) + args := createMockFeeComputerArgs() + computer, err := NewFeeComputer(args) require.Nil(t, err) require.NotNil(t, computer) }) } func TestFeeComputer_ComputeGasUsedAndFeeBasedOnRefundValue(t *testing.T) { - arguments := ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - EnableEpochsConfig: config.EnableEpochs{ - PenalizedTooMuchGasEnableEpoch: 124, - GasPriceModifierEnableEpoch: 180, - }, - } - - computer, _ := NewFeeComputer(arguments) + args := createMockFeeComputerArgs() + computer, _ := NewFeeComputer(args) contract, _ := hex.DecodeString("000000000000000000010000000000000000000000000000000000000000abba") @@ -76,16 +78,8 @@ func TestFeeComputer_ComputeGasUsedAndFeeBasedOnRefundValue(t *testing.T) { } func TestFeeComputer_ComputeFeeBasedOnGasUsed(t *testing.T) { - arguments := ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - EnableEpochsConfig: config.EnableEpochs{ - PenalizedTooMuchGasEnableEpoch: 124, - GasPriceModifierEnableEpoch: 180, - }, - } - - computer, _ := NewFeeComputer(arguments) + args := createMockFeeComputerArgs() + computer, _ := NewFeeComputer(args) contract, _ := hex.DecodeString("000000000000000000010000000000000000000000000000000000000000abba") @@ -110,16 +104,8 @@ func TestFeeComputer_ComputeFeeBasedOnGasUsed(t *testing.T) { } func TestFeeComputer_ComputeGasLimit(t *testing.T) { - arguments := ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - EnableEpochsConfig: config.EnableEpochs{ - PenalizedTooMuchGasEnableEpoch: 124, - GasPriceModifierEnableEpoch: 180, - }, - } - - computer, _ := NewFeeComputer(arguments) + args := createMockFeeComputerArgs() + computer, _ := NewFeeComputer(args) contract, _ := hex.DecodeString("000000000000000000010000000000000000000000000000000000000000abba") @@ -143,18 +129,10 @@ func TestFeeComputer_ComputeGasLimit(t *testing.T) { } func TestFeeComputer_ComputeTransactionFeeShouldWorkForDifferentEpochs(t *testing.T) { - arguments := ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - EnableEpochsConfig: config.EnableEpochs{ - PenalizedTooMuchGasEnableEpoch: 124, - GasPriceModifierEnableEpoch: 180, - }, - } - + args := createMockFeeComputerArgs() contract, _ := hex.DecodeString("000000000000000000010000000000000000000000000000000000000000abba") - computer, _ := NewFeeComputer(arguments) + computer, _ := NewFeeComputer(args) checkComputedFee(t, "50000000000000", computer, 0, 80000, 1000000000, "", nil) checkComputedFee(t, "57500000000000", computer, 0, 80000, 1000000000, "hello", nil) @@ -185,16 +163,8 @@ func checkComputedFee(t *testing.T, expectedFee string, computer *feeComputer, e } func TestFeeComputer_InHighConcurrency(t *testing.T) { - arguments := ArgsNewFeeComputer{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - EconomicsConfig: testscommon.GetEconomicsConfig(), - EnableEpochsConfig: config.EnableEpochs{ - PenalizedTooMuchGasEnableEpoch: 124, - GasPriceModifierEnableEpoch: 180, - }, - } - - computer, _ := NewFeeComputer(arguments) + args := createMockFeeComputerArgs() + computer, _ := NewFeeComputer(args) n := 1000 wg := sync.WaitGroup{} @@ -216,3 +186,14 @@ func TestFeeComputer_InHighConcurrency(t *testing.T) { wg.Wait() } + +func TestFeeComputer_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var fc *feeComputer + require.True(t, fc.IsInterfaceNil()) + + args := createMockFeeComputerArgs() + fc, _ = NewFeeComputer(args) + require.False(t, fc.IsInterfaceNil()) +} diff --git a/node/external/timemachine/fee/memoryFootprint/memory_test.go b/node/external/timemachine/fee/memoryFootprint/memory_test.go index 6f8c411bcd2..9ac5146a4ba 100644 --- a/node/external/timemachine/fee/memoryFootprint/memory_test.go +++ b/node/external/timemachine/fee/memoryFootprint/memory_test.go @@ -14,6 +14,10 @@ import ( // keep this test in a separate package as to not be influenced by other the tests from the same package func TestFeeComputer_MemoryFootprint(t *testing.T) { + if testing.Short() { + t.Skip("this test is not relevant and will fail if started with -race") + } + numEpochs := 10000 maxFootprintNumBytes := 48_000_000 @@ -23,6 +27,7 @@ func TestFeeComputer_MemoryFootprint(t *testing.T) { feeComputer, _ := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, EconomicsConfig: testscommon.GetEconomicsConfig(), + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, }) computer := fee.NewTestFeeComputer(feeComputer) diff --git a/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go b/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go index e2e655d898b..50e1d64ea84 100644 --- a/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go +++ b/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go @@ -17,10 +17,11 @@ var pubKeyConverter, _ = pubkeyConverter.NewBech32PubkeyConverter(32, "erd") func TestComputeTransactionGasUsedAndFeeMoveBalance(t *testing.T) { t.Parallel() - require := require.New(t) + req := require.New(t) feeComp, _ := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, EconomicsConfig: testscommon.GetEconomicsConfig(), + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, }) computer := fee.NewTestFeeComputer(feeComp) @@ -39,17 +40,18 @@ func TestComputeTransactionGasUsedAndFeeMoveBalance(t *testing.T) { } gasUsedAndFeeProc.computeAndAttachGasUsedAndFee(moveBalanceTx) - require.Equal(uint64(50_000), moveBalanceTx.GasUsed) - require.Equal("50000000000000", moveBalanceTx.Fee) + req.Equal(uint64(50_000), moveBalanceTx.GasUsed) + req.Equal("50000000000000", moveBalanceTx.Fee) } func TestComputeTransactionGasUsedAndFeeLogWithError(t *testing.T) { t.Parallel() - require := require.New(t) + req := require.New(t) feeComp, _ := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, EconomicsConfig: testscommon.GetEconomicsConfig(), + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, }) computer := fee.NewTestFeeComputer(feeComp) @@ -76,8 +78,8 @@ func TestComputeTransactionGasUsedAndFeeLogWithError(t *testing.T) { } gasUsedAndFeeProc.computeAndAttachGasUsedAndFee(txWithSignalErrorLog) - require.Equal(uint64(80_000), txWithSignalErrorLog.GasUsed) - require.Equal("50300000000000", txWithSignalErrorLog.Fee) + req.Equal(uint64(80_000), txWithSignalErrorLog.GasUsed) + req.Equal("50300000000000", txWithSignalErrorLog.Fee) } func silentDecodeAddress(address string) []byte { @@ -88,10 +90,11 @@ func silentDecodeAddress(address string) []byte { func TestComputeTransactionGasUsedAndFeeRelayedTxWithWriteLog(t *testing.T) { t.Parallel() - require := require.New(t) + req := require.New(t) feeComp, _ := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, EconomicsConfig: testscommon.GetEconomicsConfig(), + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, }) computer := fee.NewTestFeeComputer(feeComp) @@ -120,15 +123,16 @@ func TestComputeTransactionGasUsedAndFeeRelayedTxWithWriteLog(t *testing.T) { } gasUsedAndFeeProc.computeAndAttachGasUsedAndFee(relayedTxWithWriteLog) - require.Equal(uint64(200_000), relayedTxWithWriteLog.GasUsed) - require.Equal("66350000000000", relayedTxWithWriteLog.Fee) + req.Equal(uint64(200_000), relayedTxWithWriteLog.GasUsed) + req.Equal("66350000000000", relayedTxWithWriteLog.Fee) } func TestComputeTransactionGasUsedAndFeeTransactionWithScrWithRefund(t *testing.T) { - require := require.New(t) + req := require.New(t) feeComp, _ := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, EconomicsConfig: testscommon.GetEconomicsConfig(), + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, }) computer := fee.NewTestFeeComputer(feeComp) @@ -166,17 +170,19 @@ func TestComputeTransactionGasUsedAndFeeTransactionWithScrWithRefund(t *testing. } gasUsedAndFeeProc.computeAndAttachGasUsedAndFee(txWithSRefundSCR) - require.Equal(uint64(3_365_000), txWithSRefundSCR.GasUsed) - require.Equal("98000000000000", txWithSRefundSCR.Fee) + req.Equal(uint64(3_365_000), txWithSRefundSCR.GasUsed) + req.Equal("98000000000000", txWithSRefundSCR.Fee) } func TestNFTTransferWithScCall(t *testing.T) { - require := require.New(t) - feeComp, _ := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ + req := require.New(t) + feeComp, err := fee.NewFeeComputer(fee.ArgsNewFeeComputer{ BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, EconomicsConfig: testscommon.GetEconomicsConfig(), + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, }) computer := fee.NewTestFeeComputer(feeComp) + req.Nil(err) gasUsedAndFeeProc := newGasUsedAndFeeProcessor(computer, pubKeyConverter) @@ -199,6 +205,6 @@ func TestNFTTransferWithScCall(t *testing.T) { tx.InitiallyPaidFee = feeComp.ComputeTransactionFee(tx).String() gasUsedAndFeeProc.computeAndAttachGasUsedAndFee(tx) - require.Equal(uint64(55_000_000), tx.GasUsed) - require.Equal("822250000000000", tx.Fee) + req.Equal(uint64(55_000_000), tx.GasUsed) + req.Equal("822250000000000", tx.Fee) } diff --git a/node/external/transactionAPI/unmarshaller.go b/node/external/transactionAPI/unmarshaller.go index cf320c979f9..c9526217f4f 100644 --- a/node/external/transactionAPI/unmarshaller.go +++ b/node/external/transactionAPI/unmarshaller.go @@ -110,7 +110,7 @@ func (tu *txUnmarshaller) prepareNormalTx(tx *transaction.Transaction) *transact receiverAddress := tu.addressPubKeyConverter.SilentEncode(tx.RcvAddr, log) senderAddress := tu.addressPubKeyConverter.SilentEncode(tx.SndAddr, log) - return &transaction.ApiTransactionResult{ + apiTx := &transaction.ApiTransactionResult{ Tx: tx, Type: string(transaction.TxTypeNormal), Nonce: tx.Nonce, @@ -127,13 +127,20 @@ func (tu *txUnmarshaller) prepareNormalTx(tx *transaction.Transaction) *transact Version: tx.Version, ChainID: string(tx.ChainID), } + + if len(tx.GuardianAddr) > 0 { + apiTx.GuardianAddr = tu.addressPubKeyConverter.SilentEncode(tx.GuardianAddr, log) + apiTx.GuardianSignature = hex.EncodeToString(tx.GuardianSignature) + } + + return apiTx } func (tu *txUnmarshaller) prepareInvalidTx(tx *transaction.Transaction) *transaction.ApiTransactionResult { receiverAddress := tu.addressPubKeyConverter.SilentEncode(tx.RcvAddr, log) senderAddress := tu.addressPubKeyConverter.SilentEncode(tx.SndAddr, log) - return &transaction.ApiTransactionResult{ + apiTx := &transaction.ApiTransactionResult{ Tx: tx, Type: string(transaction.TxTypeInvalid), Nonce: tx.Nonce, @@ -146,7 +153,17 @@ func (tu *txUnmarshaller) prepareInvalidTx(tx *transaction.Transaction) *transac GasLimit: tx.GasLimit, Data: tx.Data, Signature: hex.EncodeToString(tx.Signature), + Options: tx.Options, + Version: tx.Version, + ChainID: string(tx.ChainID), } + + if len(tx.GuardianAddr) > 0 { + apiTx.GuardianAddr = tu.addressPubKeyConverter.SilentEncode(tx.GuardianAddr, log) + apiTx.GuardianSignature = hex.EncodeToString(tx.GuardianSignature) + } + + return apiTx } func (tu *txUnmarshaller) prepareRewardTx(tx *rewardTxData.RewardTx) *transaction.ApiTransactionResult { diff --git a/node/metrics/metrics.go b/node/metrics/metrics.go index 2071f37cf04..d584be00004 100644 --- a/node/metrics/metrics.go +++ b/node/metrics/metrics.go @@ -2,6 +2,7 @@ package metrics import ( "fmt" + "runtime/debug" "sort" "strconv" @@ -10,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/sharding" + logger "github.com/multiversx/mx-chain-logger-go" ) const millisecondsInSecond = 1000 @@ -18,6 +20,8 @@ const initInt = int64(0) const initString = "" const initZeroString = "0" +var log = logger.GetOrCreate("node/metrics") + // InitBaseMetrics will initialize base, default metrics to 0 values func InitBaseMetrics(appStatusHandler core.AppStatusHandler) error { if check.IfNil(appStatusHandler) { @@ -124,6 +128,7 @@ func InitConfigMetrics( appStatusHandler.SetUInt64Value(common.MetricBuiltInFunctionOnMetaEnableEpoch, uint64(enableEpochs.BuiltInFunctionOnMetaEnableEpoch)) appStatusHandler.SetStringValue(common.MetricTotalSupply, economicsConfig.GlobalSettings.GenesisTotalSupply) appStatusHandler.SetUInt64Value(common.MetricWaitingListFixEnableEpoch, uint64(enableEpochs.WaitingListFixEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricSetGuardianEnableEpoch, uint64(enableEpochs.SetGuardianEnableEpoch)) for i, nodesChangeConfig := range enableEpochs.MaxNodesChangeEnableEpoch { epochEnable := fmt.Sprintf("%s%d%s", common.MetricMaxNodesChangeEnableEpoch, i, common.EpochEnableSuffix) @@ -271,10 +276,20 @@ func InitMetrics( // SaveUint64Metric will save an uint64 metric in status handler func SaveUint64Metric(ash core.AppStatusHandler, key string, value uint64) { + if check.IfNil(ash) { + log.Error("programming error: nil AppStatusHandler in SaveUint64Metric", "stack", string(debug.Stack())) + return + } + ash.SetUInt64Value(key, value) } // SaveStringMetric will save a string metric in status handler func SaveStringMetric(ash core.AppStatusHandler, key, value string) { + if check.IfNil(ash) { + log.Error("programming error: nil AppStatusHandler in SaveStringMetric", "stack", string(debug.Stack())) + return + } + ash.SetStringValue(key, value) } diff --git a/node/metrics/metrics_test.go b/node/metrics/metrics_test.go index cabb8674c14..381a16100d6 100644 --- a/node/metrics/metrics_test.go +++ b/node/metrics/metrics_test.go @@ -5,9 +5,12 @@ import ( "strconv" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -134,49 +137,61 @@ func TestInitConfigMetrics(t *testing.T) { ESDTTransferRoleEnableEpoch: 33, BuiltInFunctionOnMetaEnableEpoch: 34, WaitingListFixEnableEpoch: 35, + SetGuardianEnableEpoch: 36, + MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 1, + NodesToShufflePerShard: 2, + }, + }, }, } expectedValues := map[string]interface{}{ - "erd_smart_contract_deploy_enable_epoch": uint32(1), - "erd_built_in_functions_enable_epoch": uint32(2), - "erd_relayed_transactions_enable_epoch": uint32(3), - "erd_penalized_too_much_gas_enable_epoch": uint32(4), - "erd_switch_jail_waiting_enable_epoch": uint32(5), - "erd_switch_hysteresis_for_min_nodes_enable_epoch": uint32(6), - "erd_below_signed_threshold_enable_epoch": uint32(7), - "erd_transaction_signed_with_txhash_enable_epoch": uint32(8), - "erd_meta_protection_enable_epoch": uint32(9), - "erd_ahead_of_time_gas_usage_enable_epoch": uint32(10), - "erd_gas_price_modifier_enable_epoch": uint32(11), - "erd_repair_callback_enable_epoch": uint32(12), - "erd_block_gas_and_fee_recheck_enable_epoch": uint32(13), - "erd_staking_v2_enable_epoch": uint32(14), - "erd_stake_enable_epoch": uint32(15), - "erd_double_key_protection_enable_epoch": uint32(16), - "erd_esdt_enable_epoch": uint32(17), - "erd_governance_enable_epoch": uint32(18), - "erd_delegation_manager_enable_epoch": uint32(19), - "erd_delegation_smart_contract_enable_epoch": uint32(20), - "erd_correct_last_unjailed_enable_epoch": uint32(21), - "erd_balance_waiting_lists_enable_epoch": uint32(22), - "erd_return_data_to_last_transfer_enable_epoch": uint32(23), - "erd_sender_in_out_transfer_enable_epoch": uint32(24), - "erd_relayed_transactions_v2_enable_epoch": uint32(25), - "erd_unbond_tokens_v2_enable_epoch": uint32(26), - "erd_save_jailed_always_enable_epoch": uint32(27), - "erd_validator_to_delegation_enable_epoch": uint32(28), - "erd_redelegate_below_min_check_enable_epoch": uint32(29), - "erd_increment_scr_nonce_in_multi_transfer_enable_epoch": uint32(30), - "erd_esdt_multi_transfer_enable_epoch": uint32(31), - "erd_global_mint_burn_disable_epoch": uint32(32), - "erd_esdt_transfer_role_enable_epoch": uint32(33), - "erd_builtin_function_on_meta_enable_epoch": uint32(34), - "erd_waiting_list_fix_enable_epoch": uint32(35), - "erd_max_nodes_change_enable_epoch": nil, - "erd_total_supply": "12345", - "erd_hysteresis": "0.100000", - "erd_adaptivity": "true", + "erd_smart_contract_deploy_enable_epoch": uint32(1), + "erd_built_in_functions_enable_epoch": uint32(2), + "erd_relayed_transactions_enable_epoch": uint32(3), + "erd_penalized_too_much_gas_enable_epoch": uint32(4), + "erd_switch_jail_waiting_enable_epoch": uint32(5), + "erd_switch_hysteresis_for_min_nodes_enable_epoch": uint32(6), + "erd_below_signed_threshold_enable_epoch": uint32(7), + "erd_transaction_signed_with_txhash_enable_epoch": uint32(8), + "erd_meta_protection_enable_epoch": uint32(9), + "erd_ahead_of_time_gas_usage_enable_epoch": uint32(10), + "erd_gas_price_modifier_enable_epoch": uint32(11), + "erd_repair_callback_enable_epoch": uint32(12), + "erd_block_gas_and_fee_recheck_enable_epoch": uint32(13), + "erd_staking_v2_enable_epoch": uint32(14), + "erd_stake_enable_epoch": uint32(15), + "erd_double_key_protection_enable_epoch": uint32(16), + "erd_esdt_enable_epoch": uint32(17), + "erd_governance_enable_epoch": uint32(18), + "erd_delegation_manager_enable_epoch": uint32(19), + "erd_delegation_smart_contract_enable_epoch": uint32(20), + "erd_correct_last_unjailed_enable_epoch": uint32(21), + "erd_balance_waiting_lists_enable_epoch": uint32(22), + "erd_return_data_to_last_transfer_enable_epoch": uint32(23), + "erd_sender_in_out_transfer_enable_epoch": uint32(24), + "erd_relayed_transactions_v2_enable_epoch": uint32(25), + "erd_unbond_tokens_v2_enable_epoch": uint32(26), + "erd_save_jailed_always_enable_epoch": uint32(27), + "erd_validator_to_delegation_enable_epoch": uint32(28), + "erd_redelegate_below_min_check_enable_epoch": uint32(29), + "erd_increment_scr_nonce_in_multi_transfer_enable_epoch": uint32(30), + "erd_esdt_multi_transfer_enable_epoch": uint32(31), + "erd_global_mint_burn_disable_epoch": uint32(32), + "erd_esdt_transfer_role_enable_epoch": uint32(33), + "erd_builtin_function_on_meta_enable_epoch": uint32(34), + "erd_waiting_list_fix_enable_epoch": uint32(35), + "erd_max_nodes_change_enable_epoch": nil, + "erd_total_supply": "12345", + "erd_hysteresis": "0.100000", + "erd_adaptivity": "true", + "erd_max_nodes_change_enable_epoch0_epoch_enable": uint32(0), + "erd_max_nodes_change_enable_epoch0_max_num_nodes": uint32(1), + "erd_max_nodes_change_enable_epoch0_nodes_to_shuffle_per_shard": uint32(2), + "erd_set_guardian_feature_enable_epoch": uint32(36), } economicsConfig := config.EconomicsConfig{ @@ -329,3 +344,266 @@ func TestInitRatingsMetrics(t *testing.T) { assert.Equal(t, v, keys[k]) } } + +func TestInitMetrics(t *testing.T) { + t.Parallel() + + appStatusHandler := &statusHandler.AppStatusHandlerStub{} + pubkeyString := "pub key" + nodeType := core.NodeTypeValidator + shardCoordinator := &testscommon.ShardsCoordinatorMock{ + NoShards: 3, + SelfIDCalled: func() uint32 { + return 0 + }, + } + nodesSetup := &testscommon.NodesSetupStub{ + GetShardConsensusGroupSizeCalled: func() uint32 { + return 63 + }, + GetMetaConsensusGroupSizeCalled: func() uint32 { + return 400 + }, + GetRoundDurationCalled: func() uint64 { + return 6000 + }, + MinNumberOfMetaNodesCalled: func() uint32 { + return 401 + }, + MinNumberOfShardNodesCalled: func() uint32 { + return 402 + }, + InitialNodesInfoCalled: func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + validators := map[uint32][]nodesCoordinator.GenesisNodeInfoHandler{ + 0: { + &shardingMocks.NodeInfoMock{}, + &shardingMocks.NodeInfoMock{}, + }, + 1: { + &shardingMocks.NodeInfoMock{}, + }, + } + + return validators, make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) + }, + GetStartTimeCalled: func() int64 { + return 111111 + }, + } + version := "version" + economicsConfigs := &config.EconomicsConfig{ + RewardsSettings: config.RewardsSettings{ + RewardsConfigByEpoch: []config.EpochRewardSettings{ + { + LeaderPercentage: 2, + }, + { + LeaderPercentage: 2, + }, + }, + }, + GlobalSettings: config.GlobalSettings{ + Denomination: 4, + }, + } + roundsPerEpoch := int64(200) + minTransactionVersion := uint32(1) + + t.Run("nil app status handler should error", func(t *testing.T) { + t.Parallel() + + err := InitMetrics(nil, pubkeyString, nodeType, shardCoordinator, nodesSetup, version, economicsConfigs, roundsPerEpoch, minTransactionVersion) + assert.Equal(t, ErrNilAppStatusHandler, err) + }) + t.Run("nil shard coordinator should error", func(t *testing.T) { + t.Parallel() + + expectedErrorString := "nil shard coordinator when initializing metrics" + err := InitMetrics(appStatusHandler, pubkeyString, nodeType, nil, nodesSetup, version, economicsConfigs, roundsPerEpoch, minTransactionVersion) + assert.Equal(t, expectedErrorString, err.Error()) + }) + t.Run("nil nodes configs should error", func(t *testing.T) { + t.Parallel() + + expectedErrorString := "nil nodes config when initializing metrics" + err := InitMetrics(appStatusHandler, pubkeyString, nodeType, shardCoordinator, nil, version, economicsConfigs, roundsPerEpoch, minTransactionVersion) + assert.Equal(t, expectedErrorString, err.Error()) + }) + t.Run("nil economics configs should error", func(t *testing.T) { + t.Parallel() + + expectedErrorString := "nil economics config when initializing metrics" + err := InitMetrics(appStatusHandler, pubkeyString, nodeType, shardCoordinator, nodesSetup, version, nil, roundsPerEpoch, minTransactionVersion) + assert.Equal(t, expectedErrorString, err.Error()) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + keys := make(map[string]interface{}) + localStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) { + keys[key] = value + }, + SetStringValueHandler: func(key string, value string) { + keys[key] = value + }, + } + + err := InitMetrics(localStatusHandler, pubkeyString, nodeType, shardCoordinator, nodesSetup, version, economicsConfigs, roundsPerEpoch, minTransactionVersion) + assert.Nil(t, err) + + expectedValues := map[string]interface{}{ + common.MetricPublicKeyBlockSign: pubkeyString, + common.MetricShardId: uint64(shardCoordinator.SelfId()), + common.MetricNumShardsWithoutMetachain: uint64(shardCoordinator.NoShards), + common.MetricNodeType: string(nodeType), + common.MetricRoundTime: uint64(6), + common.MetricAppVersion: version, + common.MetricRoundsPerEpoch: uint64(roundsPerEpoch), + common.MetricCrossCheckBlockHeight: "0", + common.MetricCrossCheckBlockHeight + "_0": uint64(0), + common.MetricCrossCheckBlockHeight + "_1": uint64(0), + common.MetricCrossCheckBlockHeight + "_2": uint64(0), + common.MetricCrossCheckBlockHeightMeta: uint64(0), + common.MetricIsSyncing: uint64(1), + common.MetricLeaderPercentage: fmt.Sprintf("%f", 2.0), + common.MetricDenomination: uint64(4), + common.MetricShardConsensusGroupSize: uint64(63), + common.MetricMetaConsensusGroupSize: uint64(400), + common.MetricNumNodesPerShard: uint64(402), + common.MetricNumMetachainNodes: uint64(401), + common.MetricStartTime: uint64(111111), + common.MetricRoundDuration: uint64(6000), + common.MetricMinTransactionVersion: uint64(1), + common.MetricNumValidators: uint64(2), + common.MetricConsensusGroupSize: uint64(63), + } + + assert.Equal(t, len(expectedValues), len(keys)) + for k, v := range expectedValues { + assert.Equal(t, v, keys[k], fmt.Sprintf("for key %s", k)) + } + }) + t.Run("should work - metachain", func(t *testing.T) { + t.Parallel() + + keys := make(map[string]interface{}) + localStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) { + keys[key] = value + }, + SetStringValueHandler: func(key string, value string) { + keys[key] = value + }, + } + localShardCoordinator := &testscommon.ShardsCoordinatorMock{ + NoShards: 3, + SelfIDCalled: func() uint32 { + return common.MetachainShardId + }, + } + + err := InitMetrics(localStatusHandler, pubkeyString, nodeType, localShardCoordinator, nodesSetup, version, economicsConfigs, roundsPerEpoch, minTransactionVersion) + assert.Nil(t, err) + + expectedValues := map[string]interface{}{ + common.MetricPublicKeyBlockSign: pubkeyString, + common.MetricShardId: uint64(localShardCoordinator.SelfId()), + common.MetricNumShardsWithoutMetachain: uint64(localShardCoordinator.NoShards), + common.MetricNodeType: string(nodeType), + common.MetricRoundTime: uint64(6), + common.MetricAppVersion: version, + common.MetricRoundsPerEpoch: uint64(roundsPerEpoch), + common.MetricCrossCheckBlockHeight: "0", + common.MetricCrossCheckBlockHeight + "_0": uint64(0), + common.MetricCrossCheckBlockHeight + "_1": uint64(0), + common.MetricCrossCheckBlockHeight + "_2": uint64(0), + common.MetricCrossCheckBlockHeightMeta: uint64(0), + common.MetricIsSyncing: uint64(1), + common.MetricLeaderPercentage: fmt.Sprintf("%f", 2.0), + common.MetricDenomination: uint64(4), + common.MetricShardConsensusGroupSize: uint64(63), + common.MetricMetaConsensusGroupSize: uint64(400), + common.MetricNumNodesPerShard: uint64(402), + common.MetricNumMetachainNodes: uint64(401), + common.MetricStartTime: uint64(111111), + common.MetricRoundDuration: uint64(6000), + common.MetricMinTransactionVersion: uint64(1), + common.MetricNumValidators: uint64(0), + common.MetricConsensusGroupSize: uint64(400), + } + + assert.Equal(t, len(expectedValues), len(keys)) + for k, v := range expectedValues { + assert.Equal(t, v, keys[k], fmt.Sprintf("for key %s", k)) + } + }) + t.Run("should work - invalid shard id", func(t *testing.T) { + t.Parallel() + + keys := make(map[string]interface{}) + localStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) { + keys[key] = value + }, + SetStringValueHandler: func(key string, value string) { + keys[key] = value + }, + } + localShardCoordinator := &testscommon.ShardsCoordinatorMock{ + NoShards: 3, + SelfIDCalled: func() uint32 { + return 10 + }, + } + + err := InitMetrics(localStatusHandler, pubkeyString, nodeType, localShardCoordinator, nodesSetup, version, economicsConfigs, roundsPerEpoch, minTransactionVersion) + assert.Nil(t, err) + + assert.Equal(t, uint64(0), keys[common.MetricConsensusGroupSize]) + }) +} + +func TestSaveStringMetric(t *testing.T) { + t.Parallel() + + t.Run("should not panic if appStatusHandler is nil", func(t *testing.T) { + assert.NotPanics(t, func() { + SaveStringMetric(nil, "key", "value") + }) + }) + t.Run("should work", func(t *testing.T) { + wasCalled := false + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetStringValueHandler: func(key string, value string) { + wasCalled = true + assert.Equal(t, "key", key) + assert.Equal(t, "value", value) + }, + } + SaveStringMetric(appStatusHandler, "key", "value") + assert.True(t, wasCalled) + }) +} + +func TestSaveUint64Metric(t *testing.T) { + t.Parallel() + + t.Run("should not panic if appStatusHandler is nil", func(t *testing.T) { + assert.NotPanics(t, func() { + SaveUint64Metric(nil, "key", 1) + }) + }) + t.Run("should work", func(t *testing.T) { + wasCalled := false + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) { + wasCalled = true + assert.Equal(t, "key", key) + assert.Equal(t, uint64(1), value) + }, + } + SaveUint64Metric(appStatusHandler, "key", 1) + assert.True(t, wasCalled) + }) +} diff --git a/node/mock/applicationRunningTrigger.go b/node/mock/applicationRunningTrigger.go new file mode 100644 index 00000000000..ade030ca801 --- /dev/null +++ b/node/mock/applicationRunningTrigger.go @@ -0,0 +1,35 @@ +package mock + +import ( + "strings" + + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("node/mock") + +type applicationRunningTrigger struct { + chanClose chan struct{} +} + +// NewApplicationRunningTrigger - +func NewApplicationRunningTrigger() *applicationRunningTrigger { + return &applicationRunningTrigger{ + chanClose: make(chan struct{}), + } +} + +// Write - +func (trigger *applicationRunningTrigger) Write(p []byte) (n int, err error) { + if strings.Contains(string(p), "application is now running") { + log.Info("got signal, trying to gracefully close the node") + close(trigger.chanClose) + } + + return 0, nil +} + +// ChanClose - +func (trigger *applicationRunningTrigger) ChanClose() chan struct{} { + return trigger.chanClose +} diff --git a/node/mock/factory/cryptoComponentsStub.go b/node/mock/factory/cryptoComponentsStub.go index f7e61374bcf..7c200169cbc 100644 --- a/node/mock/factory/cryptoComponentsStub.go +++ b/node/mock/factory/cryptoComponentsStub.go @@ -19,7 +19,6 @@ type CryptoComponentsMock struct { P2pPrivKey crypto.PrivateKey P2pSig crypto.SingleSigner PubKeyString string - PrivKeyBytes []byte PubKeyBytes []byte BlockSig crypto.SingleSigner TxSig crypto.SingleSigner @@ -85,11 +84,6 @@ func (ccm *CryptoComponentsMock) PublicKeyBytes() []byte { return ccm.PubKeyBytes } -// PrivateKeyBytes - -func (ccm *CryptoComponentsMock) PrivateKeyBytes() []byte { - return ccm.PrivKeyBytes -} - // BlockSigner - func (ccm *CryptoComponentsMock) BlockSigner() crypto.SingleSigner { return ccm.BlockSig @@ -180,7 +174,6 @@ func (ccm *CryptoComponentsMock) Clone() interface{} { PrivKey: ccm.PrivKey, P2pPrivKey: ccm.P2pPrivKey, PubKeyString: ccm.PubKeyString, - PrivKeyBytes: ccm.PrivKeyBytes, PubKeyBytes: ccm.PubKeyBytes, BlockSig: ccm.BlockSig, TxSig: ccm.TxSig, diff --git a/node/mock/factory/dataComponentsStub.go b/node/mock/factory/dataComponentsStub.go index b39a58f6309..67d40cfbd99 100644 --- a/node/mock/factory/dataComponentsStub.go +++ b/node/mock/factory/dataComponentsStub.go @@ -42,10 +42,11 @@ func (dcm *DataComponentsMock) Blockchain() data.ChainHandler { } // SetBlockchain - -func (dcm *DataComponentsMock) SetBlockchain(chain data.ChainHandler) { +func (dcm *DataComponentsMock) SetBlockchain(chain data.ChainHandler) error { dcm.mutDcm.Lock() dcm.BlockChain = chain dcm.mutDcm.Unlock() + return nil } // StorageService - diff --git a/node/mock/txFeeHandlerStub.go b/node/mock/txFeeHandlerStub.go deleted file mode 100644 index d8c0f997133..00000000000 --- a/node/mock/txFeeHandlerStub.go +++ /dev/null @@ -1,289 +0,0 @@ -package mock - -import ( - "math/big" - - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/data" -) - -// EconomicsHandlerStub - -type EconomicsHandlerStub struct { - MaxGasLimitPerBlockCalled func() uint64 - MaxGasLimitPerMiniBlockCalled func() uint64 - MaxGasLimitPerBlockForSafeCrossShardCalled func() uint64 - MaxGasLimitPerMiniBlockForSafeCrossShardCalled func() uint64 - MaxGasLimitPerTxCalled func() uint64 - SetMinGasPriceCalled func(minGasPrice uint64) - SetMinGasLimitCalled func(minGasLimit uint64) - ComputeGasLimitCalled func(tx data.TransactionWithFeeHandler) uint64 - ComputeMoveBalanceFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int - ComputeTxFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int - CheckValidityTxValuesCalled func(tx data.TransactionWithFeeHandler) error - DeveloperPercentageCalled func() float64 - MinGasPriceCalled func() uint64 - LeaderPercentageCalled func() float64 - ProtocolSustainabilityPercentageCalled func() float64 - ProtocolSustainabilityAddressCalled func() string - MinInflationRateCalled func() float64 - MaxInflationRateCalled func(year uint32) float64 - GasPriceModifierCalled func() float64 - ComputeFeeForProcessingCalled func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int - GenesisTotalSupplyCalled func() *big.Int - RewardsTopUpGradientPointCalled func() *big.Int - RewardsTopUpFactorCalled func() float64 - SplitTxGasInCategoriesCalled func(tx data.TransactionWithFeeHandler) (uint64, uint64) - GasPriceForProcessingCalled func(tx data.TransactionWithFeeHandler) uint64 - GasPriceForMoveCalled func(tx data.TransactionWithFeeHandler) uint64 - MinGasPriceForProcessingCalled func() uint64 - ComputeGasUsedAndFeeBasedOnRefundValueCalled func(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) - ComputeTxFeeBasedOnGasUsedCalled func(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int - ComputeGasLimitBasedOnBalanceCalled func(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) - SetStatusHandlerCalled func(statusHandler core.AppStatusHandler) error -} - -// ComputeGasLimitBasedOnBalance - -func (ehs *EconomicsHandlerStub) ComputeGasLimitBasedOnBalance(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) { - if ehs.ComputeGasLimitBasedOnBalanceCalled != nil { - return ehs.ComputeGasLimitBasedOnBalanceCalled(tx, balance) - } - return 0, nil -} - -// ComputeFeeForProcessing - -func (ehs *EconomicsHandlerStub) ComputeFeeForProcessing(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int { - if ehs.ComputeFeeForProcessingCalled != nil { - return ehs.ComputeFeeForProcessingCalled(tx, gasToUse) - } - return big.NewInt(0) -} - -// GasPriceModifier - -func (ehs *EconomicsHandlerStub) GasPriceModifier() float64 { - if ehs.GasPriceModifierCalled != nil { - return ehs.GasPriceModifierCalled() - } - return 1.0 -} - -// MinGasPrice - -func (ehs *EconomicsHandlerStub) MinGasPrice() uint64 { - if ehs.MinGasPriceCalled != nil { - return ehs.MinGasPriceCalled() - } - return 0 -} - -// MinGasLimit will return min gas limit -func (ehs *EconomicsHandlerStub) MinGasLimit() uint64 { - return 0 -} - -// GasPerDataByte - -func (ehs *EconomicsHandlerStub) GasPerDataByte() uint64 { - return 0 -} - -// DeveloperPercentage - -func (ehs *EconomicsHandlerStub) DeveloperPercentage() float64 { - return ehs.DeveloperPercentageCalled() -} - -// MaxGasLimitPerBlock - -func (ehs *EconomicsHandlerStub) MaxGasLimitPerBlock(uint32) uint64 { - if ehs.MaxGasLimitPerBlockCalled != nil { - return ehs.MaxGasLimitPerBlockCalled() - } - return 0 -} - -// MaxGasLimitPerMiniBlock - -func (ehs *EconomicsHandlerStub) MaxGasLimitPerMiniBlock(uint32) uint64 { - if ehs.MaxGasLimitPerMiniBlockCalled != nil { - return ehs.MaxGasLimitPerMiniBlockCalled() - } - return 0 -} - -// MaxGasLimitPerBlockForSafeCrossShard - -func (ehs *EconomicsHandlerStub) MaxGasLimitPerBlockForSafeCrossShard() uint64 { - if ehs.MaxGasLimitPerBlockForSafeCrossShardCalled != nil { - return ehs.MaxGasLimitPerBlockForSafeCrossShardCalled() - } - return 0 -} - -// MaxGasLimitPerMiniBlockForSafeCrossShard - -func (ehs *EconomicsHandlerStub) MaxGasLimitPerMiniBlockForSafeCrossShard() uint64 { - if ehs.MaxGasLimitPerMiniBlockForSafeCrossShardCalled != nil { - return ehs.MaxGasLimitPerMiniBlockForSafeCrossShardCalled() - } - return 0 -} - -// MaxGasLimitPerTx - -func (ehs *EconomicsHandlerStub) MaxGasLimitPerTx() uint64 { - if ehs.MaxGasLimitPerTxCalled != nil { - return ehs.MaxGasLimitPerTxCalled() - } - return 0 -} - -// ComputeGasLimit - -func (ehs *EconomicsHandlerStub) ComputeGasLimit(tx data.TransactionWithFeeHandler) uint64 { - return ehs.ComputeGasLimitCalled(tx) -} - -// ComputeMoveBalanceFee - -func (ehs *EconomicsHandlerStub) ComputeMoveBalanceFee(tx data.TransactionWithFeeHandler) *big.Int { - return ehs.ComputeMoveBalanceFeeCalled(tx) -} - -// ComputeTxFee - -func (ehs *EconomicsHandlerStub) ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int { - if ehs.ComputeTxFeeCalled != nil { - return ehs.ComputeTxFeeCalled(tx) - } - - return big.NewInt(0) -} - -// CheckValidityTxValues - -func (ehs *EconomicsHandlerStub) CheckValidityTxValues(tx data.TransactionWithFeeHandler) error { - if ehs.CheckValidityTxValuesCalled != nil { - return ehs.CheckValidityTxValuesCalled(tx) - } - - return nil -} - -// LeaderPercentage - -func (ehs *EconomicsHandlerStub) LeaderPercentage() float64 { - if ehs.LeaderPercentageCalled != nil { - return ehs.LeaderPercentageCalled() - } - - return 1 -} - -// ProtocolSustainabilityPercentage will return the protocol sustainability percentage value -func (ehs *EconomicsHandlerStub) ProtocolSustainabilityPercentage() float64 { - if ehs.ProtocolSustainabilityPercentageCalled != nil { - return ehs.ProtocolSustainabilityPercentageCalled() - } - - return 0.1 -} - -// ProtocolSustainabilityAddress will return the protocol sustainability address -func (ehs *EconomicsHandlerStub) ProtocolSustainabilityAddress() string { - if ehs.ProtocolSustainabilityAddressCalled != nil { - return ehs.ProtocolSustainabilityAddressCalled() - } - - return "1111" -} - -// MinInflationRate - -func (ehs *EconomicsHandlerStub) MinInflationRate() float64 { - if ehs.MinInflationRateCalled != nil { - return ehs.MinInflationRateCalled() - } - - return 1 -} - -// MaxInflationRate - -func (ehs *EconomicsHandlerStub) MaxInflationRate(year uint32) float64 { - if ehs.MaxInflationRateCalled != nil { - return ehs.MaxInflationRateCalled(year) - } - - return 1000000 -} - -// GenesisTotalSupply - -func (ehs *EconomicsHandlerStub) GenesisTotalSupply() *big.Int { - if ehs.GenesisTotalSupplyCalled != nil { - return ehs.GenesisTotalSupplyCalled() - } - - return big.NewInt(0) -} - -// RewardsTopUpGradientPoint - -func (ehs *EconomicsHandlerStub) RewardsTopUpGradientPoint() *big.Int { - if ehs.RewardsTopUpGradientPointCalled != nil { - return ehs.RewardsTopUpGradientPointCalled() - } - return big.NewInt(0) -} - -// RewardsTopUpFactor - -func (ehs *EconomicsHandlerStub) RewardsTopUpFactor() float64 { - if ehs.RewardsTopUpFactorCalled != nil { - return ehs.RewardsTopUpFactorCalled() - } - return 0 -} - -// SplitTxGasInCategories - -func (ehs *EconomicsHandlerStub) SplitTxGasInCategories(tx data.TransactionWithFeeHandler) (uint64, uint64) { - if ehs.SplitTxGasInCategoriesCalled != nil { - return ehs.SplitTxGasInCategoriesCalled(tx) - } - return 0, 0 -} - -// GasPriceForProcessing - -func (ehs *EconomicsHandlerStub) GasPriceForProcessing(tx data.TransactionWithFeeHandler) uint64 { - if ehs.GasPriceForProcessingCalled != nil { - return ehs.GasPriceForProcessingCalled(tx) - } - return 0 -} - -// GasPriceForMove - -func (ehs *EconomicsHandlerStub) GasPriceForMove(tx data.TransactionWithFeeHandler) uint64 { - if ehs.GasPriceForMoveCalled != nil { - return ehs.GasPriceForMoveCalled(tx) - } - return 0 -} - -// MinGasPriceForProcessing - -func (ehs *EconomicsHandlerStub) MinGasPriceForProcessing() uint64 { - if ehs.MinGasPriceForProcessingCalled != nil { - return ehs.MinGasPriceForProcessingCalled() - } - return 0 -} - -// ComputeGasUsedAndFeeBasedOnRefundValue - -func (ehs *EconomicsHandlerStub) ComputeGasUsedAndFeeBasedOnRefundValue(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) { - if ehs.ComputeGasUsedAndFeeBasedOnRefundValueCalled != nil { - return ehs.ComputeGasUsedAndFeeBasedOnRefundValueCalled(tx, refundValue) - } - return 0, big.NewInt(0) -} - -// ComputeTxFeeBasedOnGasUsed - -func (ehs *EconomicsHandlerStub) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int { - if ehs.ComputeTxFeeBasedOnGasUsedCalled != nil { - return ehs.ComputeTxFeeBasedOnGasUsedCalled(tx, gasUsed) - } - return big.NewInt(0) -} - -// SetStatusHandler - -func (ehs *EconomicsHandlerStub) SetStatusHandler(statusHandler core.AppStatusHandler) error { - if ehs.SetStatusHandlerCalled != nil { - return ehs.SetStatusHandlerCalled(statusHandler) - } - return nil -} - -// IsInterfaceNil returns true if there is no value under the interface -func (ehs *EconomicsHandlerStub) IsInterfaceNil() bool { - return ehs == nil -} diff --git a/node/node.go b/node/node.go index 23fc451937e..3de7c9610a4 100644 --- a/node/node.go +++ b/node/node.go @@ -19,15 +19,18 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/endProcess" "github.com/multiversx/mx-chain-core-go/data/esdt" + "github.com/multiversx/mx-chain-core-go/data/guardians" "github.com/multiversx/mx-chain-core-go/data/transaction" disabledSig "github.com/multiversx/mx-chain-crypto-go/signing/disabled/singlesig" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/debug" "github.com/multiversx/mx-chain-go/facade" mainFactory "github.com/multiversx/mx-chain-go/factory" heartbeatData "github.com/multiversx/mx-chain-go/heartbeat/data" "github.com/multiversx/mx-chain-go/node/disabled" + "github.com/multiversx/mx-chain-go/node/external" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/dataValidators" @@ -157,13 +160,11 @@ func (n *Node) GetConsensusGroupSize() int { func (n *Node) GetBalance(address string, options api.AccountQueryOptions) (*big.Int, api.BlockInfo, error) { userAccount, blockInfo, err := n.loadUserAccountHandlerByAddress(address, options) if err != nil { - apiBlockInfo, ok := extractApiBlockInfoIfErrAccountNotFoundAtBlock(err) - if ok { - return big.NewInt(0), apiBlockInfo, nil - } - if err == ErrCannotCastAccountHandlerToUserAccountHandler { - return big.NewInt(0), api.BlockInfo{}, nil + adaptedBlockInfo, isEmptyAccount := extractBlockInfoIfNewAccount(err) + if isEmptyAccount { + return big.NewInt(0), adaptedBlockInfo, nil } + return nil, api.BlockInfo{}, err } @@ -174,6 +175,11 @@ func (n *Node) GetBalance(address string, options api.AccountQueryOptions) (*big func (n *Node) GetUsername(address string, options api.AccountQueryOptions) (string, api.BlockInfo, error) { userAccount, blockInfo, err := n.loadUserAccountHandlerByAddress(address, options) if err != nil { + adaptedBlockInfo, isEmptyAccount := extractBlockInfoIfNewAccount(err) + if isEmptyAccount { + return "", adaptedBlockInfo, nil + } + return "", api.BlockInfo{}, err } @@ -185,6 +191,11 @@ func (n *Node) GetUsername(address string, options api.AccountQueryOptions) (str func (n *Node) GetCodeHash(address string, options api.AccountQueryOptions) ([]byte, api.BlockInfo, error) { userAccount, blockInfo, err := n.loadUserAccountHandlerByAddress(address, options) if err != nil { + adaptedBlockInfo, isEmptyAccount := extractBlockInfoIfNewAccount(err) + if isEmptyAccount { + return make([]byte, 0), adaptedBlockInfo, nil + } + return nil, api.BlockInfo{}, err } @@ -200,6 +211,7 @@ func (n *Node) GetAllIssuedESDTs(tokenType string, ctx context.Context) ([]strin userAccount, _, err := n.loadUserAccountHandlerByPubKey(vm.ESDTSCAddress, api.AccountQueryOptions{}) if err != nil { + // don't return 0 values here - not finding the ESDT SC address is an error that should be returned return nil, err } @@ -215,7 +227,7 @@ func (n *Node) GetAllIssuedESDTs(tokenType string, ctx context.Context) ([]strin chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err = userAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -243,7 +255,7 @@ func (n *Node) GetAllIssuedESDTs(tokenType string, ctx context.Context) ([]strin } } - err = common.GetErrorFromChanNonBlocking(chLeaves.ErrChan) + err = chLeaves.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, err } @@ -277,6 +289,11 @@ func (n *Node) getEsdtDataFromLeaf(leaf core.KeyValueHolder, userAccount state.U func (n *Node) GetKeyValuePairs(address string, options api.AccountQueryOptions, ctx context.Context) (map[string]string, api.BlockInfo, error) { userAccount, blockInfo, err := n.loadUserAccountHandlerByAddress(address, options) if err != nil { + adaptedBlockInfo, isEmptyAccount := extractBlockInfoIfNewAccount(err) + if isEmptyAccount { + return make(map[string]string), adaptedBlockInfo, nil + } + return nil, api.BlockInfo{}, err } @@ -291,7 +308,7 @@ func (n *Node) GetKeyValuePairs(address string, options api.AccountQueryOptions, chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err = userAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -310,7 +327,7 @@ func (n *Node) GetKeyValuePairs(address string, options api.AccountQueryOptions, mapToReturn[hex.EncodeToString(leaf.Key())] = hex.EncodeToString(value) } - err = common.GetErrorFromChanNonBlocking(chLeaves.ErrChan) + err = chLeaves.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, api.BlockInfo{}, err } @@ -331,6 +348,11 @@ func (n *Node) GetValueForKey(address string, key string, options api.AccountQue userAccount, blockInfo, err := n.loadUserAccountHandlerByAddress(address, options) if err != nil { + adaptedBlockInfo, isEmptyAccount := extractBlockInfoIfNewAccount(err) + if isEmptyAccount { + return "", adaptedBlockInfo, nil + } + return "", api.BlockInfo{}, err } @@ -342,11 +364,70 @@ func (n *Node) GetValueForKey(address string, key string, options api.AccountQue return hex.EncodeToString(valueBytes), blockInfo, nil } +// GetGuardianData returns the guardian data for given account +func (n *Node) GetGuardianData(address string, options api.AccountQueryOptions) (api.GuardianData, api.BlockInfo, error) { + userAccount, blockInfo, err := n.loadUserAccountHandlerByAddress(address, options) + if err != nil { + adaptedBlockInfo, isEmptyAccount := extractBlockInfoIfNewAccount(err) + if isEmptyAccount { + return api.GuardianData{}, adaptedBlockInfo, nil + } + + return api.GuardianData{}, api.BlockInfo{}, err + } + + activeGuardian, pendingGuardian, err := n.getPendingAndActiveGuardians(userAccount) + if err != nil { + return api.GuardianData{}, api.BlockInfo{}, err + } + + return api.GuardianData{ + ActiveGuardian: activeGuardian, + PendingGuardian: pendingGuardian, + Guarded: userAccount.IsGuarded(), + }, blockInfo, nil +} + +func (n *Node) getPendingAndActiveGuardians( + userAccount state.UserAccountHandler, +) (activeGuardian *api.Guardian, pendingGuardian *api.Guardian, err error) { + var active, pending *guardians.Guardian + gah := n.bootstrapComponents.GuardedAccountHandler() + active, pending, err = gah.GetConfiguredGuardians(userAccount) + if err != nil { + return nil, nil, err + } + + if active != nil { + activeGuardian = &api.Guardian{ + Address: n.coreComponents.AddressPubKeyConverter().SilentEncode(active.Address, log), + ActivationEpoch: active.ActivationEpoch, + ServiceUID: string(active.ServiceUID), + } + } + if pending != nil { + pendingGuardian = &api.Guardian{ + Address: n.coreComponents.AddressPubKeyConverter().SilentEncode(pending.Address, log), + ActivationEpoch: pending.ActivationEpoch, + ServiceUID: string(pending.ServiceUID), + } + } + + return +} + // GetESDTData returns the esdt balance and properties from a given account func (n *Node) GetESDTData(address, tokenID string, nonce uint64, options api.AccountQueryOptions) (*esdt.ESDigitalToken, api.BlockInfo, error) { // TODO: refactor here as to ensure userAccount and systemAccount are on the same root-hash userAccount, _, err := n.loadUserAccountHandlerByAddress(address, options) if err != nil { + adaptedBlockInfo, isEmptyAccount := extractBlockInfoIfNewAccount(err) + if isEmptyAccount { + return &esdt.ESDigitalToken{ + Value: big.NewInt(0), + }, adaptedBlockInfo, nil + } + return nil, api.BlockInfo{}, err } @@ -401,7 +482,7 @@ func (n *Node) getTokensIDsWithFilter( chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err = userAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -424,7 +505,7 @@ func (n *Node) getTokensIDsWithFilter( } } - err = common.GetErrorFromChanNonBlocking(chLeaves.ErrChan) + err = chLeaves.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, api.BlockInfo{}, err } @@ -514,6 +595,11 @@ func (n *Node) GetAllESDTTokens(address string, options api.AccountQueryOptions, // TODO: refactor here as to ensure userAccount and systemAccount are on the same root-hash userAccount, _, err := n.loadUserAccountHandlerByAddress(address, options) if err != nil { + adaptedBlockInfo, isEmptyAccount := extractBlockInfoIfNewAccount(err) + if isEmptyAccount { + return make(map[string]*esdt.ESDigitalToken), adaptedBlockInfo, nil + } + return nil, api.BlockInfo{}, err } @@ -537,7 +623,7 @@ func (n *Node) GetAllESDTTokens(address string, options api.AccountQueryOptions, chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err = userAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -568,9 +654,9 @@ func (n *Node) GetAllESDTTokens(address string, options api.AccountQueryOptions, } if esdtToken.TokenMetaData != nil { - esdtTokenCreatorAddr, err := n.coreComponents.AddressPubKeyConverter().Encode(esdtToken.TokenMetaData.Creator) - if err != nil { - return nil, api.BlockInfo{}, err + esdtTokenCreatorAddr, errEncode := n.coreComponents.AddressPubKeyConverter().Encode(esdtToken.TokenMetaData.Creator) + if errEncode != nil { + return nil, api.BlockInfo{}, errEncode } esdtToken.TokenMetaData.Creator = []byte(esdtTokenCreatorAddr) tokenName = adjustNftTokenIdentifier(tokenName, esdtToken.TokenMetaData.Nonce) @@ -579,7 +665,7 @@ func (n *Node) GetAllESDTTokens(address string, options api.AccountQueryOptions, allESDTs[tokenName] = esdtToken } - err = common.GetErrorFromChanNonBlocking(chLeaves.ErrChan) + err = chLeaves.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, api.BlockInfo{}, err } @@ -676,14 +762,16 @@ func (n *Node) commonTransactionValidation( whiteListerVerifiedTxs process.WhiteListHandler, whiteListRequest process.WhiteListHandler, checkSignature bool, -) (process.TxValidator, process.TxValidatorHandler, error) { +) (process.TxValidator, process.InterceptedTransactionHandler, error) { txValidator, err := dataValidators.NewTxValidator( n.stateComponents.AccountsAdapterAPI(), n.processComponents.ShardCoordinator(), whiteListRequest, n.coreComponents.AddressPubKeyConverter(), + n.coreComponents.TxVersionChecker(), common.MaxTxNonceDeltaAllowed, ) + if err != nil { log.Warn("node.ValidateTransaction: can not instantiate a TxValidator", "error", err) @@ -745,25 +833,14 @@ func (n *Node) checkSenderIsInShard(tx *transaction.Transaction) error { } // CreateTransaction will return a transaction from all the required fields -func (n *Node) CreateTransaction( - nonce uint64, - value string, - receiver string, - receiverUsername []byte, - sender string, - senderUsername []byte, - gasPrice uint64, - gasLimit uint64, - dataField []byte, - signatureHex string, - chainID string, - version uint32, - options uint32, -) (*transaction.Transaction, []byte, error) { - if version == 0 { +func (n *Node) CreateTransaction(txArgs *external.ArgsCreateTransaction) (*transaction.Transaction, []byte, error) { + if txArgs == nil { + return nil, nil, ErrNilCreateTransactionArgs + } + if txArgs.Version == 0 { return nil, nil, ErrInvalidTransactionVersion } - if chainID == "" || len(chainID) > len(n.coreComponents.ChainID()) { + if txArgs.ChainID == "" || len(txArgs.ChainID) > len(n.coreComponents.ChainID()) { return nil, nil, ErrInvalidChainIDInTransaction } addrPubKeyConverter := n.coreComponents.AddressPubKeyConverter() @@ -773,63 +850,77 @@ func (n *Node) CreateTransaction( if check.IfNil(n.stateComponents.AccountsAdapterAPI()) { return nil, nil, ErrNilAccountsAdapter } - if len(signatureHex) > n.addressSignatureHexSize { + if len(txArgs.SignatureHex) > n.addressSignatureHexSize { return nil, nil, ErrInvalidSignatureLength } - if uint32(len(receiver)) > n.coreComponents.EncodedAddressLen() { + if len(txArgs.GuardianSigHex) > n.addressSignatureHexSize { + return nil, nil, fmt.Errorf("%w for guardian signature", ErrInvalidSignatureLength) + } + + if uint32(len(txArgs.Receiver)) > n.coreComponents.EncodedAddressLen() { return nil, nil, fmt.Errorf("%w for receiver", ErrInvalidAddressLength) } - if uint32(len(sender)) > n.coreComponents.EncodedAddressLen() { + if uint32(len(txArgs.Sender)) > n.coreComponents.EncodedAddressLen() { return nil, nil, fmt.Errorf("%w for sender", ErrInvalidAddressLength) } - if len(senderUsername) > core.MaxUserNameLength { + if uint32(len(txArgs.Guardian)) > n.coreComponents.EncodedAddressLen() { + return nil, nil, fmt.Errorf("%w for guardian", ErrInvalidAddressLength) + } + if len(txArgs.SenderUsername) > core.MaxUserNameLength { return nil, nil, ErrInvalidSenderUsernameLength } - if len(receiverUsername) > core.MaxUserNameLength { + if len(txArgs.ReceiverUsername) > core.MaxUserNameLength { return nil, nil, ErrInvalidReceiverUsernameLength } - if len(dataField) > core.MegabyteSize { + if len(txArgs.DataField) > core.MegabyteSize { return nil, nil, ErrDataFieldTooBig } - receiverAddress, err := addrPubKeyConverter.Decode(receiver) + receiverAddress, err := addrPubKeyConverter.Decode(txArgs.Receiver) if err != nil { return nil, nil, errors.New("could not create receiver address from provided param") } - senderAddress, err := addrPubKeyConverter.Decode(sender) + senderAddress, err := addrPubKeyConverter.Decode(txArgs.Sender) if err != nil { return nil, nil, errors.New("could not create sender address from provided param") } - signatureBytes, err := hex.DecodeString(signatureHex) + signatureBytes, err := hex.DecodeString(txArgs.SignatureHex) if err != nil { return nil, nil, errors.New("could not fetch signature bytes") } - if len(value) > len(n.coreComponents.EconomicsData().GenesisTotalSupply().String())+1 { + if len(txArgs.Value) > len(n.coreComponents.EconomicsData().GenesisTotalSupply().String())+1 { return nil, nil, ErrTransactionValueLengthTooBig } - valAsBigInt, ok := big.NewInt(0).SetString(value, 10) + valAsBigInt, ok := big.NewInt(0).SetString(txArgs.Value, 10) if !ok { return nil, nil, ErrInvalidValue } tx := &transaction.Transaction{ - Nonce: nonce, + Nonce: txArgs.Nonce, Value: valAsBigInt, RcvAddr: receiverAddress, - RcvUserName: receiverUsername, + RcvUserName: txArgs.ReceiverUsername, SndAddr: senderAddress, - SndUserName: senderUsername, - GasPrice: gasPrice, - GasLimit: gasLimit, - Data: dataField, + SndUserName: txArgs.SenderUsername, + GasPrice: txArgs.GasPrice, + GasLimit: txArgs.GasLimit, + Data: txArgs.DataField, Signature: signatureBytes, - ChainID: []byte(chainID), - Version: version, - Options: options, + ChainID: []byte(txArgs.ChainID), + Version: txArgs.Version, + Options: txArgs.Options, + } + + if len(txArgs.Guardian) > 0 { + err = n.setTxGuardianData(txArgs.Guardian, txArgs.GuardianSigHex, tx) + if err != nil { + return nil, nil, err + } } var txHash []byte @@ -841,17 +932,37 @@ func (n *Node) CreateTransaction( return tx, txHash, nil } +func (n *Node) setTxGuardianData(guardian string, guardianSigHex string, tx *transaction.Transaction) error { + addrPubKeyConverter := n.coreComponents.AddressPubKeyConverter() + guardianAddress, err := addrPubKeyConverter.Decode(guardian) + if err != nil { + return errors.New("could not create guardian address from provided param") + } + guardianSigBytes, err := hex.DecodeString(guardianSigHex) + if err != nil { + return errors.New("could not fetch guardian signature bytes") + } + if !tx.HasOptionGuardianSet() { + return errors.New("transaction has guardian but guardian option not set") + } + + tx.GuardianAddr = guardianAddress + tx.GuardianSignature = guardianSigBytes + + return nil +} + // GetAccount will return account details for a given address func (n *Node) GetAccount(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { account, blockInfo, err := n.loadUserAccountHandlerByAddress(address, options) if err != nil { - apiBlockInfo, ok := extractApiBlockInfoIfErrAccountNotFoundAtBlock(err) - if ok { + adaptedBlockInfo, isEmptyAccount := extractBlockInfoIfNewAccount(err) + if isEmptyAccount { return api.AccountResponse{ Address: address, Balance: "0", DeveloperReward: "0", - }, apiBlockInfo, nil + }, adaptedBlockInfo, nil } return api.AccountResponse{}, api.BlockInfo{}, err @@ -879,6 +990,23 @@ func (n *Node) GetAccount(address string, options api.AccountQueryOptions) (api. }, blockInfo, nil } +func extractBlockInfoIfNewAccount(err error) (api.BlockInfo, bool) { + if err == nil { + return api.BlockInfo{}, true + } + + apiBlockInfo, ok := extractApiBlockInfoIfErrAccountNotFoundAtBlock(err) + if ok { + return apiBlockInfo, true + } + // we need this check since (in some situations) this error is also returned when a nil account handler is passed (empty account) + if err == ErrCannotCastAccountHandlerToUserAccountHandler { + return api.BlockInfo{}, true + } + + return api.BlockInfo{}, false +} + // GetCode returns the code for the given code hash func (n *Node) GetCode(codeHash []byte, options api.AccountQueryOptions) ([]byte, api.BlockInfo) { return n.loadAccountCode(codeHash, options) diff --git a/node/nodeRunner.go b/node/nodeRunner.go index 8ce94a1fb5a..f454152e982 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -64,6 +64,8 @@ import ( logger "github.com/multiversx/mx-chain-logger-go" ) +type nextOperationForNode int + const ( // TODO: remove this after better handling VM versions switching // defaultDelayBeforeScQueriesStartInSec represents the default delay before the sc query processor should start to allow external queries @@ -72,6 +74,9 @@ const ( maxTimeToClose = 10 * time.Second // SoftRestartMessage is the custom message used when the node does a soft restart operation SoftRestartMessage = "Shuffled out - soft restart" + + nextOperationShouldRestart nextOperationForNode = 1 + nextOperationShouldStop nextOperationForNode = 2 ) // nodeRunner holds the node runner configuration and controls running of a node @@ -320,7 +325,6 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( log.Debug("creating state components") managedStateComponents, err := nr.CreateManagedStateComponents( managedCoreComponents, - managedBootstrapComponents, managedDataComponents, managedStatusCoreComponents, ) @@ -382,7 +386,6 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( managedCoreComponents, managedNetworkComponents, managedBootstrapComponents, - managedDataComponents, managedStateComponents, nodesCoordinatorInstance, configs.ImportDbConfig.IsImportDBMode, @@ -437,7 +440,11 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( return true, fmt.Errorf("%w when adding nodeShufflerOut in hardForkTrigger", err) } - managedStatusComponents.SetForkDetector(managedProcessComponents.ForkDetector()) + err = managedStatusComponents.SetForkDetector(managedProcessComponents.ForkDetector()) + if err != nil { + return true, err + } + err = managedStatusComponents.StartPolling() if err != nil { return true, err @@ -507,7 +514,7 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( allowExternalVMQueriesChan := make(chan struct{}) log.Debug("updating the API service after creating the node facade") - ef, err := nr.createApiFacade(currentNode, webServerHandler, gasScheduleNotifier, allowExternalVMQueriesChan) + facadeInstance, err := nr.createApiFacade(currentNode, webServerHandler, gasScheduleNotifier, allowExternalVMQueriesChan) if err != nil { return true, err } @@ -529,20 +536,17 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) - err = waitForSignal( + nextOperation := waitForSignal( sigs, managedCoreComponents.ChanStopNodeProcess(), healthService, - ef, + facadeInstance, webServerHandler, currentNode, goRoutinesNumberStart, ) - if err != nil { - return true, nil - } - return false, nil + return nextOperation == nextOperationShouldStop, nil } func addSyncersToAccountsDB( @@ -804,10 +808,16 @@ func (nr *nodeRunner) createMetrics( metrics.SaveUint64Metric(statusCoreComponents.AppStatusHandler(), common.MetricGasPerDataByte, coreComponents.EconomicsData().GasPerDataByte()) metrics.SaveUint64Metric(statusCoreComponents.AppStatusHandler(), common.MetricMinGasPrice, coreComponents.EconomicsData().MinGasPrice()) metrics.SaveUint64Metric(statusCoreComponents.AppStatusHandler(), common.MetricMinGasLimit, coreComponents.EconomicsData().MinGasLimit()) + metrics.SaveUint64Metric(statusCoreComponents.AppStatusHandler(), common.MetricExtraGasLimitGuardedTx, coreComponents.EconomicsData().ExtraGasLimitGuardedTx()) metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricRewardsTopUpGradientPoint, coreComponents.EconomicsData().RewardsTopUpGradientPoint().String()) metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricTopUpFactor, fmt.Sprintf("%g", coreComponents.EconomicsData().RewardsTopUpFactor())) metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricGasPriceModifier, fmt.Sprintf("%g", coreComponents.EconomicsData().GasPriceModifier())) metrics.SaveUint64Metric(statusCoreComponents.AppStatusHandler(), common.MetricMaxGasPerTransaction, coreComponents.EconomicsData().MaxGasLimitPerTx()) + if nr.configs.PreferencesConfig.Preferences.FullArchive { + metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricPeerType, core.ObserverPeer.String()) + metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricPeerSubType, core.FullHistoryObserver.String()) + } + return nil } @@ -926,11 +936,11 @@ func waitForSignal( sigs chan os.Signal, chanStopNodeProcess chan endProcess.ArgEndProcess, healthService closing.Closer, - ef closing.Closer, + facade closing.Closer, httpServer shared.UpgradeableHttpServerHandler, currentNode *Node, goRoutinesNumberStart int, -) error { +) nextOperationForNode { var sig endProcess.ArgEndProcess reshuffled := false wrongConfig := false @@ -952,7 +962,7 @@ func waitForSignal( chanCloseComponents := make(chan struct{}) go func() { - closeAllComponents(healthService, ef, httpServer, currentNode, chanCloseComponents) + closeAllComponents(healthService, facade, httpServer, currentNode, chanCloseComponents) }() select { @@ -963,14 +973,14 @@ func waitForSignal( "error", "closeAllComponents did not finish on time", "stack", goroutines.GetGoRoutines()) - return fmt.Errorf("did NOT close all components gracefully") + return nextOperationShouldStop } if wrongConfig { // hang the node's process because it cannot continue with the current configuration and a restart doesn't // change this behaviour for { - log.Error("wrong configuration. stopped processing", "description", wrongConfigDescription) + log.Error("wrong configuration. stopped the processing and left the node unclosed", "description", wrongConfigDescription) time.Sleep(1 * time.Minute) } } @@ -979,10 +989,10 @@ func waitForSignal( log.Info("=============================" + SoftRestartMessage + "==================================") core.DumpGoRoutinesToLog(goRoutinesNumberStart, log) - return nil + return nextOperationShouldRestart } - return fmt.Errorf("not reshuffled, closing") + return nextOperationShouldStop } func (nr *nodeRunner) logInformation( @@ -1036,7 +1046,6 @@ func (nr *nodeRunner) CreateManagedStatusComponents( managedCoreComponents mainFactory.CoreComponentsHolder, managedNetworkComponents mainFactory.NetworkComponentsHolder, managedBootstrapComponents mainFactory.BootstrapComponentsHolder, - managedDataComponents mainFactory.DataComponentsHolder, managedStateComponents mainFactory.StateComponentsHolder, nodesCoordinator nodesCoordinator.NodesCoordinator, isInImportMode bool, @@ -1049,7 +1058,6 @@ func (nr *nodeRunner) CreateManagedStatusComponents( NodesCoordinator: nodesCoordinator, EpochStartNotifier: managedCoreComponents.EpochStartNotifierWithConfirm(), CoreComponents: managedCoreComponents, - DataComponents: managedDataComponents, NetworkComponents: managedNetworkComponents, StateComponents: managedStateComponents, IsInImportMode: isInImportMode, @@ -1083,18 +1091,19 @@ func (nr *nodeRunner) logSessionInformation( statsFolder, configurationPaths.GasScheduleDirectoryName, []string{ + configurationPaths.ApiRoutes, configurationPaths.MainConfig, configurationPaths.Economics, - configurationPaths.Ratings, - configurationPaths.Preferences, - configurationPaths.P2p, + configurationPaths.Epoch, + configurationPaths.RoundActivation, + configurationPaths.External, configurationPaths.Genesis, + configurationPaths.SmartContracts, configurationPaths.Nodes, - configurationPaths.ApiRoutes, - configurationPaths.External, + configurationPaths.P2p, + configurationPaths.Preferences, + configurationPaths.Ratings, configurationPaths.SystemSC, - configurationPaths.RoundActivation, - configurationPaths.Epoch, }) statsFile := filepath.Join(statsFolder, "session.info") @@ -1264,6 +1273,7 @@ func (nr *nodeRunner) CreateManagedDataComponents( Crypto: crypto, CurrentEpoch: storerEpoch, CreateTrieEpochRootHashStorer: configs.ImportDbConfig.ImportDbSaveTrieEpochRootHash, + NodeProcessingMode: common.GetNodeProcessingMode(nr.configs.ImportDbConfig), SnapshotsEnabled: configs.FlagsConfig.SnapshotsEnabled, } @@ -1297,21 +1307,15 @@ func (nr *nodeRunner) CreateManagedDataComponents( // CreateManagedStateComponents is the managed state components factory func (nr *nodeRunner) CreateManagedStateComponents( coreComponents mainFactory.CoreComponentsHolder, - bootstrapComponents mainFactory.BootstrapComponentsHolder, dataComponents mainFactory.DataComponentsHandler, statusCoreComponents mainFactory.StatusCoreComponentsHolder, ) (mainFactory.StateComponentsHandler, error) { - processingMode := common.Normal - if nr.configs.ImportDbConfig.IsImportDBMode { - processingMode = common.ImportDb - } stateArgs := stateComp.StateComponentsFactoryArgs{ Config: *nr.configs.GeneralConfig, - ShardCoordinator: bootstrapComponents.ShardCoordinator(), Core: coreComponents, StatusCore: statusCoreComponents, StorageService: dataComponents.StorageService(), - ProcessingMode: processingMode, + ProcessingMode: common.GetNodeProcessingMode(nr.configs.ImportDbConfig), ShouldSerializeSnapshots: nr.configs.FlagsConfig.SerializeSnapshots, SnapshotsEnabled: nr.configs.FlagsConfig.SnapshotsEnabled, ChainHandler: dataComponents.Blockchain(), @@ -1581,11 +1585,12 @@ func cleanupStorageIfNecessary(workingDir string, cleanupStorage bool) error { return os.RemoveAll(dbPath) } -func copyConfigToStatsFolder(statsFolder string, gasScheduleFolder string, configs []string) { +func copyConfigToStatsFolder(statsFolder string, gasScheduleDirectory string, configs []string) { err := os.MkdirAll(statsFolder, os.ModePerm) log.LogIfError(err) - err = copyDirectory(gasScheduleFolder, statsFolder) + newGasScheduleDirectory := path.Join(statsFolder, filepath.Base(gasScheduleDirectory)) + err = copyDirectory(gasScheduleDirectory, newGasScheduleDirectory) log.LogIfError(err) for _, configFile := range configs { @@ -1593,7 +1598,6 @@ func copyConfigToStatsFolder(statsFolder string, gasScheduleFolder string, confi } } -// TODO: add some unit tests func copyDirectory(source string, destination string) error { fileDescriptors, err := ioutil.ReadDir(source) if err != nil { @@ -1612,21 +1616,21 @@ func copyDirectory(source string, destination string) error { for _, fd := range fileDescriptors { srcFilePath := path.Join(source, fd.Name()) - dstFilePath := path.Join(destination, fd.Name()) if fd.IsDir() { + dstFilePath := path.Join(destination, filepath.Base(srcFilePath)) err = copyDirectory(srcFilePath, dstFilePath) log.LogIfError(err) } else { - copySingleFile(dstFilePath, srcFilePath) + copySingleFile(destination, srcFilePath) } } return nil } -func copySingleFile(folder string, configFile string) { - fileName := filepath.Base(configFile) +func copySingleFile(destinationDirectory string, sourceFile string) { + fileName := filepath.Base(sourceFile) - source, err := core.OpenFile(configFile) + source, err := core.OpenFile(sourceFile) if err != nil { return } @@ -1637,7 +1641,7 @@ func copySingleFile(folder string, configFile string) { } }() - destPath := filepath.Join(folder, fileName) + destPath := filepath.Join(destinationDirectory, fileName) destination, err := os.Create(destPath) if err != nil { return diff --git a/node/nodeRunner_test.go b/node/nodeRunner_test.go new file mode 100644 index 00000000000..ce0242db3f7 --- /dev/null +++ b/node/nodeRunner_test.go @@ -0,0 +1,438 @@ +//go:build !race +// +build !race + +package node + +import ( + "io/ioutil" + "os" + "os/exec" + "path" + "strings" + "syscall" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/node/mock" + "github.com/multiversx/mx-chain-go/testscommon/api" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func createConfigs(tb testing.TB) *config.Configs { + tempDir := tb.TempDir() + + originalConfigsPath := "../cmd/node/config" + newConfigsPath := path.Join(tempDir, "config") + + cmd := exec.Command("cp", "-r", originalConfigsPath, newConfigsPath) + err := cmd.Run() + require.Nil(tb, err) + + newGenesisSmartContractsFilename := path.Join(newConfigsPath, "genesisSmartContracts.json") + correctTestPathInGenesisSmartContracts(tb, tempDir, newGenesisSmartContractsFilename) + + apiConfig, err := common.LoadApiConfig(path.Join(newConfigsPath, "api.toml")) + require.Nil(tb, err) + + generalConfig, err := common.LoadMainConfig(path.Join(newConfigsPath, "config.toml")) + require.Nil(tb, err) + + ratingsConfig, err := common.LoadRatingsConfig(path.Join(newConfigsPath, "ratings.toml")) + require.Nil(tb, err) + + economicsConfig, err := common.LoadEconomicsConfig(path.Join(newConfigsPath, "economics.toml")) + require.Nil(tb, err) + + prefsConfig, err := common.LoadPreferencesConfig(path.Join(newConfigsPath, "prefs.toml")) + require.Nil(tb, err) + + p2pConfig, err := common.LoadP2PConfig(path.Join(newConfigsPath, "p2p.toml")) + require.Nil(tb, err) + + externalConfig, err := common.LoadExternalConfig(path.Join(newConfigsPath, "external.toml")) + require.Nil(tb, err) + + systemSCConfig, err := common.LoadSystemSmartContractsConfig(path.Join(newConfigsPath, "systemSmartContractsConfig.toml")) + require.Nil(tb, err) + + epochConfig, err := common.LoadEpochConfig(path.Join(newConfigsPath, "enableEpochs.toml")) + require.Nil(tb, err) + + roundConfig, err := common.LoadRoundConfig(path.Join(newConfigsPath, "enableRounds.toml")) + require.Nil(tb, err) + + // make the node pass the network wait constraints + p2pConfig.Node.MinNumPeersToWaitForOnBootstrap = 0 + p2pConfig.Node.ThresholdMinConnectedPeers = 0 + + return &config.Configs{ + GeneralConfig: generalConfig, + ApiRoutesConfig: apiConfig, + EconomicsConfig: economicsConfig, + SystemSCConfig: systemSCConfig, + RatingsConfig: ratingsConfig, + PreferencesConfig: prefsConfig, + ExternalConfig: externalConfig, + P2pConfig: p2pConfig, + FlagsConfig: &config.ContextFlagsConfig{ + WorkingDir: tempDir, + NoKeyProvided: true, + Version: "test version", + DbDir: path.Join(tempDir, "db"), + }, + ImportDbConfig: &config.ImportDbConfig{}, + ConfigurationPathsHolder: &config.ConfigurationPathsHolder{ + GasScheduleDirectoryName: path.Join(newConfigsPath, "gasSchedules"), + Nodes: path.Join(newConfigsPath, "nodesSetup.json"), + Genesis: path.Join(newConfigsPath, "genesis.json"), + SmartContracts: newGenesisSmartContractsFilename, + ValidatorKey: "validatorKey.pem", + }, + EpochConfig: epochConfig, + RoundConfig: roundConfig, + } +} + +func correctTestPathInGenesisSmartContracts(tb testing.TB, tempDir string, newGenesisSmartContractsFilename string) { + input, err := ioutil.ReadFile(newGenesisSmartContractsFilename) + require.Nil(tb, err) + + lines := strings.Split(string(input), "\n") + for i, line := range lines { + if strings.Contains(line, "./config") { + lines[i] = strings.Replace(line, "./config", path.Join(tempDir, "config"), 1) + } + } + output := strings.Join(lines, "\n") + err = ioutil.WriteFile(newGenesisSmartContractsFilename, []byte(output), 0644) + require.Nil(tb, err) +} + +func TestNewNodeRunner(t *testing.T) { + t.Parallel() + + t.Run("nil configs should error", func(t *testing.T) { + t.Parallel() + + expectedErrorString := "nil configs provided" + runner, err := NewNodeRunner(nil) + assert.Nil(t, runner) + assert.Equal(t, expectedErrorString, err.Error()) + }) + t.Run("with valid configs should work", func(t *testing.T) { + t.Parallel() + + configs := createConfigs(t) + runner, err := NewNodeRunner(configs) + assert.NotNil(t, runner) + assert.Nil(t, err) + }) +} + +func TestNodeRunner_StartAndCloseNodeUsingSIGINT(t *testing.T) { + t.Parallel() + + configs := createConfigs(t) + runner, _ := NewNodeRunner(configs) + + trigger := mock.NewApplicationRunningTrigger() + err := logger.AddLogObserver(trigger, &logger.PlainFormatter{}) + require.Nil(t, err) + + // start a go routine that will send the SIGINT message after 1 second after the node has started + go func() { + timeout := time.Minute * 5 + select { + case <-trigger.ChanClose(): + case <-time.After(timeout): + require.Fail(t, "timeout waiting for application to start") + } + time.Sleep(time.Second) + + log.Info("sending SIGINT to self") + errKill := syscall.Kill(syscall.Getpid(), syscall.SIGINT) + assert.Nil(t, errKill) + }() + + err = runner.Start() + assert.Nil(t, err) +} + +func TestCopyDirectory(t *testing.T) { + t.Parallel() + + file1Name := "file1.toml" + file1Contents := []byte("file1") + file2Name := "file2.toml" + file2Contents := []byte("file2") + file3Name := "file3.toml" + file3Contents := []byte("file3") + file4Name := "file4.toml" + file4Contents := []byte("file4") + + tempDir := t.TempDir() + + // generating dummy structure like + // file1 + // src + // +- file2 + // +- dir1 + // +- file3 + // +- dir2 + // +- file4 + + err := ioutil.WriteFile(path.Join(tempDir, file1Name), file1Contents, os.ModePerm) + require.Nil(t, err) + err = os.MkdirAll(path.Join(tempDir, "src", "dir1"), os.ModePerm) + require.Nil(t, err) + err = os.MkdirAll(path.Join(tempDir, "src", "dir2"), os.ModePerm) + require.Nil(t, err) + err = ioutil.WriteFile(path.Join(tempDir, "src", file2Name), file2Contents, os.ModePerm) + require.Nil(t, err) + err = ioutil.WriteFile(path.Join(tempDir, "src", "dir1", file3Name), file3Contents, os.ModePerm) + require.Nil(t, err) + err = ioutil.WriteFile(path.Join(tempDir, "src", "dir2", file4Name), file4Contents, os.ModePerm) + require.Nil(t, err) + + err = copyDirectory(path.Join(tempDir, "src"), path.Join(tempDir, "dst")) + require.Nil(t, err) + copySingleFile(path.Join(tempDir, "dst"), path.Join(tempDir, file1Name)) + + // after copy, check that the files are the same + buff, err := ioutil.ReadFile(path.Join(tempDir, "dst", file1Name)) + require.Nil(t, err) + assert.Equal(t, file1Contents, buff) + + buff, err = ioutil.ReadFile(path.Join(tempDir, "dst", file2Name)) + require.Nil(t, err) + assert.Equal(t, file2Contents, buff) + + buff, err = ioutil.ReadFile(path.Join(tempDir, "dst", "dir1", file3Name)) + require.Nil(t, err) + assert.Equal(t, file3Contents, buff) + + buff, err = ioutil.ReadFile(path.Join(tempDir, "dst", "dir2", file4Name)) + require.Nil(t, err) + assert.Equal(t, file4Contents, buff) +} + +func TestWaitForSignal(t *testing.T) { + t.Parallel() + + closedCalled := make(map[string]struct{}) + healthServiceClosableComponent := &mock.CloserStub{ + CloseCalled: func() error { + closedCalled["healthService"] = struct{}{} + return nil + }, + } + facadeClosableComponent := &mock.CloserStub{ + CloseCalled: func() error { + closedCalled["facade"] = struct{}{} + return nil + }, + } + httpClosableComponent := &api.UpgradeableHttpServerHandlerStub{ + CloseCalled: func() error { + closedCalled["http"] = struct{}{} + return nil + }, + } + internalNodeClosableComponent1 := &mock.CloserStub{ + CloseCalled: func() error { + closedCalled["node closable component 1"] = struct{}{} + return nil + }, + } + internalNodeClosableComponent2 := &mock.CloserStub{ + CloseCalled: func() error { + closedCalled["node closable component 2"] = struct{}{} + return nil + }, + } + n, _ := NewNode() + n.closableComponents = append(n.closableComponents, internalNodeClosableComponent1) + n.closableComponents = append(n.closableComponents, internalNodeClosableComponent2) + + // do not run these tests in parallel as they are using the same map + t.Run("should return nextOperationShouldStop if SIGINT is received", func(t *testing.T) { + closedCalled = make(map[string]struct{}) + stopChan := make(chan endProcess.ArgEndProcess) + sigs := make(chan os.Signal, 1) + + go func() { + time.Sleep(time.Millisecond * 100) // wait for the waitForSignal to start + sigs <- syscall.SIGINT + }() + + nextOperation := waitForSignal( + sigs, + stopChan, + healthServiceClosableComponent, + facadeClosableComponent, + httpClosableComponent, + n, + 1, + ) + + assert.Equal(t, nextOperationShouldStop, nextOperation) + checkCloseCalledMap(t, closedCalled) + }) + t.Run("should return nextOperationShouldRestart if shuffled out is received", func(t *testing.T) { + closedCalled = make(map[string]struct{}) + stopChan := make(chan endProcess.ArgEndProcess, 1) + sigs := make(chan os.Signal) + + go func() { + time.Sleep(time.Millisecond * 100) // wait for the waitForSignal to start + stopChan <- endProcess.ArgEndProcess{ + Reason: common.ShuffledOut, + Description: "test", + } + }() + + nextOperation := waitForSignal( + sigs, + stopChan, + healthServiceClosableComponent, + facadeClosableComponent, + httpClosableComponent, + n, + 1, + ) + + assert.Equal(t, nextOperationShouldRestart, nextOperation) + checkCloseCalledMap(t, closedCalled) + }) + t.Run("wrong configuration should not stop the node", func(t *testing.T) { + closedCalled = make(map[string]struct{}) + stopChan := make(chan endProcess.ArgEndProcess, 1) + sigs := make(chan os.Signal) + + go func() { + time.Sleep(time.Millisecond * 100) // wait for the waitForSignal to start + stopChan <- endProcess.ArgEndProcess{ + Reason: common.WrongConfiguration, + Description: "test", + } + }() + + functionFinished := make(chan struct{}) + go func() { + _ = waitForSignal( + sigs, + stopChan, + healthServiceClosableComponent, + facadeClosableComponent, + httpClosableComponent, + n, + 1, + ) + close(functionFinished) + }() + + select { + case <-functionFinished: + assert.Fail(t, "function should not have finished") + case <-time.After(maxTimeToClose + time.Second*2): + // ok, timeout reached, function did not finish + } + + checkCloseCalledMap(t, closedCalled) + }) + + delayedComponent := &mock.CloserStub{ + CloseCalled: func() error { + time.Sleep(time.Minute) + return nil + }, + } + n.closableComponents = append(n.closableComponents, delayedComponent) + + t.Run("force closing the node when SIGINT is received", func(t *testing.T) { + closedCalled = make(map[string]struct{}) + stopChan := make(chan endProcess.ArgEndProcess) + sigs := make(chan os.Signal, 1) + + go func() { + time.Sleep(time.Millisecond * 100) // wait for the waitForSignal to start + sigs <- syscall.SIGINT + }() + + nextOperation := waitForSignal( + sigs, + stopChan, + healthServiceClosableComponent, + facadeClosableComponent, + httpClosableComponent, + n, + 1, + ) + + // these exceptions appear because the delayedComponent prevented the call of the first 2 components + // as the closable components are called in reversed order + exceptions := []string{"node closable component 1", "node closable component 2"} + assert.Equal(t, nextOperationShouldStop, nextOperation) + checkCloseCalledMap(t, closedCalled, exceptions...) + }) + t.Run("force closing the node when shuffle out is received", func(t *testing.T) { + closedCalled = make(map[string]struct{}) + stopChan := make(chan endProcess.ArgEndProcess, 1) + sigs := make(chan os.Signal) + + go func() { + time.Sleep(time.Millisecond * 100) // wait for the waitForSignal to start + stopChan <- endProcess.ArgEndProcess{ + Reason: common.ShuffledOut, + Description: "test", + } + }() + + nextOperation := waitForSignal( + sigs, + stopChan, + healthServiceClosableComponent, + facadeClosableComponent, + httpClosableComponent, + n, + 1, + ) + + // these exceptions appear because the delayedComponent prevented the call of the first 2 components + // as the closable components are called in reversed order + exceptions := []string{"node closable component 1", "node closable component 2"} + // in this case, even if the node is shuffled out, it should stop as some components were not closed + assert.Equal(t, nextOperationShouldStop, nextOperation) + checkCloseCalledMap(t, closedCalled, exceptions...) + }) +} + +func checkCloseCalledMap(tb testing.TB, closedCalled map[string]struct{}, exceptions ...string) { + allKeys := []string{"healthService", "facade", "http", "node closable component 1", "node closable component 2"} + numKeys := 0 + for _, key := range allKeys { + if contains(key, exceptions) { + continue + } + + numKeys++ + assert.Contains(tb, closedCalled, key) + } + + assert.Equal(tb, numKeys, len(closedCalled)) +} + +func contains(needle string, haystack []string) bool { + for _, element := range haystack { + if needle == element { + return true + } + } + + return false +} diff --git a/node/nodeTesting.go b/node/nodeTesting.go index b7373f14dc1..29683432508 100644 --- a/node/nodeTesting.go +++ b/node/nodeTesting.go @@ -222,12 +222,12 @@ func (n *Node) generateAndSignSingleTx( Version: minTxVersion, } - marshalizedTx, err := tx.GetDataForSigning(n.coreComponents.AddressPubKeyConverter(), n.coreComponents.TxMarshalizer()) + txSigningData, err := tx.GetDataForSigning(n.coreComponents.AddressPubKeyConverter(), n.coreComponents.TxMarshalizer(), n.coreComponents.TxSignHasher()) if err != nil { return nil, nil, errors.New("could not marshal transaction") } - sig, err := n.cryptoComponents.TxSingleSigner().Sign(sk, marshalizedTx) + sig, err := n.cryptoComponents.TxSingleSigner().Sign(sk, txSigningData) if err != nil { return nil, nil, errors.New("could not sign the transaction") } diff --git a/node/nodeTesting_test.go b/node/nodeTesting_test.go index 350d752c51e..8718ea5c8ea 100644 --- a/node/nodeTesting_test.go +++ b/node/nodeTesting_test.go @@ -21,6 +21,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + factoryMocks "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/storageManager" @@ -394,7 +395,6 @@ func getDefaultCryptoComponents() *factoryMock.CryptoComponentsMock { PrivKey: &mock.PrivateKeyStub{}, P2pPrivKey: &mock.PrivateKeyStub{}, PubKeyString: "pubKey", - PrivKeyBytes: []byte("privKey"), PubKeyBytes: []byte("pubKey"), BlockSig: &mock.SingleSignerMock{}, TxSig: &mock.SingleSignerMock{}, @@ -409,8 +409,8 @@ func getDefaultCryptoComponents() *factoryMock.CryptoComponentsMock { } } -func getDefaultStateComponents() *testscommon.StateComponentsMock { - return &testscommon.StateComponentsMock{ +func getDefaultStateComponents() *factoryMocks.StateComponentsMock { + return &factoryMocks.StateComponentsMock{ PeersAcc: &stateMock.AccountsStub{}, Accounts: &stateMock.AccountsStub{}, AccountsAPI: &stateMock.AccountsStub{}, diff --git a/node/node_test.go b/node/node_test.go index 2d3668d5634..a22e98e1b58 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -23,6 +23,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/esdt" + "github.com/multiversx/mx-chain-core-go/data/guardians" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/hashing/sha256" @@ -37,10 +38,12 @@ import ( heartbeatData "github.com/multiversx/mx-chain-go/heartbeat/data" integrationTestsMock "github.com/multiversx/mx-chain-go/integrationTests/mock" "github.com/multiversx/mx-chain-go/node" + "github.com/multiversx/mx-chain-go/node/external" "github.com/multiversx/mx-chain-go/node/mock" nodeMockFactory "github.com/multiversx/mx-chain-go/node/mock/factory" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" @@ -48,12 +51,13 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" factoryTests "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" - "github.com/multiversx/mx-chain-go/testscommon/storage" + mockStorage "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/testscommon/txsSenderMock" @@ -63,6 +67,25 @@ import ( "github.com/stretchr/testify/require" ) +type testBlockInfo struct { +} + +func (t testBlockInfo) apiResult() api.BlockInfo { + return api.BlockInfo{ + Nonce: 37, + Hash: hex.EncodeToString([]byte("hash")), + RootHash: hex.EncodeToString([]byte("root")), + } +} + +func (t testBlockInfo) forProcessing() common.BlockInfo { + hash := []byte("hash") + rHash := []byte("root") + return holders.NewBlockInfo(hash, 37, rHash) +} + +var dummyBlockInfo = testBlockInfo{} + func createMockPubkeyConverter() *testscommon.PubkeyConverterMock { return testscommon.NewPubkeyConverterMock(32) } @@ -114,7 +137,7 @@ func TestNewNode(t *testing.T) { n, err := node.NewNode() assert.Nil(t, err) - assert.False(t, check.IfNil(n)) + assert.NotNil(t, n) } func TestNewNode_NilOptionShouldError(t *testing.T) { @@ -193,6 +216,44 @@ func TestGetBalance_AccountNotFoundShouldReturnZeroBalance(t *testing.T) { assert.Equal(t, big.NewInt(0), balance) } +func TestNode_GetBalanceAccNotFoundShouldReturnEmpty(t *testing.T) { + t.Parallel() + + accDB := &stateMock.AccountsStub{ + GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { + return nil, nil, state.NewErrAccountNotFoundAtBlock(dummyBlockInfo.forProcessing()) + }, + RecreateTrieCalled: func(_ []byte) error { + return nil + }, + } + + dataComponents := getDefaultDataComponents() + coreComponents := getDefaultCoreComponents() + coreComponents.IntMarsh = getMarshalizer() + coreComponents.VmMarsh = getMarshalizer() + coreComponents.Hash = getHasher() + + stateComponents := getDefaultStateComponents() + args := state.ArgsAccountsRepository{ + FinalStateAccountsWrapper: accDB, + CurrentStateAccountsWrapper: accDB, + HistoricalStateAccountsWrapper: accDB, + } + stateComponents.AccountsRepo, _ = state.NewAccountsRepository(args) + + n, _ := node.NewNode( + node.WithDataComponents(dataComponents), + node.WithCoreComponents(coreComponents), + node.WithStateComponents(stateComponents), + ) + + balance, bInfo, err := n.GetBalance(testscommon.TestAddressAlice, api.AccountQueryOptions{}) + require.Nil(t, err) + require.Equal(t, dummyBlockInfo.apiResult(), bInfo) + require.Empty(t, balance) +} + func TestGetBalance(t *testing.T) { t.Parallel() @@ -257,6 +318,44 @@ func TestGetUsername(t *testing.T) { assert.Equal(t, string(expectedUsername), username) } +func TestNode_GetCodeHashAccNotFoundShouldReturnEmpty(t *testing.T) { + t.Parallel() + + accDB := &stateMock.AccountsStub{ + GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { + return nil, nil, state.NewErrAccountNotFoundAtBlock(dummyBlockInfo.forProcessing()) + }, + RecreateTrieCalled: func(_ []byte) error { + return nil + }, + } + + dataComponents := getDefaultDataComponents() + coreComponents := getDefaultCoreComponents() + coreComponents.IntMarsh = getMarshalizer() + coreComponents.VmMarsh = getMarshalizer() + coreComponents.Hash = getHasher() + + stateComponents := getDefaultStateComponents() + args := state.ArgsAccountsRepository{ + FinalStateAccountsWrapper: accDB, + CurrentStateAccountsWrapper: accDB, + HistoricalStateAccountsWrapper: accDB, + } + stateComponents.AccountsRepo, _ = state.NewAccountsRepository(args) + + n, _ := node.NewNode( + node.WithDataComponents(dataComponents), + node.WithCoreComponents(coreComponents), + node.WithStateComponents(stateComponents), + ) + + codeHash, bInfo, err := n.GetCodeHash("erd1qyu5wthldzr8wx5c9ucg8kjagg0jfs53s8nr3zpz3hypefsdd8ssycr6th", api.AccountQueryOptions{}) + require.Nil(t, err) + require.Equal(t, dummyBlockInfo.apiResult(), bInfo) + require.Empty(t, codeHash) +} + func TestGetCodeHash(t *testing.T) { t.Parallel() @@ -290,6 +389,44 @@ func TestGetCodeHash(t *testing.T) { assert.Equal(t, expectedCodeHash, codeHash) } +func TestNode_GetKeyValuePairsAccNotFoundShouldReturnEmpty(t *testing.T) { + t.Parallel() + + accDB := &stateMock.AccountsStub{ + GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { + return nil, nil, state.NewErrAccountNotFoundAtBlock(dummyBlockInfo.forProcessing()) + }, + RecreateTrieCalled: func(_ []byte) error { + return nil + }, + } + + dataComponents := getDefaultDataComponents() + coreComponents := getDefaultCoreComponents() + coreComponents.IntMarsh = getMarshalizer() + coreComponents.VmMarsh = getMarshalizer() + coreComponents.Hash = getHasher() + + stateComponents := getDefaultStateComponents() + args := state.ArgsAccountsRepository{ + FinalStateAccountsWrapper: accDB, + CurrentStateAccountsWrapper: accDB, + HistoricalStateAccountsWrapper: accDB, + } + stateComponents.AccountsRepo, _ = state.NewAccountsRepository(args) + + n, _ := node.NewNode( + node.WithDataComponents(dataComponents), + node.WithCoreComponents(coreComponents), + node.WithStateComponents(stateComponents), + ) + + pairs, bInfo, err := n.GetKeyValuePairs(testscommon.TestAddressAlice, api.AccountQueryOptions{}, context.Background()) + require.Nil(t, err) + require.Equal(t, dummyBlockInfo.apiResult(), bInfo) + require.Len(t, pairs, 0) +} + func TestNode_GetKeyValuePairs(t *testing.T) { t.Parallel() @@ -311,7 +448,7 @@ func TestNode_GetKeyValuePairs(t *testing.T) { trieLeaf2 := keyValStorage.NewKeyValStorage(k2, append(v2, suffix...)) leavesChannels.LeavesChan <- trieLeaf2 close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -370,7 +507,7 @@ func TestNode_GetKeyValuePairs_GetAllLeavesShouldFail(t *testing.T) { &trieMock.TrieStub{ GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, _ common.KeyBuilder) error { go func() { - leavesChannels.ErrChan <- expectedErr + leavesChannels.ErrChan.WriteInChanNonBlocking(expectedErr) close(leavesChannels.LeavesChan) }() @@ -425,7 +562,7 @@ func TestNode_GetKeyValuePairsContextShouldTimeout(t *testing.T) { go func() { time.Sleep(time.Second) close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -470,6 +607,44 @@ func TestNode_GetKeyValuePairsContextShouldTimeout(t *testing.T) { assert.Equal(t, node.ErrTrieOperationsTimeout, err) } +func TestNode_GetValueForKeyAccNotFoundShouldReturnEmpty(t *testing.T) { + t.Parallel() + + accDB := &stateMock.AccountsStub{ + GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { + return nil, nil, state.NewErrAccountNotFoundAtBlock(dummyBlockInfo.forProcessing()) + }, + RecreateTrieCalled: func(_ []byte) error { + return nil + }, + } + + dataComponents := getDefaultDataComponents() + coreComponents := getDefaultCoreComponents() + coreComponents.IntMarsh = getMarshalizer() + coreComponents.VmMarsh = getMarshalizer() + coreComponents.Hash = getHasher() + + stateComponents := getDefaultStateComponents() + args := state.ArgsAccountsRepository{ + FinalStateAccountsWrapper: accDB, + CurrentStateAccountsWrapper: accDB, + HistoricalStateAccountsWrapper: accDB, + } + stateComponents.AccountsRepo, _ = state.NewAccountsRepository(args) + + n, _ := node.NewNode( + node.WithDataComponents(dataComponents), + node.WithCoreComponents(coreComponents), + node.WithStateComponents(stateComponents), + ) + + value, bInfo, err := n.GetValueForKey(testscommon.TestAddressAlice, "0a0a", api.AccountQueryOptions{}) + require.Nil(t, err) + require.Equal(t, dummyBlockInfo.apiResult(), bInfo) + require.Empty(t, value) +} + func TestNode_GetValueForKey(t *testing.T) { t.Parallel() @@ -512,6 +687,46 @@ func TestNode_GetValueForKey(t *testing.T) { assert.Equal(t, hex.EncodeToString(v1), value) } +func TestNode_GetESDTDataAccNotFoundShouldReturnEmpty(t *testing.T) { + t.Parallel() + + esdtToken := "newToken" + + accDB := &stateMock.AccountsStub{ + GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { + return nil, nil, state.NewErrAccountNotFoundAtBlock(dummyBlockInfo.forProcessing()) + }, + RecreateTrieCalled: func(_ []byte) error { + return nil + }, + } + + dataComponents := getDefaultDataComponents() + coreComponents := getDefaultCoreComponents() + coreComponents.IntMarsh = getMarshalizer() + coreComponents.VmMarsh = getMarshalizer() + coreComponents.Hash = getHasher() + + stateComponents := getDefaultStateComponents() + args := state.ArgsAccountsRepository{ + FinalStateAccountsWrapper: accDB, + CurrentStateAccountsWrapper: accDB, + HistoricalStateAccountsWrapper: accDB, + } + stateComponents.AccountsRepo, _ = state.NewAccountsRepository(args) + + n, _ := node.NewNode( + node.WithDataComponents(dataComponents), + node.WithCoreComponents(coreComponents), + node.WithStateComponents(stateComponents), + ) + + esdtTokenData, bInfo, err := n.GetESDTData(testscommon.TestAddressAlice, esdtToken, 0, api.AccountQueryOptions{}) + require.Nil(t, err) + require.Equal(t, dummyBlockInfo.apiResult(), bInfo) + require.Equal(t, "0", esdtTokenData.Value.String()) +} + func TestNode_GetESDTData(t *testing.T) { t.Parallel() @@ -628,7 +843,7 @@ func TestNode_GetAllESDTTokens(t *testing.T) { trieLeaf := keyValStorage.NewKeyValStorage(esdtKey, nil) leavesChannels.LeavesChan <- trieLeaf close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -684,7 +899,7 @@ func TestNode_GetAllESDTTokens_GetAllLeavesShouldFail(t *testing.T) { &trieMock.TrieStub{ GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, _ common.KeyBuilder) error { go func() { - leavesChannels.ErrChan <- expectedErr + leavesChannels.ErrChan.WriteInChanNonBlocking(expectedErr) close(leavesChannels.LeavesChan) }() @@ -741,7 +956,7 @@ func TestNode_GetAllESDTTokensContextShouldTimeout(t *testing.T) { go func() { time.Sleep(time.Second) close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -788,6 +1003,44 @@ func TestNode_GetAllESDTTokensContextShouldTimeout(t *testing.T) { assert.Equal(t, node.ErrTrieOperationsTimeout, err) } +func TestNode_GetAllESDTsAccNotFoundShouldReturnEmpty(t *testing.T) { + t.Parallel() + + accDB := &stateMock.AccountsStub{ + GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { + return nil, nil, state.NewErrAccountNotFoundAtBlock(dummyBlockInfo.forProcessing()) + }, + RecreateTrieCalled: func(_ []byte) error { + return nil + }, + } + + dataComponents := getDefaultDataComponents() + coreComponents := getDefaultCoreComponents() + coreComponents.IntMarsh = getMarshalizer() + coreComponents.VmMarsh = getMarshalizer() + coreComponents.Hash = getHasher() + + stateComponents := getDefaultStateComponents() + args := state.ArgsAccountsRepository{ + FinalStateAccountsWrapper: accDB, + CurrentStateAccountsWrapper: accDB, + HistoricalStateAccountsWrapper: accDB, + } + stateComponents.AccountsRepo, _ = state.NewAccountsRepository(args) + + n, _ := node.NewNode( + node.WithDataComponents(dataComponents), + node.WithCoreComponents(coreComponents), + node.WithStateComponents(stateComponents), + ) + + tokens, bInfo, err := n.GetAllESDTTokens(testscommon.TestAddressAlice, api.AccountQueryOptions{}, context.Background()) + require.Nil(t, err) + require.Equal(t, dummyBlockInfo.apiResult(), bInfo) + require.Len(t, tokens, 0) +} + func TestNode_GetAllESDTTokensShouldReturnEsdtAndFormattedNft(t *testing.T) { t.Parallel() @@ -835,7 +1088,7 @@ func TestNode_GetAllESDTTokensShouldReturnEsdtAndFormattedNft(t *testing.T) { leavesChannels.LeavesChan <- trieLeaf wg.Done() close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() wg.Wait() @@ -921,7 +1174,7 @@ func TestNode_GetAllIssuedESDTs(t *testing.T) { trieLeaf = keyValStorage.NewKeyValStorage(nftToken, append(nftMarshalledData, nftSuffix...)) leavesChannels.LeavesChan <- trieLeaf close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -1007,7 +1260,7 @@ func TestNode_GetESDTsWithRole(t *testing.T) { trieLeaf := keyValStorage.NewKeyValStorage(esdtToken, append(marshalledData, esdtSuffix...)) leavesChannels.LeavesChan <- trieLeaf close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -1087,7 +1340,7 @@ func TestNode_GetESDTsRoles(t *testing.T) { trieLeaf := keyValStorage.NewKeyValStorage(esdtToken, append(marshalledData, esdtSuffix...)) leavesChannels.LeavesChan <- trieLeaf close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -1152,7 +1405,7 @@ func TestNode_GetNFTTokenIDsRegisteredByAddress(t *testing.T) { trieLeaf := keyValStorage.NewKeyValStorage(esdtToken, append(marshalledData, esdtSuffix...)) leavesChannels.LeavesChan <- trieLeaf close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -1209,7 +1462,7 @@ func TestNode_GetNFTTokenIDsRegisteredByAddressContextShouldTimeout(t *testing.T go func() { time.Sleep(time.Second) close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -1580,7 +1833,27 @@ func TestGenerateTransaction_CorrectParamsShouldNotError(t *testing.T) { assert.Nil(t, err) } -func TestCreateTransaction_NilAddrConverterShouldErr(t *testing.T) { +func getDefaultTransactionArgs() *external.ArgsCreateTransaction { + return &external.ArgsCreateTransaction{ + Nonce: uint64(0), + Value: new(big.Int).SetInt64(10).String(), + Receiver: "rcv", + ReceiverUsername: []byte("rcvrUsername"), + Sender: "snd", + SenderUsername: []byte("sndrUsername"), + GasPrice: uint64(10), + GasLimit: uint64(20), + DataField: []byte("-"), + SignatureHex: hex.EncodeToString(bytes.Repeat([]byte{0}, 10)), + ChainID: "chainID", + Version: 1, + Options: 0, + Guardian: "", + GuardianSigHex: "", + } +} + +func TestCreateTransaction_NilArgsShouldErr(t *testing.T) { t.Parallel() coreComponents := getDefaultCoreComponents() @@ -1596,18 +1869,32 @@ func TestCreateTransaction_NilAddrConverterShouldErr(t *testing.T) { node.WithStateComponents(stateComponents), ) - nonce := uint64(0) - value := new(big.Int).SetInt64(10) - receiver := "" - sender := "" - gasPrice := uint64(10) - gasLimit := uint64(20) - txData := []byte("-") - signature := "-" + tx, txHash, err := n.CreateTransaction(nil) + + assert.Nil(t, tx) + assert.Nil(t, txHash) + assert.Equal(t, node.ErrNilCreateTransactionArgs, err) +} + +func TestCreateTransaction_NilAddrConverterShouldErr(t *testing.T) { + t.Parallel() + + coreComponents := getDefaultCoreComponents() + coreComponents.IntMarsh = getMarshalizer() + coreComponents.VmMarsh = getMarshalizer() + coreComponents.TxMarsh = getMarshalizer() + coreComponents.Hash = getHasher() + stateComponents := getDefaultStateComponents() + stateComponents.AccountsAPI = &stateMock.AccountsStub{} + + n, _ := node.NewNode( + node.WithCoreComponents(coreComponents), + node.WithStateComponents(stateComponents), + ) coreComponents.AddrPubKeyConv = nil - chainID := coreComponents.ChainID() - tx, txHash, err := n.CreateTransaction(nonce, value.String(), receiver, nil, sender, nil, gasPrice, gasLimit, txData, signature, chainID, 1, 0) + txArgs := getDefaultTransactionArgs() + tx, txHash, err := n.CreateTransaction(txArgs) assert.Nil(t, tx) assert.Nil(t, txHash) @@ -1636,32 +1923,10 @@ func TestCreateTransaction_NilAccountsAdapterShouldErr(t *testing.T) { node.WithProcessComponents(processComponents), ) - nonce := uint64(0) - value := new(big.Int).SetInt64(10) - receiver := "" - sender := "" - gasPrice := uint64(10) - gasLimit := uint64(20) - txData := []byte("-") - signature := "-" - stateComponents.AccountsAPI = nil - tx, txHash, err := n.CreateTransaction( - nonce, - value.String(), - receiver, - nil, - sender, - nil, - gasPrice, - gasLimit, - txData, - signature, - coreComponents.ChainID(), - 1, - 0, - ) + txArgs := getDefaultTransactionArgs() + tx, txHash, err := n.CreateTransaction(txArgs) assert.Nil(t, tx) assert.Nil(t, txHash) @@ -1689,16 +1954,9 @@ func TestCreateTransaction_InvalidSignatureShouldErr(t *testing.T) { node.WithStateComponents(stateComponents), ) - nonce := uint64(0) - value := new(big.Int).SetInt64(10) - receiver := "rcv" - sender := "snd" - gasPrice := uint64(10) - gasLimit := uint64(20) - txData := []byte("-") - signature := "-" - - tx, txHash, err := n.CreateTransaction(nonce, value.String(), receiver, nil, sender, nil, gasPrice, gasLimit, txData, signature, "chainID", 1, 0) + txArgs := getDefaultTransactionArgs() + txArgs.SignatureHex = "-" + tx, txHash, err := n.CreateTransaction(txArgs) assert.Nil(t, tx) assert.Nil(t, txHash) @@ -1740,27 +1998,29 @@ func TestCreateTransaction_ChainIDFieldChecks(t *testing.T) { node.WithAddressSignatureSize(10), ) - nonce := uint64(0) - value := new(big.Int).SetInt64(10) - receiver := "rcv" - sender := "snd" - gasPrice := uint64(10) - gasLimit := uint64(20) - txData := []byte("-") signature := hex.EncodeToString([]byte(strings.Repeat("s", 10))) - emptyChainID := "" - _, _, err := n.CreateTransaction(nonce, value.String(), receiver, nil, sender, nil, gasPrice, gasLimit, txData, signature, emptyChainID, 1, 0) + txArgs := getDefaultTransactionArgs() + txArgs.SignatureHex = signature + txArgs.ChainID = emptyChainID + _, _, err := n.CreateTransaction(txArgs) assert.Equal(t, node.ErrInvalidChainIDInTransaction, err) for i := 1; i < len(chainID); i++ { newChainID := strings.Repeat("c", i) - _, _, err = n.CreateTransaction(nonce, value.String(), receiver, nil, sender, nil, gasPrice, gasLimit, txData, signature, newChainID, 1, 0) + txArgs = getDefaultTransactionArgs() + txArgs.SignatureHex = signature + txArgs.ChainID = newChainID + _, _, err = n.CreateTransaction(txArgs) assert.NoError(t, err) } newChainID := chainID + "additional text" - _, _, err = n.CreateTransaction(nonce, value.String(), receiver, nil, sender, nil, gasPrice, gasLimit, txData, signature, newChainID, 1, 0) + txArgs = getDefaultTransactionArgs() + txArgs.SignatureHex = signature + txArgs.ChainID = newChainID + + _, _, err = n.CreateTransaction(txArgs) assert.Equal(t, node.ErrInvalidChainIDInTransaction, err) } @@ -1796,15 +2056,12 @@ func TestCreateTransaction_InvalidTxVersionShouldErr(t *testing.T) { node.WithStateComponents(stateComponents), ) - nonce := uint64(0) - value := new(big.Int).SetInt64(10) - receiver := "rcv" - sender := "snd" - gasPrice := uint64(10) - gasLimit := uint64(20) - txData := []byte("-") - signature := "617eff4f" - _, _, err := n.CreateTransaction(nonce, value.String(), receiver, nil, sender, nil, gasPrice, gasLimit, txData, signature, "", 0, 0) + txArgs := getDefaultTransactionArgs() + txArgs.Version = 0 + txArgs.ChainID = "" + txArgs.SignatureHex = "617eff4f" + + _, _, err := n.CreateTransaction(txArgs) assert.Equal(t, node.ErrInvalidTransactionVersion, err) } @@ -1876,13 +2133,15 @@ func TestCreateTransaction_SenderShardIdIsInDifferentShardShouldNotValidate(t *t nonce := uint64(0) value := new(big.Int).SetInt64(10) receiver := "rcv" - sender := "snd" - gasPrice := uint64(10) - gasLimit := uint64(20) - txData := []byte("-") - signature := hex.EncodeToString(bytes.Repeat([]byte{0}, 10)) - tx, txHash, err := n.CreateTransaction(nonce, value.String(), receiver, nil, sender, nil, gasPrice, gasLimit, txData, signature, string(chainID), version, 0) + txArgs := getDefaultTransactionArgs() + txArgs.Version = version + txArgs.Nonce = nonce + txArgs.Value = value.String() + txArgs.Receiver = receiver + + tx, txHash, err := n.CreateTransaction(txArgs) + assert.NotNil(t, tx) assert.Equal(t, expectedHash, txHash) assert.Nil(t, err) @@ -1936,25 +2195,26 @@ func TestCreateTransaction_SignatureLengthChecks(t *testing.T) { node.WithAddressSignatureSize(signatureLength), ) - nonce := uint64(0) value := "1" + strings.Repeat("0", maxValueLength) - receiver := "rcv" - sender := "snd" - gasPrice := uint64(10) - gasLimit := uint64(20) - txData := []byte("-") + txArgs := getDefaultTransactionArgs() + txArgs.Value = value for i := 0; i <= signatureLength; i++ { signatureBytes := []byte(strings.Repeat("a", i)) signatureHex := hex.EncodeToString(signatureBytes) - tx, _, err := n.CreateTransaction(nonce, value, receiver, []byte("rcvrUsername"), sender, []byte("sndrUsername"), gasPrice, gasLimit, txData, signatureHex, chainID, 1, 0) - assert.NotNil(t, tx) + + txArgs.SignatureHex = signatureHex + + tx, _, err := n.CreateTransaction(txArgs) + assert.NotNil(t, tx) assert.NoError(t, err) assert.Equal(t, signatureBytes, tx.Signature) } signature := hex.EncodeToString([]byte(strings.Repeat("a", signatureLength+1))) - tx, txHash, err := n.CreateTransaction(nonce, value, receiver, []byte("rcvrUsername"), sender, []byte("sndrUsername"), gasPrice, gasLimit, txData, signature, chainID, 1, 0) + txArgs.SignatureHex = signature + + tx, txHash, err := n.CreateTransaction(txArgs) assert.Nil(t, tx) assert.Empty(t, txHash) assert.Equal(t, node.ErrInvalidSignatureLength, err) @@ -2002,22 +2262,19 @@ func TestCreateTransaction_SenderLengthChecks(t *testing.T) { node.WithAddressSignatureSize(10), ) - nonce := uint64(0) - value := "10" - receiver := "rcv" - gasPrice := uint64(10) - gasLimit := uint64(20) - txData := []byte("-") - signature := hex.EncodeToString(bytes.Repeat([]byte{0}, 10)) + txArgs := getDefaultTransactionArgs() + txArgs.ChainID = chainID for i := 0; i <= encodedAddressLen; i++ { - sender := strings.Repeat("s", i) - _, _, err := n.CreateTransaction(nonce, value, receiver, []byte("rcvrUsername"), sender, []byte("sndrUsername"), gasPrice, gasLimit, txData, signature, chainID, 1, 0) + txArgs.Sender = strings.Repeat("s", i) + + _, _, err := n.CreateTransaction(txArgs) assert.NoError(t, err) } - sender := strings.Repeat("s", encodedAddressLen) + "additional" - tx, txHash, err := n.CreateTransaction(nonce, value, receiver, []byte("rcvrUsername"), sender, []byte("sndrUsername"), gasPrice, gasLimit, txData, signature, chainID, 1, 0) + txArgs.Sender = strings.Repeat("s", encodedAddressLen) + "additional" + + tx, txHash, err := n.CreateTransaction(txArgs) assert.Nil(t, tx) assert.Empty(t, txHash) assert.Error(t, err) @@ -2066,22 +2323,19 @@ func TestCreateTransaction_ReceiverLengthChecks(t *testing.T) { node.WithAddressSignatureSize(10), ) - nonce := uint64(0) - value := "10" - sender := "snd" - gasPrice := uint64(10) - gasLimit := uint64(20) - txData := []byte("-") - signature := hex.EncodeToString(bytes.Repeat([]byte{0}, 10)) + txArgs := getDefaultTransactionArgs() + txArgs.ChainID = chainID for i := 0; i <= encodedAddressLen; i++ { - receiver := strings.Repeat("r", i) - _, _, err := n.CreateTransaction(nonce, value, receiver, []byte("rcvrUsername"), sender, []byte("sndrUsername"), gasPrice, gasLimit, txData, signature, chainID, 1, 0) + txArgs.Receiver = strings.Repeat("r", i) + + _, _, err := n.CreateTransaction(txArgs) assert.NoError(t, err) } - receiver := strings.Repeat("r", encodedAddressLen) + "additional" - tx, txHash, err := n.CreateTransaction(nonce, value, receiver, []byte("rcvrUsername"), sender, []byte("sndrUsername"), gasPrice, gasLimit, txData, signature, chainID, 1, 0) + txArgs.Receiver = strings.Repeat("r", encodedAddressLen) + "additional" + + tx, txHash, err := n.CreateTransaction(txArgs) assert.Nil(t, tx) assert.Empty(t, txHash) assert.Error(t, err) @@ -2129,18 +2383,12 @@ func TestCreateTransaction_TooBigSenderUsernameShouldErr(t *testing.T) { node.WithAddressSignatureSize(10), ) - nonce := uint64(0) - value := "1" + strings.Repeat("0", maxLength+1) - receiver := "rcv" - sender := "snd" - gasPrice := uint64(10) - gasLimit := uint64(20) - txData := []byte("-") - signature := hex.EncodeToString(bytes.Repeat([]byte{0}, 10)) + txArgs := getDefaultTransactionArgs() + txArgs.Value = "1" + strings.Repeat("0", maxLength+1) + txArgs.ChainID = chainID + txArgs.SenderUsername = bytes.Repeat([]byte{0}, core.MaxUserNameLength+1) - senderUsername := bytes.Repeat([]byte{0}, core.MaxUserNameLength+1) - - tx, txHash, err := n.CreateTransaction(nonce, value, receiver, []byte("rcvrUsername"), sender, senderUsername, gasPrice, gasLimit, txData, signature, chainID, 1, 0) + tx, txHash, err := n.CreateTransaction(txArgs) assert.Nil(t, tx) assert.Empty(t, txHash) assert.Error(t, err) @@ -2188,18 +2436,12 @@ func TestCreateTransaction_TooBigReceiverUsernameShouldErr(t *testing.T) { node.WithAddressSignatureSize(10), ) - nonce := uint64(0) - value := "1" + strings.Repeat("0", maxLength+1) - receiver := "rcv" - sender := "snd" - gasPrice := uint64(10) - gasLimit := uint64(20) - txData := []byte("-") - signature := hex.EncodeToString(bytes.Repeat([]byte{0}, 10)) - - receiverUsername := bytes.Repeat([]byte{0}, core.MaxUserNameLength+1) + txArgs := getDefaultTransactionArgs() + txArgs.ChainID = chainID + txArgs.ReceiverUsername = bytes.Repeat([]byte{0}, core.MaxUserNameLength+1) + txArgs.Value = "1" + strings.Repeat("0", maxLength+1) - tx, txHash, err := n.CreateTransaction(nonce, value, receiver, receiverUsername, sender, []byte("sndrUsername"), gasPrice, gasLimit, txData, signature, chainID, 1, 0) + tx, txHash, err := n.CreateTransaction(txArgs) assert.Nil(t, tx) assert.Empty(t, txHash) assert.Error(t, err) @@ -2246,16 +2488,13 @@ func TestCreateTransaction_DataFieldSizeExceedsMaxShouldErr(t *testing.T) { node.WithStateComponents(stateComponents), node.WithAddressSignatureSize(10), ) - nonce := uint64(0) - value := "1" + strings.Repeat("0", maxLength+1) - receiver := "rcv" - sender := "snd" - gasPrice := uint64(10) - gasLimit := uint64(20) - txData := bytes.Repeat([]byte{0}, core.MegabyteSize+1) - signature := hex.EncodeToString(bytes.Repeat([]byte{0}, 10)) - tx, txHash, err := n.CreateTransaction(nonce, value, receiver, []byte("rcvrUsername"), sender, []byte("sndrUsername"), gasPrice, gasLimit, txData, signature, chainID, 1, 0) + txArgs := getDefaultTransactionArgs() + txArgs.ChainID = chainID + txArgs.DataField = bytes.Repeat([]byte{0}, core.MegabyteSize+1) + txArgs.Value = "1" + strings.Repeat("0", maxLength+1) + + tx, txHash, err := n.CreateTransaction(txArgs) assert.Nil(t, tx) assert.Empty(t, txHash) assert.Error(t, err) @@ -2303,22 +2542,175 @@ func TestCreateTransaction_TooLargeValueFieldShouldErr(t *testing.T) { node.WithAddressSignatureSize(10), ) - nonce := uint64(0) - value := "1" + strings.Repeat("0", maxLength+1) - receiver := "rcv" - sender := "snd" - gasPrice := uint64(10) - gasLimit := uint64(20) - txData := []byte("-") - signature := hex.EncodeToString(bytes.Repeat([]byte{0}, 10)) + txArgs := getDefaultTransactionArgs() + txArgs.ChainID = chainID + txArgs.Value = "1" + strings.Repeat("0", maxLength+1) - tx, txHash, err := n.CreateTransaction(nonce, value, receiver, []byte("rcvrUsername"), sender, []byte("sndrUsername"), gasPrice, gasLimit, txData, signature, chainID, 1, 0) + tx, txHash, err := n.CreateTransaction(txArgs) assert.Nil(t, tx) assert.Empty(t, txHash) assert.Error(t, err) assert.Equal(t, node.ErrTransactionValueLengthTooBig, err) } +func TestCreateTransaction_InvalidGuardianSigShouldErr(t *testing.T) { + t.Parallel() + + coreComponents := getDefaultCoreComponents() + coreComponents.IntMarsh = getMarshalizer() + coreComponents.VmMarsh = getMarshalizer() + coreComponents.TxMarsh = getMarshalizer() + coreComponents.Hash = getHasher() + coreComponents.AddrPubKeyConv = &testscommon.PubkeyConverterStub{ + DecodeCalled: func(hexAddress string) ([]byte, error) { + return []byte(hexAddress), nil + }, + } + stateComponents := getDefaultStateComponents() + stateComponents.AccountsAPI = &stateMock.AccountsStub{} + + n, _ := node.NewNode( + node.WithCoreComponents(coreComponents), + node.WithStateComponents(stateComponents), + node.WithAddressSignatureSize(16), + ) + + txArgs := getDefaultTransactionArgs() + txArgs.SignatureHex = hex.EncodeToString(bytes.Repeat([]byte{0}, 1)) + txArgs.GuardianSigHex = hex.EncodeToString(bytes.Repeat([]byte{0}, 32)) + + tx, txHash, err := n.CreateTransaction(txArgs) + + assert.Nil(t, tx) + assert.Nil(t, txHash) + assert.NotNil(t, err) + assert.True(t, errors.Is(err, node.ErrInvalidSignatureLength)) +} + +func TestCreateTransaction_InvalidGuardianAddressLenShouldErr(t *testing.T) { + t.Parallel() + + coreComponents := getDefaultCoreComponents() + coreComponents.IntMarsh = getMarshalizer() + coreComponents.VmMarsh = getMarshalizer() + coreComponents.TxMarsh = getMarshalizer() + coreComponents.Hash = getHasher() + + encodedAddressLen := 8 + coreComponents.AddrPubKeyConv = &testscommon.PubkeyConverterStub{ + DecodeCalled: func(hexAddress string) ([]byte, error) { + return []byte(hexAddress), nil + }, + LenCalled: func() int { + return encodedAddressLen + }, + } + stateComponents := getDefaultStateComponents() + stateComponents.AccountsAPI = &stateMock.AccountsStub{} + + n, _ := node.NewNode( + node.WithCoreComponents(coreComponents), + node.WithStateComponents(stateComponents), + node.WithAddressSignatureSize(16), + ) + + txArgs := getDefaultTransactionArgs() + txArgs.SignatureHex = hex.EncodeToString(bytes.Repeat([]byte{0}, 8)) + txArgs.GuardianSigHex = hex.EncodeToString(bytes.Repeat([]byte{0}, 8)) + txArgs.Guardian = strings.Repeat("g", encodedAddressLen) + "additional" + + tx, txHash, err := n.CreateTransaction(txArgs) + + assert.Nil(t, tx) + assert.Nil(t, txHash) + assert.NotNil(t, err) + assert.True(t, errors.Is(err, node.ErrInvalidAddressLength)) +} + +func TestCreateTransaction_AddressPubKeyConverterDecode(t *testing.T) { + t.Parallel() + + minAddrLen := 4 + encodedAddressLen := 8 + addrPubKeyConverter := &testscommon.PubkeyConverterStub{ + DecodeCalled: func(hexAddress string) ([]byte, error) { + if len(hexAddress) < minAddrLen { + return nil, errors.New("decode error") + } + return []byte(hexAddress), nil + }, + LenCalled: func() int { + return encodedAddressLen + }, + } + + guardianSig := hex.EncodeToString(bytes.Repeat([]byte{0}, 8)) + guardian := strings.Repeat("g", encodedAddressLen) + + t.Run("fail to decode receiver", func(t *testing.T) { + t.Parallel() + + coreComponents := getDefaultCoreComponents() + coreComponents.IntMarsh = getMarshalizer() + coreComponents.VmMarsh = getMarshalizer() + coreComponents.TxMarsh = getMarshalizer() + coreComponents.Hash = getHasher() + + coreComponents.AddrPubKeyConv = addrPubKeyConverter + stateComponents := getDefaultStateComponents() + stateComponents.AccountsAPI = &stateMock.AccountsStub{} + + n, _ := node.NewNode( + node.WithCoreComponents(coreComponents), + node.WithStateComponents(stateComponents), + node.WithAddressSignatureSize(16), + ) + + txArgs := getDefaultTransactionArgs() + txArgs.Guardian = guardian + txArgs.GuardianSigHex = guardianSig + + tx, txHash, err := n.CreateTransaction(txArgs) + + assert.Nil(t, tx) + assert.Nil(t, txHash) + assert.NotNil(t, err) + assert.True(t, strings.Contains(err.Error(), "receiver address")) + }) + + t.Run("fail to decode sender", func(t *testing.T) { + t.Parallel() + + coreComponents := getDefaultCoreComponents() + coreComponents.IntMarsh = getMarshalizer() + coreComponents.VmMarsh = getMarshalizer() + coreComponents.TxMarsh = getMarshalizer() + coreComponents.Hash = getHasher() + + coreComponents.AddrPubKeyConv = addrPubKeyConverter + stateComponents := getDefaultStateComponents() + stateComponents.AccountsAPI = &stateMock.AccountsStub{} + + n, _ := node.NewNode( + node.WithCoreComponents(coreComponents), + node.WithStateComponents(stateComponents), + node.WithAddressSignatureSize(16), + ) + + txArgs := getDefaultTransactionArgs() + txArgs.Guardian = guardian + txArgs.GuardianSigHex = guardianSig + txArgs.Receiver = strings.Repeat("r", minAddrLen+1) + + tx, txHash, err := n.CreateTransaction(txArgs) + + assert.Nil(t, tx) + assert.Nil(t, txHash) + assert.NotNil(t, err) + assert.True(t, strings.Contains(err.Error(), "sender address")) + }) +} + func TestCreateTransaction_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -2377,16 +2769,15 @@ func TestCreateTransaction_OkValsShouldWork(t *testing.T) { nonce := uint64(0) value := new(big.Int).SetInt64(10) receiver := "rcv" - sender := "snd" - gasPrice := uint64(10) - gasLimit := uint64(20) - txData := []byte("-") - signature := hex.EncodeToString(bytes.Repeat([]byte{0}, 10)) - tx, txHash, err := n.CreateTransaction( - nonce, value.String(), receiver, nil, sender, nil, gasPrice, gasLimit, txData, - signature, coreComponents.ChainID(), coreComponents.MinTransactionVersion(), 0, - ) + txArgs := getDefaultTransactionArgs() + txArgs.Receiver = receiver + txArgs.Nonce = nonce + txArgs.Value = value.String() + txArgs.ChainID = coreComponents.ChainID() + txArgs.Version = coreComponents.MinTransactionVersion() + + tx, txHash, err := n.CreateTransaction(txArgs) assert.NotNil(t, tx) assert.Equal(t, expectedHash, txHash) assert.Nil(t, err) @@ -2476,17 +2867,14 @@ func TestCreateTransaction_TxSignedWithHashShouldErrVersionShoudBe2(t *testing.T node.WithAddressSignatureSize(10), ) - nonce := uint64(0) - value := new(big.Int).SetInt64(10) - receiver := "rcv" - sender := "snd" - gasPrice := uint64(10) - gasLimit := uint64(20) - txData := []byte("-") - signature := hex.EncodeToString(bytes.Repeat([]byte{0}, 10)) - - options := versioning.MaskSignedWithHash - tx, _, err := n.CreateTransaction(nonce, value.String(), receiver, nil, sender, nil, gasPrice, gasLimit, txData, signature, chainID, version, options) + options := transaction.MaskSignedWithHash + + txArgs := getDefaultTransactionArgs() + txArgs.ChainID = chainID + txArgs.Version = version + txArgs.Options = options + + tx, _, err := n.CreateTransaction(txArgs) require.Nil(t, err) err = n.ValidateTransaction(tx) assert.Equal(t, process.ErrInvalidTransactionVersion, err) @@ -2577,17 +2965,14 @@ func TestCreateTransaction_TxSignedWithHashNoEnabledShouldErr(t *testing.T) { node.WithAddressSignatureSize(10), ) - nonce := uint64(0) - value := new(big.Int).SetInt64(10) - receiver := "rcv" - sender := "snd" - gasPrice := uint64(10) - gasLimit := uint64(20) - txData := []byte("-") - signature := hex.EncodeToString(bytes.Repeat([]byte{0}, 10)) + options := transaction.MaskSignedWithHash - options := versioning.MaskSignedWithHash - tx, _, _ := n.CreateTransaction(nonce, value.String(), receiver, nil, sender, nil, gasPrice, gasLimit, txData, signature, chainID, version+1, options) + txArgs := getDefaultTransactionArgs() + txArgs.ChainID = chainID + txArgs.Version = version + 1 + txArgs.Options = options + + tx, _, _ := n.CreateTransaction(txArgs) err := n.ValidateTransaction(tx) assert.Equal(t, process.ErrTransactionSignedWithHashIsNotEnabled, err) @@ -2942,6 +3327,44 @@ func TestNode_GetAccountAccountsRepositoryFailsShouldErr(t *testing.T) { assert.ErrorIs(t, err, errExpected) } +func TestNode_GetAccountAccNotFoundShouldReturnEmpty(t *testing.T) { + t.Parallel() + + accDB := &stateMock.AccountsStub{ + GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { + return nil, nil, state.NewErrAccountNotFoundAtBlock(dummyBlockInfo.forProcessing()) + }, + RecreateTrieCalled: func(_ []byte) error { + return nil + }, + } + + dataComponents := getDefaultDataComponents() + coreComponents := getDefaultCoreComponents() + coreComponents.IntMarsh = getMarshalizer() + coreComponents.VmMarsh = getMarshalizer() + coreComponents.Hash = getHasher() + + stateComponents := getDefaultStateComponents() + args := state.ArgsAccountsRepository{ + FinalStateAccountsWrapper: accDB, + CurrentStateAccountsWrapper: accDB, + HistoricalStateAccountsWrapper: accDB, + } + stateComponents.AccountsRepo, _ = state.NewAccountsRepository(args) + + n, _ := node.NewNode( + node.WithDataComponents(dataComponents), + node.WithCoreComponents(coreComponents), + node.WithStateComponents(stateComponents), + ) + + acc, bInfo, err := n.GetAccount(testscommon.TestAddressAlice, api.AccountQueryOptions{}) + require.Nil(t, err) + require.Equal(t, dummyBlockInfo.apiResult(), bInfo) + require.Equal(t, api.AccountResponse{Address: testscommon.TestAddressAlice, Balance: "0", DeveloperReward: "0"}, acc) +} + func TestNode_GetAccountAccountExistsShouldReturn(t *testing.T) { t.Parallel() @@ -3856,6 +4279,206 @@ func TestNode_GetHeartbeats(t *testing.T) { assert.True(t, sameMessages(providedMessages, receivedMessages)) } +func TestNode_Getters(t *testing.T) { + t.Parallel() + + coreComponents := getDefaultCoreComponents() + statusCoreComponents := &factoryTests.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandlerMock.AppStatusHandlerStub{}, + } + cryptoComponents := getDefaultCryptoComponents() + stateComponents := getDefaultStateComponents() + bootstrapComponents := getDefaultBootstrapComponents() + dataComponents := getDefaultDataComponents() + heartbeatComponents := &factoryMock.HeartbeatV2ComponentsStub{} + networkComponents := getDefaultNetworkComponents() + processComponents := getDefaultProcessComponents() + consensusGroupSize := 10 + + n, err := node.NewNode( + node.WithCoreComponents(coreComponents), + node.WithStatusCoreComponents(statusCoreComponents), + node.WithCryptoComponents(cryptoComponents), + node.WithStateComponents(stateComponents), + node.WithBootstrapComponents(bootstrapComponents), + node.WithDataComponents(dataComponents), + node.WithHeartbeatV2Components(heartbeatComponents), + node.WithNetworkComponents(networkComponents), + node.WithProcessComponents(processComponents), + node.WithConsensusGroupSize(consensusGroupSize), + node.WithImportMode(true), + ) + require.Nil(t, err) + + //pointer testing + assert.True(t, n.GetCoreComponents() == coreComponents) + assert.True(t, n.GetStatusCoreComponents() == statusCoreComponents) + assert.True(t, n.GetCryptoComponents() == cryptoComponents) + assert.True(t, n.GetStateComponents() == stateComponents) + assert.True(t, n.GetBootstrapComponents() == bootstrapComponents) + assert.True(t, n.GetDataComponents() == dataComponents) + assert.True(t, n.GetHeartbeatV2Components() == heartbeatComponents) + assert.True(t, n.GetNetworkComponents() == networkComponents) + assert.True(t, n.GetProcessComponents() == processComponents) + assert.Equal(t, consensusGroupSize, n.GetConsensusGroupSize()) + assert.True(t, n.IsInImportMode()) +} + +func TestNode_GetEpochStartDataAPI(t *testing.T) { + t.Parallel() + + prevHash := []byte("prevHash") + rootHash := []byte("rootHash") + accumulatedFees := big.NewInt(100) + developerFees := big.NewInt(200) + + dataComponents := getDefaultDataComponents() + blockchain := dataComponents.BlockChain.(*testscommon.ChainHandlerStub) + timestamp := uint64(778899) + shardID := uint32(2) + blockchain.GetGenesisHeaderCalled = func() data.HeaderHandler { + return &block.Header{ + TimeStamp: timestamp, + ShardID: shardID, + PrevHash: prevHash, + RootHash: rootHash, + AccumulatedFees: accumulatedFees, + DeveloperFees: developerFees, + } + } + + bootstrapComponents := getDefaultBootstrapComponents() + shardCoordinator := bootstrapComponents.ShardCoordinator().(*mock.ShardCoordinatorMock) + + coreComponents := getDefaultCoreComponents() + + n, _ := node.NewNode( + node.WithCoreComponents(coreComponents), + node.WithDataComponents(dataComponents), + node.WithBootstrapComponents(bootstrapComponents), + ) + epoch := uint32(37) + nonce := uint64(112233) + round := uint64(445566) + + t.Run("genesis block should work", func(t *testing.T) { + result, err := n.GetEpochStartDataAPI(0) + assert.Nil(t, err) + expectedResult := &common.EpochStartDataAPI{ + Nonce: 0, + Round: 0, + Timestamp: int64(timestamp), + Epoch: 0, + Shard: shardID, + PrevBlockHash: hex.EncodeToString(prevHash), + StateRootHash: hex.EncodeToString(rootHash), + ScheduledRootHash: "", + AccumulatedFees: accumulatedFees.String(), + DeveloperFees: developerFees.String(), + } + assert.Equal(t, expectedResult, result) + }) + t.Run("should work for metachain", func(t *testing.T) { + shardCoordinator.SelfShardId = core.MetachainShardId + + returnedHeader := &block.MetaBlock{ + Nonce: nonce, + Epoch: epoch, + Round: round, + TimeStamp: timestamp, + PrevHash: prevHash, + RootHash: rootHash, + AccumulatedFees: accumulatedFees, + DeveloperFees: developerFees, + } + + headerBytes, err := coreComponents.IntMarsh.Marshal(returnedHeader) + require.Nil(t, err) + + unit := &mockStorage.StorerStub{ + GetFromEpochCalled: func(key []byte, epoch uint32) ([]byte, error) { + expectedIdentifier := core.EpochStartIdentifier(epoch) + require.Equal(t, expectedIdentifier, string(key)) + + return headerBytes, nil + }, + } + + storageService := dataComponents.StorageService().(*mockStorage.ChainStorerStub) + storageService.GetStorerCalled = func(unitType dataRetriever.UnitType) (storage.Storer, error) { + require.Equal(t, dataRetriever.MetaBlockUnit, unitType) + return unit, nil + } + + result, err := n.GetEpochStartDataAPI(epoch) + assert.Nil(t, err) + + expectedResult := &common.EpochStartDataAPI{ + Nonce: nonce, + Round: round, + Timestamp: int64(timestamp), + Epoch: epoch, + Shard: core.MetachainShardId, + PrevBlockHash: hex.EncodeToString(prevHash), + StateRootHash: hex.EncodeToString(rootHash), + ScheduledRootHash: "", + AccumulatedFees: accumulatedFees.String(), + DeveloperFees: developerFees.String(), + } + assert.Equal(t, expectedResult, result) + }) + t.Run("should work for shard chain", func(t *testing.T) { + shardCoordinator.SelfShardId = 0 + + returnedHeader := &block.Header{ + Nonce: nonce, + Epoch: epoch, + Round: round, + ShardID: shardID, + TimeStamp: timestamp, + PrevHash: prevHash, + RootHash: rootHash, + AccumulatedFees: accumulatedFees, + DeveloperFees: developerFees, + } + + headerBytes, err := coreComponents.IntMarsh.Marshal(returnedHeader) + require.Nil(t, err) + + unit := &mockStorage.StorerStub{ + GetFromEpochCalled: func(key []byte, epoch uint32) ([]byte, error) { + expectedIdentifier := core.EpochStartIdentifier(epoch) + require.Equal(t, expectedIdentifier, string(key)) + + return headerBytes, nil + }, + } + + storageService := dataComponents.StorageService().(*mockStorage.ChainStorerStub) + storageService.GetStorerCalled = func(unitType dataRetriever.UnitType) (storage.Storer, error) { + require.Equal(t, dataRetriever.BlockHeaderUnit, unitType) + return unit, nil + } + + result, err := n.GetEpochStartDataAPI(epoch) + assert.Nil(t, err) + + expectedResult := &common.EpochStartDataAPI{ + Nonce: nonce, + Round: round, + Timestamp: int64(timestamp), + Epoch: epoch, + Shard: shardID, + PrevBlockHash: hex.EncodeToString(prevHash), + StateRootHash: hex.EncodeToString(rootHash), + ScheduledRootHash: "", + AccumulatedFees: accumulatedFees.String(), + DeveloperFees: developerFees.String(), + } + assert.Equal(t, expectedResult, result) + }) +} + func createMockHeartbeatV2Components(providedMessages []heartbeatData.PubKeyHeartbeat) *factoryMock.HeartbeatV2ComponentsStub { heartbeatV2Components := &factoryMock.HeartbeatV2ComponentsStub{} heartbeatV2Components.MonitorField = &integrationTestsMock.HeartbeatMonitorStub{ @@ -3917,6 +4540,393 @@ func createHeartbeatMessage(prefix string, idx int, isActive bool) heartbeatData } } +func TestNode_setTxGuardianData(t *testing.T) { + t.Parallel() + lenPubKey := 32 + coreComponents := getDefaultCoreComponents() + n, _ := node.NewNode( + node.WithCoreComponents(coreComponents), + ) + guardianPubKey := bytes.Repeat([]byte{1}, lenPubKey) + guardian, _ := coreComponents.AddrPubKeyConv.Encode(guardianPubKey) + guardianSig := []byte("guardian sig") + guardianSigHex := hex.EncodeToString(guardianSig) + + t.Run("invalid guardian address should err", func(t *testing.T) { + tx := &transaction.Transaction{} + tx.Options |= transaction.MaskGuardedTransaction + + err := n.SetTxGuardianData("invalid guardian address", guardianSigHex, tx) + require.NotNil(t, err) + require.Nil(t, tx.GuardianAddr) + require.Nil(t, tx.GuardianSignature) + }) + t.Run("invalid guardian sig hex should err", func(t *testing.T) { + tx := &transaction.Transaction{} + tx.Options |= transaction.MaskGuardedTransaction + + err := n.SetTxGuardianData(guardian, "invalid guardian sig hex", tx) + require.NotNil(t, err) + require.Nil(t, tx.GuardianAddr) + require.Nil(t, tx.GuardianSignature) + }) + t.Run("no guardian option set on tx should err", func(t *testing.T) { + tx := &transaction.Transaction{} + + err := n.SetTxGuardianData(guardian, guardianSigHex, tx) + require.NotNil(t, err) + require.Nil(t, tx.GuardianAddr) + require.Nil(t, tx.GuardianSignature) + }) + t.Run("setTxGuardianData ok", func(t *testing.T) { + tx := &transaction.Transaction{} + tx.Options |= transaction.MaskGuardedTransaction + + err := n.SetTxGuardianData(guardian, guardianSigHex, tx) + require.Nil(t, err) + require.Equal(t, guardianPubKey, tx.GuardianAddr) + require.Equal(t, guardianSig, tx.GuardianSignature) + }) +} + +func TestNode_GetGuardianData(t *testing.T) { + userAddressBytes := bytes.Repeat([]byte{3}, 32) + testAccount, _ := state.NewUserAccount(userAddressBytes) + testAccountsDB := &stateMock.AccountsStub{ + GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { + return testAccount, nil, nil + }, + RecreateTrieCalled: func(_ []byte) error { + return nil + }, + } + coreComponents := getDefaultCoreComponents() + dataComponents := getDefaultDataComponents() + coreComponents.IntMarsh = getMarshalizer() + coreComponents.VmMarsh = getMarshalizer() + coreComponents.Hash = getHasher() + coreComponents.AddrPubKeyConv = createMockPubkeyConverter() + testStateComponents := getDefaultStateComponents() + args := state.ArgsAccountsRepository{ + FinalStateAccountsWrapper: testAccountsDB, + CurrentStateAccountsWrapper: testAccountsDB, + HistoricalStateAccountsWrapper: testAccountsDB, + } + testStateComponents.AccountsRepo, _ = state.NewAccountsRepository(args) + userAddress, _ := coreComponents.AddressPubKeyConverter().Encode(userAddressBytes) + g1 := &guardians.Guardian{ + Address: bytes.Repeat([]byte{1}, 32), + ActivationEpoch: 0, + } + g2 := &guardians.Guardian{ + Address: bytes.Repeat([]byte{2}, 32), + ActivationEpoch: 1, + } + addressG1, _ := coreComponents.AddressPubKeyConverter().Encode(g1.Address) + apiG1 := &api.Guardian{ + Address: addressG1, + ActivationEpoch: g1.ActivationEpoch, + } + addressG2, _ := coreComponents.AddressPubKeyConverter().Encode(g2.Address) + apiG2 := &api.Guardian{ + Address: addressG2, + ActivationEpoch: g2.ActivationEpoch, + } + t.Run("error on loadUserAccountHandlerByAddress", func(t *testing.T) { + accDB := &stateMock.AccountsStub{ + GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { + return testAccount, nil, nil + }, + RecreateTrieCalled: func(_ []byte) error { + return nil + }, + } + stateComponents := getDefaultStateComponents() + argsLocal := state.ArgsAccountsRepository{ + FinalStateAccountsWrapper: accDB, + CurrentStateAccountsWrapper: accDB, + HistoricalStateAccountsWrapper: accDB, + } + stateComponents.AccountsRepo, _ = state.NewAccountsRepository(argsLocal) + n, _ := node.NewNode( + node.WithDataComponents(dataComponents), + node.WithCoreComponents(coreComponents), + node.WithStateComponents(stateComponents), + ) + guardianData, blockInfo, err := n.GetGuardianData("address", api.AccountQueryOptions{}) + require.Equal(t, api.GuardianData{}, guardianData) + require.Equal(t, api.BlockInfo{}, blockInfo) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), "invalid address")) + }) + t.Run("error on loadUserAccountHandlerByAddress but account is new", func(t *testing.T) { + providedBlockInfo := holders.NewBlockInfo([]byte{0xaa}, 7, []byte{0xbb}) + accDB := &stateMock.AccountsStub{ + GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { + return nil, nil, state.NewErrAccountNotFoundAtBlock(providedBlockInfo) + }, + RecreateTrieCalled: func(_ []byte) error { + return nil + }, + } + stateComponents := getDefaultStateComponents() + argsLocal := state.ArgsAccountsRepository{ + FinalStateAccountsWrapper: accDB, + CurrentStateAccountsWrapper: accDB, + HistoricalStateAccountsWrapper: accDB, + } + stateComponents.AccountsRepo, _ = state.NewAccountsRepository(argsLocal) + n, _ := node.NewNode( + node.WithDataComponents(dataComponents), + node.WithCoreComponents(coreComponents), + node.WithStateComponents(stateComponents), + ) + guardianData, blockInfo, err := n.GetGuardianData(userAddress, api.AccountQueryOptions{}) + require.Equal(t, api.GuardianData{}, guardianData) + expectedBlockInfo := api.BlockInfo{ + Nonce: providedBlockInfo.GetNonce(), + Hash: hex.EncodeToString(providedBlockInfo.GetHash()), + RootHash: hex.EncodeToString(providedBlockInfo.GetRootHash()), + } + require.Equal(t, expectedBlockInfo, blockInfo) + require.Nil(t, err) + }) + t.Run("getPendingAndActiveGuardians with error", func(t *testing.T) { + expectedError := errors.New("expected error") + bootstrapComponents := getDefaultBootstrapComponents() + bootstrapComponents.GuardedAccountHandlerField = &guardianMocks.GuardedAccountHandlerStub{ + GetConfiguredGuardiansCalled: func(uah state.UserAccountHandler) (active *guardians.Guardian, pending *guardians.Guardian, err error) { + return nil, nil, expectedError + }, + } + n, _ := node.NewNode( + node.WithDataComponents(dataComponents), + node.WithCoreComponents(coreComponents), + node.WithStateComponents(testStateComponents), + node.WithBootstrapComponents(bootstrapComponents), + ) + guardianData, blockInfo, err := n.GetGuardianData(userAddress, api.AccountQueryOptions{}) + require.Equal(t, api.GuardianData{}, guardianData) + require.Equal(t, api.BlockInfo{}, blockInfo) + require.Equal(t, expectedError, err) + }) + t.Run("one active", func(t *testing.T) { + bootstrapComponents := getDefaultBootstrapComponents() + bootstrapComponents.GuardedAccountHandlerField = &guardianMocks.GuardedAccountHandlerStub{ + GetConfiguredGuardiansCalled: func(uah state.UserAccountHandler) (active *guardians.Guardian, pending *guardians.Guardian, err error) { + return g1, nil, nil + }, + } + n, _ := node.NewNode( + node.WithDataComponents(dataComponents), + node.WithCoreComponents(coreComponents), + node.WithStateComponents(testStateComponents), + node.WithBootstrapComponents(bootstrapComponents), + ) + guardianData, blockInfo, err := n.GetGuardianData(userAddress, api.AccountQueryOptions{}) + require.Equal(t, api.GuardianData{ + ActiveGuardian: apiG1, + PendingGuardian: nil, + Guarded: false, + }, guardianData) + require.Equal(t, api.BlockInfo{}, blockInfo) + require.Nil(t, err) + }) + t.Run("one pending", func(t *testing.T) { + bootstrapComponents := getDefaultBootstrapComponents() + bootstrapComponents.GuardedAccountHandlerField = &guardianMocks.GuardedAccountHandlerStub{ + GetConfiguredGuardiansCalled: func(uah state.UserAccountHandler) (active *guardians.Guardian, pending *guardians.Guardian, err error) { + return nil, g1, nil + }, + } + n, _ := node.NewNode( + node.WithDataComponents(dataComponents), + node.WithCoreComponents(coreComponents), + node.WithStateComponents(testStateComponents), + node.WithBootstrapComponents(bootstrapComponents), + ) + guardianData, blockInfo, err := n.GetGuardianData(userAddress, api.AccountQueryOptions{}) + require.Equal(t, api.GuardianData{ + ActiveGuardian: nil, + PendingGuardian: apiG1, + Guarded: false, + }, guardianData) + require.Equal(t, api.BlockInfo{}, blockInfo) + require.Nil(t, err) + }) + t.Run("one active and one pending", func(t *testing.T) { + bootstrapComponents := getDefaultBootstrapComponents() + bootstrapComponents.GuardedAccountHandlerField = &guardianMocks.GuardedAccountHandlerStub{ + GetConfiguredGuardiansCalled: func(uah state.UserAccountHandler) (active *guardians.Guardian, pending *guardians.Guardian, err error) { + return g1, g2, nil + }, + } + n, _ := node.NewNode( + node.WithDataComponents(dataComponents), + node.WithCoreComponents(coreComponents), + node.WithStateComponents(testStateComponents), + node.WithBootstrapComponents(bootstrapComponents), + ) + guardianData, blockInfo, err := n.GetGuardianData(userAddress, api.AccountQueryOptions{}) + require.Equal(t, api.GuardianData{ + ActiveGuardian: apiG1, + PendingGuardian: apiG2, + Guarded: false, + }, guardianData) + require.Equal(t, api.BlockInfo{}, blockInfo) + require.Nil(t, err) + }) + t.Run("one active and one pending and account guarded", func(t *testing.T) { + acc, _ := state.NewUserAccount(userAddressBytes) + acc.CodeMetadata = (&vmcommon.CodeMetadata{Guarded: true}).ToBytes() + accDB := &stateMock.AccountsStub{ + GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { + return acc, nil, nil + }, + RecreateTrieCalled: func(_ []byte) error { + return nil + }, + } + stateComponents := getDefaultStateComponents() + argsLocal := state.ArgsAccountsRepository{ + FinalStateAccountsWrapper: accDB, + CurrentStateAccountsWrapper: accDB, + HistoricalStateAccountsWrapper: accDB, + } + stateComponents.AccountsRepo, _ = state.NewAccountsRepository(argsLocal) + bootstrapComponents := getDefaultBootstrapComponents() + bootstrapComponents.GuardedAccountHandlerField = &guardianMocks.GuardedAccountHandlerStub{ + GetConfiguredGuardiansCalled: func(uah state.UserAccountHandler) (active *guardians.Guardian, pending *guardians.Guardian, err error) { + return g1, g2, nil + }, + } + n, _ := node.NewNode( + node.WithDataComponents(dataComponents), + node.WithCoreComponents(coreComponents), + node.WithStateComponents(stateComponents), + node.WithBootstrapComponents(bootstrapComponents), + ) + guardianData, blockInfo, err := n.GetGuardianData(userAddress, api.AccountQueryOptions{}) + require.Equal(t, api.GuardianData{ + ActiveGuardian: apiG1, + PendingGuardian: apiG2, + Guarded: true, + }, guardianData) + require.Equal(t, api.BlockInfo{}, blockInfo) + require.Nil(t, err) + }) +} + +func TestNode_getPendingAndActiveGuardians(t *testing.T) { + coreComponents := getDefaultCoreComponents() + bootstrapComponents := getDefaultBootstrapComponents() + expectedErr := errors.New("expected err") + g1PubKey := bytes.Repeat([]byte{1}, 32) + g2PubKey := bytes.Repeat([]byte{2}, 32) + g1 := &guardians.Guardian{ + Address: g1PubKey, + ActivationEpoch: 10, + } + g2 := &guardians.Guardian{ + Address: g2PubKey, + ActivationEpoch: 1, + } + + addressG1, _ := coreComponents.AddrPubKeyConv.Encode(g1.Address) + expectedG1 := &api.Guardian{ + Address: addressG1, + ActivationEpoch: g1.ActivationEpoch, + } + addressG2, _ := coreComponents.AddrPubKeyConv.Encode(g2.Address) + expectedG2 := &api.Guardian{ + Address: addressG2, + ActivationEpoch: g2.ActivationEpoch, + } + + t.Run("get configured guardians with error should propagate error", func(t *testing.T) { + bootstrapComponents.GuardedAccountHandlerField = &guardianMocks.GuardedAccountHandlerStub{ + GetConfiguredGuardiansCalled: func(uah state.UserAccountHandler) (active *guardians.Guardian, pending *guardians.Guardian, err error) { + return nil, nil, expectedErr + }, + } + n, _ := node.NewNode( + node.WithCoreComponents(coreComponents), + node.WithBootstrapComponents(bootstrapComponents), + ) + + activeGuardian, pendingGuardian, err := n.GetPendingAndActiveGuardians(&stateMock.UserAccountStub{}) + require.Nil(t, activeGuardian) + require.Nil(t, pendingGuardian) + require.Equal(t, expectedErr, err) + }) + t.Run("no pending and no active but no error", func(t *testing.T) { + bootstrapComponents.GuardedAccountHandlerField = &guardianMocks.GuardedAccountHandlerStub{ + GetConfiguredGuardiansCalled: func(uah state.UserAccountHandler) (active *guardians.Guardian, pending *guardians.Guardian, err error) { + return nil, nil, nil + }, + } + n, _ := node.NewNode( + node.WithCoreComponents(coreComponents), + node.WithBootstrapComponents(bootstrapComponents), + ) + activeGuardian, pendingGuardian, err := n.GetPendingAndActiveGuardians(&stateMock.UserAccountStub{}) + require.Nil(t, activeGuardian) + require.Nil(t, pendingGuardian) + require.Nil(t, err) + }) + t.Run("one active", func(t *testing.T) { + bootstrapComponents.GuardedAccountHandlerField = &guardianMocks.GuardedAccountHandlerStub{ + GetConfiguredGuardiansCalled: func(uah state.UserAccountHandler) (active *guardians.Guardian, pending *guardians.Guardian, err error) { + return g1, nil, nil + }, + } + n, _ := node.NewNode( + node.WithCoreComponents(coreComponents), + node.WithBootstrapComponents(bootstrapComponents), + ) + activeGuardian, pendingGuardian, err := n.GetPendingAndActiveGuardians(&stateMock.UserAccountStub{}) + require.NotNil(t, activeGuardian) + + require.Equal(t, expectedG1, activeGuardian) + require.Nil(t, pendingGuardian) + require.Nil(t, err) + }) + t.Run("one pending", func(t *testing.T) { + bootstrapComponents.GuardedAccountHandlerField = &guardianMocks.GuardedAccountHandlerStub{ + GetConfiguredGuardiansCalled: func(uah state.UserAccountHandler) (active *guardians.Guardian, pending *guardians.Guardian, err error) { + return nil, g1, nil + }, + } + n, _ := node.NewNode( + node.WithCoreComponents(coreComponents), + node.WithBootstrapComponents(bootstrapComponents), + ) + activeGuardian, pendingGuardian, err := n.GetPendingAndActiveGuardians(&stateMock.UserAccountStub{}) + require.NotNil(t, pendingGuardian) + require.Equal(t, expectedG1, pendingGuardian) + require.Nil(t, activeGuardian) + require.Nil(t, err) + }) + t.Run("one active one pending", func(t *testing.T) { + bootstrapComponents.GuardedAccountHandlerField = &guardianMocks.GuardedAccountHandlerStub{ + GetConfiguredGuardiansCalled: func(uah state.UserAccountHandler) (active *guardians.Guardian, pending *guardians.Guardian, err error) { + return g1, g2, nil + }, + } + n, _ := node.NewNode( + node.WithCoreComponents(coreComponents), + node.WithBootstrapComponents(bootstrapComponents), + ) + + activeGuardian, pendingGuardian, err := n.GetPendingAndActiveGuardians(&stateMock.UserAccountStub{}) + require.NotNil(t, activeGuardian) + require.NotNil(t, pendingGuardian) + require.Equal(t, expectedG2, pendingGuardian) + require.Equal(t, expectedG1, activeGuardian) + require.Nil(t, err) + }) +} + func getDefaultCoreComponents() *nodeMockFactory.CoreComponentsMock { return &nodeMockFactory.CoreComponentsMock{ IntMarsh: &testscommon.MarshalizerMock{}, @@ -3998,7 +5008,7 @@ func getDefaultDataComponents() *nodeMockFactory.DataComponentsMock { return &nodeMockFactory.DataComponentsMock{ BlockChain: chainHandler, - Store: &storage.ChainStorerStub{}, + Store: &mockStorage.ChainStorerStub{}, DataPool: &dataRetrieverMock.PoolsHolderMock{}, MbProvider: &mock.MiniBlocksProviderStub{}, } @@ -4011,9 +5021,20 @@ func getDefaultBootstrapComponents() *mainFactoryMocks.BootstrapComponentsStub { StorageManagers: map[string]common.StorageManager{"0": &storageManager.StorageManagerStub{}}, BootstrapCalled: nil, }, - BootstrapParams: &bootstrapMocks.BootstrapParamsHandlerMock{}, - NodeRole: "", - ShCoordinator: &mock.ShardCoordinatorMock{}, - HdrIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, + BootstrapParams: &bootstrapMocks.BootstrapParamsHandlerMock{}, + NodeRole: "", + ShCoordinator: &mock.ShardCoordinatorMock{}, + HdrIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, + GuardedAccountHandlerField: &guardianMocks.GuardedAccountHandlerStub{}, } } + +func TestNode_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var n *node.Node + require.True(t, n.IsInterfaceNil()) + + n, _ = node.NewNode() + require.False(t, n.IsInterfaceNil()) +} diff --git a/node/trieIterators/delegatedListProcessor.go b/node/trieIterators/delegatedListProcessor.go index 2b254c61ff5..cf257a79e4b 100644 --- a/node/trieIterators/delegatedListProcessor.go +++ b/node/trieIterators/delegatedListProcessor.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/trie/keyBuilder" @@ -139,7 +140,7 @@ func (dlp *delegatedListProcessor) getDelegatorsList(delegationSC []byte, ctx co chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err = delegatorAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -156,7 +157,7 @@ func (dlp *delegatedListProcessor) getDelegatorsList(delegationSC []byte, ctx co delegators = append(delegators, leafKey) } - err = common.GetErrorFromChanNonBlocking(chLeaves.ErrChan) + err = chLeaves.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, err } diff --git a/node/trieIterators/delegatedListProcessor_test.go b/node/trieIterators/delegatedListProcessor_test.go index caf9bfa0f10..c240e0a4b29 100644 --- a/node/trieIterators/delegatedListProcessor_test.go +++ b/node/trieIterators/delegatedListProcessor_test.go @@ -9,7 +9,6 @@ import ( "testing" "time" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/keyValStorage" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-go/common" @@ -43,24 +42,19 @@ func TestNewDelegatedListProcessor(t *testing.T) { }, exError: ErrNilAccountsAdapter, }, - { - name: "ShouldWork", - argsFunc: func() ArgTrieIteratorProcessor { - return createMockArgs() - }, - exError: nil, - }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - _, err := NewDelegatedListProcessor(tt.argsFunc()) + dlp, err := NewDelegatedListProcessor(tt.argsFunc()) require.True(t, errors.Is(err, tt.exError)) + require.Nil(t, dlp) }) } - dlp, _ := NewDelegatedListProcessor(createMockArgs()) - assert.False(t, check.IfNil(dlp)) + dlp, err := NewDelegatedListProcessor(createMockArgs()) + require.NotNil(t, dlp) + require.Nil(t, err) } func TestDelegatedListProc_GetDelegatorsListGetAllContractAddressesFailsShouldErr(t *testing.T) { @@ -231,6 +225,16 @@ func TestDelegatedListProc_GetDelegatorsListShouldWork(t *testing.T) { assert.Equal(t, []*api.Delegator{&expectedDelegator1, &expectedDelegator2}, delegatorsValues) } +func TestDelegatedListProcessor_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var dlp *delegatedListProcessor + require.True(t, dlp.IsInterfaceNil()) + + dlp, _ = NewDelegatedListProcessor(createMockArgs()) + require.False(t, dlp.IsInterfaceNil()) +} + func createDelegationScAccount(address []byte, leaves [][]byte, rootHash []byte, timeSleep time.Duration) state.UserAccountHandler { acc, _ := state.NewUserAccount(address) acc.SetDataTrie(&trieMock.TrieStub{ @@ -246,7 +250,7 @@ func createDelegationScAccount(address []byte, leaves [][]byte, rootHash []byte, } close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil diff --git a/node/trieIterators/directStakedListProcessor.go b/node/trieIterators/directStakedListProcessor.go index 2034a0a6a42..7193b5de2de 100644 --- a/node/trieIterators/directStakedListProcessor.go +++ b/node/trieIterators/directStakedListProcessor.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/trie/keyBuilder" "github.com/multiversx/mx-chain-go/vm" @@ -59,7 +60,7 @@ func (dslp *directStakedListProcessor) getAllStakedAccounts(validatorAccount sta chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err = validatorAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -95,7 +96,7 @@ func (dslp *directStakedListProcessor) getAllStakedAccounts(validatorAccount sta stakedAccounts = append(stakedAccounts, val) } - err = common.GetErrorFromChanNonBlocking(chLeaves.ErrChan) + err = chLeaves.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, err } diff --git a/node/trieIterators/directStakedListProcessor_test.go b/node/trieIterators/directStakedListProcessor_test.go index edb70de582a..29398b7bcb6 100644 --- a/node/trieIterators/directStakedListProcessor_test.go +++ b/node/trieIterators/directStakedListProcessor_test.go @@ -9,7 +9,6 @@ import ( "testing" "time" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/keyValStorage" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-go/common" @@ -42,24 +41,19 @@ func TestNewDirectStakedListProcessor(t *testing.T) { }, exError: ErrNilAccountsAdapter, }, - { - name: "ShouldWork", - argsFunc: func() ArgTrieIteratorProcessor { - return createMockArgs() - }, - exError: nil, - }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - _, err := NewDirectStakedListProcessor(tt.argsFunc()) + dslp, err := NewDirectStakedListProcessor(tt.argsFunc()) require.True(t, errors.Is(err, tt.exError)) + require.Nil(t, dslp) }) } - dslp, _ := NewDirectStakedListProcessor(createMockArgs()) - assert.False(t, check.IfNil(dslp)) + dslp, err := NewDirectStakedListProcessor(createMockArgs()) + require.NotNil(t, dslp) + require.Nil(t, err) } func TestDirectStakedListProc_GetDelegatorsListContextShouldTimeout(t *testing.T) { @@ -167,7 +161,7 @@ func createValidatorScAccount(address []byte, leaves [][]byte, rootHash []byte, } close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -176,3 +170,13 @@ func createValidatorScAccount(address []byte, leaves [][]byte, rootHash []byte, return acc } + +func TestDirectStakedListProcessor_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var dslp *directStakedListProcessor + require.True(t, dslp.IsInterfaceNil()) + + dslp, _ = NewDirectStakedListProcessor(createMockArgs()) + require.False(t, dslp.IsInterfaceNil()) +} diff --git a/node/trieIterators/stakeValuesProcessor.go b/node/trieIterators/stakeValuesProcessor.go index c77169203d3..17109690b98 100644 --- a/node/trieIterators/stakeValuesProcessor.go +++ b/node/trieIterators/stakeValuesProcessor.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/trie/keyBuilder" @@ -98,7 +99,7 @@ func (svp *stakedValuesProcessor) computeBaseStakedAndTopUp(ctx context.Context) // TODO investigate if a call to GetAllLeavesKeysOnChannel (without values) might increase performance chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err = validatorAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -123,7 +124,7 @@ func (svp *stakedValuesProcessor) computeBaseStakedAndTopUp(ctx context.Context) totalTopUp = totalTopUp.Add(totalTopUp, info.topUpValue) } - err = common.GetErrorFromChanNonBlocking(chLeaves.ErrChan) + err = chLeaves.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, nil, err } diff --git a/node/trieIterators/stakeValuesProcessor_test.go b/node/trieIterators/stakeValuesProcessor_test.go index 82a47ef3e45..989cf102fde 100644 --- a/node/trieIterators/stakeValuesProcessor_test.go +++ b/node/trieIterators/stakeValuesProcessor_test.go @@ -196,7 +196,7 @@ func TestTotalStakedValueProcessor_GetTotalStakedValue_ContextShouldTimeout(t *t GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, _ context.Context, _ []byte, _ common.KeyBuilder) error { time.Sleep(time.Second) close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() return nil }, RootCalled: func() ([]byte, error) { @@ -298,7 +298,7 @@ func TestTotalStakedValueProcessor_GetTotalStakedValue(t *testing.T) { channels.LeavesChan <- leaf6 close(channels.LeavesChan) - close(channels.ErrChan) + channels.ErrChan.Close() }() return nil diff --git a/outport/process/alteredaccounts/alteredAccountsProvider_test.go b/outport/process/alteredaccounts/alteredAccountsProvider_test.go index 01fcefed050..63a924ffe6a 100644 --- a/outport/process/alteredaccounts/alteredAccountsProvider_test.go +++ b/outport/process/alteredaccounts/alteredAccountsProvider_test.go @@ -451,7 +451,7 @@ func testExtractAlteredAccountsFromPoolShouldReturnErrorWhenCastingToVmCommonUse } args.AccountsDB = &state.AccountsStub{ LoadAccountCalled: func(_ []byte) (vmcommon.AccountHandler, error) { - return &state.UserAccountStub{}, nil + return &state.StateUserAccountHandlerStub{}, nil }, } aap, _ := NewAlteredAccountsProvider(args) diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 191e91972de..dd8d3c8bd75 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -21,6 +21,7 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" nodeFactory "github.com/multiversx/mx-chain-go/cmd/node/factory" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/common/logging" "github.com/multiversx/mx-chain-go/config" @@ -1732,7 +1733,7 @@ func (bp *baseProcessor) commitTrieEpochRootHashIfNeeded(metaBlock *block.MetaBl iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err = userAccountsDb.GetAllLeaves(iteratorChannels, context.Background(), rootHash) if err != nil { @@ -1761,7 +1762,7 @@ func (bp *baseProcessor) commitTrieEpochRootHashIfNeeded(metaBlock *block.MetaBl if len(rh) != 0 { dataTrie := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } errDataTrieGet := userAccountsDb.GetAllLeaves(dataTrie, context.Background(), rh) if errDataTrieGet != nil { @@ -1773,7 +1774,7 @@ func (bp *baseProcessor) commitTrieEpochRootHashIfNeeded(metaBlock *block.MetaBl currentSize += len(lf.Value()) } - err = common.GetErrorFromChanNonBlocking(dataTrie.ErrChan) + err = dataTrie.ErrChan.ReadFromChanNonBlocking() if err != nil { return err } @@ -1789,7 +1790,7 @@ func (bp *baseProcessor) commitTrieEpochRootHashIfNeeded(metaBlock *block.MetaBl balanceSum.Add(balanceSum, userAccount.GetBalance()) } - err = common.GetErrorFromChanNonBlocking(iteratorChannels.ErrChan) + err = iteratorChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return err } diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index a8525909b4f..b86791c52ef 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -41,6 +41,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/dblookupext" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" @@ -439,7 +440,7 @@ func createMockTransactionCoordinatorArguments( FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{}, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, @@ -1892,7 +1893,7 @@ func TestBaseProcessor_commitTrieEpochRootHashIfNeededShouldWork(t *testing.T) { }, GetAllLeavesCalled: func(channels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { close(channels.LeavesChan) - close(channels.ErrChan) + channels.ErrChan.Close() return nil }, }, @@ -1936,7 +1937,7 @@ func TestBaseProcessor_commitTrieEpochRootHashIfNeeded_GetAllLeaves(t *testing.T }, GetAllLeavesCalled: func(channels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { close(channels.LeavesChan) - close(channels.ErrChan) + channels.ErrChan.Close() return expectedErr }, }, @@ -1973,7 +1974,7 @@ func TestBaseProcessor_commitTrieEpochRootHashIfNeeded_GetAllLeaves(t *testing.T return rootHash, nil }, GetAllLeavesCalled: func(channels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { - channels.ErrChan <- expectedErr + channels.ErrChan.WriteInChanNonBlocking(expectedErr) close(channels.LeavesChan) return nil }, @@ -2033,14 +2034,14 @@ func TestBaseProcessor_commitTrieEpochRootHashIfNeededShouldUseDataTrieIfNeededW if bytes.Equal(rootHash, rh) { calledWithUserAccountRootHash = true close(channels.LeavesChan) - close(channels.ErrChan) + channels.ErrChan.Close() return nil } go func() { channels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("address"), []byte("bytes")) close(channels.LeavesChan) - close(channels.ErrChan) + channels.ErrChan.Close() }() return nil diff --git a/process/block/postprocess/basePostProcess.go b/process/block/postprocess/basePostProcess.go index 38048f2f7e7..058118dd88b 100644 --- a/process/block/postprocess/basePostProcess.go +++ b/process/block/postprocess/basePostProcess.go @@ -23,7 +23,8 @@ type txShardInfo struct { } type txInfo struct { - tx data.TransactionHandler + tx data.TransactionHandler + index uint32 *txShardInfo } @@ -41,6 +42,7 @@ type basePostProcessor struct { mapProcessedResult map[string][][]byte intraShardMiniBlock *block.MiniBlock economicsFee process.FeeHandler + index uint32 } // SaveCurrentIntermediateTxToStorage saves all current intermediate results to the provided storage unit @@ -77,6 +79,7 @@ func (bpp *basePostProcessor) CreateBlockStarted() { bpp.interResultsForBlock = make(map[string]*txInfo) bpp.intraShardMiniBlock = nil bpp.mapProcessedResult = make(map[string][][]byte) + bpp.index = 0 bpp.mutInterResultsForBlock.Unlock() } @@ -274,7 +277,8 @@ func (bpp *basePostProcessor) addIntermediateTxToResultsForBlock( rcvShardID uint32, ) { addScrShardInfo := &txShardInfo{receiverShardID: rcvShardID, senderShardID: sndShardID} - scrInfo := &txInfo{tx: txHandler, txShardInfo: addScrShardInfo} + scrInfo := &txInfo{tx: txHandler, txShardInfo: addScrShardInfo, index: bpp.index} + bpp.index++ bpp.interResultsForBlock[string(txHash)] = scrInfo for key := range bpp.mapProcessedResult { diff --git a/process/block/postprocess/intermediateResults.go b/process/block/postprocess/intermediateResults.go index 2180bd034ce..d051fa41e65 100644 --- a/process/block/postprocess/intermediateResults.go +++ b/process/block/postprocess/intermediateResults.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" @@ -21,61 +22,72 @@ import ( var _ process.IntermediateTransactionHandler = (*intermediateResultsProcessor)(nil) type intermediateResultsProcessor struct { - pubkeyConv core.PubkeyConverter - blockType block.Type - currTxs dataRetriever.TransactionCacher + pubkeyConv core.PubkeyConverter + blockType block.Type + currTxs dataRetriever.TransactionCacher + enableEpochsHandler common.EnableEpochsHandler *basePostProcessor } +// ArgsNewIntermediateResultsProcessor defines the arguments needed for new smart contract processor +type ArgsNewIntermediateResultsProcessor struct { + Hasher hashing.Hasher + Marshalizer marshal.Marshalizer + Coordinator sharding.Coordinator + PubkeyConv core.PubkeyConverter + Store dataRetriever.StorageService + BlockType block.Type + CurrTxs dataRetriever.TransactionCacher + EconomicsFee process.FeeHandler + EnableEpochsHandler common.EnableEpochsHandler +} + // NewIntermediateResultsProcessor creates a new intermediate results processor func NewIntermediateResultsProcessor( - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, - coordinator sharding.Coordinator, - pubkeyConv core.PubkeyConverter, - store dataRetriever.StorageService, - blockType block.Type, - currTxs dataRetriever.TransactionCacher, - economicsFee process.FeeHandler, + args ArgsNewIntermediateResultsProcessor, ) (*intermediateResultsProcessor, error) { - if check.IfNil(hasher) { + if check.IfNil(args.Hasher) { return nil, process.ErrNilHasher } - if check.IfNil(marshalizer) { + if check.IfNil(args.Marshalizer) { return nil, process.ErrNilMarshalizer } - if check.IfNil(coordinator) { + if check.IfNil(args.Coordinator) { return nil, process.ErrNilShardCoordinator } - if check.IfNil(pubkeyConv) { + if check.IfNil(args.PubkeyConv) { return nil, process.ErrNilPubkeyConverter } - if check.IfNil(store) { + if check.IfNil(args.Store) { return nil, process.ErrNilStorage } - if check.IfNil(currTxs) { + if check.IfNil(args.CurrTxs) { return nil, process.ErrNilTxForCurrentBlockHandler } - if check.IfNil(economicsFee) { + if check.IfNil(args.EconomicsFee) { return nil, process.ErrNilEconomicsFeeHandler } + if check.IfNil(args.EnableEpochsHandler) { + return nil, process.ErrNilEnableEpochsHandler + } base := &basePostProcessor{ - hasher: hasher, - marshalizer: marshalizer, - shardCoordinator: coordinator, - store: store, + hasher: args.Hasher, + marshalizer: args.Marshalizer, + shardCoordinator: args.Coordinator, + store: args.Store, storageType: dataRetriever.UnsignedTransactionUnit, mapProcessedResult: make(map[string][][]byte), - economicsFee: economicsFee, + economicsFee: args.EconomicsFee, } irp := &intermediateResultsProcessor{ - basePostProcessor: base, - pubkeyConv: pubkeyConv, - blockType: blockType, - currTxs: currTxs, + basePostProcessor: base, + pubkeyConv: args.PubkeyConv, + blockType: args.BlockType, + currTxs: args.CurrTxs, + enableEpochsHandler: args.EnableEpochsHandler, } irp.interResultsForBlock = make(map[string]*txInfo) @@ -130,9 +142,17 @@ func (irp *intermediateResultsProcessor) CreateAllInterMiniBlocks() []*block.Min miniblock.ReceiverShardID = shId miniblock.Type = irp.blockType - sort.Slice(miniblock.TxHashes, func(a, b int) bool { - return bytes.Compare(miniblock.TxHashes[a], miniblock.TxHashes[b]) < 0 - }) + if irp.enableEpochsHandler.IsKeepExecOrderOnCreatedSCRsEnabled() { + sort.Slice(miniblock.TxHashes, func(a, b int) bool { + scrInfoA := irp.interResultsForBlock[string(miniblock.TxHashes[a])] + scrInfoB := irp.interResultsForBlock[string(miniblock.TxHashes[b])] + return scrInfoA.index < scrInfoB.index + }) + } else { + sort.Slice(miniblock.TxHashes, func(a, b int) bool { + return bytes.Compare(miniblock.TxHashes[a], miniblock.TxHashes[b]) < 0 + }) + } log.Debug("intermediateResultsProcessor.CreateAllInterMiniBlocks", "type", miniblock.Type, diff --git a/process/block/postprocess/intermediateResults_test.go b/process/block/postprocess/intermediateResults_test.go index 12773c61081..67633564fea 100644 --- a/process/block/postprocess/intermediateResults_test.go +++ b/process/block/postprocess/intermediateResults_test.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" @@ -28,19 +29,28 @@ func createMockPubkeyConverter() *testscommon.PubkeyConverterMock { return testscommon.NewPubkeyConverterMock(32) } +func createMockArgsNewIntermediateResultsProcessor() ArgsNewIntermediateResultsProcessor { + args := ArgsNewIntermediateResultsProcessor{ + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + Coordinator: mock.NewMultiShardsCoordinatorMock(5), + PubkeyConv: createMockPubkeyConverter(), + Store: &storage.ChainStorerStub{}, + BlockType: block.SmartContractResultBlock, + CurrTxs: &mock.TxForCurrentBlockStub{}, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: true}, + } + + return args +} + func TestNewIntermediateResultsProcessor_NilHashes(t *testing.T) { t.Parallel() - irp, err := NewIntermediateResultsProcessor( - nil, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(5), - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.TxBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.Hasher = nil + irp, err := NewIntermediateResultsProcessor(args) assert.Nil(t, irp) assert.Equal(t, process.ErrNilHasher, err) @@ -49,16 +59,9 @@ func TestNewIntermediateResultsProcessor_NilHashes(t *testing.T) { func TestNewIntermediateResultsProcessor_NilMarshalizer(t *testing.T) { t.Parallel() - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - nil, - mock.NewMultiShardsCoordinatorMock(5), - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.TxBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.Marshalizer = nil + irp, err := NewIntermediateResultsProcessor(args) assert.Nil(t, irp) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -67,16 +70,9 @@ func TestNewIntermediateResultsProcessor_NilMarshalizer(t *testing.T) { func TestNewIntermediateResultsProcessor_NilShardCoordinator(t *testing.T) { t.Parallel() - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - nil, - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.TxBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = nil + irp, err := NewIntermediateResultsProcessor(args) assert.Nil(t, irp) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -85,16 +81,9 @@ func TestNewIntermediateResultsProcessor_NilShardCoordinator(t *testing.T) { func TestNewIntermediateResultsProcessor_NilPubkeyConverter(t *testing.T) { t.Parallel() - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(5), - nil, - &storage.ChainStorerStub{}, - block.TxBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.PubkeyConv = nil + irp, err := NewIntermediateResultsProcessor(args) assert.Nil(t, irp) assert.Equal(t, process.ErrNilPubkeyConverter, err) @@ -103,16 +92,9 @@ func TestNewIntermediateResultsProcessor_NilPubkeyConverter(t *testing.T) { func TestNewIntermediateResultsProcessor_NilStorer(t *testing.T) { t.Parallel() - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(5), - createMockPubkeyConverter(), - nil, - block.TxBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.Store = nil + irp, err := NewIntermediateResultsProcessor(args) assert.Nil(t, irp) assert.Equal(t, process.ErrNilStorage, err) @@ -121,16 +103,9 @@ func TestNewIntermediateResultsProcessor_NilStorer(t *testing.T) { func TestNewIntermediateResultsProcessor_NilTxForCurrentBlockHandler(t *testing.T) { t.Parallel() - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(5), - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.TxBlock, - nil, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.CurrTxs = nil + irp, err := NewIntermediateResultsProcessor(args) assert.Nil(t, irp) assert.Equal(t, process.ErrNilTxForCurrentBlockHandler, err) @@ -139,34 +114,29 @@ func TestNewIntermediateResultsProcessor_NilTxForCurrentBlockHandler(t *testing. func TestNewIntermediateResultsProcessor_NilEconomicsFeeHandler(t *testing.T) { t.Parallel() - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(5), - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.TxBlock, - &mock.TxForCurrentBlockStub{}, - nil, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.EconomicsFee = nil + irp, err := NewIntermediateResultsProcessor(args) assert.Nil(t, irp) assert.Equal(t, process.ErrNilEconomicsFeeHandler, err) } +func TestNewIntermediateResultsProcessor_NilEpochHandler(t *testing.T) { + t.Parallel() + + args := createMockArgsNewIntermediateResultsProcessor() + args.EnableEpochsHandler = nil + irp, err := NewIntermediateResultsProcessor(args) + + assert.Nil(t, irp) + assert.Equal(t, process.ErrNilEnableEpochsHandler, err) +} + func TestNewIntermediateResultsProcessor_Good(t *testing.T) { t.Parallel() - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(5), - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.TxBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + irp, err := NewIntermediateResultsProcessor(createMockArgsNewIntermediateResultsProcessor()) assert.NotNil(t, irp) assert.Nil(t, err) @@ -176,16 +146,9 @@ func TestIntermediateResultsProcessor_getShardIdsFromAddressesGood(t *testing.T) t.Parallel() nrShards := 5 - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(uint32(nrShards)), - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -201,16 +164,9 @@ func TestIntermediateResultsProcessor_AddIntermediateTransactions(t *testing.T) t.Parallel() nrShards := 5 - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(uint32(nrShards)), - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -223,16 +179,9 @@ func TestIntermediateResultsProcessor_AddIntermediateTransactionsWrongType(t *te t.Parallel() nrShards := 5 - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(uint32(nrShards)), - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -248,16 +197,9 @@ func TestIntermediateResultsProcessor_AddIntermediateTransactionsNilSender(t *te t.Parallel() shardC := mock.NewMultiShardsCoordinatorMock(2) - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - shardC, - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = shardC + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -281,16 +223,9 @@ func TestIntermediateResultsProcessor_AddIntermediateTransactionsNilReceiver(t * t.Parallel() shardC := mock.NewMultiShardsCoordinatorMock(2) - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - shardC, - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = shardC + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -321,20 +256,14 @@ func TestIntermediateResultsProcessor_AddIntermediateTransactionsShardIdMismatch return 1 }, } - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - shardC, - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{ - MaxGasLimitPerMiniBlockCalled: func() uint64 { - return maxGasLimitPerBlock - }, + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = shardC + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerMiniBlockCalled: func() uint64 { + return maxGasLimitPerBlock }, - ) + } + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -355,16 +284,9 @@ func TestIntermediateResultsProcessor_AddIntermediateTransactionsNegativeValueIn t.Parallel() shardC := mock.NewMultiShardsCoordinatorMock(2) - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - shardC, - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = shardC + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -395,20 +317,14 @@ func TestIntermediateResultsProcessor_AddIntermediateTransactionsAddrGood(t *tes t.Parallel() nrShards := 5 - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(uint32(nrShards)), - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{ - MaxGasLimitPerMiniBlockCalled: func() uint64 { - return maxGasLimitPerBlock - }, + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerMiniBlockCalled: func() uint64 { + return maxGasLimitPerBlock }, - ) + } + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -429,16 +345,9 @@ func TestIntermediateResultsProcessor_AddIntermediateTransactionsAddAndRevert(t t.Parallel() nrShards := 5 - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(uint32(nrShards)), - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -476,20 +385,14 @@ func TestIntermediateResultsProcessor_CreateAllInterMiniBlocksNothingInCache(t * t.Parallel() nrShards := 5 - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(uint32(nrShards)), - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{ - MaxGasLimitPerMiniBlockCalled: func() uint64 { - return maxGasLimitPerBlock - }, + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerMiniBlockCalled: func() uint64 { + return maxGasLimitPerBlock }, - ) + } + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -502,20 +405,14 @@ func TestIntermediateResultsProcessor_CreateAllInterMiniBlocksNotCrossShard(t *t t.Parallel() nrShards := 5 - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(uint32(nrShards)), - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{ - MaxGasLimitPerMiniBlockCalled: func() uint64 { - return maxGasLimitPerBlock - }, + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerMiniBlockCalled: func() uint64 { + return maxGasLimitPerBlock }, - ) + } + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -540,20 +437,14 @@ func TestIntermediateResultsProcessor_CreateAllInterMiniBlocksCrossShard(t *test nrShards := 5 shardCoordinator := mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - shardCoordinator, - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{ - MaxGasLimitPerMiniBlockCalled: func() uint64 { - return maxGasLimitPerBlock - }, + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = shardCoordinator + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerMiniBlockCalled: func() uint64 { + return maxGasLimitPerBlock }, - ) + } + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -603,16 +494,9 @@ func TestIntermediateResultsProcessor_GetNumOfCrossInterMbsAndTxsShouldWork(t *t return uint32(shardID) } - irp, _ := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - shardCoordinator, - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = shardCoordinator + irp, _ := NewIntermediateResultsProcessor(args) txs := make([]data.TransactionHandler, 0) txs = append(txs, &smartContractResult.SmartContractResult{Nonce: 0, SndAddr: snd, RcvAddr: []byte("0"), Value: big.NewInt(0), PrevTxHash: []byte("txHash")}) @@ -637,17 +521,9 @@ func TestIntermediateResultsProcessor_VerifyInterMiniBlocksNilBody(t *testing.T) t.Parallel() nrShards := 5 - shardCoordinator := mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - shardCoordinator, - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -661,17 +537,9 @@ func TestIntermediateResultsProcessor_VerifyInterMiniBlocksBodyShouldpassAsNotCr t.Parallel() nrShards := 5 - shardCoordinator := mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - shardCoordinator, - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -679,8 +547,8 @@ func TestIntermediateResultsProcessor_VerifyInterMiniBlocksBodyShouldpassAsNotCr body := &block.Body{} body.MiniBlocks = append(body.MiniBlocks, &block.MiniBlock{ Type: block.SmartContractResultBlock, - ReceiverShardID: shardCoordinator.SelfId(), - SenderShardID: shardCoordinator.SelfId() + 1}) + ReceiverShardID: args.Coordinator.SelfId(), + SenderShardID: args.Coordinator.SelfId() + 1}) err = irp.VerifyInterMiniBlocks(body) assert.Nil(t, err) @@ -690,23 +558,14 @@ func TestIntermediateResultsProcessor_VerifyInterMiniBlocksBodyMissingMiniblock( t.Parallel() nrShards := 5 - shardCoordinator := mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - shardCoordinator, - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) - + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) body := &block.Body{} - otherShard := shardCoordinator.SelfId() + 1 + otherShard := args.Coordinator.SelfId() + 1 body.MiniBlocks = append(body.MiniBlocks, &block.MiniBlock{Type: block.SmartContractResultBlock, ReceiverShardID: otherShard}) err = irp.VerifyInterMiniBlocks(body) @@ -718,20 +577,14 @@ func TestIntermediateResultsProcessor_VerifyInterMiniBlocksBodyMiniBlockMissmatc nrShards := 5 shardCoordinator := mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - shardCoordinator, - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{ - MaxGasLimitPerMiniBlockCalled: func() uint64 { - return maxGasLimitPerBlock - }, + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = shardCoordinator + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerMiniBlockCalled: func() uint64 { + return maxGasLimitPerBlock }, - ) + } + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -768,23 +621,19 @@ func TestIntermediateResultsProcessor_VerifyInterMiniBlocksBodyShouldPass(t *tes nrShards := 5 shardCoordinator := mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - shardCoordinator, - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{ - MaxGasLimitPerMiniBlockCalled: func() uint64 { - return maxGasLimitPerBlock - }, - MaxGasLimitPerBlockCalled: func() uint64 { - return maxGasLimitPerBlock - }, + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = shardCoordinator + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerMiniBlockCalled: func() uint64 { + return maxGasLimitPerBlock + }, + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { + return maxGasLimitPerBlock }, - ) + } + enableEpochHandler := &testscommon.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: false} + args.EnableEpochsHandler = enableEpochHandler + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -827,6 +676,19 @@ func TestIntermediateResultsProcessor_VerifyInterMiniBlocksBodyShouldPass(t *tes err = irp.VerifyInterMiniBlocks(body) assert.Nil(t, err) + + enableEpochHandler.IsKeepExecOrderOnCreatedSCRsEnabledField = true + err = irp.VerifyInterMiniBlocks(body) + assert.Equal(t, err, process.ErrMiniBlockHashMismatch) + + miniBlock.TxHashes = make([][]byte, 0) + for i := 0; i < len(txs); i++ { + txHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &hashingMocks.HasherMock{}, txs[i]) + miniBlock.TxHashes = append(miniBlock.TxHashes, txHash) + } + + err = irp.VerifyInterMiniBlocks(body) + assert.Nil(t, err) } func TestIntermediateResultsProcessor_SaveCurrentIntermediateTxToStorageShouldSave(t *testing.T) { @@ -835,24 +697,18 @@ func TestIntermediateResultsProcessor_SaveCurrentIntermediateTxToStorageShouldSa nrShards := 5 shardCoordinator := mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) putCounter := 0 - irp, err := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - shardCoordinator, - createMockPubkeyConverter(), - &storage.ChainStorerStub{ - PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { - if unitType == dataRetriever.UnsignedTransactionUnit { - putCounter++ - } - return nil - }, - }, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = shardCoordinator + args.Store = &storage.ChainStorerStub{ + PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { + if unitType == dataRetriever.UnsignedTransactionUnit { + putCounter++ + } + return nil + }, + } + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -884,18 +740,11 @@ func TestIntermediateResultsProcessor_CreateMarshalizedDataNothingToMarshal(t *t nrShards := 5 shardCoordinator := mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) - hasher := &hashingMocks.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - irp, err := NewIntermediateResultsProcessor( - hasher, - marshalizer, - shardCoordinator, - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = shardCoordinator + args.Hasher = &hashingMocks.HasherMock{} + args.Marshalizer = &mock.MarshalizerMock{} + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -918,16 +767,11 @@ func TestIntermediateResultsProcessor_CreateMarshalizedData(t *testing.T) { shardCoordinator := mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) hasher := &hashingMocks.HasherMock{} marshalizer := &mock.MarshalizerMock{} - irp, err := NewIntermediateResultsProcessor( - hasher, - marshalizer, - shardCoordinator, - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = shardCoordinator + args.Hasher = hasher + args.Marshalizer = marshalizer + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -986,16 +830,11 @@ func TestIntermediateResultsProcessor_GetAllCurrentUsedTxs(t *testing.T) { shardCoordinator := mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) hasher := &hashingMocks.HasherMock{} marshalizer := &mock.MarshalizerMock{} - irp, err := NewIntermediateResultsProcessor( - hasher, - marshalizer, - shardCoordinator, - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = shardCoordinator + args.Hasher = hasher + args.Marshalizer = marshalizer + irp, err := NewIntermediateResultsProcessor(args) assert.NotNil(t, irp) assert.Nil(t, err) @@ -1030,23 +869,19 @@ func TestIntermediateResultsProcessor_SplitMiniBlocksIfNeededShouldWork(t *testi shardCoordinator := mock.NewMultiShardsCoordinatorMock(uint32(nrShards)) hasher := &hashingMocks.HasherMock{} marshalizer := &mock.MarshalizerMock{} - irp, _ := NewIntermediateResultsProcessor( - hasher, - marshalizer, - shardCoordinator, - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.SmartContractResultBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{ - MaxGasLimitPerMiniBlockForSafeCrossShardCalled: func() uint64 { - return gasLimit - }, - MaxGasLimitPerTxCalled: func() uint64 { - return gasLimit - }, + args := createMockArgsNewIntermediateResultsProcessor() + args.Coordinator = shardCoordinator + args.Hasher = hasher + args.Marshalizer = marshalizer + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerMiniBlockForSafeCrossShardCalled: func() uint64 { + return gasLimit }, - ) + MaxGasLimitPerTxCalled: func() uint64 { + return gasLimit + }, + } + irp, _ := NewIntermediateResultsProcessor(args) tx1 := transaction.Transaction{Nonce: 0, GasLimit: 100} tx2 := transaction.Transaction{Nonce: 1, GasLimit: 100} @@ -1085,16 +920,7 @@ func TestIntermediateResultsProcessor_SplitMiniBlocksIfNeededShouldWork(t *testi func TestIntermediateResultsProcessor_addIntermediateTxToResultsForBlock(t *testing.T) { t.Parallel() - irp, _ := NewIntermediateResultsProcessor( - &hashingMocks.HasherMock{}, - &mock.MarshalizerMock{}, - mock.NewMultiShardsCoordinatorMock(5), - createMockPubkeyConverter(), - &storage.ChainStorerStub{}, - block.TxBlock, - &mock.TxForCurrentBlockStub{}, - &mock.FeeHandlerStub{}, - ) + irp, _ := NewIntermediateResultsProcessor(createMockArgsNewIntermediateResultsProcessor()) key := []byte("key") irp.InitProcessedResults(key) diff --git a/process/block/postprocess/oneMBPostProcessor_test.go b/process/block/postprocess/oneMBPostProcessor_test.go index f9b752004e4..5151fdc5f88 100644 --- a/process/block/postprocess/oneMBPostProcessor_test.go +++ b/process/block/postprocess/oneMBPostProcessor_test.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" @@ -27,7 +28,7 @@ func TestNewOneMBPostProcessor_NilHasher(t *testing.T) { &storage.ChainStorerStub{}, block.TxBlock, dataRetriever.TransactionUnit, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, ) assert.Nil(t, irp) @@ -44,7 +45,7 @@ func TestNewOneMBPostProcessor_NilMarshalizer(t *testing.T) { &storage.ChainStorerStub{}, block.TxBlock, dataRetriever.TransactionUnit, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, ) assert.Nil(t, irp) @@ -61,7 +62,7 @@ func TestNewOneMBPostProcessor_NilShardCoord(t *testing.T) { &storage.ChainStorerStub{}, block.TxBlock, dataRetriever.TransactionUnit, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, ) assert.Nil(t, irp) @@ -78,7 +79,7 @@ func TestNewOneMBPostProcessor_NilStorer(t *testing.T) { nil, block.TxBlock, dataRetriever.TransactionUnit, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, ) assert.Nil(t, irp) @@ -112,7 +113,7 @@ func TestNewOneMBPostProcessor_OK(t *testing.T) { &storage.ChainStorerStub{}, block.TxBlock, dataRetriever.TransactionUnit, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, ) assert.Nil(t, err) @@ -129,7 +130,7 @@ func TestOneMBPostProcessor_CreateAllInterMiniBlocks(t *testing.T) { &storage.ChainStorerStub{}, block.TxBlock, dataRetriever.TransactionUnit, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, ) mbs := irp.CreateAllInterMiniBlocks() @@ -146,7 +147,7 @@ func TestOneMBPostProcessor_CreateAllInterMiniBlocksOneMinBlock(t *testing.T) { &storage.ChainStorerStub{}, block.TxBlock, dataRetriever.TransactionUnit, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, ) txs := make([]data.TransactionHandler, 0) @@ -170,7 +171,7 @@ func TestOneMBPostProcessor_VerifyNilBody(t *testing.T) { &storage.ChainStorerStub{}, block.TxBlock, dataRetriever.TransactionUnit, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, ) err := irp.VerifyInterMiniBlocks(&block.Body{}) @@ -187,7 +188,7 @@ func TestOneMBPostProcessor_VerifyTooManyBlock(t *testing.T) { &storage.ChainStorerStub{}, block.TxBlock, dataRetriever.TransactionUnit, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, ) txs := make([]data.TransactionHandler, 0) @@ -232,7 +233,7 @@ func TestOneMBPostProcessor_VerifyNilMiniBlocks(t *testing.T) { &storage.ChainStorerStub{}, block.TxBlock, dataRetriever.TransactionUnit, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, ) miniBlock := &block.MiniBlock{ @@ -256,7 +257,7 @@ func TestOneMBPostProcessor_VerifyOk(t *testing.T) { &storage.ChainStorerStub{}, block.TxBlock, dataRetriever.TransactionUnit, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, ) txs := make([]data.TransactionHandler, 0) diff --git a/process/block/postprocess/testIntermediateResult.go b/process/block/postprocess/testIntermediateResult.go index 92c0d893546..f6b8224d27c 100644 --- a/process/block/postprocess/testIntermediateResult.go +++ b/process/block/postprocess/testIntermediateResult.go @@ -1,14 +1,7 @@ package postprocess import ( - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-core-go/hashing" - "github.com/multiversx/mx-chain-core-go/marshal" - "github.com/multiversx/mx-chain-go/dataRetriever" - "github.com/multiversx/mx-chain-go/process" - "github.com/multiversx/mx-chain-go/sharding" ) // TestIntermediateResProc extends intermediateResultsProcessor and is used in integration tests @@ -20,16 +13,9 @@ type TestIntermediateResProc struct { // NewTestIntermediateResultsProcessor creates a new instance of TestIntermediateResProc func NewTestIntermediateResultsProcessor( - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, - coordinator sharding.Coordinator, - pubkeyConv core.PubkeyConverter, - store dataRetriever.StorageService, - blockType block.Type, - currTxs dataRetriever.TransactionCacher, - economicsFee process.FeeHandler, + args ArgsNewIntermediateResultsProcessor, ) (*TestIntermediateResProc, error) { - interimProc, err := NewIntermediateResultsProcessor(hasher, marshalizer, coordinator, pubkeyConv, store, blockType, currTxs, economicsFee) + interimProc, err := NewIntermediateResultsProcessor(args) return &TestIntermediateResProc{interimProc}, err } diff --git a/process/block/preprocess/gasComputation_test.go b/process/block/preprocess/gasComputation_test.go index e0facce81e6..91c504d97c6 100644 --- a/process/block/preprocess/gasComputation_test.go +++ b/process/block/preprocess/gasComputation_test.go @@ -11,8 +11,8 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/preprocess" - "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -40,7 +40,7 @@ func TestNewGasComputation_NilEnableEpochsHandlerShouldErr(t *testing.T) { t.Parallel() gc, err := preprocess.NewGasComputation( - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.TxTypeHandlerMock{}, nil, ) @@ -53,7 +53,7 @@ func TestNewGasComputation_ShouldWork(t *testing.T) { t.Parallel() gc, err := preprocess.NewGasComputation( - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.TxTypeHandlerMock{}, createEnableEpochsHandler(), ) @@ -66,7 +66,7 @@ func TestGasProvided_ShouldWork(t *testing.T) { t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.TxTypeHandlerMock{}, createEnableEpochsHandler(), ) @@ -98,7 +98,7 @@ func TestGasRefunded_ShouldWork(t *testing.T) { t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.TxTypeHandlerMock{}, createEnableEpochsHandler(), ) @@ -130,7 +130,7 @@ func TestGasPenalized_ShouldWork(t *testing.T) { t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.TxTypeHandlerMock{}, createEnableEpochsHandler(), ) @@ -162,7 +162,7 @@ func TestComputeGasProvidedByTx_ShouldErrWrongTypeAssertion(t *testing.T) { t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.TxTypeHandlerMock{}, createEnableEpochsHandler(), ) @@ -175,7 +175,7 @@ func TestComputeGasProvidedByTx_ShouldWorkWhenTxReceiverAddressIsNotASmartContra t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{ + &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 6 }, @@ -195,7 +195,7 @@ func TestComputeGasProvidedByTx_ShouldWorkWhenTxReceiverAddressIsASmartContractI t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{ + &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 6 }, @@ -218,7 +218,7 @@ func TestComputeGasProvidedByTx_ShouldWorkWhenTxReceiverAddressIsASmartContractC t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{ + &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 6 }, @@ -241,7 +241,7 @@ func TestComputeGasProvidedByTx_ShouldReturnZeroIf0GasLimit(t *testing.T) { t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{ + &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 6 }, @@ -264,7 +264,7 @@ func TestComputeGasProvidedByTx_ShouldReturnGasLimitIfLessThanMoveBalance(t *tes t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{ + &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 6 }, @@ -287,7 +287,7 @@ func TestComputeGasProvidedByTx_ShouldReturnGasLimitWhenRelayed(t *testing.T) { t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{ + &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 0 }, @@ -310,7 +310,7 @@ func TestComputeGasProvidedByTx_ShouldReturnGasLimitWhenRelayedV2(t *testing.T) t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{ + &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 0 }, @@ -333,7 +333,7 @@ func TestComputeGasProvidedByMiniBlock_ShouldErrMissingTransaction(t *testing.T) t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{ + &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 6 }, @@ -362,7 +362,7 @@ func TestComputeGasProvidedByMiniBlock_ShouldReturnZeroWhenOneTxIsMissing(t *tes t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{ + &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 6 }, @@ -394,7 +394,7 @@ func TestComputeGasProvidedByMiniBlock_ShouldWork(t *testing.T) { t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{ + &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 6 }, @@ -434,7 +434,7 @@ func TestComputeGasProvidedByMiniBlock_ShouldWorkV1(t *testing.T) { t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{ + &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 6 }, @@ -474,7 +474,7 @@ func TestComputeGasProvidedByTx_ShouldWorkWhenTxReceiverAddressIsNotASmartContra t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{ + &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 6 }, @@ -494,7 +494,7 @@ func TestComputeGasProvidedByTx_ShouldWorkWhenTxReceiverAddressIsASmartContractI t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{ + &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 6 }, @@ -517,7 +517,7 @@ func TestComputeGasProvidedByTx_ShouldWorkWhenTxReceiverAddressIsASmartContractC t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{ + &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 6 }, @@ -540,7 +540,7 @@ func TestReset_ShouldWork(t *testing.T) { t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.TxTypeHandlerMock{}, createEnableEpochsHandler(), ) @@ -577,7 +577,7 @@ func TestRestoreGasSinceLastReset_ShouldWork(t *testing.T) { t.Parallel() gc, _ := preprocess.NewGasComputation( - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.TxTypeHandlerMock{}, createEnableEpochsHandler(), ) diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 645ac0d8cf0..14a3a8fa8d7 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -877,7 +877,7 @@ func (txs *transactions) processAndRemoveBadTransaction( ) error { _, err := txs.txProcessor.ProcessTransaction(tx) - isTxTargetedForDeletion := errors.Is(err, process.ErrLowerNonceInTransaction) || errors.Is(err, process.ErrInsufficientFee) + isTxTargetedForDeletion := errors.Is(err, process.ErrLowerNonceInTransaction) || errors.Is(err, process.ErrInsufficientFee) || errors.Is(err, process.ErrTransactionNotExecutable) if isTxTargetedForDeletion { strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) txs.txPool.RemoveData(txHash, strCache) diff --git a/process/block/preprocess/transactionsV2.go b/process/block/preprocess/transactionsV2.go index d94434965d7..654ff4231a8 100644 --- a/process/block/preprocess/transactionsV2.go +++ b/process/block/preprocess/transactionsV2.go @@ -384,7 +384,7 @@ func (txs *transactions) verifyTransaction( txs.accountTxsShards.Unlock() if err != nil { - isTxTargetedForDeletion := errors.Is(err, process.ErrLowerNonceInTransaction) || errors.Is(err, process.ErrInsufficientFee) + isTxTargetedForDeletion := errors.Is(err, process.ErrLowerNonceInTransaction) || errors.Is(err, process.ErrInsufficientFee) || errors.Is(err, process.ErrTransactionNotExecutable) if isTxTargetedForDeletion { strCache := process.ShardCacherIdentifier(senderShardID, receiverShardID) txs.txPool.RemoveData(txHash, strCache) diff --git a/process/block/preprocess/transactionsV2_test.go b/process/block/preprocess/transactionsV2_test.go index 72624deafb5..a2b0326068a 100644 --- a/process/block/preprocess/transactionsV2_test.go +++ b/process/block/preprocess/transactionsV2_test.go @@ -15,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/storage/txcache" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -34,14 +35,14 @@ func createTransactionPreprocessor() *transactions { ShardCoordinator: mock.NewMultiShardsCoordinatorMock(3), Accounts: &stateMock.AccountsStub{}, OnRequestTransaction: requestTransaction, - EconomicsFee: &mock.FeeHandlerStub{ + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ MaxGasLimitPerMiniBlockForSafeCrossShardCalled: func() uint64 { return MaxGasLimitPerBlock }, MaxGasLimitPerBlockForSafeCrossShardCalled: func() uint64 { return MaxGasLimitPerBlock }, - MaxGasLimitPerBlockCalled: func() uint64 { + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return MaxGasLimitPerBlock }, MaxGasLimitPerTxCalled: func() uint64 { diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index 69fd9d71c39..872472cd218 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -49,12 +49,12 @@ type txInfoHolder struct { tx *transaction.Transaction } -func feeHandlerMock() *mock.FeeHandlerStub { - return &mock.FeeHandlerStub{ +func feeHandlerMock() *economicsmocks.EconomicsHandlerStub { + return &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 0 }, - MaxGasLimitPerBlockCalled: func() uint64 { + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return MaxGasLimitPerBlock }, MaxGasLimitPerMiniBlockCalled: func() uint64 { @@ -1342,8 +1342,8 @@ func TestTransactionsPreprocessor_ComputeGasProvidedShouldWork(t *testing.T) { txGasLimitInSender := maxGasLimit + 1 txGasLimitInReceiver := maxGasLimit args := createDefaultTransactionsProcessorArgs() - args.EconomicsFee = &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return maxGasLimit }, } @@ -1388,7 +1388,7 @@ func TestTransactionsPreprocessor_SplitMiniBlocksIfNeededShouldWork(t *testing.T args := createDefaultTransactionsProcessorArgs() enableEpochsHandlerStub := &testscommon.EnableEpochsHandlerStub{} args.EnableEpochsHandler = enableEpochsHandlerStub - args.EconomicsFee = &mock.FeeHandlerStub{ + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ MaxGasLimitPerMiniBlockForSafeCrossShardCalled: func() uint64 { return gasLimitPerMiniBlock }, diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 1dce393b2d2..1cd3d4f761e 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -36,6 +36,7 @@ import ( "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/outport" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" @@ -447,11 +448,11 @@ func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{ + &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 0 }, - MaxGasLimitPerBlockCalled: func() uint64 { + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return MaxGasLimitPerBlock }, }, @@ -668,11 +669,11 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{ + &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 0 }, - MaxGasLimitPerBlockCalled: func() uint64 { + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return MaxGasLimitPerBlock }, }, @@ -2580,7 +2581,7 @@ func TestShardProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -2688,7 +2689,7 @@ func TestShardProcessor_MarshalizedDataMarshalWithoutSuccess(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -3059,11 +3060,11 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{ + &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 0 }, - MaxGasLimitPerBlockCalled: func() uint64 { + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return MaxGasLimitPerBlock }, }, @@ -3261,7 +3262,7 @@ func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, diff --git a/process/common.go b/process/common.go index 901b74c6221..5412128ba85 100644 --- a/process/common.go +++ b/process/common.go @@ -897,6 +897,17 @@ func GetMiniBlockHeaderWithHash(header data.HeaderHandler, miniBlockHash []byte) return nil } +// IsBuiltinFuncCallWithParam checks if the given transaction data represents a builtin function call with parameters +func IsBuiltinFuncCallWithParam(txData []byte, function string) bool { + expectedTxDataPrefix := []byte(function + "@") + return bytes.HasPrefix(txData, expectedTxDataPrefix) +} + +// IsSetGuardianCall checks if the given transaction data represents the set guardian builtin function call +func IsSetGuardianCall(txData []byte) bool { + return IsBuiltinFuncCallWithParam(txData, core.BuiltInFunctionSetGuardian) +} + // CheckIfIndexesAreOutOfBound checks if the given indexes are out of bound for the given mini block func CheckIfIndexesAreOutOfBound( indexOfFirstTxToBeProcessed int32, diff --git a/process/common_test.go b/process/common_test.go index 9bdcbe0ee5e..a79e2fd5c32 100644 --- a/process/common_test.go +++ b/process/common_test.go @@ -19,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestGetShardHeaderShouldErrNilCacher(t *testing.T) { @@ -2242,6 +2243,47 @@ func TestGetMiniBlockHeaderWithHash(t *testing.T) { }) } +func Test_IsBuiltinFuncCallWithParam(t *testing.T) { + txDataNoFunction := []byte("dummy data") + targetFunction := "function" + nonTargetFunction := "differentFunction" + suffix := "@dummy@params" + txDataWithFunc := []byte(targetFunction + suffix) + txDataNonTargetFunc := []byte(nonTargetFunction + suffix) + + t.Run("no function", func(t *testing.T) { + require.False(t, process.IsBuiltinFuncCallWithParam(txDataNoFunction, targetFunction)) + }) + t.Run("non target function", func(t *testing.T) { + require.False(t, process.IsBuiltinFuncCallWithParam(txDataNonTargetFunc, targetFunction)) + }) + t.Run("target function", func(t *testing.T) { + require.True(t, process.IsBuiltinFuncCallWithParam(txDataWithFunc, targetFunction)) + }) +} + +func Test_IsSetGuardianCall(t *testing.T) { + t.Parallel() + + setGuardianTxData := []byte("SetGuardian@xxxxxxxx") + t.Run("should return false for tx with other builtin function call or random data", func(t *testing.T) { + require.False(t, process.IsSetGuardianCall([]byte(core.BuiltInFunctionClaimDeveloperRewards+"@..."))) + require.False(t, process.IsSetGuardianCall([]byte("some random data"))) + }) + t.Run("should return false for tx with setGuardian without params (no builtin function call)", func(t *testing.T) { + require.False(t, process.IsSetGuardianCall([]byte("SetGuardian"))) + }) + t.Run("should return true for setGuardian call with invalid num of params", func(t *testing.T) { + require.True(t, process.IsSetGuardianCall([]byte("SetGuardian@xxx@xxx@xxx"))) + }) + t.Run("should return true for setGuardian call with empty param", func(t *testing.T) { + require.True(t, process.IsSetGuardianCall([]byte("SetGuardian@"))) + }) + t.Run("should return true for setGuardian call", func(t *testing.T) { + require.True(t, process.IsSetGuardianCall(setGuardianTxData)) + }) +} + func TestCheckIfIndexesAreOutOfBound(t *testing.T) { t.Parallel() diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 42ae2fc6374..87017fcf030 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -32,6 +32,7 @@ import ( "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -44,12 +45,12 @@ const MaxGasLimitPerBlock = uint64(100000) var txHash = []byte("tx_hash1") -func FeeHandlerMock() *mock.FeeHandlerStub { - return &mock.FeeHandlerStub{ +func FeeHandlerMock() *economicsmocks.EconomicsHandlerStub { + return &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 0 }, - MaxGasLimitPerBlockCalled: func() uint64 { + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return MaxGasLimitPerBlock }, MaxGasLimitPerMiniBlockCalled: func() uint64 { @@ -232,7 +233,7 @@ func createMockTransactionCoordinatorArguments() ArgTransactionCoordinator { FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{}, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, @@ -547,15 +548,17 @@ func createPreProcessorContainer() process.PreProcessorsContainer { } func createInterimProcessorContainer() process.IntermediateProcessorContainer { - preFactory, _ := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - &mock.MarshalizerMock{}, - &hashingMocks.HasherMock{}, - createMockPubkeyConverter(), - initStore(), - initDataPool([]byte("test_hash1")), - &mock.FeeHandlerStub{}, - ) + argsFactory := shard.ArgsNewIntermediateProcessorsContainerFactory{ + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + PubkeyConverter: createMockPubkeyConverter(), + Store: initStore(), + PoolsHolder: initDataPool([]byte("test_hash1")), + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: true}, + } + preFactory, _ := shard.NewIntermediateProcessorsContainerFactory(argsFactory) container, _ := preFactory.Create() return container @@ -1361,8 +1364,8 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeMultipleMin argsTransactionCoordinator.MiniBlockPool = tdp.MiniBlocks() argsTransactionCoordinator.PreProcessors = createPreProcessorContainerWithDataPool( tdp, - &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return MaxGasLimitPerBlock }, MaxGasLimitPerMiniBlockForSafeCrossShardCalled: func() uint64 { @@ -1424,8 +1427,8 @@ func TestTransactionCoordinator_CompactAndExpandMiniblocksShouldWork(t *testing. argsTransactionCoordinator.MiniBlockPool = tdp.MiniBlocks() argsTransactionCoordinator.PreProcessors = createPreProcessorContainerWithDataPool( tdp, - &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return MaxGasLimitPerBlock }, MaxGasLimitPerMiniBlockForSafeCrossShardCalled: func() uint64 { @@ -2180,15 +2183,17 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testi tdp := initDataPool(txHash) shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) - preFactory, _ := shard.NewIntermediateProcessorsContainerFactory( - shardCoordinator, - &mock.MarshalizerMock{}, - &hashingMocks.HasherMock{}, - createMockPubkeyConverter(), - &storageStubs.ChainStorerStub{}, - tdp, - &mock.FeeHandlerStub{}, - ) + argsFactory := shard.ArgsNewIntermediateProcessorsContainerFactory{ + ShardCoordinator: shardCoordinator, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + PubkeyConverter: createMockPubkeyConverter(), + Store: &storageStubs.ChainStorerStub{}, + PoolsHolder: tdp, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: true}, + } + preFactory, _ := shard.NewIntermediateProcessorsContainerFactory(argsFactory) container, _ := preFactory.Create() argsTransactionCoordinator := createMockTransactionCoordinatorArguments() @@ -2234,19 +2239,21 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { tdp := initDataPool(txHash) shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) - interFactory, _ := shard.NewIntermediateProcessorsContainerFactory( - shardCoordinator, - &mock.MarshalizerMock{}, - &hashingMocks.HasherMock{}, - createMockPubkeyConverter(), - &storageStubs.ChainStorerStub{}, - tdp, - &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + argsFactory := shard.ArgsNewIntermediateProcessorsContainerFactory{ + ShardCoordinator: shardCoordinator, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + PubkeyConverter: createMockPubkeyConverter(), + Store: &storageStubs.ChainStorerStub{}, + PoolsHolder: tdp, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return MaxGasLimitPerBlock }, }, - ) + EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: true}, + } + interFactory, _ := shard.NewIntermediateProcessorsContainerFactory(argsFactory) container, _ := interFactory.Create() argsTransactionCoordinator := createMockTransactionCoordinatorArguments() @@ -2559,7 +2566,7 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldReturnWhenEpochIsNo FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{}, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{ @@ -2598,11 +2605,11 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldErrMaxGasLimitPerMi FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{ + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return maxGasLimitPerBlock + 1 }, - MaxGasLimitPerBlockCalled: func() uint64 { + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return maxGasLimitPerBlock }, MaxGasLimitPerMiniBlockCalled: func() uint64 { @@ -2663,11 +2670,11 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldErrMaxAccumulatedFe FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{ + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return maxGasLimitPerBlock }, - MaxGasLimitPerBlockCalled: func() uint64 { + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return maxGasLimitPerBlock }, MaxGasLimitPerMiniBlockForSafeCrossShardCalled: func() uint64 { @@ -2739,11 +2746,11 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldErrMaxDeveloperFees FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{ + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return maxGasLimitPerBlock }, - MaxGasLimitPerBlockCalled: func() uint64 { + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return maxGasLimitPerBlock }, MaxGasLimitPerMiniBlockForSafeCrossShardCalled: func() uint64 { @@ -2815,11 +2822,11 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldWork(t *testing.T) FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{ + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return maxGasLimitPerBlock }, - MaxGasLimitPerBlockCalled: func() uint64 { + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return maxGasLimitPerBlock }, MaxGasLimitPerMiniBlockForSafeCrossShardCalled: func() uint64 { @@ -2890,7 +2897,7 @@ func TestTransactionCoordinator_GetAllTransactionsShouldWork(t *testing.T) { FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{}, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, @@ -2962,8 +2969,8 @@ func TestTransactionCoordinator_VerifyGasLimitShouldErrMaxGasLimitPerMiniBlockIn FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return tx1GasLimit + tx2GasLimit + tx3GasLimit - 1 }, MaxGasLimitPerMiniBlockCalled: func() uint64 { @@ -3054,8 +3061,8 @@ func TestTransactionCoordinator_VerifyGasLimitShouldWork(t *testing.T) { FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return tx1GasLimit + tx2GasLimit + tx3GasLimit }, MaxGasLimitPerMiniBlockForSafeCrossShardCalled: func() uint64 { @@ -3142,7 +3149,7 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{}, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, @@ -3183,7 +3190,7 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{ + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return tx.GetGasLimit() + 1 }, @@ -3239,7 +3246,7 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{ + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 0 }, @@ -3300,8 +3307,8 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return tx1GasLimit + tx2GasLimit + tx3GasLimit - 1 }, MaxGasLimitPerMiniBlockCalled: func() uint64 { @@ -3366,8 +3373,8 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return tx1GasLimit + tx2GasLimit + tx3GasLimit }, MaxGasLimitPerMiniBlockForSafeCrossShardCalled: func() uint64 { @@ -3429,7 +3436,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMissingTransaction(t *testing FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{}, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, @@ -3483,7 +3490,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxAccumulatedFeesExceeded(t FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{ + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ DeveloperPercentageCalled: func() float64 { return 0.1 }, @@ -3551,7 +3558,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxDeveloperFeesExceeded(t *t FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{ + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ DeveloperPercentageCalled: func() float64 { return 0.1 }, @@ -3620,7 +3627,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxAccumulatedFeesExceededWhe FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{ + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ DeveloperPercentageCalled: func() float64 { return 0.1 }, @@ -3704,7 +3711,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxDeveloperFeesExceededWhenS FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{ + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ DeveloperPercentageCalled: func() float64 { return 0.1 }, @@ -3788,7 +3795,7 @@ func TestTransactionCoordinator_VerifyFeesShouldWork(t *testing.T) { FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{ + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ DeveloperPercentageCalled: func() float64 { return 0.1 }, @@ -3875,7 +3882,7 @@ func TestTransactionCoordinator_GetMaxAccumulatedAndDeveloperFeesShouldErr(t *te FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{}, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, @@ -3926,7 +3933,7 @@ func TestTransactionCoordinator_GetMaxAccumulatedAndDeveloperFeesShouldWork(t *t FeeHandler: &mock.FeeAccumulatorStub{}, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{ + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ DeveloperPercentageCalled: func() float64 { return 0.1 }, @@ -3999,7 +4006,7 @@ func TestTransactionCoordinator_RevertIfNeededShouldWork(t *testing.T) { }, BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{}, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, diff --git a/process/dataValidators/disabledTxValidator.go b/process/dataValidators/disabledTxValidator.go index 7207ecb1f7b..bb7c4cd6853 100644 --- a/process/dataValidators/disabledTxValidator.go +++ b/process/dataValidators/disabledTxValidator.go @@ -14,7 +14,7 @@ func NewDisabledTxValidator() *disabledTxValidator { } // CheckTxValidity is a disabled implementation that will return nil -func (dtv *disabledTxValidator) CheckTxValidity(_ process.TxValidatorHandler) error { +func (dtv *disabledTxValidator) CheckTxValidity(_ process.InterceptedTransactionHandler) error { return nil } diff --git a/process/dataValidators/export_test.go b/process/dataValidators/export_test.go new file mode 100644 index 00000000000..0b3e8ee5568 --- /dev/null +++ b/process/dataValidators/export_test.go @@ -0,0 +1,19 @@ +package dataValidators + +import ( + "github.com/multiversx/mx-chain-go/process" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +// CheckAccount - +func (txv *txValidator) CheckAccount( + interceptedTx process.InterceptedTransactionHandler, + accountHandler vmcommon.AccountHandler, +) error { + return txv.checkAccount(interceptedTx, accountHandler) +} + +// GetTxData - +func GetTxData(interceptedTx process.InterceptedTransactionHandler) ([]byte, error) { + return getTxData(interceptedTx) +} diff --git a/process/dataValidators/txValidator.go b/process/dataValidators/txValidator.go index 56a9a44ec1e..182518b3ba8 100644 --- a/process/dataValidators/txValidator.go +++ b/process/dataValidators/txValidator.go @@ -6,9 +6,9 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/process" - "github.com/multiversx/mx-chain-go/process/interceptors/processor" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -21,7 +21,8 @@ type txValidator struct { accounts state.AccountsAdapter shardCoordinator sharding.Coordinator whiteListHandler process.WhiteListHandler - pubkeyConverter core.PubkeyConverter + pubKeyConverter core.PubkeyConverter + txVersionChecker process.TxVersionCheckerHandler maxNonceDeltaAllowed int } @@ -30,7 +31,8 @@ func NewTxValidator( accounts state.AccountsAdapter, shardCoordinator sharding.Coordinator, whiteListHandler process.WhiteListHandler, - pubkeyConverter core.PubkeyConverter, + pubKeyConverter core.PubkeyConverter, + txVersionChecker process.TxVersionCheckerHandler, maxNonceDeltaAllowed int, ) (*txValidator, error) { if check.IfNil(accounts) { @@ -42,23 +44,25 @@ func NewTxValidator( if check.IfNil(whiteListHandler) { return nil, process.ErrNilWhiteListHandler } - if check.IfNil(pubkeyConverter) { + if check.IfNil(pubKeyConverter) { return nil, fmt.Errorf("%w in NewTxValidator", process.ErrNilPubkeyConverter) } + if check.IfNil(txVersionChecker) { + return nil, process.ErrNilTransactionVersionChecker + } return &txValidator{ accounts: accounts, shardCoordinator: shardCoordinator, whiteListHandler: whiteListHandler, maxNonceDeltaAllowed: maxNonceDeltaAllowed, - pubkeyConverter: pubkeyConverter, + pubKeyConverter: pubKeyConverter, + txVersionChecker: txVersionChecker, }, nil } // CheckTxValidity will filter transactions that needs to be added in pools -func (txv *txValidator) CheckTxValidity(interceptedTx process.TxValidatorHandler) error { - // TODO: Refactor, extract methods. - +func (txv *txValidator) CheckTxValidity(interceptedTx process.InterceptedTransactionHandler) error { interceptedData, ok := interceptedTx.(process.InterceptedData) if ok { if txv.whiteListHandler.IsWhiteListed(interceptedData) { @@ -66,52 +70,58 @@ func (txv *txValidator) CheckTxValidity(interceptedTx process.TxValidatorHandler } } - shardID := txv.shardCoordinator.SelfId() - txShardID := interceptedTx.SenderShardId() - senderIsInAnotherShard := shardID != txShardID - if senderIsInAnotherShard { + if txv.isSenderInDifferentShard(interceptedTx) { return nil } - senderAddress := interceptedTx.SenderAddress() + accountHandler, err := txv.getSenderAccount(interceptedTx) + if err != nil { + return err + } - accountHandler, err := txv.accounts.GetExistingAccount(senderAddress) + return txv.checkAccount(interceptedTx, accountHandler) +} + +func (txv *txValidator) checkAccount( + interceptedTx process.InterceptedTransactionHandler, + accountHandler vmcommon.AccountHandler, +) error { + err := txv.checkNonce(interceptedTx, accountHandler) if err != nil { - return fmt.Errorf("%w for address %s and shard %d, err: %s", - process.ErrAccountNotFound, - txv.pubkeyConverter.SilentEncode(senderAddress, log), - shardID, - err.Error(), - ) + return err } - accountNonce := accountHandler.GetNonce() - txNonce := interceptedTx.Nonce() - lowerNonceInTx := txNonce < accountNonce - veryHighNonceInTx := txNonce > accountNonce+uint64(txv.maxNonceDeltaAllowed) - isTxRejected := lowerNonceInTx || veryHighNonceInTx - if isTxRejected { - return fmt.Errorf("%w lowerNonceInTx: %v, veryHighNonceInTx: %v", - process.ErrWrongTransaction, - lowerNonceInTx, - veryHighNonceInTx, - ) + account, err := txv.getSenderUserAccount(interceptedTx, accountHandler) + if err != nil { + return err } + return txv.checkBalance(interceptedTx, account) +} + +func (txv *txValidator) getSenderUserAccount( + interceptedTx process.InterceptedTransactionHandler, + accountHandler vmcommon.AccountHandler, +) (state.UserAccountHandler, error) { + senderAddress := interceptedTx.SenderAddress() account, ok := accountHandler.(state.UserAccountHandler) if !ok { - return fmt.Errorf("%w, account is not of type *state.Account, address: %s", + return nil, fmt.Errorf("%w, account is not of type *state.Account, address: %s", process.ErrWrongTypeAssertion, - txv.pubkeyConverter.SilentEncode(senderAddress, log), + txv.pubKeyConverter.SilentEncode(senderAddress, log), ) } + return account, nil +} +func (txv *txValidator) checkBalance(interceptedTx process.InterceptedTransactionHandler, account state.UserAccountHandler) error { accountBalance := account.GetBalance() txFee := interceptedTx.Fee() if accountBalance.Cmp(txFee) < 0 { + senderAddress := interceptedTx.SenderAddress() return fmt.Errorf("%w, for address: %s, wanted %v, have %v", process.ErrInsufficientFunds, - txv.pubkeyConverter.SilentEncode(senderAddress, log), + txv.pubKeyConverter.SilentEncode(senderAddress, log), txFee, accountBalance, ) @@ -120,9 +130,54 @@ func (txv *txValidator) CheckTxValidity(interceptedTx process.TxValidatorHandler return nil } +func (txv *txValidator) checkNonce(interceptedTx process.InterceptedTransactionHandler, accountHandler vmcommon.AccountHandler) error { + accountNonce := accountHandler.GetNonce() + txNonce := interceptedTx.Nonce() + lowerNonceInTx := txNonce < accountNonce + veryHighNonceInTx := txNonce > accountNonce+uint64(txv.maxNonceDeltaAllowed) + if lowerNonceInTx || veryHighNonceInTx { + return fmt.Errorf("%w lowerNonceInTx: %v, veryHighNonceInTx: %v", + process.ErrWrongTransaction, + lowerNonceInTx, + veryHighNonceInTx, + ) + } + return nil +} + +func (txv *txValidator) isSenderInDifferentShard(interceptedTx process.InterceptedTransactionHandler) bool { + shardID := txv.shardCoordinator.SelfId() + txShardID := interceptedTx.SenderShardId() + return shardID != txShardID +} + +func (txv *txValidator) getSenderAccount(interceptedTx process.InterceptedTransactionHandler) (vmcommon.AccountHandler, error) { + senderAddress := interceptedTx.SenderAddress() + accountHandler, err := txv.accounts.GetExistingAccount(senderAddress) + if err != nil { + return nil, fmt.Errorf("%w for address %s and shard %d, err: %s", + process.ErrAccountNotFound, + txv.pubKeyConverter.SilentEncode(senderAddress, log), + txv.shardCoordinator.SelfId(), + err.Error(), + ) + } + + return accountHandler, nil +} + +func getTxData(interceptedTx process.InterceptedTransactionHandler) ([]byte, error) { + tx := interceptedTx.Transaction() + if tx == nil { + return nil, process.ErrNilTransaction + } + + return tx.GetData(), nil +} + // CheckTxWhiteList will check if the cross shard transactions are whitelisted and could be added in pools func (txv *txValidator) CheckTxWhiteList(data process.InterceptedData) error { - interceptedTx, ok := data.(processor.InterceptedTransactionHandler) + interceptedTx, ok := data.(process.InterceptedTransactionHandler) if !ok { return process.ErrWrongTypeAssertion } diff --git a/process/dataValidators/txValidator_test.go b/process/dataValidators/txValidator_test.go index 83c9e6651d0..7037932bb02 100644 --- a/process/dataValidators/txValidator_test.go +++ b/process/dataValidators/txValidator_test.go @@ -7,6 +7,8 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/dataValidators" "github.com/multiversx/mx-chain-go/process/mock" @@ -15,6 +17,7 @@ import ( stateMock "github.com/multiversx/mx-chain-go/testscommon/state" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func getAccAdapter(nonce uint64, balance *big.Int) *stateMock.AccountsStub { @@ -41,14 +44,14 @@ func createMockCoordinator(identifierPrefix string, currentShardID uint32) *mock } } -func getTxValidatorHandler( +func getInterceptedTxHandler( sndShardId uint32, rcvShardId uint32, nonce uint64, sndAddr []byte, fee *big.Int, -) process.TxValidatorHandler { - return &mock.TxValidatorHandlerStub{ +) process.InterceptedTransactionHandler { + return &mock.InterceptedTxHandlerStub{ SenderShardIdCalled: func() uint32 { return sndShardId }, @@ -64,6 +67,9 @@ func getTxValidatorHandler( FeeCalled: func() *big.Int { return fee }, + TransactionCalled: func() data.TransactionHandler { + return &transaction.Transaction{} + }, } } @@ -77,6 +83,7 @@ func TestNewTxValidator_NilAccountsShouldErr(t *testing.T) { shardCoordinator, &testscommon.WhiteListHandlerStub{}, testscommon.NewPubkeyConverterMock(32), + &testscommon.TxVersionCheckerStub{}, maxNonceDeltaAllowed, ) @@ -94,6 +101,7 @@ func TestNewTxValidator_NilShardCoordinatorShouldErr(t *testing.T) { nil, &testscommon.WhiteListHandlerStub{}, testscommon.NewPubkeyConverterMock(32), + &testscommon.TxVersionCheckerStub{}, maxNonceDeltaAllowed, ) @@ -112,6 +120,7 @@ func TestTxValidator_NewValidatorNilWhiteListHandlerShouldErr(t *testing.T) { shardCoordinator, nil, testscommon.NewPubkeyConverterMock(32), + &testscommon.TxVersionCheckerStub{}, maxNonceDeltaAllowed, ) @@ -130,6 +139,7 @@ func TestNewTxValidator_NilPubkeyConverterShouldErr(t *testing.T) { shardCoordinator, &testscommon.WhiteListHandlerStub{}, nil, + &testscommon.TxVersionCheckerStub{}, maxNonceDeltaAllowed, ) @@ -137,6 +147,24 @@ func TestNewTxValidator_NilPubkeyConverterShouldErr(t *testing.T) { assert.True(t, errors.Is(err, process.ErrNilPubkeyConverter)) } +func TestNewTxValidator_NilTxVersionCheckerShouldErr(t *testing.T) { + t.Parallel() + + adb := getAccAdapter(0, big.NewInt(0)) + shardCoordinator := createMockCoordinator("_", 0) + maxNonceDeltaAllowed := 100 + txValidator, err := dataValidators.NewTxValidator( + adb, + shardCoordinator, + &testscommon.WhiteListHandlerStub{}, + testscommon.NewPubkeyConverterMock(32), + nil, + maxNonceDeltaAllowed, + ) + assert.Nil(t, txValidator) + assert.True(t, errors.Is(err, process.ErrNilTransactionVersionChecker)) +} + func TestNewTxValidator_ShouldWork(t *testing.T) { t.Parallel() @@ -148,6 +176,7 @@ func TestNewTxValidator_ShouldWork(t *testing.T) { shardCoordinator, &testscommon.WhiteListHandlerStub{}, testscommon.NewPubkeyConverterMock(32), + &testscommon.TxVersionCheckerStub{}, maxNonceDeltaAllowed, ) @@ -170,12 +199,13 @@ func TestTxValidator_CheckTxValidityTxCrossShardShouldWork(t *testing.T) { shardCoordinator, &testscommon.WhiteListHandlerStub{}, testscommon.NewPubkeyConverterMock(32), + &testscommon.TxVersionCheckerStub{}, maxNonceDeltaAllowed, ) assert.Nil(t, err) addressMock := []byte("address") - txValidatorHandler := getTxValidatorHandler(currentShard+1, currentShard, 1, addressMock, big.NewInt(0)) + txValidatorHandler := getInterceptedTxHandler(currentShard+1, currentShard, 1, addressMock, big.NewInt(0)) result := txValidator.CheckTxValidity(txValidatorHandler) assert.Nil(t, result) @@ -195,13 +225,14 @@ func TestTxValidator_CheckTxValidityAccountNonceIsGreaterThanTxNonceShouldReturn shardCoordinator, &testscommon.WhiteListHandlerStub{}, testscommon.NewPubkeyConverterMock(32), + &testscommon.TxVersionCheckerStub{}, maxNonceDeltaAllowed, ) assert.Nil(t, err) addressMock := []byte("address") currentShard := uint32(0) - txValidatorHandler := getTxValidatorHandler(currentShard, currentShard, txNonce, addressMock, big.NewInt(0)) + txValidatorHandler := getInterceptedTxHandler(currentShard, currentShard, txNonce, addressMock, big.NewInt(0)) result := txValidator.CheckTxValidity(txValidatorHandler) assert.True(t, errors.Is(result, process.ErrWrongTransaction)) @@ -221,13 +252,14 @@ func TestTxValidator_CheckTxValidityTxNonceIsTooHigh(t *testing.T) { shardCoordinator, &testscommon.WhiteListHandlerStub{}, testscommon.NewPubkeyConverterMock(32), + &testscommon.TxVersionCheckerStub{}, maxNonceDeltaAllowed, ) assert.Nil(t, err) addressMock := []byte("address") currentShard := uint32(0) - txValidatorHandler := getTxValidatorHandler(currentShard, currentShard, txNonce, addressMock, big.NewInt(0)) + txValidatorHandler := getInterceptedTxHandler(currentShard, currentShard, txNonce, addressMock, big.NewInt(0)) result := txValidator.CheckTxValidity(txValidatorHandler) assert.True(t, errors.Is(result, process.ErrWrongTransaction)) @@ -249,13 +281,14 @@ func TestTxValidator_CheckTxValidityAccountBalanceIsLessThanTxTotalValueShouldRe shardCoordinator, &testscommon.WhiteListHandlerStub{}, testscommon.NewPubkeyConverterMock(32), + &testscommon.TxVersionCheckerStub{}, maxNonceDeltaAllowed, ) assert.Nil(t, err) addressMock := []byte("address") currentShard := uint32(0) - txValidatorHandler := getTxValidatorHandler(currentShard, currentShard, txNonce, addressMock, fee) + txValidatorHandler := getInterceptedTxHandler(currentShard, currentShard, txNonce, addressMock, fee) result := txValidator.CheckTxValidity(txValidatorHandler) assert.NotNil(t, result) @@ -276,12 +309,13 @@ func TestTxValidator_CheckTxValidityAccountNotExitsShouldReturnFalse(t *testing. shardCoordinator, &testscommon.WhiteListHandlerStub{}, testscommon.NewPubkeyConverterMock(32), + &testscommon.TxVersionCheckerStub{}, maxNonceDeltaAllowed, ) addressMock := []byte("address") currentShard := uint32(0) - txValidatorHandler := getTxValidatorHandler(currentShard, currentShard, 1, addressMock, big.NewInt(0)) + txValidatorHandler := getInterceptedTxHandler(currentShard, currentShard, 1, addressMock, big.NewInt(0)) result := txValidator.CheckTxValidity(txValidatorHandler) assert.True(t, errors.Is(result, process.ErrAccountNotFound)) @@ -305,19 +339,20 @@ func TestTxValidator_CheckTxValidityAccountNotExitsButWhiteListedShouldReturnTru }, }, testscommon.NewPubkeyConverterMock(32), + &testscommon.TxVersionCheckerStub{}, maxNonceDeltaAllowed, ) addressMock := []byte("address") currentShard := uint32(0) - txValidatorHandler := getTxValidatorHandler(currentShard, currentShard, 1, addressMock, big.NewInt(0)) + txValidatorHandler := getInterceptedTxHandler(currentShard, currentShard, 1, addressMock, big.NewInt(0)) interceptedTx := struct { process.InterceptedData - process.TxValidatorHandler + process.InterceptedTransactionHandler }{ - InterceptedData: nil, - TxValidatorHandler: txValidatorHandler, + InterceptedData: nil, + InterceptedTransactionHandler: txValidatorHandler, } // interceptedTx needs to be of type InterceptedData & TxValidatorHandler @@ -339,12 +374,13 @@ func TestTxValidator_CheckTxValidityWrongAccountTypeShouldReturnFalse(t *testing shardCoordinator, &testscommon.WhiteListHandlerStub{}, testscommon.NewPubkeyConverterMock(32), + &testscommon.TxVersionCheckerStub{}, maxNonceDeltaAllowed, ) addressMock := []byte("address") currentShard := uint32(0) - txValidatorHandler := getTxValidatorHandler(currentShard, currentShard, 1, addressMock, big.NewInt(0)) + txValidatorHandler := getInterceptedTxHandler(currentShard, currentShard, 1, addressMock, big.NewInt(0)) result := txValidator.CheckTxValidity(txValidatorHandler) assert.True(t, errors.Is(result, process.ErrWrongTypeAssertion)) @@ -363,17 +399,52 @@ func TestTxValidator_CheckTxValidityTxIsOkShouldReturnTrue(t *testing.T) { shardCoordinator, &testscommon.WhiteListHandlerStub{}, testscommon.NewPubkeyConverterMock(32), + &testscommon.TxVersionCheckerStub{}, maxNonceDeltaAllowed, ) addressMock := []byte("address") currentShard := uint32(0) - txValidatorHandler := getTxValidatorHandler(currentShard, currentShard, 1, addressMock, big.NewInt(0)) + txValidatorHandler := getInterceptedTxHandler(currentShard, currentShard, 1, addressMock, big.NewInt(0)) result := txValidator.CheckTxValidity(txValidatorHandler) assert.Nil(t, result) } +func Test_getTxData(t *testing.T) { + t.Run("nil tx in intercepted tx returns error", func(t *testing.T) { + interceptedTx := getDefaultInterceptedTx() + interceptedTx.TransactionCalled = func() data.TransactionHandler { return nil } + txData, err := dataValidators.GetTxData(interceptedTx) + require.Equal(t, process.ErrNilTransaction, err) + require.Nil(t, txData) + }) + t.Run("non nil intercepted tx without data", func(t *testing.T) { + expectedData := []byte(nil) + interceptedTx := getDefaultInterceptedTx() + interceptedTx.TransactionCalled = func() data.TransactionHandler { + return &transaction.Transaction{ + Data: expectedData, + } + } + txData, err := dataValidators.GetTxData(interceptedTx) + require.Nil(t, err) + require.Equal(t, expectedData, txData) + }) + t.Run("non nil intercepted tx with data", func(t *testing.T) { + expectedData := []byte("expected data") + interceptedTx := getDefaultInterceptedTx() + interceptedTx.TransactionCalled = func() data.TransactionHandler { + return &transaction.Transaction{ + Data: expectedData, + } + } + txData, err := dataValidators.GetTxData(interceptedTx) + require.Nil(t, err) + require.Equal(t, expectedData, txData) + }) +} + //------- IsInterfaceNil func TestTxValidator_IsInterfaceNil(t *testing.T) { @@ -386,6 +457,7 @@ func TestTxValidator_IsInterfaceNil(t *testing.T) { shardCoordinator, &testscommon.WhiteListHandlerStub{}, testscommon.NewPubkeyConverterMock(32), + &testscommon.TxVersionCheckerStub{}, 100, ) _ = txValidator @@ -393,3 +465,26 @@ func TestTxValidator_IsInterfaceNil(t *testing.T) { assert.True(t, check.IfNil(txValidator)) } + +func getDefaultInterceptedTx() *mock.InterceptedTxHandlerStub { + return &mock.InterceptedTxHandlerStub{ + SenderShardIdCalled: func() uint32 { + return 0 + }, + ReceiverShardIdCalled: func() uint32 { + return 1 + }, + NonceCalled: func() uint64 { + return 0 + }, + SenderAddressCalled: func() []byte { + return []byte("sender address") + }, + FeeCalled: func() *big.Int { + return big.NewInt(100000) + }, + TransactionCalled: func() data.TransactionHandler { + return &transaction.Transaction{} + }, + } +} diff --git a/process/economics/builtInFunctionsCost.go b/process/economics/builtInFunctionsCost.go index 8fa3a4daaf2..f784b5f2332 100644 --- a/process/economics/builtInFunctionsCost.go +++ b/process/economics/builtInFunctionsCost.go @@ -107,6 +107,12 @@ func (bc *builtInFunctionsCost) ComputeBuiltInCost(tx data.TransactionWithFeeHan case core.BuiltInFunctionESDTNFTCreate: costStorage := calculateLenOfArguments(arguments) * bc.gasConfig.BaseOperationCost.StorePerByte return bc.gasConfig.BuiltInCost.ESDTNFTCreate + costStorage + case core.BuiltInFunctionSetGuardian: + return bc.gasConfig.BuiltInCost.SetGuardian + case core.BuiltInFunctionGuardAccount: + return bc.gasConfig.BuiltInCost.GuardAccount + case core.BuiltInFunctionUnGuardAccount: + return bc.gasConfig.BuiltInCost.UnGuardAccount default: return 0 } diff --git a/process/economics/economicsData.go b/process/economics/economicsData.go index f8dad002ebe..268a3f30650 100644 --- a/process/economics/economicsData.go +++ b/process/economics/economicsData.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process" @@ -25,8 +26,20 @@ var _ process.FeeHandler = (*economicsData)(nil) var epsilon = 0.00000001 var log = logger.GetOrCreate("process/economics") +type gasConfig struct { + gasLimitSettingEpoch uint32 + maxGasLimitPerBlock uint64 + maxGasLimitPerMiniBlock uint64 + maxGasLimitPerMetaBlock uint64 + maxGasLimitPerMetaMiniBlock uint64 + maxGasLimitPerTx uint64 + minGasLimit uint64 + extraGasLimitGuardedTx uint64 +} + // economicsData will store information about economics type economicsData struct { + gasConfig rewardsSettings []config.EpochRewardSettings rewardsSettingEpoch uint32 leaderPercentage float64 @@ -37,17 +50,11 @@ type economicsData struct { topUpFactor float64 mutRewardsSettings sync.RWMutex gasLimitSettings []config.GasLimitSetting - gasLimitSettingEpoch uint32 - maxGasLimitPerBlock uint64 - maxGasLimitPerMiniBlock uint64 - maxGasLimitPerMetaBlock uint64 - maxGasLimitPerMetaMiniBlock uint64 - maxGasLimitPerTx uint64 mutGasLimitSettings sync.RWMutex gasPerDataByte uint64 minGasPrice uint64 + maxGasPriceSetGuardian uint64 gasPriceModifier float64 - minGasLimit uint64 genesisTotalSupply *big.Int minInflation float64 yearSettings map[uint32]*config.YearSetting @@ -55,10 +62,12 @@ type economicsData struct { statusHandler core.AppStatusHandler builtInFunctionsCostHandler BuiltInFunctionsCostHandler enableEpochsHandler common.EnableEpochsHandler + txVersionHandler process.TxVersionCheckerHandler } // ArgsNewEconomicsData defines the arguments needed for new economics economicsData type ArgsNewEconomicsData struct { + TxVersionChecker process.TxVersionCheckerHandler BuiltInFunctionsCostHandler BuiltInFunctionsCostHandler Economics *config.EconomicsConfig EpochNotifier process.EpochNotifier @@ -70,6 +79,9 @@ func NewEconomicsData(args ArgsNewEconomicsData) (*economicsData, error) { if check.IfNil(args.BuiltInFunctionsCostHandler) { return nil, process.ErrNilBuiltInFunctionsCostHandler } + if check.IfNil(args.TxVersionChecker) { + return nil, process.ErrNilTransactionVersionChecker + } err := checkValues(args.Economics) if err != nil { @@ -117,6 +129,7 @@ func NewEconomicsData(args ArgsNewEconomicsData) (*economicsData, error) { topUpGradientPoint: topUpGradientPoint, gasLimitSettings: gasLimitSettings, minGasPrice: convertedData.minGasPrice, + maxGasPriceSetGuardian: convertedData.maxGasPriceSetGuardian, gasPerDataByte: convertedData.gasPerDataByte, minInflation: args.Economics.GlobalSettings.MinimumInflation, genesisTotalSupply: convertedData.genesisTotalSupply, @@ -124,6 +137,7 @@ func NewEconomicsData(args ArgsNewEconomicsData) (*economicsData, error) { statusHandler: statusHandler.NewNilStatusHandler(), builtInFunctionsCostHandler: args.BuiltInFunctionsCostHandler, enableEpochsHandler: args.EnableEpochsHandler, + txVersionHandler: args.TxVersionChecker, } ed.yearSettings = make(map[uint32]*config.YearSetting) @@ -134,56 +148,18 @@ func NewEconomicsData(args ArgsNewEconomicsData) (*economicsData, error) { } } - err = ed.setGasLimitSetting(gasLimitSettings[0]) + var gc *gasConfig + gc, err = checkAndParseGasLimitSettings(gasLimitSettings[0]) if err != nil { return nil, err } + ed.gasConfig = *gc args.EpochNotifier.RegisterNotifyHandler(ed) return ed, nil } -func (ed *economicsData) setGasLimitSetting(gasLimitSetting config.GasLimitSetting) error { - var err error - conversionBase := 10 - bitConversionSize := 64 - - ed.gasLimitSettingEpoch = gasLimitSetting.EnableEpoch - - ed.maxGasLimitPerBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerBlock, conversionBase, bitConversionSize) - if err != nil { - return process.ErrInvalidMaxGasLimitPerBlock - } - - ed.maxGasLimitPerMiniBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMiniBlock, conversionBase, bitConversionSize) - if err != nil { - return process.ErrInvalidMaxGasLimitPerMiniBlock - } - - ed.maxGasLimitPerMetaBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMetaBlock, conversionBase, bitConversionSize) - if err != nil { - return process.ErrInvalidMaxGasLimitPerMetaBlock - } - - ed.maxGasLimitPerMetaMiniBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMetaMiniBlock, conversionBase, bitConversionSize) - if err != nil { - return process.ErrInvalidMaxGasLimitPerMetaMiniBlock - } - - ed.maxGasLimitPerTx, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerTx, conversionBase, bitConversionSize) - if err != nil { - return process.ErrInvalidMaxGasLimitPerTx - } - - ed.minGasLimit, err = strconv.ParseUint(gasLimitSetting.MinGasLimit, conversionBase, bitConversionSize) - if err != nil { - return process.ErrInvalidMinimumGasLimitForTx - } - - return nil -} - func convertValues(economics *config.EconomicsConfig) (*economicsData, error) { conversionBase := 10 bitConversionSize := 64 @@ -203,10 +179,16 @@ func convertValues(economics *config.EconomicsConfig) (*economicsData, error) { return nil, process.ErrInvalidGenesisTotalSupply } + maxGasPriceSetGuardian, err := strconv.ParseUint(economics.FeeSettings.MaxGasPriceSetGuardian, conversionBase, bitConversionSize) + if err != nil { + return nil, process.ErrInvalidMaxGasPriceSetGuardian + } + return &economicsData{ - minGasPrice: minGasPrice, - gasPerDataByte: gasPerDataByte, - genesisTotalSupply: genesisTotalSupply, + minGasPrice: minGasPrice, + gasPerDataByte: gasPerDataByte, + genesisTotalSupply: genesisTotalSupply, + maxGasPriceSetGuardian: maxGasPriceSetGuardian, }, nil } @@ -218,7 +200,28 @@ func checkValues(economics *config.EconomicsConfig) error { if len(economics.RewardsSettings.RewardsConfigByEpoch) == 0 { return process.ErrEmptyEpochRewardsConfig } - for _, rewardsConfig := range economics.RewardsSettings.RewardsConfigByEpoch { + + err := checkRewardsSettings(economics.RewardsSettings) + if err != nil { + return err + } + + if len(economics.GlobalSettings.YearSettings) == 0 { + return process.ErrEmptyYearSettings + } + for _, yearSetting := range economics.GlobalSettings.YearSettings { + if isPercentageInvalid(yearSetting.MaximumInflation) { + return process.ErrInvalidInflationPercentages + } + } + + err = checkFeeSettings(economics.FeeSettings) + + return err +} + +func checkRewardsSettings(rewardsSettings config.RewardsSettings) error { + for _, rewardsConfig := range rewardsSettings.RewardsConfigByEpoch { if isPercentageInvalid(rewardsConfig.LeaderPercentage) || isPercentageInvalid(rewardsConfig.DeveloperPercentage) || isPercentageInvalid(rewardsConfig.ProtocolSustainabilityPercentage) || @@ -235,74 +238,88 @@ func checkValues(economics *config.EconomicsConfig) error { return process.ErrInvalidRewardsTopUpGradientPoint } } + return nil +} - if len(economics.GlobalSettings.YearSettings) == 0 { - return process.ErrEmptyYearSettings - } - for _, yearSetting := range economics.GlobalSettings.YearSettings { - if isPercentageInvalid(yearSetting.MaximumInflation) { - return process.ErrInvalidInflationPercentages - } - } - - if economics.FeeSettings.GasPriceModifier > 1.0 || economics.FeeSettings.GasPriceModifier < epsilon { +func checkFeeSettings(feeSettings config.FeeSettings) error { + if feeSettings.GasPriceModifier > 1.0 || feeSettings.GasPriceModifier < epsilon { return process.ErrInvalidGasModifier } - if len(economics.FeeSettings.GasLimitSettings) == 0 { + if len(feeSettings.GasLimitSettings) == 0 { return process.ErrEmptyGasLimitSettings } - conversionBase := 10 - bitConversionSize := 64 - for _, gasLimitSetting := range economics.FeeSettings.GasLimitSettings { - minGasLimit, err := strconv.ParseUint(gasLimitSetting.MinGasLimit, conversionBase, bitConversionSize) - if err != nil { - return process.ErrInvalidMinimumGasLimitForTx - } - maxGasLimitPerBlock, err := strconv.ParseUint(gasLimitSetting.MaxGasLimitPerBlock, conversionBase, bitConversionSize) + var err error + for _, gasLimitSetting := range feeSettings.GasLimitSettings { + _, err = checkAndParseGasLimitSettings(gasLimitSetting) if err != nil { - return fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerBlock, gasLimitSetting.EnableEpoch) + return err } + } + return nil +} - maxGasLimitPerMiniBlock, err := strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMiniBlock, conversionBase, bitConversionSize) - if err != nil { - return fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerMiniBlock, gasLimitSetting.EnableEpoch) - } +func checkAndParseGasLimitSettings(gasLimitSetting config.GasLimitSetting) (*gasConfig, error) { + conversionBase := 10 + bitConversionSize := 64 - maxGasLimitPerMetaBlock, err := strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMetaBlock, conversionBase, bitConversionSize) - if err != nil { - return fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerMetaBlock, gasLimitSetting.EnableEpoch) - } + gc := &gasConfig{} + var err error - maxGasLimitPerMetaMiniBlock, err := strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMetaMiniBlock, conversionBase, bitConversionSize) - if err != nil { - return fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerMetaMiniBlock, gasLimitSetting.EnableEpoch) - } + gc.gasLimitSettingEpoch = gasLimitSetting.EnableEpoch + gc.minGasLimit, err = strconv.ParseUint(gasLimitSetting.MinGasLimit, conversionBase, bitConversionSize) + if err != nil { + return nil, process.ErrInvalidMinimumGasLimitForTx + } - maxGasLimitPerTx, err := strconv.ParseUint(gasLimitSetting.MaxGasLimitPerTx, conversionBase, bitConversionSize) - if err != nil { - return fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerTx, gasLimitSetting.EnableEpoch) - } + gc.maxGasLimitPerBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerBlock, conversionBase, bitConversionSize) + if err != nil { + return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerBlock, gasLimitSetting.EnableEpoch) + } - if maxGasLimitPerBlock < minGasLimit { - return fmt.Errorf("%w: maxGasLimitPerBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerBlock, maxGasLimitPerBlock, minGasLimit, gasLimitSetting.EnableEpoch) - } - if maxGasLimitPerMiniBlock < minGasLimit { - return fmt.Errorf("%w: maxGasLimitPerMiniBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerMiniBlock, maxGasLimitPerMiniBlock, minGasLimit, gasLimitSetting.EnableEpoch) - } - if maxGasLimitPerMetaBlock < minGasLimit { - return fmt.Errorf("%w: maxGasLimitPerMetaBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerMetaBlock, maxGasLimitPerMetaBlock, minGasLimit, gasLimitSetting.EnableEpoch) - } - if maxGasLimitPerMetaMiniBlock < minGasLimit { - return fmt.Errorf("%w: maxGasLimitPerMetaMiniBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerMetaMiniBlock, maxGasLimitPerMetaMiniBlock, minGasLimit, gasLimitSetting.EnableEpoch) - } - if maxGasLimitPerTx < minGasLimit { - return fmt.Errorf("%w: maxGasLimitPerTx = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerTx, maxGasLimitPerTx, minGasLimit, gasLimitSetting.EnableEpoch) - } + gc.maxGasLimitPerMiniBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMiniBlock, conversionBase, bitConversionSize) + if err != nil { + return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerMiniBlock, gasLimitSetting.EnableEpoch) } - return nil + gc.maxGasLimitPerMetaBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMetaBlock, conversionBase, bitConversionSize) + if err != nil { + return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerMetaBlock, gasLimitSetting.EnableEpoch) + } + + gc.maxGasLimitPerMetaMiniBlock, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerMetaMiniBlock, conversionBase, bitConversionSize) + if err != nil { + return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerMetaMiniBlock, gasLimitSetting.EnableEpoch) + } + + gc.maxGasLimitPerTx, err = strconv.ParseUint(gasLimitSetting.MaxGasLimitPerTx, conversionBase, bitConversionSize) + if err != nil { + return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidMaxGasLimitPerTx, gasLimitSetting.EnableEpoch) + } + + gc.extraGasLimitGuardedTx, err = strconv.ParseUint(gasLimitSetting.ExtraGasLimitGuardedTx, conversionBase, bitConversionSize) + if err != nil { + return nil, fmt.Errorf("%w for epoch %d", process.ErrInvalidExtraGasLimitGuardedTx, gasLimitSetting.EnableEpoch) + } + + if gc.maxGasLimitPerBlock < gc.minGasLimit { + return nil, fmt.Errorf("%w: maxGasLimitPerBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerBlock, gc.maxGasLimitPerBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) + } + if gc.maxGasLimitPerMiniBlock < gc.minGasLimit { + return nil, fmt.Errorf("%w: maxGasLimitPerMiniBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerMiniBlock, gc.maxGasLimitPerMiniBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) + } + if gc.maxGasLimitPerMetaBlock < gc.minGasLimit { + return nil, fmt.Errorf("%w: maxGasLimitPerMetaBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerMetaBlock, gc.maxGasLimitPerMetaBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) + } + if gc.maxGasLimitPerMetaMiniBlock < gc.minGasLimit { + return nil, fmt.Errorf("%w: maxGasLimitPerMetaMiniBlock = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerMetaMiniBlock, gc.maxGasLimitPerMetaMiniBlock, gc.minGasLimit, gasLimitSetting.EnableEpoch) + } + if gc.maxGasLimitPerTx < gc.minGasLimit { + return nil, fmt.Errorf("%w: maxGasLimitPerTx = %d minGasLimit = %d in epoch %d", process.ErrInvalidMaxGasLimitPerTx, gc.maxGasLimitPerTx, gc.minGasLimit, gasLimitSetting.EnableEpoch) + } + + return gc, nil } func isPercentageInvalid(percentage float64) bool { @@ -382,6 +399,16 @@ func (ed *economicsData) MinGasLimit() uint64 { return ed.minGasLimit } +// ExtraGasLimitGuardedTx returns the extra gas limit required by the guarded transactions +func (ed *economicsData) ExtraGasLimitGuardedTx() uint64 { + return ed.extraGasLimitGuardedTx +} + +// MaxGasPriceSetGuardian returns the maximum gas price for set guardian transactions +func (ed *economicsData) MaxGasPriceSetGuardian() uint64 { + return ed.maxGasPriceSetGuardian +} + // GasPerDataByte will return the gas required for a economicsData byte func (ed *economicsData) GasPerDataByte() uint64 { return ed.gasPerDataByte @@ -580,6 +607,10 @@ func (ed *economicsData) ComputeGasLimit(tx data.TransactionWithFeeHandler) uint dataLen := uint64(len(tx.GetData())) gasLimit += dataLen * ed.gasPerDataByte + txInstance, ok := tx.(*transaction.Transaction) + if ok && ed.txVersionHandler.IsGuardedTransaction(txInstance) { + gasLimit += ed.extraGasLimitGuardedTx + } return gasLimit } @@ -710,9 +741,11 @@ func (ed *economicsData) setGasLimitConfig(currentEpoch uint32) { } if ed.gasLimitSettingEpoch != gasLimitSetting.EnableEpoch { - err := ed.setGasLimitSetting(gasLimitSetting) + gc, err := checkAndParseGasLimitSettings(gasLimitSetting) if err != nil { log.Error("setGasLimitConfig", "error", err.Error()) + } else { + ed.gasConfig = *gc } } diff --git a/process/economics/economicsData_test.go b/process/economics/economicsData_test.go index 6f3330641f1..0ac846787c1 100644 --- a/process/economics/economicsData_test.go +++ b/process/economics/economicsData_test.go @@ -63,11 +63,13 @@ func feeSettingsDummy(gasModifier float64) config.FeeSettings { MaxGasLimitPerMetaMiniBlock: "1000000", MaxGasLimitPerTx: "100000", MinGasLimit: "500", + ExtraGasLimitGuardedTx: "50000", }, }, - MinGasPrice: "18446744073709551615", - GasPerDataByte: "1", - GasPriceModifier: gasModifier, + MinGasPrice: "18446744073709551615", + GasPerDataByte: "1", + GasPriceModifier: gasModifier, + MaxGasPriceSetGuardian: "200000", } } @@ -81,11 +83,13 @@ func feeSettingsReal() config.FeeSettings { MaxGasLimitPerMetaMiniBlock: "15000000000", MaxGasLimitPerTx: "1500000000", MinGasLimit: "50000", + ExtraGasLimitGuardedTx: "50000", }, }, - MinGasPrice: "1000000000", - GasPerDataByte: "1500", - GasPriceModifier: 0.01, + MinGasPrice: "1000000000", + GasPerDataByte: "1500", + GasPriceModifier: 0.01, + MaxGasPriceSetGuardian: "200000", } } @@ -98,6 +102,7 @@ func createArgsForEconomicsData(gasModifier float64) economics.ArgsNewEconomicsD IsGasPriceModifierFlagEnabledField: true, }, BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } return args } @@ -111,6 +116,7 @@ func createArgsForEconomicsDataRealFees(handler economics.BuiltInFunctionsCostHa IsGasPriceModifierFlagEnabledField: true, }, BuiltInFunctionsCostHandler: handler, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } return args } @@ -500,6 +506,7 @@ func TestEconomicsData_ConfirmedGasLimitSettingsChangeOrderedConfigs(t *testing. MaxGasLimitPerMetaMiniBlock: "15000000000", MaxGasLimitPerTx: "1500000000", MinGasLimit: "50000", + ExtraGasLimitGuardedTx: "50000", }, { EnableEpoch: 2, @@ -509,6 +516,7 @@ func TestEconomicsData_ConfirmedGasLimitSettingsChangeOrderedConfigs(t *testing. MaxGasLimitPerMetaMiniBlock: "5000000000", MaxGasLimitPerTx: "500000000", MinGasLimit: "50000", + ExtraGasLimitGuardedTx: "50000", }, } @@ -588,6 +596,7 @@ func TestEconomicsData_ConfirmedGasLimitSettingsChangeUnOrderedConfigs(t *testin MaxGasLimitPerMetaMiniBlock: "5000000000", MaxGasLimitPerTx: "500000000", MinGasLimit: "50000", + ExtraGasLimitGuardedTx: "50000", }, { EnableEpoch: 0, @@ -597,6 +606,7 @@ func TestEconomicsData_ConfirmedGasLimitSettingsChangeUnOrderedConfigs(t *testin MaxGasLimitPerMetaMiniBlock: "15000000000", MaxGasLimitPerTx: "1500000000", MinGasLimit: "50000", + ExtraGasLimitGuardedTx: "50000", }, } @@ -1124,3 +1134,16 @@ func TestEconomicsData_ComputeGasLimitBasedOnBalance(t *testing.T) { require.Nil(t, err) require.Equal(t, uint64(11894070000), gasLimit) } + +func TestEconomicsData_MaxGasPriceSetGuardian(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{}) + maxGasPriceSetGuardianString := "2000000" + expectedMaxGasPriceSetGuardian, err := strconv.ParseUint(maxGasPriceSetGuardianString, 10, 64) + require.Nil(t, err) + args.Economics.FeeSettings.MaxGasPriceSetGuardian = maxGasPriceSetGuardianString + economicData, _ := economics.NewEconomicsData(args) + + require.Equal(t, expectedMaxGasPriceSetGuardian, economicData.MaxGasPriceSetGuardian()) +} diff --git a/process/economics/export_test.go b/process/economics/export_test.go index 32dbd31b6de..f327701f3cb 100644 --- a/process/economics/export_test.go +++ b/process/economics/export_test.go @@ -38,6 +38,7 @@ func (ed *economicsData) GetGasLimitSetting() *config.GasLimitSetting { gasLimitSetting.MaxGasLimitPerMetaMiniBlock = strconv.FormatUint(ed.maxGasLimitPerMetaMiniBlock, 10) gasLimitSetting.MaxGasLimitPerTx = strconv.FormatUint(ed.maxGasLimitPerTx, 10) gasLimitSetting.MinGasLimit = strconv.FormatUint(ed.minGasLimit, 10) + gasLimitSetting.ExtraGasLimitGuardedTx = strconv.FormatUint(ed.extraGasLimitGuardedTx, 10) return gasLimitSetting } diff --git a/process/errors.go b/process/errors.go index 895634b3599..b00039aaa91 100644 --- a/process/errors.go +++ b/process/errors.go @@ -391,7 +391,7 @@ var ErrWrongTypeInMiniBlock = errors.New("type in miniblock is not correct for p var ErrNilTransactionCoordinator = errors.New("transaction coordinator is nil") // ErrNilUint64Converter signals that uint64converter is nil -var ErrNilUint64Converter = errors.New("unit64converter is nil") +var ErrNilUint64Converter = errors.New("uint64converter is nil") // ErrNilSmartContractResultProcessor signals that smart contract result processor is nil var ErrNilSmartContractResultProcessor = errors.New("nil smart contract result processor") @@ -522,6 +522,18 @@ var ErrMaxGasLimitUsedForDestMeTxsIsReached = errors.New("max gas limit used for // ErrInvalidMinimumGasPrice signals that an invalid gas price has been read from config file var ErrInvalidMinimumGasPrice = errors.New("invalid minimum gas price") +// ErrInvalidExtraGasLimitGuardedTx signals that an invalid gas limit has been provided in the config file +var ErrInvalidExtraGasLimitGuardedTx = errors.New("invalid extra gas limit for guarded transactions") + +// ErrInvalidMaxGasPriceSetGuardian signals that an invalid maximum gas price has been provided in the config file +var ErrInvalidMaxGasPriceSetGuardian = errors.New("invalid maximum gas price for set guardian") + +// ErrGuardianSignatureNotExpected signals that the guardian signature is not expected +var ErrGuardianSignatureNotExpected = errors.New("guardian signature not expected") + +// ErrGuardianAddressNotExpected signals that the guardian address is not expected +var ErrGuardianAddressNotExpected = errors.New("guardian address not expected") + // ErrInvalidMinimumGasLimitForTx signals that an invalid minimum gas limit for transactions has been read from config file var ErrInvalidMinimumGasLimitForTx = errors.New("invalid minimum gas limit for transactions") @@ -1148,3 +1160,39 @@ var ErrNilProcessDebugger = errors.New("nil process debugger") // ErrMaxCallsReached signals that the allowed max number of calls was reached var ErrMaxCallsReached = errors.New("max calls reached") + +// ErrNilGuardianChecker signals that a nil guardian checker was provided +var ErrNilGuardianChecker = errors.New("nil guardian checker") + +// ErrAccountHasNoGuardianSet signals that the account has no guardians set +var ErrAccountHasNoGuardianSet = errors.New("account has no guardian set") + +// ErrAccountHasNoActiveGuardian signals that the account has no active guardian +var ErrAccountHasNoActiveGuardian = errors.New("account has no active guardian") + +// ErrAccountHasNoPendingGuardian signals that the account has no pending guardian +var ErrAccountHasNoPendingGuardian = errors.New("account has no pending guardian") + +// ErrNilGuardedAccountHandler signals that a nil guarded account handler was provided +var ErrNilGuardedAccountHandler = errors.New("nil guarded account handler") + +// ErrTransactionNotExecutable signals that a transaction is not executable and gas will not be consumed +var ErrTransactionNotExecutable = errors.New("transaction is not executable and gas will not be consumed") + +// ErrTransactionAndAccountGuardianMismatch signals a mismatch between the guardian on the account and the one on the transaction +var ErrTransactionAndAccountGuardianMismatch = errors.New("mismatch between transaction guardian and configured account guardian") + +// ErrInvalidSetGuardianEpochsDelay signals an invalid configuration for the epochs delay +var ErrInvalidSetGuardianEpochsDelay = errors.New("incorrect setting for set guardian epochs delay") + +// ErrCannotReplaceGuardedAccountPendingGuardian signals that a pending guardian on a guarded account cannot be replaced +var ErrCannotReplaceGuardedAccountPendingGuardian = errors.New("cannot replace pending guardian on guarded account") + +// ErrNilGuardianServiceUID signals that a nil guardian service identifier was provided +var ErrNilGuardianServiceUID = errors.New("nil guardian service unique identifier") + +// ErrGasPriceTooHigh signals a too high gas price +var ErrGasPriceTooHigh = errors.New("gas price is too high for the transaction") + +// ErrGuardedTransactionNotExpected signals that a guarded transaction was received for processing but the account is not guarded +var ErrGuardedTransactionNotExpected = errors.New("guarded transaction not expected") diff --git a/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go b/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go index aa0ff8724b4..c66ac5bea6f 100644 --- a/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go +++ b/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go @@ -224,6 +224,7 @@ func (bicf *baseInterceptorsContainerFactory) createOneTxInterceptor(topic strin bicf.shardCoordinator, bicf.whiteListHandler, addrPubKeyConverter, + bicf.argInterceptorFactory.CoreComponents.TxVersionChecker(), bicf.maxTxNonceDeltaAllowed, ) if err != nil { diff --git a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go index df04eb19643..5e636622ed3 100644 --- a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go +++ b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go @@ -15,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" @@ -631,7 +632,7 @@ func getArgumentsMeta( Store: createMetaStore(), DataPool: createMetaDataPools(), MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, - TxFeeHandler: &mock.FeeHandlerStub{}, + TxFeeHandler: &economicsmocks.EconomicsHandlerStub{}, BlockBlackList: &testscommon.TimeCacheStub{}, HeaderSigVerifier: &mock.HeaderSigVerifierStub{}, HeaderIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, diff --git a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go index 1f5b5d09d87..17213c3e7b1 100644 --- a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go +++ b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go @@ -15,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" @@ -713,7 +714,7 @@ func getArgumentsShard( Store: createShardStore(), DataPool: createShardDataPools(), MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, - TxFeeHandler: &mock.FeeHandlerStub{}, + TxFeeHandler: &economicsmocks.EconomicsHandlerStub{}, BlockBlackList: &testscommon.TimeCacheStub{}, HeaderSigVerifier: &mock.HeaderSigVerifierStub{}, HeaderIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, diff --git a/process/factory/metachain/intermediateProcessorsContainerFactory.go b/process/factory/metachain/intermediateProcessorsContainerFactory.go index d9a80abea50..826e61231c7 100644 --- a/process/factory/metachain/intermediateProcessorsContainerFactory.go +++ b/process/factory/metachain/intermediateProcessorsContainerFactory.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/postprocess" @@ -14,56 +15,67 @@ import ( ) type intermediateProcessorsContainerFactory struct { - shardCoordinator sharding.Coordinator - marshalizer marshal.Marshalizer - hasher hashing.Hasher - pubkeyConverter core.PubkeyConverter - store dataRetriever.StorageService - poolsHolder dataRetriever.PoolsHolder - economicsFee process.FeeHandler + shardCoordinator sharding.Coordinator + marshalizer marshal.Marshalizer + hasher hashing.Hasher + pubkeyConverter core.PubkeyConverter + store dataRetriever.StorageService + poolsHolder dataRetriever.PoolsHolder + economicsFee process.FeeHandler + enableEpochsHandler common.EnableEpochsHandler +} + +// ArgsNewIntermediateProcessorsContainerFactory defines the argument list to create a new container factory +type ArgsNewIntermediateProcessorsContainerFactory struct { + ShardCoordinator sharding.Coordinator + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + PubkeyConverter core.PubkeyConverter + Store dataRetriever.StorageService + PoolsHolder dataRetriever.PoolsHolder + EconomicsFee process.FeeHandler + EnableEpochsHandler common.EnableEpochsHandler } // NewIntermediateProcessorsContainerFactory is responsible for creating a new intermediate processors factory object func NewIntermediateProcessorsContainerFactory( - shardCoordinator sharding.Coordinator, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - pubkeyConverter core.PubkeyConverter, - store dataRetriever.StorageService, - poolsHolder dataRetriever.PoolsHolder, - economicsFee process.FeeHandler, + args ArgsNewIntermediateProcessorsContainerFactory, ) (*intermediateProcessorsContainerFactory, error) { - if check.IfNil(shardCoordinator) { + if check.IfNil(args.ShardCoordinator) { return nil, process.ErrNilShardCoordinator } - if check.IfNil(marshalizer) { + if check.IfNil(args.Marshalizer) { return nil, process.ErrNilMarshalizer } - if check.IfNil(hasher) { + if check.IfNil(args.Hasher) { return nil, process.ErrNilHasher } - if check.IfNil(pubkeyConverter) { + if check.IfNil(args.PubkeyConverter) { return nil, process.ErrNilPubkeyConverter } - if check.IfNil(store) { + if check.IfNil(args.Store) { return nil, process.ErrNilStorage } - if check.IfNil(poolsHolder) { + if check.IfNil(args.PoolsHolder) { return nil, process.ErrNilPoolsHolder } - if check.IfNil(economicsFee) { + if check.IfNil(args.EconomicsFee) { return nil, process.ErrNilEconomicsFeeHandler } + if check.IfNil(args.EnableEpochsHandler) { + return nil, process.ErrNilEnableEpochsHandler + } return &intermediateProcessorsContainerFactory{ - shardCoordinator: shardCoordinator, - marshalizer: marshalizer, - hasher: hasher, - pubkeyConverter: pubkeyConverter, - poolsHolder: poolsHolder, - store: store, - economicsFee: economicsFee, + shardCoordinator: args.ShardCoordinator, + marshalizer: args.Marshalizer, + hasher: args.Hasher, + pubkeyConverter: args.PubkeyConverter, + store: args.Store, + poolsHolder: args.PoolsHolder, + economicsFee: args.EconomicsFee, + enableEpochsHandler: args.EnableEpochsHandler, }, nil } @@ -95,17 +107,18 @@ func (ppcm *intermediateProcessorsContainerFactory) Create() (process.Intermedia } func (ppcm *intermediateProcessorsContainerFactory) createSmartContractResultsIntermediateProcessor() (process.IntermediateTransactionHandler, error) { - irp, err := postprocess.NewIntermediateResultsProcessor( - ppcm.hasher, - ppcm.marshalizer, - ppcm.shardCoordinator, - ppcm.pubkeyConverter, - ppcm.store, - block.SmartContractResultBlock, - ppcm.poolsHolder.CurrentBlockTxs(), - ppcm.economicsFee, - ) - + args := postprocess.ArgsNewIntermediateResultsProcessor{ + Hasher: ppcm.hasher, + Marshalizer: ppcm.marshalizer, + Coordinator: ppcm.shardCoordinator, + PubkeyConv: ppcm.pubkeyConverter, + Store: ppcm.store, + BlockType: block.SmartContractResultBlock, + CurrTxs: ppcm.poolsHolder.CurrentBlockTxs(), + EconomicsFee: ppcm.economicsFee, + EnableEpochsHandler: ppcm.enableEpochsHandler, + } + irp, err := postprocess.NewIntermediateResultsProcessor(args) return irp, err } diff --git a/process/factory/metachain/intermediateProcessorsContainerFactory_test.go b/process/factory/metachain/intermediateProcessorsContainerFactory_test.go index 03826424f18..327ac2b6812 100644 --- a/process/factory/metachain/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/metachain/intermediateProcessorsContainerFactory_test.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/testscommon" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" @@ -17,18 +18,26 @@ func createMockPubkeyConverter() *testscommon.PubkeyConverterMock { return testscommon.NewPubkeyConverterMock(32) } +func createMockArgsNewIntermediateProcessorsFactory() metachain.ArgsNewIntermediateProcessorsContainerFactory { + args := metachain.ArgsNewIntermediateProcessorsContainerFactory{ + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), + PubkeyConverter: createMockPubkeyConverter(), + Store: &storageStubs.ChainStorerStub{}, + PoolsHolder: dataRetrieverMock.NewPoolsHolderMock(), + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: true}, + } + return args +} + func TestNewIntermediateProcessorsContainerFactory_NilShardCoord(t *testing.T) { t.Parallel() - ipcf, err := metachain.NewIntermediateProcessorsContainerFactory( - nil, - &mock.MarshalizerMock{}, - &hashingMocks.HasherMock{}, - createMockPubkeyConverter(), - &storageStubs.ChainStorerStub{}, - dataRetrieverMock.NewPoolsHolderMock(), - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateProcessorsFactory() + args.ShardCoordinator = nil + ipcf, err := metachain.NewIntermediateProcessorsContainerFactory(args) assert.Nil(t, ipcf) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -37,15 +46,9 @@ func TestNewIntermediateProcessorsContainerFactory_NilShardCoord(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory_NilMarshalizer(t *testing.T) { t.Parallel() - ipcf, err := metachain.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - nil, - &hashingMocks.HasherMock{}, - createMockPubkeyConverter(), - &storageStubs.ChainStorerStub{}, - dataRetrieverMock.NewPoolsHolderMock(), - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateProcessorsFactory() + args.Marshalizer = nil + ipcf, err := metachain.NewIntermediateProcessorsContainerFactory(args) assert.Nil(t, ipcf) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -54,15 +57,9 @@ func TestNewIntermediateProcessorsContainerFactory_NilMarshalizer(t *testing.T) func TestNewIntermediateProcessorsContainerFactory_NilHasher(t *testing.T) { t.Parallel() - ipcf, err := metachain.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - &mock.MarshalizerMock{}, - nil, - createMockPubkeyConverter(), - &storageStubs.ChainStorerStub{}, - dataRetrieverMock.NewPoolsHolderMock(), - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateProcessorsFactory() + args.Hasher = nil + ipcf, err := metachain.NewIntermediateProcessorsContainerFactory(args) assert.Nil(t, ipcf) assert.Equal(t, process.ErrNilHasher, err) @@ -71,15 +68,9 @@ func TestNewIntermediateProcessorsContainerFactory_NilHasher(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory_NilAdrConv(t *testing.T) { t.Parallel() - ipcf, err := metachain.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - &mock.MarshalizerMock{}, - &hashingMocks.HasherMock{}, - nil, - &storageStubs.ChainStorerStub{}, - dataRetrieverMock.NewPoolsHolderMock(), - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateProcessorsFactory() + args.PubkeyConverter = nil + ipcf, err := metachain.NewIntermediateProcessorsContainerFactory(args) assert.Nil(t, ipcf) assert.Equal(t, process.ErrNilPubkeyConverter, err) @@ -88,15 +79,9 @@ func TestNewIntermediateProcessorsContainerFactory_NilAdrConv(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory_NilStorer(t *testing.T) { t.Parallel() - ipcf, err := metachain.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - &mock.MarshalizerMock{}, - &hashingMocks.HasherMock{}, - createMockPubkeyConverter(), - nil, - dataRetrieverMock.NewPoolsHolderMock(), - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateProcessorsFactory() + args.Store = nil + ipcf, err := metachain.NewIntermediateProcessorsContainerFactory(args) assert.Nil(t, ipcf) assert.Equal(t, process.ErrNilStorage, err) @@ -105,15 +90,9 @@ func TestNewIntermediateProcessorsContainerFactory_NilStorer(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory_NilPoolsHolder(t *testing.T) { t.Parallel() - ipcf, err := metachain.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - &mock.MarshalizerMock{}, - &hashingMocks.HasherMock{}, - createMockPubkeyConverter(), - &storageStubs.ChainStorerStub{}, - nil, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateProcessorsFactory() + args.PoolsHolder = nil + ipcf, err := metachain.NewIntermediateProcessorsContainerFactory(args) assert.Nil(t, ipcf) assert.Equal(t, process.ErrNilPoolsHolder, err) @@ -122,32 +101,30 @@ func TestNewIntermediateProcessorsContainerFactory_NilPoolsHolder(t *testing.T) func TestNewIntermediateProcessorsContainerFactory_NilEconomicsFeeHandler(t *testing.T) { t.Parallel() - ipcf, err := metachain.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - &mock.MarshalizerMock{}, - &hashingMocks.HasherMock{}, - createMockPubkeyConverter(), - &storageStubs.ChainStorerStub{}, - dataRetrieverMock.NewPoolsHolderMock(), - nil, - ) + args := createMockArgsNewIntermediateProcessorsFactory() + args.EconomicsFee = nil + ipcf, err := metachain.NewIntermediateProcessorsContainerFactory(args) assert.Nil(t, ipcf) assert.Equal(t, process.ErrNilEconomicsFeeHandler, err) } +func TestNewIntermediateProcessorsContainerFactory_NilEnableEpochHandler(t *testing.T) { + t.Parallel() + + args := createMockArgsNewIntermediateProcessorsFactory() + args.EnableEpochsHandler = nil + ipcf, err := metachain.NewIntermediateProcessorsContainerFactory(args) + + assert.Nil(t, ipcf) + assert.Equal(t, process.ErrNilEnableEpochsHandler, err) +} + func TestNewIntermediateProcessorsContainerFactory(t *testing.T) { t.Parallel() - ipcf, err := metachain.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - &mock.MarshalizerMock{}, - &hashingMocks.HasherMock{}, - createMockPubkeyConverter(), - &storageStubs.ChainStorerStub{}, - dataRetrieverMock.NewPoolsHolderMock(), - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateProcessorsFactory() + ipcf, err := metachain.NewIntermediateProcessorsContainerFactory(args) assert.Nil(t, err) assert.NotNil(t, ipcf) @@ -157,15 +134,8 @@ func TestNewIntermediateProcessorsContainerFactory(t *testing.T) { func TestIntermediateProcessorsContainerFactory_Create(t *testing.T) { t.Parallel() - ipcf, err := metachain.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(5), - &mock.MarshalizerMock{}, - &hashingMocks.HasherMock{}, - createMockPubkeyConverter(), - &storageStubs.ChainStorerStub{}, - dataRetrieverMock.NewPoolsHolderMock(), - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateProcessorsFactory() + ipcf, err := metachain.NewIntermediateProcessorsContainerFactory(args) assert.Nil(t, err) assert.NotNil(t, ipcf) diff --git a/process/factory/metachain/preProcessorsContainerFactory_test.go b/process/factory/metachain/preProcessorsContainerFactory_test.go index 03580c1df1b..9f504b1a227 100644 --- a/process/factory/metachain/preProcessorsContainerFactory_test.go +++ b/process/factory/metachain/preProcessorsContainerFactory_test.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/testscommon" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -28,7 +29,7 @@ func TestNewPreProcessorsContainerFactory_NilShardCoordinator(t *testing.T) { &testscommon.RequestHandlerStub{}, &testscommon.TxProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, createMockPubkeyConverter(), @@ -57,7 +58,7 @@ func TestNewPreProcessorsContainerFactory_NilStore(t *testing.T) { &testscommon.RequestHandlerStub{}, &testscommon.TxProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, createMockPubkeyConverter(), @@ -86,7 +87,7 @@ func TestNewPreProcessorsContainerFactory_NilMarshalizer(t *testing.T) { &testscommon.RequestHandlerStub{}, &testscommon.TxProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, createMockPubkeyConverter(), @@ -115,7 +116,7 @@ func TestNewPreProcessorsContainerFactory_NilHasher(t *testing.T) { &testscommon.RequestHandlerStub{}, &testscommon.TxProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, createMockPubkeyConverter(), @@ -144,7 +145,7 @@ func TestNewPreProcessorsContainerFactory_NilDataPool(t *testing.T) { &testscommon.RequestHandlerStub{}, &testscommon.TxProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, createMockPubkeyConverter(), @@ -173,7 +174,7 @@ func TestNewPreProcessorsContainerFactory_NilAccounts(t *testing.T) { &testscommon.RequestHandlerStub{}, &testscommon.TxProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, createMockPubkeyConverter(), @@ -231,7 +232,7 @@ func TestNewPreProcessorsContainerFactory_NilTxProcessor(t *testing.T) { &testscommon.RequestHandlerStub{}, nil, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, createMockPubkeyConverter(), @@ -260,7 +261,7 @@ func TestNewPreProcessorsContainerFactory_NilRequestHandler(t *testing.T) { nil, &testscommon.TxProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, createMockPubkeyConverter(), @@ -288,7 +289,7 @@ func TestNewPreProcessorsContainerFactory_NilGasHandler(t *testing.T) { &testscommon.RequestHandlerStub{}, &testscommon.TxProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, nil, &mock.BlockTrackerMock{}, createMockPubkeyConverter(), @@ -316,7 +317,7 @@ func TestNewPreProcessorsContainerFactory_NilBlockTracker(t *testing.T) { &testscommon.RequestHandlerStub{}, &testscommon.TxProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, nil, createMockPubkeyConverter(), @@ -344,7 +345,7 @@ func TestNewPreProcessorsContainerFactory_NilPubkeyConverter(t *testing.T) { &testscommon.RequestHandlerStub{}, &testscommon.TxProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, nil, @@ -372,7 +373,7 @@ func TestNewPreProcessorsContainerFactory_NilBlockSizeComputationHandler(t *test &testscommon.RequestHandlerStub{}, &testscommon.TxProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, createMockPubkeyConverter(), @@ -400,7 +401,7 @@ func TestNewPreProcessorsContainerFactory_NilBalanceComputationHandler(t *testin &testscommon.RequestHandlerStub{}, &testscommon.TxProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, createMockPubkeyConverter(), @@ -428,7 +429,7 @@ func TestNewPreProcessorsContainerFactory_NilEnableEpochsHandler(t *testing.T) { &testscommon.RequestHandlerStub{}, &testscommon.TxProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, createMockPubkeyConverter(), @@ -456,7 +457,7 @@ func TestNewPreProcessorsContainerFactory_NilTxTypeHandler(t *testing.T) { &testscommon.RequestHandlerStub{}, &testscommon.TxProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, createMockPubkeyConverter(), @@ -484,7 +485,7 @@ func TestNewPreProcessorsContainerFactory_NilScheduledTxsExecutionHandler(t *tes &testscommon.RequestHandlerStub{}, &testscommon.TxProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, createMockPubkeyConverter(), @@ -512,7 +513,7 @@ func TestNewPreProcessorsContainerFactory_NilProcessedMiniBlocksTracker(t *testi &testscommon.RequestHandlerStub{}, &testscommon.TxProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, createMockPubkeyConverter(), @@ -540,7 +541,7 @@ func TestNewPreProcessorsContainerFactory(t *testing.T) { &testscommon.RequestHandlerStub{}, &testscommon.TxProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, createMockPubkeyConverter(), @@ -575,7 +576,7 @@ func TestPreProcessorsContainerFactory_CreateErrTxPreproc(t *testing.T) { &testscommon.RequestHandlerStub{}, &testscommon.TxProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, createMockPubkeyConverter(), @@ -608,7 +609,7 @@ func TestPreProcessorsContainerFactory_Create(t *testing.T) { &testscommon.RequestHandlerStub{}, &testscommon.TxProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, createMockPubkeyConverter(), diff --git a/process/factory/metachain/vmContainerFactory.go b/process/factory/metachain/vmContainerFactory.go index 665be533e17..0134cda878b 100644 --- a/process/factory/metachain/vmContainerFactory.go +++ b/process/factory/metachain/vmContainerFactory.go @@ -68,7 +68,7 @@ func NewVMContainerFactory(args ArgsNewVMContainerFactory) (*vmContainerFactory, return nil, fmt.Errorf("%w in NewVMContainerFactory", process.ErrNilEconomicsData) } if check.IfNil(args.MessageSignVerifier) { - return nil, fmt.Errorf("%w in NewVMContainerFactory", process.ErrNilKeyGen) + return nil, fmt.Errorf("%w in NewVMContainerFactory", vm.ErrNilMessageSignVerifier) } if check.IfNil(args.NodesConfigProvider) { return nil, fmt.Errorf("%w in NewVMContainerFactory", process.ErrNilNodesConfigProvider) diff --git a/process/factory/metachain/vmContainerFactory_test.go b/process/factory/metachain/vmContainerFactory_test.go index 824cd787560..f4a796eac07 100644 --- a/process/factory/metachain/vmContainerFactory_test.go +++ b/process/factory/metachain/vmContainerFactory_test.go @@ -39,11 +39,14 @@ func createVmContainerMockArgument(gasSchedule core.GasScheduleNotifier) ArgsNew OwnerAddress: "aaaaaa", }, GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ + V1: config.GovernanceSystemSCConfigV1{ + ProposalCost: "500", + }, Active: config.GovernanceSystemSCConfigActive{ ProposalCost: "500", - MinQuorum: "50", - MinPassThreshold: "50", - MinVetoThreshold: "50", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, }, }, StakingSystemSCConfig: config.StakingSystemSCConfig{ @@ -89,7 +92,7 @@ func TestNewVMContainerFactory_NilMessageSignVerifier(t *testing.T) { vmf, err := NewVMContainerFactory(argsNewVmContainerFactory) assert.True(t, check.IfNil(vmf)) - assert.True(t, errors.Is(err, process.ErrNilKeyGen)) + assert.True(t, errors.Is(err, vm.ErrNilMessageSignVerifier)) } func TestNewVMContainerFactory_NilNodesConfigProvider(t *testing.T) { @@ -271,16 +274,19 @@ func TestVmContainerFactory_Create(t *testing.T) { MaxGasLimitPerMetaMiniBlock: "10000000000", MaxGasLimitPerTx: "10000000000", MinGasLimit: "10", + ExtraGasLimitGuardedTx: "50000", }, }, - MinGasPrice: "10", - GasPerDataByte: "1", - GasPriceModifier: 1.0, + MinGasPrice: "10", + GasPerDataByte: "1", + GasPriceModifier: 1.0, + MaxGasPriceSetGuardian: "100000", }, }, EpochNotifier: &epochNotifier.EpochNotifierStub{}, EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) @@ -299,13 +305,16 @@ func TestVmContainerFactory_Create(t *testing.T) { OwnerAddress: "aaaaaa", }, GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ + V1: config.GovernanceSystemSCConfigV1{ + ProposalCost: "500", + }, Active: config.GovernanceSystemSCConfigActive{ ProposalCost: "500", - MinQuorum: "50", - MinPassThreshold: "50", - MinVetoThreshold: "50", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, }, - FirstWhitelistedAddress: "3132333435363738393031323334353637383930313233343536373839303234", + ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", @@ -405,6 +414,7 @@ func FillGasMapMetaChainSystemSCsCosts(value uint64) map[string]uint64 { gasMap["DelegationMgrOps"] = value gasMap["GetAllNodeStates"] = value gasMap["ValidatorToDelegation"] = value + gasMap["GetActiveFund"] = value gasMap["FixWaitingListSize"] = value return gasMap diff --git a/process/factory/shard/intermediateProcessorsContainerFactory.go b/process/factory/shard/intermediateProcessorsContainerFactory.go index d78eddbc061..c19b971d3b3 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/postprocess" @@ -14,56 +15,67 @@ import ( ) type intermediateProcessorsContainerFactory struct { - shardCoordinator sharding.Coordinator - marshalizer marshal.Marshalizer - hasher hashing.Hasher - pubkeyConverter core.PubkeyConverter - store dataRetriever.StorageService - poolsHolder dataRetriever.PoolsHolder - economicsFee process.FeeHandler + shardCoordinator sharding.Coordinator + marshalizer marshal.Marshalizer + hasher hashing.Hasher + pubkeyConverter core.PubkeyConverter + store dataRetriever.StorageService + poolsHolder dataRetriever.PoolsHolder + economicsFee process.FeeHandler + enableEpochsHandler common.EnableEpochsHandler +} + +// ArgsNewIntermediateProcessorsContainerFactory defines the argument list to create a new container factory +type ArgsNewIntermediateProcessorsContainerFactory struct { + ShardCoordinator sharding.Coordinator + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + PubkeyConverter core.PubkeyConverter + Store dataRetriever.StorageService + PoolsHolder dataRetriever.PoolsHolder + EconomicsFee process.FeeHandler + EnableEpochsHandler common.EnableEpochsHandler } // NewIntermediateProcessorsContainerFactory is responsible for creating a new intermediate processors factory object func NewIntermediateProcessorsContainerFactory( - shardCoordinator sharding.Coordinator, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - pubkeyConverter core.PubkeyConverter, - store dataRetriever.StorageService, - poolsHolder dataRetriever.PoolsHolder, - economicsFee process.FeeHandler, + args ArgsNewIntermediateProcessorsContainerFactory, ) (*intermediateProcessorsContainerFactory, error) { - if check.IfNil(shardCoordinator) { + if check.IfNil(args.ShardCoordinator) { return nil, process.ErrNilShardCoordinator } - if check.IfNil(marshalizer) { + if check.IfNil(args.Marshalizer) { return nil, process.ErrNilMarshalizer } - if check.IfNil(hasher) { + if check.IfNil(args.Hasher) { return nil, process.ErrNilHasher } - if check.IfNil(pubkeyConverter) { + if check.IfNil(args.PubkeyConverter) { return nil, process.ErrNilPubkeyConverter } - if check.IfNil(store) { + if check.IfNil(args.Store) { return nil, process.ErrNilStorage } - if check.IfNil(poolsHolder) { + if check.IfNil(args.PoolsHolder) { return nil, process.ErrNilPoolsHolder } - if check.IfNil(economicsFee) { + if check.IfNil(args.EconomicsFee) { return nil, process.ErrNilEconomicsFeeHandler } + if check.IfNil(args.EnableEpochsHandler) { + return nil, process.ErrNilEnableEpochsHandler + } return &intermediateProcessorsContainerFactory{ - shardCoordinator: shardCoordinator, - marshalizer: marshalizer, - hasher: hasher, - pubkeyConverter: pubkeyConverter, - store: store, - poolsHolder: poolsHolder, - economicsFee: economicsFee, + shardCoordinator: args.ShardCoordinator, + marshalizer: args.Marshalizer, + hasher: args.Hasher, + pubkeyConverter: args.PubkeyConverter, + store: args.Store, + poolsHolder: args.PoolsHolder, + economicsFee: args.EconomicsFee, + enableEpochsHandler: args.EnableEpochsHandler, }, nil } @@ -105,16 +117,18 @@ func (ppcm *intermediateProcessorsContainerFactory) Create() (process.Intermedia } func (ppcm *intermediateProcessorsContainerFactory) createSmartContractResultsIntermediateProcessor() (process.IntermediateTransactionHandler, error) { - irp, err := postprocess.NewIntermediateResultsProcessor( - ppcm.hasher, - ppcm.marshalizer, - ppcm.shardCoordinator, - ppcm.pubkeyConverter, - ppcm.store, - block.SmartContractResultBlock, - ppcm.poolsHolder.CurrentBlockTxs(), - ppcm.economicsFee, - ) + args := postprocess.ArgsNewIntermediateResultsProcessor{ + Hasher: ppcm.hasher, + Marshalizer: ppcm.marshalizer, + Coordinator: ppcm.shardCoordinator, + PubkeyConv: ppcm.pubkeyConverter, + Store: ppcm.store, + BlockType: block.SmartContractResultBlock, + CurrTxs: ppcm.poolsHolder.CurrentBlockTxs(), + EconomicsFee: ppcm.economicsFee, + EnableEpochsHandler: ppcm.enableEpochsHandler, + } + irp, err := postprocess.NewIntermediateResultsProcessor(args) return irp, err } diff --git a/process/factory/shard/intermediateProcessorsContainerFactory_test.go b/process/factory/shard/intermediateProcessorsContainerFactory_test.go index bcec9873e60..a3aae67c19c 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory_test.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" @@ -51,19 +52,26 @@ func createMockPubkeyConverter() *testscommon.PubkeyConverterMock { return testscommon.NewPubkeyConverterMock(32) } +func createMockArgsNewIntermediateProcessorsFactory() shard.ArgsNewIntermediateProcessorsContainerFactory { + args := shard.ArgsNewIntermediateProcessorsContainerFactory{ + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), + PubkeyConverter: createMockPubkeyConverter(), + Store: &storageStubs.ChainStorerStub{}, + PoolsHolder: createDataPools(), + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{IsKeepExecOrderOnCreatedSCRsEnabledField: true}, + } + return args +} + func TestNewIntermediateProcessorsContainerFactory_NilShardCoord(t *testing.T) { t.Parallel() - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - nil, - &mock.MarshalizerMock{}, - &hashingMocks.HasherMock{}, - createMockPubkeyConverter(), - &storageStubs.ChainStorerStub{}, - dPool, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateProcessorsFactory() + args.ShardCoordinator = nil + ipcf, err := shard.NewIntermediateProcessorsContainerFactory(args) assert.Nil(t, ipcf) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -72,16 +80,9 @@ func TestNewIntermediateProcessorsContainerFactory_NilShardCoord(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory_NilMarshalizer(t *testing.T) { t.Parallel() - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - nil, - &hashingMocks.HasherMock{}, - createMockPubkeyConverter(), - &storageStubs.ChainStorerStub{}, - dPool, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateProcessorsFactory() + args.Marshalizer = nil + ipcf, err := shard.NewIntermediateProcessorsContainerFactory(args) assert.Nil(t, ipcf) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -90,16 +91,9 @@ func TestNewIntermediateProcessorsContainerFactory_NilMarshalizer(t *testing.T) func TestNewIntermediateProcessorsContainerFactory_NilHasher(t *testing.T) { t.Parallel() - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - &mock.MarshalizerMock{}, - nil, - createMockPubkeyConverter(), - &storageStubs.ChainStorerStub{}, - dPool, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateProcessorsFactory() + args.Hasher = nil + ipcf, err := shard.NewIntermediateProcessorsContainerFactory(args) assert.Nil(t, ipcf) assert.Equal(t, process.ErrNilHasher, err) @@ -108,16 +102,9 @@ func TestNewIntermediateProcessorsContainerFactory_NilHasher(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory_NilAdrConv(t *testing.T) { t.Parallel() - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - &mock.MarshalizerMock{}, - &hashingMocks.HasherMock{}, - nil, - &storageStubs.ChainStorerStub{}, - dPool, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateProcessorsFactory() + args.PubkeyConverter = nil + ipcf, err := shard.NewIntermediateProcessorsContainerFactory(args) assert.Nil(t, ipcf) assert.Equal(t, process.ErrNilPubkeyConverter, err) @@ -126,16 +113,9 @@ func TestNewIntermediateProcessorsContainerFactory_NilAdrConv(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory_NilStorer(t *testing.T) { t.Parallel() - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - &mock.MarshalizerMock{}, - &hashingMocks.HasherMock{}, - createMockPubkeyConverter(), - nil, - dPool, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateProcessorsFactory() + args.Store = nil + ipcf, err := shard.NewIntermediateProcessorsContainerFactory(args) assert.Nil(t, ipcf) assert.Equal(t, process.ErrNilStorage, err) @@ -144,15 +124,9 @@ func TestNewIntermediateProcessorsContainerFactory_NilStorer(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory_NilPoolsHolder(t *testing.T) { t.Parallel() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - &mock.MarshalizerMock{}, - &hashingMocks.HasherMock{}, - createMockPubkeyConverter(), - &storageStubs.ChainStorerStub{}, - nil, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateProcessorsFactory() + args.PoolsHolder = nil + ipcf, err := shard.NewIntermediateProcessorsContainerFactory(args) assert.Nil(t, ipcf) assert.Equal(t, process.ErrNilPoolsHolder, err) @@ -161,34 +135,30 @@ func TestNewIntermediateProcessorsContainerFactory_NilPoolsHolder(t *testing.T) func TestNewIntermediateProcessorsContainerFactory_NilEconomicsFeeHandler(t *testing.T) { t.Parallel() - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - &mock.MarshalizerMock{}, - &hashingMocks.HasherMock{}, - createMockPubkeyConverter(), - &storageStubs.ChainStorerStub{}, - dPool, - nil, - ) + args := createMockArgsNewIntermediateProcessorsFactory() + args.EconomicsFee = nil + ipcf, err := shard.NewIntermediateProcessorsContainerFactory(args) assert.Nil(t, ipcf) assert.Equal(t, process.ErrNilEconomicsFeeHandler, err) } +func TestNewIntermediateProcessorsContainerFactory_NilEnableEpochsHandler(t *testing.T) { + t.Parallel() + + args := createMockArgsNewIntermediateProcessorsFactory() + args.EnableEpochsHandler = nil + ipcf, err := shard.NewIntermediateProcessorsContainerFactory(args) + + assert.Nil(t, ipcf) + assert.Equal(t, process.ErrNilEnableEpochsHandler, err) +} + func TestNewIntermediateProcessorsContainerFactory(t *testing.T) { t.Parallel() - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - &mock.MarshalizerMock{}, - &hashingMocks.HasherMock{}, - createMockPubkeyConverter(), - &storageStubs.ChainStorerStub{}, - dPool, - &mock.FeeHandlerStub{}, - ) + args := createMockArgsNewIntermediateProcessorsFactory() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory(args) assert.Nil(t, err) assert.NotNil(t, ipcf) @@ -198,17 +168,8 @@ func TestNewIntermediateProcessorsContainerFactory(t *testing.T) { func TestIntermediateProcessorsContainerFactory_Create(t *testing.T) { t.Parallel() - dPool := createDataPools() - ipcf, err := shard.NewIntermediateProcessorsContainerFactory( - mock.NewMultiShardsCoordinatorMock(3), - &mock.MarshalizerMock{}, - &hashingMocks.HasherMock{}, - createMockPubkeyConverter(), - &storageStubs.ChainStorerStub{}, - dPool, - &mock.FeeHandlerStub{}, - ) - + args := createMockArgsNewIntermediateProcessorsFactory() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory(args) assert.Nil(t, err) assert.NotNil(t, ipcf) diff --git a/process/factory/shard/preProcessorsContainerFactory_test.go b/process/factory/shard/preProcessorsContainerFactory_test.go index a44ffddcc87..5eec22fc5d2 100644 --- a/process/factory/shard/preProcessorsContainerFactory_test.go +++ b/process/factory/shard/preProcessorsContainerFactory_test.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/testscommon" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -34,7 +35,7 @@ func TestNewPreProcessorsContainerFactory_NilShardCoordinator(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -65,7 +66,7 @@ func TestNewPreProcessorsContainerFactory_NilStore(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -96,7 +97,7 @@ func TestNewPreProcessorsContainerFactory_NilMarshalizer(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -127,7 +128,7 @@ func TestNewPreProcessorsContainerFactory_NilHasher(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -158,7 +159,7 @@ func TestNewPreProcessorsContainerFactory_NilDataPool(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -189,7 +190,7 @@ func TestNewPreProcessorsContainerFactory_NilAddrConv(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -220,7 +221,7 @@ func TestNewPreProcessorsContainerFactory_NilAccounts(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -251,7 +252,7 @@ func TestNewPreProcessorsContainerFactory_NilTxProcessor(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -282,7 +283,7 @@ func TestNewPreProcessorsContainerFactory_NilSCProcessor(t *testing.T) { nil, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -313,7 +314,7 @@ func TestNewPreProcessorsContainerFactory_NilSCR(t *testing.T) { &testscommon.SCProcessorMock{}, nil, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -344,7 +345,7 @@ func TestNewPreProcessorsContainerFactory_NilRewardTxProcessor(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, nil, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -375,7 +376,7 @@ func TestNewPreProcessorsContainerFactory_NilRequestHandler(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -437,7 +438,7 @@ func TestNewPreProcessorsContainerFactory_NilGasHandler(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, nil, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -468,7 +469,7 @@ func TestNewPreProcessorsContainerFactory_NilBlockTracker(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, nil, &testscommon.BlockSizeComputationStub{}, @@ -499,7 +500,7 @@ func TestNewPreProcessorsContainerFactory_NilBlockSizeComputationHandler(t *test &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, nil, @@ -530,7 +531,7 @@ func TestNewPreProcessorsContainerFactory_NilBalanceComputationHandler(t *testin &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -561,7 +562,7 @@ func TestNewPreProcessorsContainerFactory_NilEnableEpochsHandler(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -592,7 +593,7 @@ func TestNewPreProcessorsContainerFactory_NilTxTypeHandler(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -623,7 +624,7 @@ func TestNewPreProcessorsContainerFactory_NilScheduledTxsExecutionHandler(t *tes &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -654,7 +655,7 @@ func TestNewPreProcessorsContainerFactory_NilProcessedMiniBlocksTracker(t *testi &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -685,7 +686,7 @@ func TestNewPreProcessorsContainerFactory(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -721,7 +722,7 @@ func TestPreProcessorsContainerFactory_CreateErrTxPreproc(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -763,7 +764,7 @@ func TestPreProcessorsContainerFactory_CreateErrScrPreproc(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, @@ -808,7 +809,7 @@ func TestPreProcessorsContainerFactory_Create(t *testing.T) { &testscommon.SCProcessorMock{}, &testscommon.SmartContractResultsProcessorMock{}, &testscommon.RewardTxProcessorMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.GasHandlerStub{}, &mock.BlockTrackerMock{}, &testscommon.BlockSizeComputationStub{}, diff --git a/process/gasCost.go b/process/gasCost.go index 87e54722c92..25b0dfe2881 100644 --- a/process/gasCost.go +++ b/process/gasCost.go @@ -28,6 +28,9 @@ type BuiltInCost struct { ESDTNFTAddUri uint64 ESDTNFTUpdateAttributes uint64 ESDTNFTMultiTransfer uint64 + SetGuardian uint64 + GuardAccount uint64 + UnGuardAccount uint64 } // GasCost holds all the needed gas costs for system smart contracts diff --git a/process/guardian/disabled/disabledGuardedAccount.go b/process/guardian/disabled/disabledGuardedAccount.go new file mode 100644 index 00000000000..b09f8dd181c --- /dev/null +++ b/process/guardian/disabled/disabledGuardedAccount.go @@ -0,0 +1,47 @@ +package disabled + +import ( + "github.com/multiversx/mx-chain-core-go/data/guardians" + "github.com/multiversx/mx-chain-go/state" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +type disabledGuardedAccount struct{} + +// NewDisabledGuardedAccountHandler returns a disabled implementation +func NewDisabledGuardedAccountHandler() *disabledGuardedAccount { + return &disabledGuardedAccount{} +} + +// GetActiveGuardian returns nil, nil as this is a disabled implementation +func (dga *disabledGuardedAccount) GetActiveGuardian(_ vmcommon.UserAccountHandler) ([]byte, error) { + return nil, nil +} + +// HasActiveGuardian returns false as this is a disabled implementation +func (dga *disabledGuardedAccount) HasActiveGuardian(_ state.UserAccountHandler) bool { + return false +} + +// HasPendingGuardian returns false as this is a disabled implementation +func (dga *disabledGuardedAccount) HasPendingGuardian(_ state.UserAccountHandler) bool { + return false +} + +// SetGuardian returns nil as this is a disabled implementation +func (dga *disabledGuardedAccount) SetGuardian(_ vmcommon.UserAccountHandler, _ []byte, _ []byte, _ []byte) error { + return nil +} + +// CleanOtherThanActive does nothing as this is a disabled implementation +func (dga *disabledGuardedAccount) CleanOtherThanActive(_ vmcommon.UserAccountHandler) {} + +// GetConfiguredGuardians returns nil, nil, nil as this is a disabled component +func (dga *disabledGuardedAccount) GetConfiguredGuardians(_ state.UserAccountHandler) (active *guardians.Guardian, pending *guardians.Guardian, err error) { + return nil, nil, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (dga *disabledGuardedAccount) IsInterfaceNil() bool { + return dga == nil +} diff --git a/process/guardian/guardedAccount.go b/process/guardian/guardedAccount.go new file mode 100644 index 00000000000..91556072a34 --- /dev/null +++ b/process/guardian/guardedAccount.go @@ -0,0 +1,341 @@ +package guardian + +import ( + "bytes" + "fmt" + "sync" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/guardians" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/state" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +var guardianKey = []byte(core.ProtectedKeyPrefix + core.GuardiansKeyIdentifier) + +type guardedAccount struct { + marshaller marshal.Marshalizer + epochNotifier vmcommon.EpochNotifier + mutEpoch sync.RWMutex + guardianActivationEpochsDelay uint32 + currentEpoch uint32 +} + +// NewGuardedAccount creates a new guarded account +func NewGuardedAccount( + marshaller marshal.Marshalizer, + epochNotifier vmcommon.EpochNotifier, + setGuardianEpochsDelay uint32, +) (*guardedAccount, error) { + if check.IfNil(marshaller) { + return nil, process.ErrNilMarshalizer + } + if check.IfNil(epochNotifier) { + return nil, process.ErrNilEpochNotifier + } + if setGuardianEpochsDelay == 0 { + return nil, process.ErrInvalidSetGuardianEpochsDelay + } + + agc := &guardedAccount{ + marshaller: marshaller, + epochNotifier: epochNotifier, + guardianActivationEpochsDelay: setGuardianEpochsDelay, + } + + epochNotifier.RegisterNotifyHandler(agc) + + return agc, nil +} + +// GetActiveGuardian returns the active guardian +func (agc *guardedAccount) GetActiveGuardian(uah vmcommon.UserAccountHandler) ([]byte, error) { + configuredGuardians, err := agc.getVmUserAccountConfiguredGuardian(uah) + if err != nil { + return nil, err + } + + guardian, err := agc.getActiveGuardian(configuredGuardians) + if err != nil { + return nil, err + } + + return guardian.Address, nil +} + +// CleanOtherThanActive cleans the pending guardian or old/disabled guardian, if any +func (agc *guardedAccount) CleanOtherThanActive(uah vmcommon.UserAccountHandler) { + configuredGuardians, err := agc.getVmUserAccountConfiguredGuardian(uah) + if err != nil { + return + } + + activeGuardian, err := agc.getActiveGuardian(configuredGuardians) + if err != nil { + configuredGuardians.Slice = []*guardians.Guardian{} + } else { + configuredGuardians.Slice = []*guardians.Guardian{activeGuardian} + } + + _ = agc.saveAccountGuardians(uah, configuredGuardians) +} + +// HasActiveGuardian returns true if the account has an active guardian configured, false otherwise +func (agc *guardedAccount) HasActiveGuardian(uah state.UserAccountHandler) bool { + if check.IfNil(uah) { + return false + } + + configuredGuardians, err := agc.getConfiguredGuardians(uah) + if err != nil { + return false + } + _, err = agc.getActiveGuardian(configuredGuardians) + + return err == nil +} + +// HasPendingGuardian return true if the account has a pending guardian, false otherwise +func (agc *guardedAccount) HasPendingGuardian(uah state.UserAccountHandler) bool { + if check.IfNil(uah) { + return false + } + + configuredGuardians, err := agc.getConfiguredGuardians(uah) + if err != nil { + return false + } + + _, err = agc.getPendingGuardian(configuredGuardians) + + return err == nil +} + +// SetGuardian sets a guardian for an account +func (agc *guardedAccount) SetGuardian(uah vmcommon.UserAccountHandler, guardianAddress []byte, txGuardianAddress []byte, guardianServiceUID []byte) error { + stateUserAccount, ok := uah.(state.UserAccountHandler) + if !ok { + return process.ErrWrongTypeAssertion + } + + if len(guardianServiceUID) == 0 { + return process.ErrNilGuardianServiceUID + } + + if len(txGuardianAddress) > 0 { + return agc.instantSetGuardian(stateUserAccount, guardianAddress, txGuardianAddress, guardianServiceUID) + } + + agc.mutEpoch.RLock() + guardian := &guardians.Guardian{ + Address: guardianAddress, + ActivationEpoch: agc.currentEpoch + agc.guardianActivationEpochsDelay, + ServiceUID: guardianServiceUID, + } + agc.mutEpoch.RUnlock() + + return agc.setAccountGuardian(stateUserAccount, guardian) +} + +func (agc *guardedAccount) getVmUserAccountConfiguredGuardian(uah vmcommon.UserAccountHandler) (*guardians.Guardians, error) { + stateUserAccount, ok := uah.(state.UserAccountHandler) + if !ok { + return nil, process.ErrWrongTypeAssertion + } + + configuredGuardians, err := agc.getConfiguredGuardians(stateUserAccount) + if err != nil { + return nil, err + } + if len(configuredGuardians.Slice) == 0 { + return nil, process.ErrAccountHasNoGuardianSet + } + + return configuredGuardians, nil +} + +func (agc *guardedAccount) setAccountGuardian(uah state.UserAccountHandler, guardian *guardians.Guardian) error { + configuredGuardians, err := agc.getConfiguredGuardians(uah) + if err != nil { + return err + } + + newGuardians, err := agc.updateGuardians(guardian, configuredGuardians) + if err != nil { + return err + } + + accHandler, ok := uah.(vmcommon.UserAccountHandler) + if !ok { + return process.ErrWrongTypeAssertion + } + + return agc.saveAccountGuardians(accHandler, newGuardians) +} + +func (agc *guardedAccount) instantSetGuardian( + uah state.UserAccountHandler, + guardianAddress []byte, + txGuardianAddress []byte, + guardianServiceUID []byte, +) error { + accountGuardians, err := agc.getConfiguredGuardians(uah) + if err != nil { + return err + } + + activeGuardian, err := agc.getActiveGuardian(accountGuardians) + if err != nil { + return err + } + + if !bytes.Equal(activeGuardian.Address, txGuardianAddress) { + return process.ErrTransactionAndAccountGuardianMismatch + } + + // immediately set the new guardian + agc.mutEpoch.RLock() + guardian := &guardians.Guardian{ + Address: guardianAddress, + ActivationEpoch: agc.currentEpoch, + ServiceUID: guardianServiceUID, + } + agc.mutEpoch.RUnlock() + + accountGuardians.Slice = []*guardians.Guardian{guardian} + accHandler, ok := uah.(vmcommon.UserAccountHandler) + if !ok { + return process.ErrWrongTypeAssertion + } + + return agc.saveAccountGuardians(accHandler, accountGuardians) +} + +// TODO: add constraints on not co-signed txs on interceptor, for setGuardian +// 1. Gas price cannot exceed a preconfigured limit +// 2. If there is already one guardian pending, do not allow setting another one +func (agc *guardedAccount) updateGuardians(newGuardian *guardians.Guardian, accountGuardians *guardians.Guardians) (*guardians.Guardians, error) { + numSetGuardians := len(accountGuardians.Slice) + + if numSetGuardians == 0 { + accountGuardians.Slice = append(accountGuardians.Slice, newGuardian) + return accountGuardians, nil + } + + activeGuardian, err := agc.getActiveGuardian(accountGuardians) + if err != nil { + // no active guardian, do not replace the already pending guardian + return nil, fmt.Errorf("%w in updateGuardians, with %d configured guardians", err, numSetGuardians) + } + + if bytes.Equal(activeGuardian.Address, newGuardian.Address) { + accountGuardians.Slice = []*guardians.Guardian{activeGuardian} + } else { + accountGuardians.Slice = []*guardians.Guardian{activeGuardian, newGuardian} + } + + return accountGuardians, nil +} + +func (agc *guardedAccount) saveAccountGuardians(account vmcommon.UserAccountHandler, accountGuardians *guardians.Guardians) error { + marshalledData, err := agc.marshaller.Marshal(accountGuardians) + if err != nil { + return err + } + + return account.AccountDataHandler().SaveKeyValue(guardianKey, marshalledData) +} + +func (agc *guardedAccount) getConfiguredGuardians(uah state.UserAccountHandler) (*guardians.Guardians, error) { + guardiansMarshalled, _, err := uah.RetrieveValue(guardianKey) + if err != nil || len(guardiansMarshalled) == 0 { + return &guardians.Guardians{Slice: make([]*guardians.Guardian, 0)}, nil + } + + configuredGuardians := &guardians.Guardians{} + err = agc.marshaller.Unmarshal(configuredGuardians, guardiansMarshalled) + if err != nil { + return nil, err + } + + return configuredGuardians, nil +} + +func (agc *guardedAccount) getActiveGuardian(gs *guardians.Guardians) (*guardians.Guardian, error) { + agc.mutEpoch.RLock() + defer agc.mutEpoch.RUnlock() + + var selectedGuardian *guardians.Guardian + for _, guardian := range gs.Slice { + if guardian == nil { + continue + } + if guardian.ActivationEpoch > agc.currentEpoch { + continue + } + if selectedGuardian == nil { + selectedGuardian = guardian + continue + } + + // get the most recent active guardian + if selectedGuardian.ActivationEpoch < guardian.ActivationEpoch { + selectedGuardian = guardian + } + } + + if selectedGuardian == nil { + return nil, process.ErrAccountHasNoActiveGuardian + } + + return selectedGuardian, nil +} + +// GetConfiguredGuardians returns the configured guardians for an account +func (agc *guardedAccount) GetConfiguredGuardians(uah state.UserAccountHandler) (active *guardians.Guardian, pending *guardians.Guardian, err error) { + configuredGuardians, err := agc.getConfiguredGuardians(uah) + if err != nil { + return nil, nil, err + } + + active, _ = agc.getActiveGuardian(configuredGuardians) + pending, _ = agc.getPendingGuardian(configuredGuardians) + + return +} + +func (agc *guardedAccount) getPendingGuardian(gs *guardians.Guardians) (*guardians.Guardian, error) { + if gs == nil { + return nil, process.ErrAccountHasNoPendingGuardian + } + + agc.mutEpoch.RLock() + defer agc.mutEpoch.RUnlock() + + for _, guardian := range gs.Slice { + if guardian == nil { + continue + } + if guardian.ActivationEpoch <= agc.currentEpoch { + continue + } + return guardian, nil + } + + return nil, process.ErrAccountHasNoPendingGuardian +} + +// EpochConfirmed is the registered callback function for the epoch change notifier +func (agc *guardedAccount) EpochConfirmed(epoch uint32, _ uint64) { + agc.mutEpoch.Lock() + agc.currentEpoch = epoch + agc.mutEpoch.Unlock() +} + +// IsInterfaceNil returns true if the receiver is nil +func (agc *guardedAccount) IsInterfaceNil() bool { + return agc == nil +} diff --git a/process/guardian/guardedAccount_test.go b/process/guardian/guardedAccount_test.go new file mode 100644 index 00000000000..d6550babd55 --- /dev/null +++ b/process/guardian/guardedAccount_test.go @@ -0,0 +1,1218 @@ +package guardian + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/guardians" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + stateMocks "github.com/multiversx/mx-chain-go/testscommon/state" + "github.com/multiversx/mx-chain-go/testscommon/trie" + "github.com/multiversx/mx-chain-go/testscommon/vmcommonMocks" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +func TestNewGuardedAccount(t *testing.T) { + marshaller := &testscommon.MarshalizerMock{} + en := &epochNotifier.EpochNotifierStub{} + ga, err := NewGuardedAccount(marshaller, en, 10) + require.Nil(t, err) + require.NotNil(t, ga) + + ga, err = NewGuardedAccount(nil, en, 10) + require.Equal(t, process.ErrNilMarshalizer, err) + require.Nil(t, ga) + + ga, err = NewGuardedAccount(marshaller, nil, 10) + require.Equal(t, process.ErrNilEpochNotifier, err) + require.Nil(t, ga) + + ga, err = NewGuardedAccount(marshaller, en, 0) + require.Equal(t, process.ErrInvalidSetGuardianEpochsDelay, err) + require.Nil(t, ga) +} + +func TestGuardedAccount_getActiveGuardian(t *testing.T) { + ga := createGuardedAccountWithEpoch(9) + + t.Run("no guardians", func(t *testing.T) { + t.Parallel() + + configuredGuardians := &guardians.Guardians{} + activeGuardian, err := ga.getActiveGuardian(configuredGuardians) + require.Nil(t, activeGuardian) + require.Equal(t, process.ErrAccountHasNoActiveGuardian, err) + }) + t.Run("one pending guardian", func(t *testing.T) { + t.Parallel() + + g1 := &guardians.Guardian{Address: []byte("addr1"), ActivationEpoch: 11} + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{g1}} + activeGuardian, err := ga.getActiveGuardian(configuredGuardians) + require.Nil(t, activeGuardian) + require.Equal(t, process.ErrAccountHasNoActiveGuardian, err) + }) + t.Run("one active guardian", func(t *testing.T) { + t.Parallel() + + g1 := &guardians.Guardian{Address: []byte("addr1"), ActivationEpoch: 9} + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{g1}} + activeGuardian, err := ga.getActiveGuardian(configuredGuardians) + require.Nil(t, err) + require.Equal(t, g1, activeGuardian) + }) + t.Run("one active and one pending", func(t *testing.T) { + t.Parallel() + + g1 := &guardians.Guardian{Address: []byte("addr1"), ActivationEpoch: 9} + g2 := &guardians.Guardian{Address: []byte("addr2"), ActivationEpoch: 30} + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{g1, g2}} + activeGuardian, err := ga.getActiveGuardian(configuredGuardians) + require.Nil(t, err) + require.Equal(t, g1, activeGuardian) + }) + t.Run("one active and one too old", func(t *testing.T) { + t.Parallel() + + g1 := &guardians.Guardian{Address: []byte("addr1"), ActivationEpoch: 8} + g2 := &guardians.Guardian{Address: []byte("addr2"), ActivationEpoch: 9} + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{g1, g2}} + activeGuardian, err := ga.getActiveGuardian(configuredGuardians) + require.Nil(t, err) + require.Equal(t, g2, activeGuardian) + }) + t.Run("one active and one too old, saved in reverse order", func(t *testing.T) { + t.Parallel() + + g1 := &guardians.Guardian{Address: []byte("addr1"), ActivationEpoch: 8} + g2 := &guardians.Guardian{Address: []byte("addr2"), ActivationEpoch: 9} + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{g2, g1}} + activeGuardian, err := ga.getActiveGuardian(configuredGuardians) + require.Nil(t, err) + require.Equal(t, g2, activeGuardian) + }) +} + +func TestGuardedAccount_getConfiguredGuardians(t *testing.T) { + ga := createGuardedAccountWithEpoch(10) + + t.Run("guardians key not found should return empty", func(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + acc := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + return nil, 0, expectedErr + }, + } + + configuredGuardians, err := ga.getConfiguredGuardians(acc) + require.Nil(t, err) + require.NotNil(t, configuredGuardians) + require.True(t, len(configuredGuardians.Slice) == 0) + }) + t.Run("key found but no guardians, should return empty", func(t *testing.T) { + t.Parallel() + + acc := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + return nil, 0, nil + }, + } + + configuredGuardians, err := ga.getConfiguredGuardians(acc) + require.Nil(t, err) + require.NotNil(t, configuredGuardians) + require.True(t, len(configuredGuardians.Slice) == 0) + }) + t.Run("unmarshal guardians error should return error", func(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + ga := createGuardedAccountWithEpoch(10) + ga.marshaller = &testscommon.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return expectedErr + }, + } + acc := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + return []byte("wrongly marshalled guardians"), 0, nil + }, + } + + configuredGuardians, err := ga.getConfiguredGuardians(acc) + require.Nil(t, configuredGuardians) + require.Equal(t, expectedErr, err) + }) + t.Run("unmarshal guardians error should return error", func(t *testing.T) { + t.Parallel() + + g1 := &guardians.Guardian{Address: []byte("addr1"), ActivationEpoch: 9} + expectedConfiguredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{g1}} + + acc := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(expectedConfiguredGuardians) + return val, 0, err + }, + } + + configuredGuardians, err := ga.getConfiguredGuardians(acc) + require.Nil(t, err) + require.Equal(t, expectedConfiguredGuardians, configuredGuardians) + }) +} + +func TestGuardedAccount_saveAccountGuardians(t *testing.T) { + userAccount := &vmcommonMocks.UserAccountStub{ + AccountDataHandlerCalled: func() vmcommon.AccountDataHandler { + return &trie.DataTrieTrackerStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + return nil + }, + } + }, + } + + t.Run("marshaling error should return err", func(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + ga := createGuardedAccountWithEpoch(10) + ga.marshaller = &testscommon.MarshalizerStub{ + MarshalCalled: func(obj interface{}) ([]byte, error) { + return nil, expectedErr + }, + } + + err := ga.saveAccountGuardians(userAccount, nil) + require.Equal(t, expectedErr, err) + }) + t.Run("save account guardians OK", func(t *testing.T) { + t.Parallel() + + SaveKeyValueCalled := false + userAccount.AccountDataHandlerCalled = func() vmcommon.AccountDataHandler { + return &trie.DataTrieTrackerStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + SaveKeyValueCalled = true + return nil + }, + } + } + + ga := createGuardedAccountWithEpoch(10) + err := ga.saveAccountGuardians(userAccount, nil) + require.Nil(t, err) + require.True(t, SaveKeyValueCalled) + }) +} + +func TestGuardedAccount_updateGuardians(t *testing.T) { + ga := createGuardedAccountWithEpoch(10) + newGuardian := &guardians.Guardian{ + Address: []byte("new guardian address"), + ActivationEpoch: 20, + } + + t.Run("update empty guardian list with new guardian", func(t *testing.T) { + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{}} + expectedGuardians := append(configuredGuardians.Slice, newGuardian) + updatedGuardians, err := ga.updateGuardians(newGuardian, configuredGuardians) + require.Nil(t, err) + require.Equal(t, expectedGuardians, updatedGuardians.Slice) + }) + t.Run("updating when there is an existing pending guardian and no active should error", func(t *testing.T) { + existingGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: 11, + } + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{existingGuardian}} + + updatedGuardians, err := ga.updateGuardians(newGuardian, configuredGuardians) + require.Nil(t, updatedGuardians) + require.True(t, errors.Is(err, process.ErrAccountHasNoActiveGuardian)) + }) + t.Run("updating the existing same active guardian should leave the active guardian unchanged", func(t *testing.T) { + existingGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: 9, + } + + newGuardian := newGuardian + newGuardian.Address = existingGuardian.Address + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{existingGuardian}} + + updatedGuardians, err := ga.updateGuardians(newGuardian, configuredGuardians) + require.Nil(t, err) + require.Equal(t, configuredGuardians, updatedGuardians) + }) + t.Run("updating the existing same active guardian, when there is also a pending guardian configured, should clean up pending and leave active unchanged", func(t *testing.T) { + existingActiveGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: 9, + } + existingPendingGuardian := &guardians.Guardian{ + Address: []byte("pending guardian address"), + ActivationEpoch: 13, + } + + newGuardian := newGuardian + newGuardian.Address = existingPendingGuardian.Address + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{existingActiveGuardian, existingPendingGuardian}} + expectedUpdatedGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{existingActiveGuardian, newGuardian}} + + updatedGuardians, err := ga.updateGuardians(newGuardian, configuredGuardians) + require.Nil(t, err) + require.Equal(t, expectedUpdatedGuardians, updatedGuardians) + }) + t.Run("updating the existing same pending guardian while there is an active one should leave the active guardian unchanged but update the pending", func(t *testing.T) { + existingGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: 9, + } + + newGuardian := newGuardian + newGuardian.Address = existingGuardian.Address + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{existingGuardian}} + + updatedGuardians, err := ga.updateGuardians(newGuardian, configuredGuardians) + require.Nil(t, err) + require.Equal(t, configuredGuardians, updatedGuardians) + }) +} + +func TestGuardedAccount_setAccountGuardian(t *testing.T) { + ga := createGuardedAccountWithEpoch(10) + newGuardian := &guardians.Guardian{ + Address: []byte("new guardian address"), + ActivationEpoch: 20, + } + + t.Run("if updateGuardians returns err, the err should be propagated", func(t *testing.T) { + existingGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: 11, + } + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{existingGuardian}} + ua := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + + err := ga.setAccountGuardian(ua, newGuardian) + require.True(t, errors.Is(err, process.ErrAccountHasNoActiveGuardian)) + }) + t.Run("setGuardian same guardian ok, not changing existing config", func(t *testing.T) { + existingGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: 9, + } + newGuardian := newGuardian + newGuardian.Address = existingGuardian.Address + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{existingGuardian}} + + expectedValue := []byte(nil) + ua := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + expectedValue, _ = ga.marshaller.Marshal(configuredGuardians) + return expectedValue, 0, nil + }, + AccountDataHandlerCalled: func() vmcommon.AccountDataHandler { + return &trie.DataTrieTrackerStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + require.Equal(t, guardianKey, key) + require.Equal(t, expectedValue, value) + return nil + }, + } + }, + } + + err := ga.setAccountGuardian(ua, newGuardian) + require.Nil(t, err) + }) +} + +func TestGuardedAccount_instantSetGuardian(t *testing.T) { + currentEpoch := uint32(10) + ga := createGuardedAccountWithEpoch(currentEpoch) + newGuardian := &guardians.Guardian{ + Address: []byte("new guardian address"), + ActivationEpoch: 20, + } + txGuardianAddress := []byte("guardian address") + guardianServiceUID := []byte("testID") + + t.Run("getActiveGuardianErr with err (no active guardian) should error", func(t *testing.T) { + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{}} + + ua := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + + err := ga.instantSetGuardian(ua, newGuardian.Address, txGuardianAddress, guardianServiceUID) + require.Equal(t, process.ErrAccountHasNoActiveGuardian, err) + }) + t.Run("tx signed by different than active guardian should err", func(t *testing.T) { + activeGuardian := &guardians.Guardian{ + Address: []byte("active guardian address"), + ActivationEpoch: 1, + } + + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{activeGuardian}} + + ua := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + + err := ga.instantSetGuardian(ua, newGuardian.Address, txGuardianAddress, guardianServiceUID) + require.Equal(t, process.ErrTransactionAndAccountGuardianMismatch, err) + }) + t.Run("immediately set the guardian if setGuardian tx is signed by active guardian", func(t *testing.T) { + activeGuardian := &guardians.Guardian{ + Address: txGuardianAddress, + ActivationEpoch: 1, + ServiceUID: guardianServiceUID, + } + newGuardian := &guardians.Guardian{ + Address: []byte("new guardian address"), + ActivationEpoch: currentEpoch, + ServiceUID: []byte("testServiceID2"), + } + + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{activeGuardian}} + expectedValue, _ := ga.marshaller.Marshal(&guardians.Guardians{Slice: []*guardians.Guardian{newGuardian}}) + + ua := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + AccountDataHandlerCalled: func() vmcommon.AccountDataHandler { + return &trie.DataTrieTrackerStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + require.Equal(t, guardianKey, key) + require.Equal(t, expectedValue, value) + return nil + }, + } + }} + + err := ga.instantSetGuardian(ua, newGuardian.Address, txGuardianAddress, newGuardian.ServiceUID) + require.Nil(t, err) + }) +} + +func TestGuardedAccount_GetActiveGuardian(t *testing.T) { + currentEpoch := uint32(10) + ga := createGuardedAccountWithEpoch(currentEpoch) + + t.Run("wrong account type should err", func(t *testing.T) { + var uah *vmcommonMocks.UserAccountStub + activeGuardian, err := ga.GetActiveGuardian(uah) + require.Nil(t, activeGuardian) + require.Equal(t, process.ErrWrongTypeAssertion, err) + }) + t.Run("getConfiguredGuardians with err should err - no active", func(t *testing.T) { + dataTrieErr := errors.New("expected error") + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + return nil, 0, dataTrieErr + }, + } + activeGuardian, err := ga.GetActiveGuardian(uah) + require.Nil(t, activeGuardian) + require.Equal(t, process.ErrAccountHasNoGuardianSet, err) + }) + t.Run("no guardian should return err", func(t *testing.T) { + configuredGuardians := &guardians.Guardians{} + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + + activeGuardian, err := ga.GetActiveGuardian(uah) + require.Nil(t, activeGuardian) + require.Equal(t, process.ErrAccountHasNoGuardianSet, err) + }) + t.Run("one pending guardian should return err", func(t *testing.T) { + pendingGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: currentEpoch + 1, + } + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{pendingGuardian}} + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + + activeGuardian, err := ga.GetActiveGuardian(uah) + require.Nil(t, activeGuardian) + require.Equal(t, process.ErrAccountHasNoActiveGuardian, err) + }) + t.Run("one active guardian should return the active", func(t *testing.T) { + activeGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: currentEpoch - 1, + } + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{activeGuardian}} + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + + guardian, err := ga.GetActiveGuardian(uah) + require.Equal(t, activeGuardian.Address, guardian) + require.Nil(t, err) + }) + t.Run("one active guardian and one pending new guardian", func(t *testing.T) { + activeGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: currentEpoch - 1, + } + pendingGuardian := &guardians.Guardian{ + Address: []byte("pending guardian address"), + ActivationEpoch: currentEpoch + 1, + } + + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{activeGuardian, pendingGuardian}} + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + + guardian, err := ga.GetActiveGuardian(uah) + require.Equal(t, activeGuardian.Address, guardian) + require.Nil(t, err) + }) + t.Run("one active guardian and one disabled (old) guardian", func(t *testing.T) { + activeGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: currentEpoch - 1, + } + oldGuardian := &guardians.Guardian{ + Address: []byte("old guardian address"), + ActivationEpoch: currentEpoch - 5, + } + + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{activeGuardian, oldGuardian}} + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + + guardian, err := ga.GetActiveGuardian(uah) + require.Equal(t, activeGuardian.Address, guardian) + require.Nil(t, err) + }) +} + +func TestGuardedAccount_getPendingGuardian(t *testing.T) { + currentEpoch := uint32(10) + ga := createGuardedAccountWithEpoch(currentEpoch) + + t.Run("nil guardians/empty guardians should err", func(t *testing.T) { + pendingGuardian, err := ga.getPendingGuardian(nil) + require.Nil(t, pendingGuardian) + require.Equal(t, process.ErrAccountHasNoPendingGuardian, err) + + configuredGuardians := &guardians.Guardians{} + pendingGuardian, err = ga.getPendingGuardian(configuredGuardians) + require.Nil(t, pendingGuardian) + require.Equal(t, process.ErrAccountHasNoPendingGuardian, err) + }) + t.Run("one pending guardian should return it", func(t *testing.T) { + pendingGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: currentEpoch + 1, + } + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{pendingGuardian}} + pGuardian, err := ga.getPendingGuardian(configuredGuardians) + require.Nil(t, err) + require.Equal(t, pendingGuardian, pGuardian) + }) + t.Run("one active guardian should err", func(t *testing.T) { + activeGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: currentEpoch - 1, + } + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{activeGuardian}} + guardian, err := ga.getPendingGuardian(configuredGuardians) + require.Nil(t, guardian) + require.Equal(t, process.ErrAccountHasNoPendingGuardian, err) + }) + t.Run("one active guardian and one pending new guardian", func(t *testing.T) { + activeGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: currentEpoch - 1, + } + pendingGuardian := &guardians.Guardian{ + Address: []byte("pending guardian address"), + ActivationEpoch: currentEpoch + 1, + } + + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{activeGuardian, pendingGuardian}} + guardian, err := ga.getPendingGuardian(configuredGuardians) + require.Equal(t, pendingGuardian, guardian) + require.Nil(t, err) + }) + t.Run("one active guardian and one disabled (old) guardian should err", func(t *testing.T) { + activeGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: currentEpoch - 1, + } + oldGuardian := &guardians.Guardian{ + Address: []byte("old guardian address"), + ActivationEpoch: currentEpoch - 5, + } + + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{activeGuardian, oldGuardian}} + guardian, err := ga.getPendingGuardian(configuredGuardians) + require.Nil(t, guardian) + require.Equal(t, process.ErrAccountHasNoPendingGuardian, err) + }) +} + +func TestGuardedAccount_SetGuardian(t *testing.T) { + currentEpoch := uint32(10) + ga := createGuardedAccountWithEpoch(currentEpoch) + guardianServiceUID := []byte("testID") + initialServiceUID := []byte("test2ID") + g1 := &guardians.Guardian{ + Address: []byte("guardian address 1"), + ActivationEpoch: currentEpoch - 2, + ServiceUID: initialServiceUID, + } + g2 := &guardians.Guardian{ + Address: []byte("guardian address 2"), + ActivationEpoch: currentEpoch - 1, + ServiceUID: initialServiceUID, + } + newGuardianAddress := []byte("new guardian address") + + t.Run("invalid user account handler should err", func(t *testing.T) { + err := ga.SetGuardian(nil, newGuardianAddress, g1.Address, guardianServiceUID) + require.Equal(t, process.ErrWrongTypeAssertion, err) + }) + t.Run("transaction signed by current active guardian but instantSetGuardian returns error", func(t *testing.T) { + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{g1}} + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + err := ga.SetGuardian(uah, newGuardianAddress, g2.Address, guardianServiceUID) + require.Equal(t, process.ErrTransactionAndAccountGuardianMismatch, err) + }) + t.Run("instantly set guardian if tx signed by current active guardian", func(t *testing.T) { + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{g1}} + newGuardian := &guardians.Guardian{ + Address: newGuardianAddress, + ActivationEpoch: currentEpoch, + ServiceUID: guardianServiceUID, + } + expectedNewGuardians, _ := ga.marshaller.Marshal(&guardians.Guardians{Slice: []*guardians.Guardian{newGuardian}}) + + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + AccountDataHandlerCalled: func() vmcommon.AccountDataHandler { + return &trie.DataTrieTrackerStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + require.Equal(t, guardianKey, key) + require.Equal(t, expectedNewGuardians, value) + return nil + }, + } + }, + } + err := ga.SetGuardian(uah, newGuardianAddress, g1.Address, guardianServiceUID) + require.Nil(t, err) + }) + t.Run("nil guardian serviceUID should err", func(t *testing.T) { + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{g1}} + saveKeyValueCalled := false + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + AccountDataHandlerCalled: func() vmcommon.AccountDataHandler { + return &trie.DataTrieTrackerStub{ + SaveKeyValueCalled: func(_ []byte, _ []byte) error { + saveKeyValueCalled = true + return nil + }, + } + }, + } + err := ga.SetGuardian(uah, newGuardianAddress, g1.Address, nil) + require.False(t, saveKeyValueCalled) + require.Equal(t, process.ErrNilGuardianServiceUID, err) + }) + t.Run("tx not signed by active guardian sets guardian with delay", func(t *testing.T) { + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{g1}} + newGuardian := &guardians.Guardian{ + Address: newGuardianAddress, + ActivationEpoch: currentEpoch + ga.guardianActivationEpochsDelay, + ServiceUID: guardianServiceUID, + } + expectedNewGuardians, _ := ga.marshaller.Marshal(&guardians.Guardians{Slice: []*guardians.Guardian{g1, newGuardian}}) + + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + AccountDataHandlerCalled: func() vmcommon.AccountDataHandler { + return &trie.DataTrieTrackerStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + require.Equal(t, guardianKey, key) + require.Equal(t, expectedNewGuardians, value) + return nil + }, + } + }, + } + err := ga.SetGuardian(uah, newGuardianAddress, nil, guardianServiceUID) + require.Nil(t, err) + }) +} + +func TestGuardedAccount_HasActiveGuardian(t *testing.T) { + t.Parallel() + + currentEpoch := uint32(10) + ga := createGuardedAccountWithEpoch(currentEpoch) + + t.Run("nil account type should return false", func(t *testing.T) { + var uah *stateMocks.UserAccountStub + require.False(t, ga.HasActiveGuardian(uah)) + }) + t.Run("getConfiguredGuardians with err should return false", func(t *testing.T) { + expectedErr := errors.New("expected error") + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + return nil, 0, expectedErr + }, + } + require.False(t, ga.HasActiveGuardian(uah)) + }) + t.Run("no guardian should return false", func(t *testing.T) { + configuredGuardians := &guardians.Guardians{} + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + + require.False(t, ga.HasActiveGuardian(uah)) + }) + t.Run("one pending guardian should return false", func(t *testing.T) { + pendingGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: currentEpoch + 1, + } + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{pendingGuardian}} + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + + require.False(t, ga.HasActiveGuardian(uah)) + }) + t.Run("one active guardian should return true", func(t *testing.T) { + activeGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: currentEpoch - 1, + } + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{activeGuardian}} + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + + require.True(t, ga.HasActiveGuardian(uah)) + }) + t.Run("one active guardian and one pending new guardian should return true", func(t *testing.T) { + activeGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: currentEpoch - 1, + } + pendingGuardian := &guardians.Guardian{ + Address: []byte("pending guardian address"), + ActivationEpoch: currentEpoch + 1, + } + + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{activeGuardian, pendingGuardian}} + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + + require.True(t, ga.HasActiveGuardian(uah)) + }) + t.Run("one active guardian and one disabled (old) guardian should return true", func(t *testing.T) { + activeGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: currentEpoch - 1, + } + oldGuardian := &guardians.Guardian{ + Address: []byte("old guardian address"), + ActivationEpoch: currentEpoch - 5, + } + + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{activeGuardian, oldGuardian}} + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + + require.True(t, ga.HasActiveGuardian(uah)) + }) +} + +func TestGuardedAccount_HasPendingGuardian(t *testing.T) { + t.Parallel() + + currentEpoch := uint32(10) + ga := createGuardedAccountWithEpoch(currentEpoch) + + t.Run("nil account type should return false", func(t *testing.T) { + var uah *stateMocks.UserAccountStub + require.False(t, ga.HasPendingGuardian(uah)) + }) + t.Run("getConfiguredGuardians with err should return false", func(t *testing.T) { + expectedErr := errors.New("expected error") + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + return nil, 0, expectedErr + }, + } + require.False(t, ga.HasPendingGuardian(uah)) + }) + t.Run("no guardian should return false", func(t *testing.T) { + configuredGuardians := &guardians.Guardians{} + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + + require.False(t, ga.HasPendingGuardian(uah)) + }) + t.Run("one pending guardian should return true", func(t *testing.T) { + pendingGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: currentEpoch + 1, + } + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{pendingGuardian}} + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + + require.True(t, ga.HasPendingGuardian(uah)) + }) + t.Run("one active guardian should return false", func(t *testing.T) { + activeGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: currentEpoch - 1, + } + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{activeGuardian}} + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + + require.False(t, ga.HasPendingGuardian(uah)) + }) + t.Run("one active guardian and one pending new guardian should return true", func(t *testing.T) { + activeGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: currentEpoch - 1, + } + pendingGuardian := &guardians.Guardian{ + Address: []byte("pending guardian address"), + ActivationEpoch: currentEpoch + 1, + } + + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{activeGuardian, pendingGuardian}} + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + + require.True(t, ga.HasPendingGuardian(uah)) + }) + t.Run("one active guardian and one disabled (old) guardian should return false", func(t *testing.T) { + activeGuardian := &guardians.Guardian{ + Address: []byte("guardian address"), + ActivationEpoch: currentEpoch - 1, + } + oldGuardian := &guardians.Guardian{ + Address: []byte("old guardian address"), + ActivationEpoch: currentEpoch - 5, + } + + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{activeGuardian, oldGuardian}} + uah := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + + require.False(t, ga.HasPendingGuardian(uah)) + }) +} + +func TestGuardedAccount_CleanOtherThanActive(t *testing.T) { + t.Parallel() + + currentEpoch := uint32(10) + g0 := &guardians.Guardian{ + Address: []byte("old guardian"), + ActivationEpoch: currentEpoch - 4, + } + g1 := &guardians.Guardian{ + Address: []byte("active guardian"), + ActivationEpoch: currentEpoch - 2, + } + g2 := &guardians.Guardian{ + Address: []byte("pending guardian"), + ActivationEpoch: currentEpoch + 2, + } + + t.Run("no configured guardians does not change the guardians", func(t *testing.T) { + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{}} + ga := createGuardedAccountWithEpoch(currentEpoch) + + acc := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + AccountDataHandlerCalled: func() vmcommon.AccountDataHandler { + return &trie.DataTrieTrackerStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + require.Fail(t, "should not save anything") + return nil + }, + } + }, + } + + ga.CleanOtherThanActive(acc) + }) + t.Run("one pending guardian should clean the pending", func(t *testing.T) { + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{g2}} + ga := createGuardedAccountWithEpoch(currentEpoch) + expectedConfig := &guardians.Guardians{Slice: []*guardians.Guardian{}} + expectedValue, _ := ga.marshaller.Marshal(expectedConfig) + + acc := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + AccountDataHandlerCalled: func() vmcommon.AccountDataHandler { + return &trie.DataTrieTrackerStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + require.Equal(t, guardianKey, key) + require.Equal(t, value, expectedValue) + return nil + }, + } + }, + } + + ga.CleanOtherThanActive(acc) + }) + t.Run("one active guardian should set again the active", func(t *testing.T) { + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{g1}} + ga := createGuardedAccountWithEpoch(currentEpoch) + + acc := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + AccountDataHandlerCalled: func() vmcommon.AccountDataHandler { + return &trie.DataTrieTrackerStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + require.Equal(t, guardianKey, key) + expectedMarshalledGuardians, _ := ga.marshaller.Marshal(configuredGuardians) + require.Equal(t, expectedMarshalledGuardians, value) + return nil + }, + } + }, + } + + ga.CleanOtherThanActive(acc) + }) + t.Run("one active and one pending should set again the active (effect is cleaning the pending)", func(t *testing.T) { + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{g1, g2}} + ga := createGuardedAccountWithEpoch(currentEpoch) + + acc := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + AccountDataHandlerCalled: func() vmcommon.AccountDataHandler { + return &trie.DataTrieTrackerStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + require.Equal(t, guardianKey, key) + expectedMarshalledGuardians, _ := ga.marshaller.Marshal(&guardians.Guardians{Slice: []*guardians.Guardian{g1}}) + require.Equal(t, expectedMarshalledGuardians, value) + return nil + }, + } + }, + } + + ga.CleanOtherThanActive(acc) + }) + t.Run("one active and one disabled should set again the active (effect is cleaning the disabled)", func(t *testing.T) { + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{g0, g1}} + ga := createGuardedAccountWithEpoch(currentEpoch) + + acc := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + AccountDataHandlerCalled: func() vmcommon.AccountDataHandler { + return &trie.DataTrieTrackerStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + require.Equal(t, guardianKey, key) + expectedMarshalledGuardians, _ := ga.marshaller.Marshal(&guardians.Guardians{Slice: []*guardians.Guardian{g1}}) + require.Equal(t, expectedMarshalledGuardians, value) + return nil + }, + } + }, + } + + ga.CleanOtherThanActive(acc) + }) +} + +func TestGuardedAccount_GetConfiguredGuardians(t *testing.T) { + currentEpoch := uint32(10) + g0 := &guardians.Guardian{ + Address: []byte("old guardian"), + ActivationEpoch: currentEpoch - 4, + } + g1 := &guardians.Guardian{ + Address: []byte("active guardian"), + ActivationEpoch: currentEpoch - 2, + } + g2 := &guardians.Guardian{ + Address: []byte("pending guardian"), + ActivationEpoch: currentEpoch + 2, + } + ga := createGuardedAccountWithEpoch(currentEpoch) + + t.Run("unmarshall error", func(t *testing.T) { + t.Parallel() + + acc := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + return []byte("wrong data"), 0, nil + }, + } + active, pending, err := ga.GetConfiguredGuardians(acc) + require.Nil(t, active) + require.Nil(t, pending) + require.NotNil(t, err) + }) + t.Run("empty configured guardians", func(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + acc := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + return nil, 0, expectedErr + }, + } + active, pending, err := ga.GetConfiguredGuardians(acc) + require.Nil(t, active) + require.Nil(t, pending) + require.Nil(t, err) + }) + t.Run("one pending guardian", func(t *testing.T) { + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{g2}} + acc := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + active, pending, err := ga.GetConfiguredGuardians(acc) + require.Nil(t, active) + require.Equal(t, g2, pending) + require.Nil(t, err) + }) + t.Run("one active guardian", func(t *testing.T) { + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{g1}} + acc := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + active, pending, err := ga.GetConfiguredGuardians(acc) + require.Equal(t, g1, active) + require.Nil(t, pending) + require.Nil(t, err) + }) + t.Run("one active and one pending", func(t *testing.T) { + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{g1, g2}} + acc := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + active, pending, err := ga.GetConfiguredGuardians(acc) + require.Equal(t, g1, active) + require.Equal(t, g2, pending) + require.Nil(t, err) + }) + t.Run("one old and one active", func(t *testing.T) { + configuredGuardians := &guardians.Guardians{Slice: []*guardians.Guardian{g0, g1}} + acc := &stateMocks.UserAccountStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + val, err := ga.marshaller.Marshal(configuredGuardians) + return val, 0, err + }, + } + active, pending, err := ga.GetConfiguredGuardians(acc) + require.Equal(t, g1, active) + require.Nil(t, pending) + require.Nil(t, err) + }) +} + +func TestGuardedAccount_EpochConfirmed(t *testing.T) { + ga := createGuardedAccountWithEpoch(0) + ga.EpochConfirmed(1, 0) + require.Equal(t, uint32(1), ga.currentEpoch) + + ga.EpochConfirmed(111, 0) + require.Equal(t, uint32(111), ga.currentEpoch) +} + +func TestGuardedAccount_IsInterfaceNil(t *testing.T) { + var gah process.GuardedAccountHandler + require.True(t, check.IfNil(gah)) + + var ga *guardedAccount + require.True(t, check.IfNil(ga)) + + ga, _ = NewGuardedAccount(&testscommon.MarshalizerMock{}, &epochNotifier.EpochNotifierStub{}, 10) + require.False(t, check.IfNil(ga)) +} + +func TestGuardedAccount_EpochConcurrency(t *testing.T) { + t.Parallel() + + marshaller := &testscommon.MarshalizerMock{} + currentEpoch := uint32(0) + en := forking.NewGenericEpochNotifier() + ga, _ := NewGuardedAccount(marshaller, en, 2) + ctx := context.Background() + go func() { + epochTime := time.Millisecond + timer := time.NewTimer(epochTime) + defer timer.Stop() + + for { + timer.Reset(epochTime) + select { + case <-timer.C: + hdr := &block.Header{ + Epoch: currentEpoch, + } + en.CheckEpoch(hdr) + currentEpoch++ + case <-ctx.Done(): + return + } + } + }() + + uah := &stateMocks.UserAccountStub{ + AccountDataHandlerCalled: func() vmcommon.AccountDataHandler { + return &trie.DataTrieTrackerStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + return nil + }, + } + }, + } + err := ga.SetGuardian(uah, []byte("guardian address"), nil, []byte("uuid")) + require.Nil(t, err) +} + +func createGuardedAccountWithEpoch(epoch uint32) *guardedAccount { + marshaller := &testscommon.MarshalizerMock{} + en := &epochNotifier.EpochNotifierStub{ + RegisterNotifyHandlerCalled: func(handler vmcommon.EpochSubscriberHandler) { + handler.EpochConfirmed(epoch, 0) + }, + } + + ga, _ := NewGuardedAccount(marshaller, en, 10) + return ga +} diff --git a/process/interceptors/factory/interceptedMetaHeaderDataFactory_test.go b/process/interceptors/factory/interceptedMetaHeaderDataFactory_test.go index ba0bda4a67c..d77a5ff5ea9 100644 --- a/process/interceptors/factory/interceptedMetaHeaderDataFactory_test.go +++ b/process/interceptors/factory/interceptedMetaHeaderDataFactory_test.go @@ -16,6 +16,7 @@ import ( processMocks "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" @@ -54,7 +55,7 @@ func createMockPubkeyConverter() core.PubkeyConverter { } func createMockFeeHandler() process.FeeHandler { - return &mock.FeeHandlerStub{} + return &economicsmocks.EconomicsHandlerStub{} } func createMockComponentHolders() (*mock.CoreComponentsMock, *mock.CryptoComponentsMock) { diff --git a/process/interceptors/processor/interface.go b/process/interceptors/processor/interface.go index 8d48aa5fb55..147d8f30270 100644 --- a/process/interceptors/processor/interface.go +++ b/process/interceptors/processor/interface.go @@ -1,28 +1,9 @@ package processor import ( - "math/big" - - "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/state" ) -// InterceptedTransactionHandler defines an intercepted data wrapper over transaction handler that has -// receiver and sender shard getters -type InterceptedTransactionHandler interface { - SenderShardId() uint32 - ReceiverShardId() uint32 - Nonce() uint64 - SenderAddress() []byte - Fee() *big.Int - Transaction() data.TransactionHandler -} - -// ShardedPool is a perspective of the sharded data pool -type ShardedPool interface { - AddData(key []byte, data interface{}, sizeInBytes int, cacheID string) -} - type interceptedDataSizeHandler interface { SizeInBytes() int } diff --git a/process/interceptors/processor/txInterceptorProcessor.go b/process/interceptors/processor/txInterceptorProcessor.go index e99f31a1380..1215e81e937 100644 --- a/process/interceptors/processor/txInterceptorProcessor.go +++ b/process/interceptors/processor/txInterceptorProcessor.go @@ -10,10 +10,11 @@ import ( var _ process.InterceptorProcessor = (*TxInterceptorProcessor)(nil) var txLog = logger.GetOrCreate("process/interceptors/processor/txlog") + // TxInterceptorProcessor is the processor used when intercepting transactions // (smart contract results, receipts, transaction) structs which satisfy TransactionHandler interface. type TxInterceptorProcessor struct { - shardedPool ShardedPool + shardedPool process.ShardedPool txValidator process.TxValidator } @@ -37,7 +38,7 @@ func NewTxInterceptorProcessor(argument *ArgTxInterceptorProcessor) (*TxIntercep // Validate checks if the intercepted data can be processed func (txip *TxInterceptorProcessor) Validate(data process.InterceptedData, _ core.PeerID) error { - interceptedTx, ok := data.(InterceptedTransactionHandler) + interceptedTx, ok := data.(process.InterceptedTransactionHandler) if !ok { return process.ErrWrongTypeAssertion } @@ -47,7 +48,7 @@ func (txip *TxInterceptorProcessor) Validate(data process.InterceptedData, _ cor // Save will save the received data into the cacher func (txip *TxInterceptorProcessor) Save(data process.InterceptedData, peerOriginator core.PeerID, _ string) error { - interceptedTx, ok := data.(InterceptedTransactionHandler) + interceptedTx, ok := data.(process.InterceptedTransactionHandler) if !ok { return process.ErrWrongTypeAssertion } diff --git a/process/interceptors/processor/txInterceptorProcessor_test.go b/process/interceptors/processor/txInterceptorProcessor_test.go index 2974af7c434..439fecf9aca 100644 --- a/process/interceptors/processor/txInterceptorProcessor_test.go +++ b/process/interceptors/processor/txInterceptorProcessor_test.go @@ -80,7 +80,7 @@ func TestTxInterceptorProcessor_ValidateReturnsFalseShouldErr(t *testing.T) { expectedErr := errors.New("tx validation error") arg := createMockTxArgument() arg.TxValidator = &mock.TxValidatorStub{ - CheckTxValidityCalled: func(txValidatorHandler process.TxValidatorHandler) error { + CheckTxValidityCalled: func(interceptedTx process.InterceptedTransactionHandler) error { return expectedErr }, } @@ -101,7 +101,7 @@ func TestTxInterceptorProcessor_ValidateReturnsTrueShouldWork(t *testing.T) { arg := createMockTxArgument() arg.TxValidator = &mock.TxValidatorStub{ - CheckTxValidityCalled: func(txValidatorHandler process.TxValidatorHandler) error { + CheckTxValidityCalled: func(interceptedTx process.InterceptedTransactionHandler) error { return nil }, } diff --git a/process/interface.go b/process/interface.go index cacfc6650b9..38feb06a4ac 100644 --- a/process/interface.go +++ b/process/interface.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/endProcess" "github.com/multiversx/mx-chain-core-go/data/esdt" + "github.com/multiversx/mx-chain-core-go/data/guardians" "github.com/multiversx/mx-chain-core-go/data/rewardTx" "github.com/multiversx/mx-chain-core-go/data/scheduled" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" @@ -65,7 +66,7 @@ type TxTypeHandler interface { // TxValidator can determine if a provided transaction handler is valid or not from the process point of view type TxValidator interface { - CheckTxValidity(txHandler TxValidatorHandler) error + CheckTxValidity(interceptedTx InterceptedTransactionHandler) error CheckTxWhiteList(data InterceptedData) error IsInterfaceNil() bool } @@ -79,8 +80,20 @@ type TxValidatorHandler interface { Fee() *big.Int } +// InterceptedTransactionHandler defines an intercepted data wrapper over transaction handler that has +// receiver and sender shard getters +type InterceptedTransactionHandler interface { + SenderShardId() uint32 + ReceiverShardId() uint32 + Nonce() uint64 + SenderAddress() []byte + Fee() *big.Int + Transaction() data.TransactionHandler +} + // TxVersionCheckerHandler defines the functionality that is needed for a TxVersionChecker to validate transaction version type TxVersionCheckerHandler interface { + IsGuardedTransaction(tx *transaction.Transaction) bool IsSignedWithHash(tx *transaction.Transaction) bool CheckTxVersion(tx *transaction.Transaction) error IsInterfaceNil() bool @@ -504,7 +517,7 @@ type BlockChainHookHandler interface { RevertToSnapshot(snapshot int) error Close() error FilterCodeMetadataForUpgrade(input []byte) ([]byte, error) - ApplyFiltersOnCodeMetadata(codeMetadata vmcommon.CodeMetadata) vmcommon.CodeMetadata + ApplyFiltersOnSCCodeMetadata(codeMetadata vmcommon.CodeMetadata) vmcommon.CodeMetadata ResetCounters() GetCounterValues() map[string]uint64 IsInterfaceNil() bool @@ -645,8 +658,10 @@ type feeHandler interface { CheckValidityTxValues(tx data.TransactionWithFeeHandler) error ComputeFeeForProcessing(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int MinGasPrice() uint64 + MaxGasPriceSetGuardian() uint64 GasPriceModifier() float64 MinGasLimit() uint64 + ExtraGasLimitGuardedTx() uint64 SplitTxGasInCategories(tx data.TransactionWithFeeHandler) (uint64, uint64) GasPriceForProcessing(tx data.TransactionWithFeeHandler) uint64 GasPriceForMove(tx data.TransactionWithFeeHandler) uint64 @@ -1215,6 +1230,36 @@ type ScheduledTxsExecutionHandler interface { IsInterfaceNil() bool } +// ShardedPool is a perspective of the sharded data pool +type ShardedPool interface { + AddData(key []byte, data interface{}, sizeInBytes int, cacheID string) +} + +// InterceptedSignedTransactionHandler provides additional handling for signed transactions +type InterceptedSignedTransactionHandler interface { + InterceptedTransactionHandler + GetTxMessageForSignatureVerification() ([]byte, error) +} + +// GuardianChecker can check an account guardian +type GuardianChecker interface { + GetActiveGuardian(handler vmcommon.UserAccountHandler) ([]byte, error) + HasActiveGuardian(uah state.UserAccountHandler) bool + HasPendingGuardian(uah state.UserAccountHandler) bool + IsInterfaceNil() bool +} + +// GuardedAccountHandler allows setting and getting the configured account guardian +type GuardedAccountHandler interface { + GetActiveGuardian(handler vmcommon.UserAccountHandler) ([]byte, error) + HasActiveGuardian(uah state.UserAccountHandler) bool + HasPendingGuardian(uah state.UserAccountHandler) bool + SetGuardian(uah vmcommon.UserAccountHandler, guardianAddress []byte, txGuardianAddress []byte, guardianServiceUID []byte) error + CleanOtherThanActive(uah vmcommon.UserAccountHandler) + GetConfiguredGuardians(uah state.UserAccountHandler) (active *guardians.Guardian, pending *guardians.Guardian, err error) + IsInterfaceNil() bool +} + // DoubleTransactionDetector is able to detect if a transaction hash is present more than once in a block body type DoubleTransactionDetector interface { ProcessBlockBody(body *block.Body) diff --git a/process/mock/feeHandlerStub.go b/process/mock/feeHandlerStub.go deleted file mode 100644 index 2d9d658945d..00000000000 --- a/process/mock/feeHandlerStub.go +++ /dev/null @@ -1,235 +0,0 @@ -package mock - -import ( - "math/big" - - "github.com/multiversx/mx-chain-core-go/data" -) - -// FeeHandlerStub - -type FeeHandlerStub struct { - SetMaxGasLimitPerBlockCalled func(maxGasLimitPerBlock uint64) - SetMinGasPriceCalled func(minGasPrice uint64) - SetMinGasLimitCalled func(minGasLimit uint64) - MaxGasLimitPerBlockCalled func() uint64 - MaxGasLimitPerMiniBlockCalled func() uint64 - MaxGasLimitPerBlockForSafeCrossShardCalled func() uint64 - MaxGasLimitPerMiniBlockForSafeCrossShardCalled func() uint64 - MaxGasLimitPerTxCalled func() uint64 - ComputeGasLimitCalled func(tx data.TransactionWithFeeHandler) uint64 - ComputeMoveBalanceFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int - ComputeTxFeeCalled func(tx data.TransactionWithFeeHandler) *big.Int - CheckValidityTxValuesCalled func(tx data.TransactionWithFeeHandler) error - DeveloperPercentageCalled func() float64 - MinGasPriceCalled func() uint64 - GasPriceModifierCalled func() float64 - ComputeFeeForProcessingCalled func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int - GenesisTotalSupplyCalled func() *big.Int - SplitTxGasInCategoriesCalled func(tx data.TransactionWithFeeHandler) (uint64, uint64) - GasPriceForProcessingCalled func(tx data.TransactionWithFeeHandler) uint64 - GasPriceForMoveCalled func(tx data.TransactionWithFeeHandler) uint64 - MinGasPriceForProcessingCalled func() uint64 - ComputeGasUsedAndFeeBasedOnRefundValueCalled func(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) - ComputeTxFeeBasedOnGasUsedCalled func(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int - ComputeGasLimitBasedOnBalanceCalled func(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) -} - -// ComputeFeeForProcessing - -func (fhs *FeeHandlerStub) ComputeFeeForProcessing(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int { - if fhs.ComputeFeeForProcessingCalled != nil { - return fhs.ComputeFeeForProcessingCalled(tx, gasToUse) - } - return big.NewInt(0) -} - -// ComputeGasLimitBasedOnBalance - -func (fhs *FeeHandlerStub) ComputeGasLimitBasedOnBalance(tx data.TransactionWithFeeHandler, balance *big.Int) (uint64, error) { - if fhs.ComputeGasLimitBasedOnBalanceCalled != nil { - return fhs.ComputeGasLimitBasedOnBalanceCalled(tx, balance) - } - return 0, nil -} - -// GasPriceModifier - -func (fhs *FeeHandlerStub) GasPriceModifier() float64 { - if fhs.GasPriceModifierCalled != nil { - return fhs.GasPriceModifierCalled() - } - return 1.0 -} - -// MinGasPrice - -func (fhs *FeeHandlerStub) MinGasPrice() uint64 { - if fhs.MinGasPriceCalled != nil { - return fhs.MinGasPriceCalled() - } - return 0 -} - -// MinGasLimit will return min gas limit -func (fhs *FeeHandlerStub) MinGasLimit() uint64 { - return 0 -} - -// DeveloperPercentage - -func (fhs *FeeHandlerStub) DeveloperPercentage() float64 { - if fhs.DeveloperPercentageCalled != nil { - return fhs.DeveloperPercentageCalled() - } - - return 0.0 -} - -// GasPerDataByte - -func (fhs *FeeHandlerStub) GasPerDataByte() uint64 { - return 0 -} - -// SetMaxGasLimitPerBlock - -func (fhs *FeeHandlerStub) SetMaxGasLimitPerBlock(maxGasLimitPerBlock uint64) { - fhs.SetMaxGasLimitPerBlockCalled(maxGasLimitPerBlock) -} - -// SetMinGasPrice - -func (fhs *FeeHandlerStub) SetMinGasPrice(minGasPrice uint64) { - fhs.SetMinGasPriceCalled(minGasPrice) -} - -// SetMinGasLimit - -func (fhs *FeeHandlerStub) SetMinGasLimit(minGasLimit uint64) { - fhs.SetMinGasLimitCalled(minGasLimit) -} - -// MaxGasLimitPerBlock - -func (fhs *FeeHandlerStub) MaxGasLimitPerBlock(uint32) uint64 { - if fhs.MaxGasLimitPerBlockCalled != nil { - return fhs.MaxGasLimitPerBlockCalled() - } - return 0 -} - -// MaxGasLimitPerMiniBlock - -func (fhs *FeeHandlerStub) MaxGasLimitPerMiniBlock(uint32) uint64 { - if fhs.MaxGasLimitPerMiniBlockCalled != nil { - return fhs.MaxGasLimitPerMiniBlockCalled() - } - return 0 -} - -// MaxGasLimitPerBlockForSafeCrossShard - -func (fhs *FeeHandlerStub) MaxGasLimitPerBlockForSafeCrossShard() uint64 { - if fhs.MaxGasLimitPerBlockForSafeCrossShardCalled != nil { - return fhs.MaxGasLimitPerBlockForSafeCrossShardCalled() - } - return 0 -} - -// MaxGasLimitPerMiniBlockForSafeCrossShard - -func (fhs *FeeHandlerStub) MaxGasLimitPerMiniBlockForSafeCrossShard() uint64 { - if fhs.MaxGasLimitPerMiniBlockForSafeCrossShardCalled != nil { - return fhs.MaxGasLimitPerMiniBlockForSafeCrossShardCalled() - } - return 0 -} - -// MaxGasLimitPerTx - -func (fhs *FeeHandlerStub) MaxGasLimitPerTx() uint64 { - if fhs.MaxGasLimitPerTxCalled != nil { - return fhs.MaxGasLimitPerTxCalled() - } - return 0 -} - -// ComputeGasLimit - -func (fhs *FeeHandlerStub) ComputeGasLimit(tx data.TransactionWithFeeHandler) uint64 { - if fhs.ComputeGasLimitCalled != nil { - return fhs.ComputeGasLimitCalled(tx) - } - return 0 -} - -// ComputeMoveBalanceFee - -func (fhs *FeeHandlerStub) ComputeMoveBalanceFee(tx data.TransactionWithFeeHandler) *big.Int { - if fhs.ComputeMoveBalanceFeeCalled != nil { - return fhs.ComputeMoveBalanceFeeCalled(tx) - } - return big.NewInt(0) -} - -// ComputeTxFee - -func (fhs *FeeHandlerStub) ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int { - if fhs.ComputeTxFeeCalled != nil { - return fhs.ComputeTxFeeCalled(tx) - } - return big.NewInt(0) -} - -// GenesisTotalSupply - -func (fhs *FeeHandlerStub) GenesisTotalSupply() *big.Int { - if fhs.GenesisTotalSupplyCalled != nil { - return fhs.GenesisTotalSupplyCalled() - } - - return big.NewInt(0) -} - -// CheckValidityTxValues - -func (fhs *FeeHandlerStub) CheckValidityTxValues(tx data.TransactionWithFeeHandler) error { - if fhs.CheckValidityTxValuesCalled != nil { - return fhs.CheckValidityTxValuesCalled(tx) - } - return nil -} - -// SplitTxGasInCategories - -func (fhs *FeeHandlerStub) SplitTxGasInCategories(tx data.TransactionWithFeeHandler) (uint64, uint64) { - if fhs.SplitTxGasInCategoriesCalled != nil { - return fhs.SplitTxGasInCategoriesCalled(tx) - } - return 0, 0 -} - -// GasPriceForProcessing - -func (fhs *FeeHandlerStub) GasPriceForProcessing(tx data.TransactionWithFeeHandler) uint64 { - if fhs.GasPriceForProcessingCalled != nil { - return fhs.GasPriceForProcessingCalled(tx) - } - return 0 -} - -// GasPriceForMove - -func (fhs *FeeHandlerStub) GasPriceForMove(tx data.TransactionWithFeeHandler) uint64 { - if fhs.GasPriceForMoveCalled != nil { - return fhs.GasPriceForMoveCalled(tx) - } - return 0 -} - -// MinGasPriceForProcessing - -func (fhs *FeeHandlerStub) MinGasPriceForProcessing() uint64 { - if fhs.MinGasPriceForProcessingCalled != nil { - return fhs.MinGasPriceForProcessingCalled() - } - return 0 -} - -// ComputeGasUsedAndFeeBasedOnRefundValue - -func (fhs *FeeHandlerStub) ComputeGasUsedAndFeeBasedOnRefundValue(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) { - if fhs.ComputeGasUsedAndFeeBasedOnRefundValueCalled != nil { - return fhs.ComputeGasUsedAndFeeBasedOnRefundValueCalled(tx, refundValue) - } - return 0, big.NewInt(0) -} - -// ComputeTxFeeBasedOnGasUsed - -func (fhs *FeeHandlerStub) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int { - if fhs.ComputeTxFeeBasedOnGasUsedCalled != nil { - return fhs.ComputeTxFeeBasedOnGasUsedCalled(tx, gasUsed) - } - return big.NewInt(0) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (fhs *FeeHandlerStub) IsInterfaceNil() bool { - return fhs == nil -} diff --git a/process/mock/interceptedTxHandlerStub.go b/process/mock/interceptedTxHandlerStub.go index af95cf88fb7..689a9420f4e 100644 --- a/process/mock/interceptedTxHandlerStub.go +++ b/process/mock/interceptedTxHandlerStub.go @@ -8,40 +8,67 @@ import ( // InterceptedTxHandlerStub - type InterceptedTxHandlerStub struct { - SenderShardIdCalled func() uint32 - ReceiverShardIdCalled func() uint32 - NonceCalled func() uint64 - SenderAddressCalled func() []byte - FeeCalled func() *big.Int - TransactionCalled func() data.TransactionHandler + SenderShardIdCalled func() uint32 + ReceiverShardIdCalled func() uint32 + NonceCalled func() uint64 + SenderAddressCalled func() []byte + FeeCalled func() *big.Int + TransactionCalled func() data.TransactionHandler + GetTxMessageForSignatureVerificationCalled func() ([]byte, error) } // SenderShardId - func (iths *InterceptedTxHandlerStub) SenderShardId() uint32 { - return iths.SenderShardIdCalled() + if iths.SenderShardIdCalled != nil { + return iths.SenderShardIdCalled() + } + return 0 } // ReceiverShardId - func (iths *InterceptedTxHandlerStub) ReceiverShardId() uint32 { - return iths.ReceiverShardIdCalled() + if iths.ReceiverShardIdCalled != nil { + return iths.ReceiverShardIdCalled() + } + return 0 } // Nonce - func (iths *InterceptedTxHandlerStub) Nonce() uint64 { - return iths.NonceCalled() + if iths.NonceCalled != nil { + return iths.NonceCalled() + } + return 0 } // SenderAddress - func (iths *InterceptedTxHandlerStub) SenderAddress() []byte { - return iths.SenderAddressCalled() + if iths.SenderAddressCalled != nil { + return iths.SenderAddressCalled() + } + return nil } // Fee - func (iths *InterceptedTxHandlerStub) Fee() *big.Int { - return iths.FeeCalled() + if iths.FeeCalled != nil { + return iths.FeeCalled() + } + return nil } // Transaction - func (iths *InterceptedTxHandlerStub) Transaction() data.TransactionHandler { - return iths.TransactionCalled() + if iths.TransactionCalled != nil { + return iths.TransactionCalled() + } + return nil +} + +// GetTxMessageForSignatureVerification - +func (iths *InterceptedTxHandlerStub) GetTxMessageForSignatureVerification() ([]byte, error) { + if iths.GetTxMessageForSignatureVerificationCalled != nil { + return iths.GetTxMessageForSignatureVerificationCalled() + } + return nil, nil } diff --git a/process/mock/txValidatorStub.go b/process/mock/txValidatorStub.go index 103a3c3ab30..1652020a12c 100644 --- a/process/mock/txValidatorStub.go +++ b/process/mock/txValidatorStub.go @@ -6,14 +6,14 @@ import ( // TxValidatorStub - type TxValidatorStub struct { - CheckTxValidityCalled func(txValidatorHandler process.TxValidatorHandler) error - CheckTxWhiteListCalled func(data process.InterceptedData) error - RejectedTxsCalled func() uint64 + CheckTxValidityCalled func(interceptedTx process.InterceptedTransactionHandler) error + CheckTxWhiteListCalled func(data process.InterceptedData) error + RejectedTxsCalled func() uint64 } // CheckTxValidity - -func (t *TxValidatorStub) CheckTxValidity(txValidatorHandler process.TxValidatorHandler) error { - return t.CheckTxValidityCalled(txValidatorHandler) +func (t *TxValidatorStub) CheckTxValidity(interceptedTx process.InterceptedTransactionHandler) error { + return t.CheckTxValidityCalled(interceptedTx) } // CheckTxWhiteList - diff --git a/process/peer/process.go b/process/peer/process.go index 83b3e2616c3..f7d15ed7917 100644 --- a/process/peer/process.go +++ b/process/peer/process.go @@ -14,6 +14,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/validatorInfo" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" @@ -450,7 +451,7 @@ func (vs *validatorStatistics) getValidatorDataFromLeaves( validators[currentShardId] = append(validators[currentShardId], validatorInfoData) } - err := common.GetErrorFromChanNonBlocking(leavesChannels.ErrChan) + err := leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, err } @@ -565,7 +566,7 @@ func (vs *validatorStatistics) GetValidatorInfoForRootHash(rootHash []byte) (map leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err := vs.peerAdapter.GetAllLeaves(leavesChannels, context.Background(), rootHash) if err != nil { diff --git a/process/peer/process_test.go b/process/peer/process_test.go index 8d35dd9d45a..383d17d51e1 100644 --- a/process/peer/process_test.go +++ b/process/peer/process_test.go @@ -88,16 +88,19 @@ func createMockArguments() peer.ArgValidatorStatisticsProcessor { MaxGasLimitPerMetaMiniBlock: "10000000", MaxGasLimitPerTx: "10000000", MinGasLimit: "10", + ExtraGasLimitGuardedTx: "50000", }, }, - MinGasPrice: "10", - GasPerDataByte: "1", - GasPriceModifier: 1.0, + MinGasPrice: "10", + GasPerDataByte: "1", + GasPriceModifier: 1.0, + MaxGasPriceSetGuardian: "100000", }, }, EpochNotifier: &epochNotifier.EpochNotifierStub{}, EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) @@ -1969,7 +1972,7 @@ func TestValidatorStatistics_ResetValidatorStatisticsAtNewEpoch(t *testing.T) { go func() { ch.LeavesChan <- keyValStorage.NewKeyValStorage(addrBytes0, marshalizedPa0) close(ch.LeavesChan) - close(ch.ErrChan) + ch.ErrChan.Close() }() return nil @@ -2032,7 +2035,7 @@ func TestValidatorStatistics_Process(t *testing.T) { ch.LeavesChan <- keyValStorage.NewKeyValStorage(addrBytes0, marshalizedPa0) ch.LeavesChan <- keyValStorage.NewKeyValStorage(addrBytesMeta, marshalizedPaMeta) close(ch.LeavesChan) - close(ch.ErrChan) + ch.ErrChan.Close() }() return nil @@ -2078,7 +2081,7 @@ func TestValidatorStatistics_GetValidatorInfoForRootHash(t *testing.T) { peerAdapter.GetAllLeavesCalled = func(ch *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { if bytes.Equal(rootHash, hash) { go func() { - ch.ErrChan <- expectedErr + ch.ErrChan.WriteInChanNonBlocking(expectedErr) close(ch.LeavesChan) }() @@ -2108,7 +2111,7 @@ func TestValidatorStatistics_GetValidatorInfoForRootHash(t *testing.T) { ch.LeavesChan <- keyValStorage.NewKeyValStorage(addrBytes0, marshalizedPa0) ch.LeavesChan <- keyValStorage.NewKeyValStorage(addrBytesMeta, marshalizedPaMeta) close(ch.LeavesChan) - close(ch.ErrChan) + ch.ErrChan.Close() }() return nil @@ -2555,7 +2558,7 @@ func updateArgumentsWithNeeded(arguments peer.ArgValidatorStatisticsProcessor) { ch.LeavesChan <- keyValStorage.NewKeyValStorage(addrBytes0, marshalizedPa0) ch.LeavesChan <- keyValStorage.NewKeyValStorage(addrBytesMeta, marshalizedPaMeta) close(ch.LeavesChan) - close(ch.ErrChan) + ch.ErrChan.Close() }() return nil diff --git a/process/rewardTransaction/interceptedRewardTransaction.go b/process/rewardTransaction/interceptedRewardTransaction.go index 73e19fed81d..e96a3cf0eca 100644 --- a/process/rewardTransaction/interceptedRewardTransaction.go +++ b/process/rewardTransaction/interceptedRewardTransaction.go @@ -82,11 +82,6 @@ func (inRTx *InterceptedRewardTransaction) processFields(rewardTxBuff []byte) er inRTx.rcvShard = inRTx.coordinator.ComputeId(inRTx.rTx.RcvAddr) inRTx.sndShard = core.MetachainShardId - if inRTx.coordinator.SelfId() == core.MetachainShardId { - inRTx.isForCurrentShard = false - return nil - } - isForCurrentShardRecv := inRTx.rcvShard == inRTx.coordinator.SelfId() isForCurrentShardSender := inRTx.sndShard == inRTx.coordinator.SelfId() inRTx.isForCurrentShard = isForCurrentShardRecv || isForCurrentShardSender diff --git a/process/rewardTransaction/interceptedRewardTransaction_test.go b/process/rewardTransaction/interceptedRewardTransaction_test.go index de27f51a130..03f87c791e4 100644 --- a/process/rewardTransaction/interceptedRewardTransaction_test.go +++ b/process/rewardTransaction/interceptedRewardTransaction_test.go @@ -1,6 +1,7 @@ package rewardTransaction_test import ( + "bytes" "fmt" "math/big" "testing" @@ -318,6 +319,88 @@ func TestNewInterceptedRewardTransaction_CheckValidityShouldWork(t *testing.T) { assert.Nil(t, err) } +func TestNewInterceptedRewardTransaction_IsForCurrentShard(t *testing.T) { + t.Parallel() + + receiverAddress := []byte("receiver address") + testShardID := uint32(2) + value := big.NewInt(100) + rewTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: value, + RcvAddr: receiverAddress, + } + + mockShardCoordinator := &mock.ShardCoordinatorStub{} + marshalizer := &mock.MarshalizerMock{} + txBuff, _ := marshalizer.Marshal(&rewTx) + t.Run("same shard ID with the receiver should return true", func(t *testing.T) { + mockShardCoordinator.ComputeIdCalled = func(address []byte) uint32 { + if bytes.Equal(address, receiverAddress) { + return testShardID + } + + return 0 + } + mockShardCoordinator.SelfIdCalled = func() uint32 { + return testShardID + } + + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + txBuff, + marshalizer, + &hashingMocks.HasherMock{}, + createMockPubkeyConverter(), + mockShardCoordinator) + assert.Nil(t, err) + + assert.True(t, irt.IsForCurrentShard()) + }) + t.Run("metachain should return true", func(t *testing.T) { + mockShardCoordinator.ComputeIdCalled = func(address []byte) uint32 { + if bytes.Equal(address, receiverAddress) { + return testShardID + } + + return 0 + } + mockShardCoordinator.SelfIdCalled = func() uint32 { + return core.MetachainShardId + } + + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + txBuff, + marshalizer, + &hashingMocks.HasherMock{}, + createMockPubkeyConverter(), + mockShardCoordinator) + assert.Nil(t, err) + assert.True(t, irt.IsForCurrentShard()) + }) + t.Run("different shard should return false", func(t *testing.T) { + mockShardCoordinator.ComputeIdCalled = func(address []byte) uint32 { + if bytes.Equal(address, receiverAddress) { + return testShardID + } + + return 0 + } + mockShardCoordinator.SelfIdCalled = func() uint32 { + return testShardID + 1 // different with the receiver but not metachain + } + + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + txBuff, + marshalizer, + &hashingMocks.HasherMock{}, + createMockPubkeyConverter(), + mockShardCoordinator) + assert.Nil(t, err) + assert.False(t, irt.IsForCurrentShard()) + }) +} + func TestInterceptedRewardTransaction_Type(t *testing.T) { t.Parallel() diff --git a/process/smartContract/builtInFunctions/factory.go b/process/smartContract/builtInFunctions/factory.go index 8e241e0bec9..b13c0fa0b12 100644 --- a/process/smartContract/builtInFunctions/factory.go +++ b/process/smartContract/builtInFunctions/factory.go @@ -26,6 +26,7 @@ type ArgsCreateBuiltInFunctionContainer struct { ShardCoordinator sharding.Coordinator EpochNotifier vmcommon.EpochNotifier EnableEpochsHandler vmcommon.EnableEpochsHandler + GuardedAccountHandler vmcommon.GuardedAccountHandler AutomaticCrawlerAddresses [][]byte MaxNumNodesInTransferRole uint32 } @@ -53,6 +54,9 @@ func CreateBuiltInFunctionsFactory(args ArgsCreateBuiltInFunctionContainer) (vmc if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + if check.IfNil(args.GuardedAccountHandler) { + return nil, process.ErrNilGuardedAccountHandler + } vmcommonAccounts, ok := args.Accounts.(vmcommon.AccountsAdapter) if !ok { @@ -79,6 +83,7 @@ func CreateBuiltInFunctionsFactory(args ArgsCreateBuiltInFunctionContainer) (vmc Accounts: vmcommonAccounts, ShardCoordinator: args.ShardCoordinator, EnableEpochsHandler: args.EnableEpochsHandler, + GuardedAccountHandler: args.GuardedAccountHandler, ConfigAddress: crawlerAllowedAddress, MaxNumOfAddressesForTransferRole: args.MaxNumNodesInTransferRole, } diff --git a/process/smartContract/builtInFunctions/factory_test.go b/process/smartContract/builtInFunctions/factory_test.go index 8f9979ac698..9b4d5b9b9f9 100644 --- a/process/smartContract/builtInFunctions/factory_test.go +++ b/process/smartContract/builtInFunctions/factory_test.go @@ -14,6 +14,7 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/stretchr/testify/assert" ) @@ -36,6 +37,7 @@ func createMockArguments() ArgsCreateBuiltInFunctionContainer { bytes.Repeat([]byte{1}, 32), }, MaxNumNodesInTransferRole: 100, + GuardedAccountHandler: &guardianMocks.GuardedAccountHandlerStub{}, } return args @@ -84,6 +86,8 @@ func fillGasMapBuiltInCosts(value uint64) map[string]uint64 { gasMap["ESDTNFTAddUri"] = value gasMap["ESDTNFTUpdateAttributes"] = value gasMap["ESDTNFTMultiTransfer"] = value + gasMap["SetGuardian"] = value + gasMap["GuardAccount"] = value return gasMap } @@ -160,7 +164,7 @@ func TestCreateBuiltInFunctionContainer(t *testing.T) { args := createMockArguments() builtInFuncFactory, err := CreateBuiltInFunctionsFactory(args) assert.Nil(t, err) - assert.Equal(t, len(builtInFuncFactory.BuiltInFunctionContainer().Keys()), 31) + assert.Equal(t, 34, len(builtInFuncFactory.BuiltInFunctionContainer().Keys())) err = builtInFuncFactory.SetPayableHandler(&testscommon.BlockChainHookStub{}) assert.Nil(t, err) diff --git a/process/smartContract/hooks/blockChainHook.go b/process/smartContract/hooks/blockChainHook.go index 5a8904f5b8b..cfdf70e1178 100644 --- a/process/smartContract/hooks/blockChainHook.go +++ b/process/smartContract/hooks/blockChainHook.go @@ -565,7 +565,7 @@ func (bh *BlockChainHookImpl) FilterCodeMetadataForUpgrade(input []byte) ([]byte } raw := vmcommon.CodeMetadataFromBytes(input) - filtered := bh.ApplyFiltersOnCodeMetadata(raw) + filtered := bh.ApplyFiltersOnSCCodeMetadata(raw) if bytes.Equal(input, filtered.ToBytes()) { return filtered.ToBytes(), nil } @@ -573,9 +573,10 @@ func (bh *BlockChainHookImpl) FilterCodeMetadataForUpgrade(input []byte) ([]byte return nil, parsers.ErrInvalidCodeMetadata } -// ApplyFiltersOnCodeMetadata will apply all known filters on the provided code metadata value -func (bh *BlockChainHookImpl) ApplyFiltersOnCodeMetadata(codeMetadata vmcommon.CodeMetadata) vmcommon.CodeMetadata { +// ApplyFiltersOnSCCodeMetadata will apply all known filters on the provided code metadata value +func (bh *BlockChainHookImpl) ApplyFiltersOnSCCodeMetadata(codeMetadata vmcommon.CodeMetadata) vmcommon.CodeMetadata { codeMetadata.PayableBySC = codeMetadata.PayableBySC && bh.enableEpochsHandler.IsPayableBySCFlagEnabled() + codeMetadata.Guarded = false return codeMetadata } diff --git a/process/smartContract/hooks/blockChainHook_test.go b/process/smartContract/hooks/blockChainHook_test.go index 6a0420bde68..75d2b9e37c3 100644 --- a/process/smartContract/hooks/blockChainHook_test.go +++ b/process/smartContract/hooks/blockChainHook_test.go @@ -1473,7 +1473,7 @@ func TestBlockChainHookImpl_ProcessBuiltInFunction(t *testing.T) { args.Accounts = &stateMock.AccountsStub{ GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { require.Equal(t, addrSender, addressContainer) - return &stateMock.UserAccountStub{}, nil + return &stateMock.StateUserAccountHandlerStub{}, nil }, } bh, _ := hooks.NewBlockChainHookImpl(args) @@ -1520,7 +1520,7 @@ func TestBlockChainHookImpl_ProcessBuiltInFunction(t *testing.T) { LoadAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { require.Equal(t, addrReceiver, addressContainer) - return &stateMock.UserAccountStub{}, nil + return &stateMock.StateUserAccountHandlerStub{}, nil }, } @@ -2122,7 +2122,7 @@ func TestBlockChainHookImpl_ApplyFiltersOnCodeMetadata(t *testing.T) { Readable: true, } - resulted := bh.ApplyFiltersOnCodeMetadata(provided) + resulted := bh.ApplyFiltersOnSCCodeMetadata(provided) expected := vmcommon.CodeMetadata{ Payable: true, @@ -2148,7 +2148,7 @@ func TestBlockChainHookImpl_ApplyFiltersOnCodeMetadata(t *testing.T) { Readable: true, } - resulted := bh.ApplyFiltersOnCodeMetadata(provided) + resulted := bh.ApplyFiltersOnSCCodeMetadata(provided) expected := vmcommon.CodeMetadata{ Payable: true, PayableBySC: true, @@ -2163,7 +2163,7 @@ func TestBlockChainHookImpl_ApplyFiltersOnCodeMetadata(t *testing.T) { Upgradeable: true, Readable: true, } - resulted = bh.ApplyFiltersOnCodeMetadata(provided) + resulted = bh.ApplyFiltersOnSCCodeMetadata(provided) expected = vmcommon.CodeMetadata{ Payable: true, PayableBySC: false, diff --git a/process/smartContract/process_test.go b/process/smartContract/process_test.go index 768f452bf51..f13b8ce5d16 100644 --- a/process/smartContract/process_test.go +++ b/process/smartContract/process_test.go @@ -86,7 +86,7 @@ func createMockSmartContractProcessorArguments() ArgsNewSmartContractProcessor { BadTxForwarder: &mock.IntermediateTransactionHandlerMock{}, TxFeeHandler: &mock.FeeAccumulatorStub{}, TxLogsProcessor: &mock.TxLogsProcessorStub{}, - EconomicsFee: &mock.FeeHandlerStub{ + EconomicsFee: &economicsmocks.EconomicsHandlerStub{ DeveloperPercentageCalled: func() float64 { return 0.0 }, @@ -973,7 +973,7 @@ func TestScProcessor_DeploySmartContractEconomicsFeeValidateFails(t *testing.T) arguments.VmContainer = vm arguments.ArgsParser = argParser - arguments.EconomicsFee = &mock.FeeHandlerStub{ + arguments.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ CheckValidityTxValuesCalled: func(tx data.TransactionWithFeeHandler) error { return expectedError }, @@ -1187,7 +1187,7 @@ func TestScProcessor_DeploySmartContractUpdateDeveloperRewardsFails(t *testing.T arguments.VmContainer = vm arguments.ArgsParser = argParser arguments.AccountsDB = accntState - economicsFee := &mock.FeeHandlerStub{ + economicsFee := &economicsmocks.EconomicsHandlerStub{ DeveloperPercentageCalled: func() float64 { return 0.0 }, @@ -1930,7 +1930,7 @@ func TestScProcessor_InitializeVMInputFromTx_ShouldErrNotEnoughGas(t *testing.T) arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser - arguments.EconomicsFee = &mock.FeeHandlerStub{ + arguments.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 1000 }, @@ -2371,7 +2371,7 @@ func TestScProcessor_ProcessSCPaymentWithNewFlags(t *testing.T) { txFee := big.NewInt(25) arguments := createMockSmartContractProcessorArguments() - arguments.EconomicsFee = &mock.FeeHandlerStub{ + arguments.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ DeveloperPercentageCalled: func() float64 { return 0.0 }, @@ -2492,7 +2492,7 @@ func TestScProcessor_RefundGasToSender(t *testing.T) { minGasPrice := uint64(10) arguments := createMockSmartContractProcessorArguments() - arguments.EconomicsFee = &mock.FeeHandlerStub{MinGasPriceCalled: func() uint64 { + arguments.EconomicsFee = &economicsmocks.EconomicsHandlerStub{MinGasPriceCalled: func() uint64 { return minGasPrice }} arguments.EnableEpochsHandler = &testscommon.EnableEpochsHandlerStub{} @@ -2532,7 +2532,7 @@ func TestScProcessor_DoNotRefundGasToSenderForAsyncCall(t *testing.T) { minGasPrice := uint64(10) arguments := createMockSmartContractProcessorArguments() - arguments.EconomicsFee = &mock.FeeHandlerStub{MinGasPriceCalled: func() uint64 { + arguments.EconomicsFee = &economicsmocks.EconomicsHandlerStub{MinGasPriceCalled: func() uint64 { return minGasPrice }} arguments.EnableEpochsHandler = &testscommon.EnableEpochsHandlerStub{} @@ -3637,7 +3637,7 @@ func TestSmartContractProcessor_computeTotalConsumedFeeAndDevRwd(t *testing.T) { shardCoordinator := &mock.CoordinatorStub{ComputeIdCalled: func(address []byte) uint32 { return 0 }} - feeHandler := &mock.FeeHandlerStub{ + feeHandler := &economicsmocks.EconomicsHandlerStub{ ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { return 0 }, @@ -3853,7 +3853,7 @@ func TestScProcessor_CreateRefundForRelayerFromAnotherShard(t *testing.T) { return 0 }} arguments.ShardCoordinator = shardCoordinator - arguments.EconomicsFee = &mock.FeeHandlerStub{ComputeFeeForProcessingCalled: func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int { + arguments.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ComputeFeeForProcessingCalled: func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int { return big.NewInt(100) }} sc, _ := NewSmartContractProcessor(arguments) @@ -3943,7 +3943,7 @@ func TestProcessIfErrorCheckBackwardsCompatibilityProcessTransactionFeeCalledSho return 0 }} arguments.ShardCoordinator = shardCoordinator - arguments.EconomicsFee = &mock.FeeHandlerStub{ + arguments.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ ComputeFeeForProcessingCalled: func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int { return big.NewInt(100) }, @@ -3982,7 +3982,7 @@ func TestProcessIfErrorCheckBackwardsCompatibilityProcessTransactionFeeCalledSho return 0 }} arguments.ShardCoordinator = shardCoordinator - arguments.EconomicsFee = &mock.FeeHandlerStub{ + arguments.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ ComputeFeeForProcessingCalled: func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int { return big.NewInt(100) }, @@ -4208,11 +4208,13 @@ func createRealEconomicsDataArgs() *economics.ArgsNewEconomicsData { MaxGasLimitPerMetaMiniBlock: "15000000000", MaxGasLimitPerTx: "1500000000", MinGasLimit: "50000", + ExtraGasLimitGuardedTx: "50000", }, }, - GasPerDataByte: "1500", - MinGasPrice: "1000000000", - GasPriceModifier: 0.01, + GasPerDataByte: "1500", + MinGasPrice: "1000000000", + GasPriceModifier: 0.01, + MaxGasPriceSetGuardian: "100000", }, }, EpochNotifier: &epochNotifier.EpochNotifierStub{}, @@ -4220,6 +4222,7 @@ func createRealEconomicsDataArgs() *economics.ArgsNewEconomicsData { IsGasPriceModifierFlagEnabledField: true, }, BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } } diff --git a/process/smartContract/scQueryService_test.go b/process/smartContract/scQueryService_test.go index 75c30d8f773..61f7ddfd285 100644 --- a/process/smartContract/scQueryService_test.go +++ b/process/smartContract/scQueryService_test.go @@ -17,6 +17,7 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -27,7 +28,7 @@ const DummyScAddress = "00000000000000000500fabd9501b7e5353de57a4e319857c2fb9908 func createMockArgumentsForSCQuery() ArgsNewSCQueryService { return ArgsNewSCQueryService{ VmContainer: &mock.VMContainerMock{}, - EconomicsFee: &mock.FeeHandlerStub{}, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, BlockChainHook: &testscommon.BlockChainHookStub{}, BlockChain: &testscommon.ChainHandlerStub{}, WasmVMChangeLocker: &sync.RWMutex{}, @@ -245,8 +246,8 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { return mockVM, nil }, } - argsNewSCQuery.EconomicsFee = &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + argsNewSCQuery.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return uint64(math.MaxUint64) }, } @@ -287,8 +288,8 @@ func TestExecuteQuery_ReturnsCorrectly(t *testing.T) { return mockVM, nil }, } - argsNewSCQuery.EconomicsFee = &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + argsNewSCQuery.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return uint64(math.MaxUint64) }, } @@ -328,8 +329,8 @@ func TestExecuteQuery_GasProvidedShouldBeApplied(t *testing.T) { return mockVM, nil }, } - argsNewSCQuery.EconomicsFee = &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + argsNewSCQuery.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return uint64(math.MaxUint64) }, } @@ -365,8 +366,8 @@ func TestExecuteQuery_GasProvidedShouldBeApplied(t *testing.T) { return mockVM, nil }, } - argsNewSCQuery.EconomicsFee = &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + argsNewSCQuery.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return uint64(math.MaxUint64) }, } @@ -404,8 +405,8 @@ func TestExecuteQuery_WhenNotOkCodeShouldNotErr(t *testing.T) { return mockVM, nil }, } - argsNewSCQuery.EconomicsFee = &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + argsNewSCQuery.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return uint64(math.MaxUint64) }, } @@ -452,8 +453,8 @@ func TestExecuteQuery_ShouldCallRunScSequentially(t *testing.T) { return mockVM, nil }, } - argsNewSCQuery.EconomicsFee = &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + argsNewSCQuery.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return uint64(math.MaxUint64) }, } @@ -500,8 +501,8 @@ func TestSCQueryService_ExecuteQueryShouldNotIncludeCallerAddressAndValue(t *tes return mockVM, nil }, } - argsNewSCQuery.EconomicsFee = &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + argsNewSCQuery.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return uint64(math.MaxUint64) }, } @@ -542,8 +543,8 @@ func TestSCQueryService_ExecuteQueryShouldIncludeCallerAddressAndValue(t *testin return mockVM, nil }, } - argsNewSCQuery.EconomicsFee = &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + argsNewSCQuery.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return uint64(math.MaxUint64) }, } @@ -676,8 +677,8 @@ func TestSCQueryService_ComputeTxCostScCall(t *testing.T) { return mockVM, nil }, } - argsNewSCQuery.EconomicsFee = &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + argsNewSCQuery.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return uint64(math.MaxUint64) }, } @@ -713,8 +714,8 @@ func TestSCQueryService_ComputeScCallGasLimitRetCodeNotOK(t *testing.T) { return mockVM, nil }, } - argsNewSCQuery.EconomicsFee = &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + argsNewSCQuery.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return uint64(math.MaxUint64) }, } @@ -739,7 +740,7 @@ func TestNewSCQueryService_CloseShouldWork(t *testing.T) { return nil }, }, - EconomicsFee: &mock.FeeHandlerStub{}, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, BlockChainHook: &testscommon.BlockChainHookStub{}, BlockChain: &testscommon.ChainHandlerStub{}, WasmVMChangeLocker: &sync.RWMutex{}, diff --git a/process/smartContract/vmInput.go b/process/smartContract/vmInput.go index aaf9ce56e92..ad79469a2d0 100644 --- a/process/smartContract/vmInput.go +++ b/process/smartContract/vmInput.go @@ -20,7 +20,7 @@ func (sc *scProcessor) createVMDeployInput(tx data.TransactionHandler) (*vmcommo vmCreateInput := &vmcommon.ContractCreateInput{} vmCreateInput.ContractCode = deployData.Code // when executing SC deploys we should always apply the flags - codeMetadata := sc.blockChainHook.ApplyFiltersOnCodeMetadata(deployData.CodeMetadata) + codeMetadata := sc.blockChainHook.ApplyFiltersOnSCCodeMetadata(deployData.CodeMetadata) vmCreateInput.ContractCodeMetadata = codeMetadata.ToBytes() vmCreateInput.VMInput = vmcommon.VMInput{} err = sc.initializeVMInputFromTx(&vmCreateInput.VMInput, tx) @@ -95,6 +95,11 @@ func (sc *scProcessor) createVMCallInput( vmCallInput.CurrentTxHash = txHash vmCallInput.GasLocked = gasLocked + gtx, ok := tx.(data.GuardedTransactionHandler) + if ok { + vmCallInput.TxGuardian = gtx.GetGuardianAddr() + } + scr, isSCR := tx.(*smartContractResult.SmartContractResult) if isSCR { vmCallInput.OriginalTxHash = scr.GetOriginalTxHash() diff --git a/process/track/baseBlockTrack.go b/process/track/baseBlockTrack.go index f4a264e5086..22eb1c86cc1 100644 --- a/process/track/baseBlockTrack.go +++ b/process/track/baseBlockTrack.go @@ -787,6 +787,9 @@ func checkTrackerNilParameters(arguments ArgBaseTracker) error { if check.IfNil(arguments.FeeHandler) { return process.ErrNilEconomicsData } + if check.IfNil(arguments.WhitelistHandler) { + return process.ErrNilWhiteListHandler + } return nil } diff --git a/process/track/baseBlockTrack_test.go b/process/track/baseBlockTrack_test.go index 285c96fdbb5..8c919cd9ee7 100644 --- a/process/track/baseBlockTrack_test.go +++ b/process/track/baseBlockTrack_test.go @@ -21,6 +21,7 @@ import ( "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" logger "github.com/multiversx/mx-chain-logger-go" "github.com/stretchr/testify/assert" @@ -110,7 +111,7 @@ func CreateShardTrackerMockArguments() track.ArgShardTracker { } headerValidator, _ := processBlock.NewHeaderValidator(argsHeaderValidator) whitelistHandler := &testscommon.WhiteListHandlerStub{} - feeHandler := &mock.FeeHandlerStub{ + feeHandler := &economicsmocks.EconomicsHandlerStub{ MaxGasLimitPerBlockForSafeCrossShardCalled: func() uint64 { return maxGasLimitPerBlock }, @@ -148,7 +149,7 @@ func CreateMetaTrackerMockArguments() track.ArgMetaTracker { } headerValidator, _ := processBlock.NewHeaderValidator(argsHeaderValidator) whitelistHandler := &testscommon.WhiteListHandlerStub{} - feeHandler := &mock.FeeHandlerStub{ + feeHandler := &economicsmocks.EconomicsHandlerStub{ MaxGasLimitPerBlockForSafeCrossShardCalled: func() uint64 { return maxGasLimitPerBlock }, @@ -184,7 +185,7 @@ func CreateBaseTrackerMockArguments() track.ArgBaseTracker { Marshalizer: &mock.MarshalizerMock{}, } headerValidator, _ := processBlock.NewHeaderValidator(argsHeaderValidator) - feeHandler := &mock.FeeHandlerStub{ + feeHandler := &economicsmocks.EconomicsHandlerStub{ MaxGasLimitPerBlockForSafeCrossShardCalled: func() uint64 { return maxGasLimitPerBlock }, @@ -306,6 +307,24 @@ func TestNewBlockTrack_ShouldErrNotarizedHeadersSliceIsNil(t *testing.T) { assert.True(t, check.IfNil(mbt)) } +func TestNewBlockTrack_ShouldErrNilWhitelistHandler(t *testing.T) { + t.Parallel() + + shardArguments := CreateShardTrackerMockArguments() + shardArguments.WhitelistHandler = nil + sbt, err := track.NewShardBlockTrack(shardArguments) + + assert.Equal(t, process.ErrNilWhiteListHandler, err) + assert.Nil(t, sbt) + + metaArguments := CreateMetaTrackerMockArguments() + metaArguments.WhitelistHandler = nil + mbt, err := track.NewMetaBlockTrack(metaArguments) + + assert.Equal(t, process.ErrNilWhiteListHandler, err) + assert.True(t, check.IfNil(mbt)) +} + func TestNewBlockTrack_ShouldWork(t *testing.T) { t.Parallel() diff --git a/process/track/miniBlockTrack.go b/process/track/miniBlockTrack.go index 538dbdf0740..900846f67ff 100644 --- a/process/track/miniBlockTrack.go +++ b/process/track/miniBlockTrack.go @@ -73,7 +73,7 @@ func (mbt *miniBlockTrack) receivedMiniBlock(key []byte, value interface{}) { return } - log.Trace("miniBlockTrack.receivedMiniBlock", + log.Debug("received miniblock from network in block tracker", "hash", key, "sender", miniBlock.SenderShardID, "receiver", miniBlock.ReceiverShardID, diff --git a/process/transaction/baseProcess.go b/process/transaction/baseProcess.go index d38f7edd1be..5146f0fe1a4 100644 --- a/process/transaction/baseProcess.go +++ b/process/transaction/baseProcess.go @@ -14,6 +14,7 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) type baseTxProcessor struct { @@ -25,6 +26,8 @@ type baseTxProcessor struct { marshalizer marshal.Marshalizer scProcessor process.SmartContractProcessor enableEpochsHandler common.EnableEpochsHandler + txVersionChecker process.TxVersionCheckerHandler + guardianChecker process.GuardianChecker } func (txProc *baseTxProcessor) getAccounts( @@ -122,14 +125,12 @@ func (txProc *baseTxProcessor) checkTxValues( if check.IfNil(acntSnd) { return nil } - if acntSnd.GetNonce() < tx.Nonce { return process.ErrHigherNonceInTransaction } if acntSnd.GetNonce() > tx.Nonce { return process.ErrLowerNonceInTransaction } - err = txProc.economicsFee.CheckValidityTxValues(tx) if err != nil { return err @@ -164,6 +165,11 @@ func (txProc *baseTxProcessor) checkTxValues( return process.ErrInsufficientFunds } + err = txProc.verifyGuardian(tx, acntSnd) + if err != nil { + return err + } + return nil } @@ -202,7 +208,7 @@ func (txProc *baseTxProcessor) processIfTxErrorCrossShard(tx *transaction.Transa } // VerifyTransaction verifies the account states in respect with the transaction data -func (txProc *txProcessor) VerifyTransaction(tx *transaction.Transaction) error { +func (txProc *baseTxProcessor) VerifyTransaction(tx *transaction.Transaction) error { if check.IfNil(tx) { return process.ErrNilTransaction } @@ -214,3 +220,61 @@ func (txProc *txProcessor) VerifyTransaction(tx *transaction.Transaction) error return txProc.checkTxValues(tx, senderAccount, receiverAccount, false) } + +// Setting a guardian is allowed with regular transactions on a guarded account +// but in this case is set with the default epochs delay +func checkOperationAllowedToBypassGuardian(txData []byte) error { + if process.IsSetGuardianCall(txData) { + return nil + } + + return fmt.Errorf("%w, not allowed to bypass guardian", process.ErrTransactionNotExecutable) +} + +func (txProc *baseTxProcessor) checkGuardedAccountUnguardedTxPermission(txData []byte, account state.UserAccountHandler) error { + err := checkOperationAllowedToBypassGuardian(txData) + if err != nil { + return err + } + + // block non guarded setGuardian Txs if there is a pending guardian + hasPendingGuardian := txProc.guardianChecker.HasPendingGuardian(account) + if process.IsSetGuardianCall(txData) && hasPendingGuardian { + return fmt.Errorf("%w, %s", process.ErrTransactionNotExecutable, process.ErrCannotReplaceGuardedAccountPendingGuardian.Error()) + } + + return nil +} + +func (txProc *baseTxProcessor) verifyGuardian(tx *transaction.Transaction, account state.UserAccountHandler) error { + if check.IfNil(account) { + return nil + } + isTransactionGuarded := txProc.txVersionChecker.IsGuardedTransaction(tx) + if !account.IsGuarded() { + if isTransactionGuarded { + return fmt.Errorf("%w, %s", process.ErrTransactionNotExecutable, process.ErrGuardedTransactionNotExpected.Error()) + } + + return nil + } + if !isTransactionGuarded { + return txProc.checkGuardedAccountUnguardedTxPermission(tx.GetData(), account) + } + + acc, ok := account.(vmcommon.UserAccountHandler) + if !ok { + return fmt.Errorf("%w, %s", process.ErrTransactionNotExecutable, process.ErrWrongTypeAssertion.Error()) + } + + guardian, err := txProc.guardianChecker.GetActiveGuardian(acc) + if err != nil { + return fmt.Errorf("%w, %s", process.ErrTransactionNotExecutable, err.Error()) + } + + if !bytes.Equal(guardian, tx.GuardianAddr) { + return fmt.Errorf("%w, %s", process.ErrTransactionNotExecutable, process.ErrTransactionAndAccountGuardianMismatch.Error()) + } + + return nil +} diff --git a/process/transaction/baseProcess_test.go b/process/transaction/baseProcess_test.go new file mode 100644 index 00000000000..04f58184562 --- /dev/null +++ b/process/transaction/baseProcess_test.go @@ -0,0 +1,242 @@ +package transaction + +import ( + "errors" + "math/big" + "strings" + "testing" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" + "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_checkOperationAllowedToBypassGuardian(t *testing.T) { + t.Run("operations not allowed to bypass", func(t *testing.T) { + txData := []byte("#@!") + require.True(t, errors.Is(checkOperationAllowedToBypassGuardian(txData), process.ErrTransactionNotExecutable)) + txData = []byte(nil) + require.True(t, errors.Is(checkOperationAllowedToBypassGuardian(txData), process.ErrTransactionNotExecutable)) + txData = []byte("SomeOtherFunction@") + require.True(t, errors.Is(checkOperationAllowedToBypassGuardian(txData), process.ErrTransactionNotExecutable)) + }) + t.Run("setGuardian data field (non builtin call) not allowed", func(t *testing.T) { + txData := []byte("setGuardian") + require.True(t, errors.Is(checkOperationAllowedToBypassGuardian(txData), process.ErrTransactionNotExecutable)) + }) + t.Run("set guardian builtin call allowed to bypass", func(t *testing.T) { + txData := []byte("SetGuardian@") + require.Nil(t, checkOperationAllowedToBypassGuardian(txData)) + }) +} + +func Test_checkGuardedAccountUnguardedTxPermission(t *testing.T) { + + baseProc := baseTxProcessor{ + accounts: &stateMock.AccountsStub{}, + shardCoordinator: mock.NewOneShardCoordinatorMock(), + pubkeyConv: testscommon.NewPubkeyConverterMock(32), + economicsFee: &economicsmocks.EconomicsHandlerStub{ + CheckValidityTxValuesCalled: func(tx data.TransactionWithFeeHandler) error { + return nil + }, + ComputeMoveBalanceFeeCalled: func(tx data.TransactionWithFeeHandler) *big.Int { + return big.NewInt(0) + }, + }, + hasher: &hashingMocks.HasherMock{}, + marshalizer: &testscommon.MarshalizerMock{}, + scProcessor: &testscommon.SCProcessorMock{}, + enableEpochsHandler: &testscommon.EnableEpochsHandlerStub{ + IsPenalizedTooMuchGasFlagEnabledField: true, + }, + txVersionChecker: &testscommon.TxVersionCheckerStub{}, + guardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, + } + + account := &stateMock.UserAccountStub{} + + t.Run("nil txData", func(t *testing.T) { + require.True(t, errors.Is(baseProc.checkGuardedAccountUnguardedTxPermission(nil, account), process.ErrTransactionNotExecutable)) + }) + t.Run("empty txData", func(t *testing.T) { + require.True(t, errors.Is(baseProc.checkGuardedAccountUnguardedTxPermission([]byte(""), account), process.ErrTransactionNotExecutable)) + }) + t.Run("nil account", func(t *testing.T) { + txData := []byte("SetGuardian@") + require.Nil(t, baseProc.checkGuardedAccountUnguardedTxPermission(txData, nil)) + }) + t.Run("setGuardian data field (non builtin call) not allowed", func(t *testing.T) { + txData := []byte("setGuardian") + require.True(t, errors.Is(baseProc.checkGuardedAccountUnguardedTxPermission(txData, account), process.ErrTransactionNotExecutable)) + }) + t.Run("set guardian builtin call allowed to bypass", func(t *testing.T) { + txData := []byte("SetGuardian@") + require.Nil(t, baseProc.checkGuardedAccountUnguardedTxPermission(txData, account)) + }) + t.Run("set guardian builtin call with pending guardian not allowed", func(t *testing.T) { + txData := []byte("SetGuardian@") + baseProcLocal := baseProc + baseProcLocal.guardianChecker = &guardianMocks.GuardedAccountHandlerStub{ + HasPendingGuardianCalled: func(uah state.UserAccountHandler) bool { + return true + }, + } + + err := baseProcLocal.checkGuardedAccountUnguardedTxPermission(txData, account) + require.True(t, errors.Is(err, process.ErrTransactionNotExecutable)) + require.True(t, strings.Contains(err.Error(), process.ErrCannotReplaceGuardedAccountPendingGuardian.Error())) + }) +} + +func TestBaseTxProcessor_VerifyGuardian(t *testing.T) { + t.Parallel() + + baseProc := baseTxProcessor{ + accounts: &stateMock.AccountsStub{}, + shardCoordinator: mock.NewOneShardCoordinatorMock(), + pubkeyConv: testscommon.NewPubkeyConverterMock(32), + economicsFee: &economicsmocks.EconomicsHandlerStub{ + CheckValidityTxValuesCalled: func(tx data.TransactionWithFeeHandler) error { + return nil + }, + ComputeMoveBalanceFeeCalled: func(tx data.TransactionWithFeeHandler) *big.Int { + return big.NewInt(0) + }, + }, + hasher: &hashingMocks.HasherMock{}, + marshalizer: &testscommon.MarshalizerMock{}, + scProcessor: &testscommon.SCProcessorMock{}, + enableEpochsHandler: &testscommon.EnableEpochsHandlerStub{ + IsPenalizedTooMuchGasFlagEnabledField: true, + }, + txVersionChecker: &testscommon.TxVersionCheckerStub{}, + guardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, + } + + notGuardedAccount := &stateMock.UserAccountStub{} + guardedAccount := &stateMock.UserAccountStub{ + IsGuardedCalled: func() bool { + return true + }, + } + expectedErr := errors.New("expected error") + tx := &transaction.Transaction{ + GuardianAddr: []byte("guardian"), + } + + t.Run("nil account should not error", func(t *testing.T) { + t.Parallel() + + localBaseProc := baseProc + err := localBaseProc.verifyGuardian(&transaction.Transaction{}, nil) + assert.Nil(t, err) + }) + t.Run("guarded account with a not guarded transaction should error", func(t *testing.T) { + t.Parallel() + + localBaseProc := baseProc + localBaseProc.txVersionChecker = &testscommon.TxVersionCheckerStub{ + IsGuardedTransactionCalled: func(tx *transaction.Transaction) bool { + return false + }, + } + + err := localBaseProc.verifyGuardian(&transaction.Transaction{}, guardedAccount) + assert.ErrorIs(t, err, process.ErrTransactionNotExecutable) + assert.Contains(t, err.Error(), "not allowed to bypass guardian") + }) + t.Run("not guarded account with guarded tx should error", func(t *testing.T) { + t.Parallel() + + localBaseProc := baseProc + localBaseProc.txVersionChecker = &testscommon.TxVersionCheckerStub{ + IsGuardedTransactionCalled: func(tx *transaction.Transaction) bool { + return true + }, + } + + err := localBaseProc.verifyGuardian(&transaction.Transaction{}, notGuardedAccount) + assert.ErrorIs(t, err, process.ErrTransactionNotExecutable) + assert.Contains(t, err.Error(), process.ErrGuardedTransactionNotExpected.Error()) + }) + t.Run("not guarded account with not guarded tx should work", func(t *testing.T) { + t.Parallel() + + localBaseProc := baseProc + localBaseProc.txVersionChecker = &testscommon.TxVersionCheckerStub{ + IsGuardedTransactionCalled: func(tx *transaction.Transaction) bool { + return false + }, + } + + err := localBaseProc.verifyGuardian(&transaction.Transaction{}, notGuardedAccount) + assert.Nil(t, err) + }) + t.Run("get active guardian fails should error", func(t *testing.T) { + t.Parallel() + + localBaseProc := baseProc + localBaseProc.txVersionChecker = &testscommon.TxVersionCheckerStub{ + IsGuardedTransactionCalled: func(tx *transaction.Transaction) bool { + return true + }, + } + localBaseProc.guardianChecker = &guardianMocks.GuardedAccountHandlerStub{ + GetActiveGuardianCalled: func(uah vmcommon.UserAccountHandler) ([]byte, error) { + return nil, expectedErr + }, + } + + err := localBaseProc.verifyGuardian(&transaction.Transaction{}, guardedAccount) + assert.ErrorIs(t, err, process.ErrTransactionNotExecutable) + assert.Contains(t, err.Error(), expectedErr.Error()) + }) + t.Run("guardian address mismatch should error", func(t *testing.T) { + t.Parallel() + + localBaseProc := baseProc + localBaseProc.txVersionChecker = &testscommon.TxVersionCheckerStub{ + IsGuardedTransactionCalled: func(tx *transaction.Transaction) bool { + return true + }, + } + localBaseProc.guardianChecker = &guardianMocks.GuardedAccountHandlerStub{ + GetActiveGuardianCalled: func(uah vmcommon.UserAccountHandler) ([]byte, error) { + return []byte("account guardian"), nil + }, + } + + err := localBaseProc.verifyGuardian(tx, guardedAccount) + assert.ErrorIs(t, err, process.ErrTransactionNotExecutable) + assert.Contains(t, err.Error(), process.ErrTransactionAndAccountGuardianMismatch.Error()) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + localBaseProc := baseProc + localBaseProc.txVersionChecker = &testscommon.TxVersionCheckerStub{ + IsGuardedTransactionCalled: func(tx *transaction.Transaction) bool { + return true + }, + } + localBaseProc.guardianChecker = &guardianMocks.GuardedAccountHandlerStub{ + GetActiveGuardianCalled: func(uah vmcommon.UserAccountHandler) ([]byte, error) { + return []byte("guardian"), nil + }, + } + + err := localBaseProc.verifyGuardian(tx, guardedAccount) + assert.Nil(t, err) + }) +} diff --git a/process/transaction/export_test.go b/process/transaction/export_test.go index 742fc618b6d..e812f208580 100644 --- a/process/transaction/export_test.go +++ b/process/transaction/export_test.go @@ -79,3 +79,11 @@ func (txProc *txProcessor) ExecuteFailedRelayedTransaction( originalTxHash, errorMsg) } + +func (inTx *InterceptedTransaction) CheckMaxGasPrice() error { + return inTx.checkMaxGasPrice() +} + +func (txProc *txProcessor) VerifyGuardian(tx *transaction.Transaction, account state.UserAccountHandler) error { + return txProc.verifyGuardian(tx, account) +} diff --git a/process/transaction/interceptedTransaction.go b/process/transaction/interceptedTransaction.go index ce60bb89644..0aedf837d09 100644 --- a/process/transaction/interceptedTransaction.go +++ b/process/transaction/interceptedTransaction.go @@ -152,17 +152,19 @@ func createRelayedV2(relayedTx *transaction.Transaction, args [][]byte) (*transa return nil, process.ErrInvalidArguments } tx := &transaction.Transaction{ - Nonce: big.NewInt(0).SetBytes(args[1]).Uint64(), - Value: big.NewInt(0), - RcvAddr: args[0], - SndAddr: relayedTx.RcvAddr, - GasPrice: relayedTx.GasPrice, - GasLimit: 0, // the user had to sign a transaction with 0 gasLimit - as all gasLimit is coming from the relayer - Data: args[2], - ChainID: relayedTx.ChainID, - Version: relayedTx.Version, - Signature: args[3], - Options: relayedTx.Options, + Nonce: big.NewInt(0).SetBytes(args[1]).Uint64(), + Value: big.NewInt(0), + RcvAddr: args[0], + SndAddr: relayedTx.RcvAddr, + GasPrice: relayedTx.GasPrice, + GasLimit: 0, // the user had to sign a transaction with 0 gasLimit - as all gasLimit is coming from the relayer + Data: args[2], + ChainID: relayedTx.ChainID, + Version: relayedTx.Version, + Signature: args[3], + Options: relayedTx.Options, + GuardianAddr: nil, + GuardianSignature: nil, } return tx, nil @@ -182,6 +184,11 @@ func (inTx *InterceptedTransaction) CheckValidity() error { return err } + err = inTx.VerifyGuardianSig(inTx.tx) + if err != nil { + return err + } + err = inTx.verifyIfRelayedTx(inTx.tx) if err != nil { return err @@ -218,7 +225,12 @@ func (inTx *InterceptedTransaction) verifyIfRelayedTxV2(tx *transaction.Transact err = inTx.verifySig(userTx) if err != nil { - return err + return fmt.Errorf("inner transaction: %w", err) + } + + err = inTx.VerifyGuardianSig(userTx) + if err != nil { + return fmt.Errorf("inner transaction: %w", err) } funcName, _, err = inTx.argsParser.ParseCallData(string(userTx.Data)) @@ -249,7 +261,7 @@ func (inTx *InterceptedTransaction) verifyIfRelayedTx(tx *transaction.Transactio userTx, err := createTx(inTx.signMarshalizer, userTxArgs[0]) if err != nil { - return err + return fmt.Errorf("inner transaction: %w", err) } if !bytes.Equal(userTx.SndAddr, tx.RcvAddr) { @@ -258,12 +270,17 @@ func (inTx *InterceptedTransaction) verifyIfRelayedTx(tx *transaction.Transactio err = inTx.integrity(userTx) if err != nil { - return err + return fmt.Errorf("inner transaction: %w", err) } err = inTx.verifySig(userTx) if err != nil { - return err + return fmt.Errorf("inner transaction: %w", err) + } + + err = inTx.VerifyGuardianSig(userTx) + if err != nil { + return fmt.Errorf("inner transaction: %w", err) } if len(userTx.Data) == 0 { @@ -322,12 +339,36 @@ func (inTx *InterceptedTransaction) integrity(tx *transaction.Transaction) error return process.ErrInvalidSndAddr } + err = inTx.checkMaxGasPrice() + if err != nil { + return err + } + return inTx.feeHandler.CheckValidityTxValues(tx) } +func (inTx *InterceptedTransaction) checkMaxGasPrice() error { + tx := inTx.tx + // no need to check max gas for guarded transactions as they are co-signed + if inTx.txVersionChecker.IsGuardedTransaction(tx) { + return nil + } + + txData := tx.GetData() + if !process.IsSetGuardianCall(txData) { + return nil + } + + if tx.GetGasPrice() > inTx.feeHandler.MaxGasPriceSetGuardian() { + return process.ErrGasPriceTooHigh + } + + return nil +} + // verifySig checks if the tx is correctly signed func (inTx *InterceptedTransaction) verifySig(tx *transaction.Transaction) error { - buffCopiedTx, err := tx.GetDataForSigning(inTx.pubkeyConv, inTx.signMarshalizer) + txMessageForSigVerification, err := inTx.getTxMessageForGivenTx(tx) if err != nil { return err } @@ -337,17 +378,60 @@ func (inTx *InterceptedTransaction) verifySig(tx *transaction.Transaction) error return err } - if !inTx.txVersionChecker.IsSignedWithHash(tx) { - return inTx.singleSigner.Verify(senderPubKey, buffCopiedTx, tx.Signature) + return inTx.singleSigner.Verify(senderPubKey, txMessageForSigVerification, tx.Signature) +} + +// VerifyGuardianSig verifies if the guardian signature is valid +func (inTx *InterceptedTransaction) VerifyGuardianSig(tx *transaction.Transaction) error { + txMessageForSigVerification, err := inTx.getTxMessageForGivenTx(tx) + if err != nil { + return err + } + + if !inTx.txVersionChecker.IsGuardedTransaction(tx) { + return verifyConsistencyForNotGuardedTx(tx) } - if !inTx.enableSignedTxWithHash { - return process.ErrTransactionSignedWithHashIsNotEnabled + guardianPubKey, err := inTx.keyGen.PublicKeyFromByteArray(tx.GuardianAddr) + if err != nil { + return err + } + + errVerifySig := inTx.singleSigner.Verify(guardianPubKey, txMessageForSigVerification, tx.GuardianSignature) + if errVerifySig != nil { + return fmt.Errorf("%w when checking the guardian's signature", errVerifySig) } - txHash := inTx.txSignHasher.Compute(string(buffCopiedTx)) + return nil +} + +func verifyConsistencyForNotGuardedTx(tx *transaction.Transaction) error { + if len(tx.GetGuardianAddr()) > 0 { + return process.ErrGuardianAddressNotExpected + } + if len(tx.GetGuardianSignature()) > 0 { + return process.ErrGuardianSignatureNotExpected + } + + return nil +} + +func (inTx *InterceptedTransaction) getTxMessageForGivenTx(tx *transaction.Transaction) ([]byte, error) { + if inTx.txVersionChecker.IsSignedWithHash(tx) && !inTx.enableSignedTxWithHash { + return nil, process.ErrTransactionSignedWithHashIsNotEnabled + } + + txSigningData, err := tx.GetDataForSigning(inTx.pubkeyConv, inTx.signMarshalizer, inTx.txSignHasher) + if err != nil { + return nil, err + } + + return txSigningData, nil +} - return inTx.singleSigner.Verify(senderPubKey, txHash, tx.Signature) +// GetTxMessageForSignatureVerification returns the transaction data that the signature needs to be verified on +func (inTx *InterceptedTransaction) GetTxMessageForSignatureVerification() ([]byte, error) { + return inTx.getTxMessageForGivenTx(inTx.tx) } // ReceiverShardId returns the receiver shard id diff --git a/process/transaction/interceptedTransaction_test.go b/process/transaction/interceptedTransaction_test.go index d1d525fde4b..74efd6716b0 100644 --- a/process/transaction/interceptedTransaction_test.go +++ b/process/transaction/interceptedTransaction_test.go @@ -20,6 +20,7 @@ import ( "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/process/transaction" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" logger "github.com/multiversx/mx-chain-logger-go" "github.com/stretchr/testify/assert" @@ -36,7 +37,7 @@ var recvAddress = []byte("23456789012345678901234567890123") var sigBad = []byte("bad-signature") var sigOk = []byte("signature") -func createMockPubkeyConverter() *testscommon.PubkeyConverterMock { +func createMockPubKeyConverter() *testscommon.PubkeyConverterMock { return testscommon.NewPubkeyConverterMock(32) } @@ -63,14 +64,57 @@ func createKeyGenMock() crypto.KeyGenerator { } } -func createFreeTxFeeHandler() *mock.FeeHandlerStub { - return &mock.FeeHandlerStub{ +func createFreeTxFeeHandler() *economicsmocks.EconomicsHandlerStub { + return &economicsmocks.EconomicsHandlerStub{ CheckValidityTxValuesCalled: func(tx data.TransactionWithFeeHandler) error { return nil }, } } +func createInterceptedTxWithTxFeeHandlerAndVersionChecker(tx *dataTransaction.Transaction, txFeeHandler process.FeeHandler, txVerChecker *testscommon.TxVersionCheckerStub) (*transaction.InterceptedTransaction, error) { + marshaller := &testscommon.MarshalizerMock{} + txBuff, err := marshaller.Marshal(tx) + if err != nil { + return nil, err + } + + shardCoordinator := mock.NewMultipleShardsCoordinatorMock() + shardCoordinator.CurrentShard = 6 + shardCoordinator.ComputeIdCalled = func(address []byte) uint32 { + if bytes.Equal(address, senderAddress) { + return senderShard + } + if bytes.Equal(address, recvAddress) { + return recvShard + } + + return shardCoordinator.CurrentShard + } + + return transaction.NewInterceptedTransaction( + txBuff, + marshaller, + marshaller, + &hashingMocks.HasherMock{}, + createKeyGenMock(), + createDummySigner(), + &testscommon.PubkeyConverterStub{ + LenCalled: func() int { + return 32 + }, + }, + shardCoordinator, + txFeeHandler, + &testscommon.WhiteListHandlerStub{}, + &mock.ArgumentParserMock{}, + []byte("T"), + false, + &hashingMocks.HasherMock{}, + txVerChecker, + ) +} + func createInterceptedTxFromPlainTx(tx *dataTransaction.Transaction, txFeeHandler process.FeeHandler, chainID []byte, minTxVersion uint32) (*transaction.InterceptedTransaction, error) { marshalizer := &mock.MarshalizerMock{} txBuff, err := marshalizer.Marshal(tx) @@ -169,9 +213,9 @@ func TestNewInterceptedTransaction_NilBufferShouldErr(t *testing.T) { &hashingMocks.HasherMock{}, &mock.SingleSignKeyGenMock{}, &mock.SignerMock{}, - createMockPubkeyConverter(), + createMockPubKeyConverter(), mock.NewOneShardCoordinatorMock(), - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, &mock.ArgumentParserMock{}, []byte("chainID"), @@ -194,9 +238,9 @@ func TestNewInterceptedTransaction_NilArgsParser(t *testing.T) { &hashingMocks.HasherMock{}, &mock.SingleSignKeyGenMock{}, &mock.SignerMock{}, - createMockPubkeyConverter(), + createMockPubKeyConverter(), mock.NewOneShardCoordinatorMock(), - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, nil, []byte("chainID"), @@ -219,9 +263,9 @@ func TestNewInterceptedTransaction_NilVersionChecker(t *testing.T) { &hashingMocks.HasherMock{}, &mock.SingleSignKeyGenMock{}, &mock.SignerMock{}, - createMockPubkeyConverter(), + createMockPubKeyConverter(), mock.NewOneShardCoordinatorMock(), - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, &mock.ArgumentParserMock{}, []byte("chainID"), @@ -244,9 +288,9 @@ func TestNewInterceptedTransaction_NilMarshalizerShouldErr(t *testing.T) { &hashingMocks.HasherMock{}, &mock.SingleSignKeyGenMock{}, &mock.SignerMock{}, - createMockPubkeyConverter(), + createMockPubKeyConverter(), mock.NewOneShardCoordinatorMock(), - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, &mock.ArgumentParserMock{}, []byte("chainID"), @@ -269,9 +313,9 @@ func TestNewInterceptedTransaction_NilSignMarshalizerShouldErr(t *testing.T) { &hashingMocks.HasherMock{}, &mock.SingleSignKeyGenMock{}, &mock.SignerMock{}, - createMockPubkeyConverter(), + createMockPubKeyConverter(), mock.NewOneShardCoordinatorMock(), - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, &mock.ArgumentParserMock{}, []byte("chainID"), @@ -294,9 +338,9 @@ func TestNewInterceptedTransaction_NilHasherShouldErr(t *testing.T) { nil, &mock.SingleSignKeyGenMock{}, &mock.SignerMock{}, - createMockPubkeyConverter(), + createMockPubKeyConverter(), mock.NewOneShardCoordinatorMock(), - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, &mock.ArgumentParserMock{}, []byte("chainID"), @@ -319,9 +363,9 @@ func TestNewInterceptedTransaction_NilKeyGenShouldErr(t *testing.T) { &hashingMocks.HasherMock{}, nil, &mock.SignerMock{}, - createMockPubkeyConverter(), + createMockPubKeyConverter(), mock.NewOneShardCoordinatorMock(), - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, &mock.ArgumentParserMock{}, []byte("chainID"), @@ -344,9 +388,9 @@ func TestNewInterceptedTransaction_NilSignerShouldErr(t *testing.T) { &hashingMocks.HasherMock{}, &mock.SingleSignKeyGenMock{}, nil, - createMockPubkeyConverter(), + createMockPubKeyConverter(), mock.NewOneShardCoordinatorMock(), - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, &mock.ArgumentParserMock{}, []byte("chainID"), @@ -371,7 +415,7 @@ func TestNewInterceptedTransaction_NilPubkeyConverterShouldErr(t *testing.T) { &mock.SignerMock{}, nil, mock.NewOneShardCoordinatorMock(), - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, &mock.ArgumentParserMock{}, []byte("chainID"), @@ -394,9 +438,9 @@ func TestNewInterceptedTransaction_NilCoordinatorShouldErr(t *testing.T) { &hashingMocks.HasherMock{}, &mock.SingleSignKeyGenMock{}, &mock.SignerMock{}, - createMockPubkeyConverter(), + createMockPubKeyConverter(), nil, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, &mock.ArgumentParserMock{}, []byte("chainID"), @@ -419,7 +463,7 @@ func TestNewInterceptedTransaction_NilFeeHandlerShouldErr(t *testing.T) { &hashingMocks.HasherMock{}, &mock.SingleSignKeyGenMock{}, &mock.SignerMock{}, - createMockPubkeyConverter(), + createMockPubKeyConverter(), mock.NewOneShardCoordinatorMock(), nil, &testscommon.WhiteListHandlerStub{}, @@ -444,9 +488,9 @@ func TestNewInterceptedTransaction_NilWhiteListerVerifiedTxsShouldErr(t *testing &hashingMocks.HasherMock{}, &mock.SingleSignKeyGenMock{}, &mock.SignerMock{}, - createMockPubkeyConverter(), + createMockPubKeyConverter(), mock.NewOneShardCoordinatorMock(), - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, nil, &mock.ArgumentParserMock{}, []byte("chainID"), @@ -469,9 +513,9 @@ func TestNewInterceptedTransaction_InvalidChainIDShouldErr(t *testing.T) { &hashingMocks.HasherMock{}, &mock.SingleSignKeyGenMock{}, &mock.SignerMock{}, - createMockPubkeyConverter(), + createMockPubKeyConverter(), mock.NewOneShardCoordinatorMock(), - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, &mock.ArgumentParserMock{}, nil, @@ -494,9 +538,9 @@ func TestNewInterceptedTransaction_NilTxSignHasherShouldErr(t *testing.T) { &hashingMocks.HasherMock{}, &mock.SingleSignKeyGenMock{}, &mock.SignerMock{}, - createMockPubkeyConverter(), + createMockPubKeyConverter(), mock.NewOneShardCoordinatorMock(), - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, &mock.ArgumentParserMock{}, []byte("chainID"), @@ -525,9 +569,9 @@ func TestNewInterceptedTransaction_UnmarshalingTxFailsShouldErr(t *testing.T) { &hashingMocks.HasherMock{}, &mock.SingleSignKeyGenMock{}, &mock.SignerMock{}, - createMockPubkeyConverter(), + createMockPubKeyConverter(), mock.NewOneShardCoordinatorMock(), - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, &mock.ArgumentParserMock{}, []byte("chainID"), @@ -792,7 +836,7 @@ func TestNewInterceptedTransaction_InsufficientFeeShouldErr(t *testing.T) { Version: minTxVersion, } errExpected := errors.New("insufficient fee") - feeHandler := &mock.FeeHandlerStub{ + feeHandler := &economicsmocks.EconomicsHandlerStub{ CheckValidityTxValuesCalled: func(tx data.TransactionWithFeeHandler) error { return errExpected }, @@ -966,7 +1010,7 @@ func TestInterceptedTransaction_CheckValiditySignedWithHashButNotEnabled(t *test Signature: sigOk, ChainID: chainID, Version: minTxVersion + 1, - Options: versioning.MaskSignedWithHash, + Options: dataTransaction.MaskSignedWithHash, } marshalizer := &mock.MarshalizerMock{} @@ -1010,7 +1054,7 @@ func TestInterceptedTransaction_CheckValiditySignedWithHashButNotEnabled(t *test assert.Equal(t, process.ErrTransactionSignedWithHashIsNotEnabled, err) } -func TestInterceptedTransaction_CheckValiditySignedWithHashShoudWork(t *testing.T) { +func TestInterceptedTransaction_CheckValiditySignedWithHashShouldWork(t *testing.T) { t.Parallel() minTxVersion := uint32(1) @@ -1026,7 +1070,7 @@ func TestInterceptedTransaction_CheckValiditySignedWithHashShoudWork(t *testing. Signature: sigOk, ChainID: chainID, Version: minTxVersion + 1, - Options: versioning.MaskSignedWithHash, + Options: dataTransaction.MaskSignedWithHash, } marshalizer := &mock.MarshalizerMock{} @@ -1350,7 +1394,7 @@ func TestInterceptedTransaction_CheckValidityOfRelayedTx(t *testing.T) { tx.Data = []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxData)) txi, _ = createInterceptedTxFromPlainTxWithArgParser(tx) err = txi.CheckValidity() - assert.Equal(t, process.ErrNilValue, err) + assert.ErrorIs(t, err, data.ErrNilValue) userTx.Value = big.NewInt(0) userTxData, _ = marshalizer.Marshal(userTx) @@ -1364,7 +1408,8 @@ func TestInterceptedTransaction_CheckValidityOfRelayedTx(t *testing.T) { tx.Data = []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxData)) txi, _ = createInterceptedTxFromPlainTxWithArgParser(tx) err = txi.CheckValidity() - assert.Equal(t, errSignerMockVerifySigFails, err) + assert.ErrorIs(t, err, errSignerMockVerifySigFails) + assert.Contains(t, err.Error(), "inner transaction") userTx.Signature = sigOk userTx.SndAddr = []byte("otherAddress") @@ -1433,7 +1478,8 @@ func TestInterceptedTransaction_CheckValidityOfRelayedTxV2(t *testing.T) { tx.Data = []byte(core.RelayedTransactionV2 + "@" + hex.EncodeToString(userTx.RcvAddr) + "@" + hex.EncodeToString(big.NewInt(0).SetUint64(userTx.Nonce).Bytes()) + "@" + hex.EncodeToString(userTx.Data) + "@" + hex.EncodeToString(userTx.Signature)) txi, _ = createInterceptedTxFromPlainTxWithArgParser(tx) err = txi.CheckValidity() - assert.Equal(t, errSignerMockVerifySigFails, err) + assert.ErrorIs(t, err, errSignerMockVerifySigFails) + assert.Contains(t, err.Error(), "inner transaction") userTx.Signature = sigOk userTx.SndAddr = []byte("otherAddress") @@ -1631,3 +1677,190 @@ func TestInterceptedTransaction_String(t *testing.T) { assert.Equal(t, expectedFormat, txin.String()) } + +func TestInterceptedTransaction_checkMaxGasPrice(t *testing.T) { + t.Parallel() + + maxAllowedGasPriceSetGuardian := uint64(2000000) + feeHandler := &economicsmocks.EconomicsHandlerStub{ + MaxGasPriceSetGuardianCalled: func() uint64 { + return maxAllowedGasPriceSetGuardian + }, + } + setGuardianBuiltinCallData := []byte("SetGuardian@xxxx") + testTx1 := &dataTransaction.Transaction{ + GasPrice: maxAllowedGasPriceSetGuardian / 2, + Data: setGuardianBuiltinCallData, + } + testTx2 := &dataTransaction.Transaction{ + GasPrice: maxAllowedGasPriceSetGuardian * 2, + Data: setGuardianBuiltinCallData, + } + + t.Run("guardedTx returns always OK no matter the gas price", func(t *testing.T) { + txVersionChecker := &testscommon.TxVersionCheckerStub{ + IsGuardedTransactionCalled: func(tx *dataTransaction.Transaction) bool { + return true + }, + } + inTx1, err := createInterceptedTxWithTxFeeHandlerAndVersionChecker(testTx1, feeHandler, txVersionChecker) + require.Nil(t, err) + inTx2, err := createInterceptedTxWithTxFeeHandlerAndVersionChecker(testTx2, feeHandler, txVersionChecker) + require.Nil(t, err) + + errMaxGasPrice := inTx1.CheckMaxGasPrice() + require.Nil(t, errMaxGasPrice) + + errMaxGasPrice = inTx2.CheckMaxGasPrice() + require.Nil(t, errMaxGasPrice) + }) + t.Run("not guarded Tx, not setGuardian always OK", func(t *testing.T) { + tx1 := *testTx1 + tx1.Data = []byte("dummy") + tx2 := *testTx2 + tx2.Data = []byte("dummy") + + txVersionChecker := &testscommon.TxVersionCheckerStub{ + IsGuardedTransactionCalled: func(tx *dataTransaction.Transaction) bool { + return false + }, + } + + inTx1, err := createInterceptedTxWithTxFeeHandlerAndVersionChecker(&tx1, feeHandler, txVersionChecker) + require.Nil(t, err) + inTx2, err := createInterceptedTxWithTxFeeHandlerAndVersionChecker(&tx2, feeHandler, txVersionChecker) + require.Nil(t, err) + + errMaxGasPrice := inTx1.CheckMaxGasPrice() + require.Nil(t, errMaxGasPrice) + + errMaxGasPrice = inTx2.CheckMaxGasPrice() + require.Nil(t, errMaxGasPrice) + }) + t.Run("not guarded Tx with setGuardian call and price lower than max or equal OK", func(t *testing.T) { + tx1 := *testTx1 + tx1.GasPrice = maxAllowedGasPriceSetGuardian + tx2 := *testTx2 + tx2.GasPrice = maxAllowedGasPriceSetGuardian / 2 + + txVersionChecker := &testscommon.TxVersionCheckerStub{ + IsGuardedTransactionCalled: func(tx *dataTransaction.Transaction) bool { + return false + }, + } + + inTx1, err := createInterceptedTxWithTxFeeHandlerAndVersionChecker(&tx1, feeHandler, txVersionChecker) + require.Nil(t, err) + inTx2, err := createInterceptedTxWithTxFeeHandlerAndVersionChecker(&tx2, feeHandler, txVersionChecker) + require.Nil(t, err) + + errMaxGasPrice := inTx1.CheckMaxGasPrice() + require.Nil(t, errMaxGasPrice) + + errMaxGasPrice = inTx2.CheckMaxGasPrice() + require.Nil(t, errMaxGasPrice) + }) + t.Run("not guarded Tx with setGuardian call and price higher than max err", func(t *testing.T) { + tx1 := *testTx1 + tx1.GasPrice = maxAllowedGasPriceSetGuardian * 2 + txVersionChecker := &testscommon.TxVersionCheckerStub{ + IsGuardedTransactionCalled: func(tx *dataTransaction.Transaction) bool { + return false + }, + } + + inTx1, err := createInterceptedTxWithTxFeeHandlerAndVersionChecker(&tx1, feeHandler, txVersionChecker) + require.Nil(t, err) + + errMaxGasPrice := inTx1.CheckMaxGasPrice() + require.Equal(t, process.ErrGasPriceTooHigh, errMaxGasPrice) + }) +} + +func TestInterceptedTransaction_VerifyGuardianSig(t *testing.T) { + t.Parallel() + + testTxVersionChecker := testscommon.TxVersionCheckerStub{ + IsGuardedTransactionCalled: func(tx *dataTransaction.Transaction) bool { + return true + }, + } + feeHandler := &economicsmocks.EconomicsHandlerStub{ + MaxGasPriceSetGuardianCalled: func() uint64 { + return 1000 + }, + } + testTx := dataTransaction.Transaction{ + Data: []byte("some data"), + GuardianAddr: []byte("guardian addr"), + GuardianSignature: []byte("guardian signature"), + } + + t.Run("get data for signing with error", func(t *testing.T) { + tx := testTx + txVersionChecker := testTxVersionChecker + txVersionChecker.IsSignedWithHashCalled = func(tx *dataTransaction.Transaction) bool { + return true + } + inTx, err := createInterceptedTxWithTxFeeHandlerAndVersionChecker(&tx, feeHandler, &txVersionChecker) + require.Nil(t, err) + err = inTx.VerifyGuardianSig(&tx) + require.Equal(t, process.ErrTransactionSignedWithHashIsNotEnabled, err) + }) + t.Run("nil guardian sig", func(t *testing.T) { + tx := testTx + tx.GuardianSignature = nil + inTx, err := createInterceptedTxWithTxFeeHandlerAndVersionChecker(&tx, feeHandler, &testTxVersionChecker) + require.Nil(t, err) + + err = inTx.VerifyGuardianSig(&tx) + require.ErrorIs(t, err, errSignerMockVerifySigFails) + require.Contains(t, err.Error(), "guardian's signature") + }) + t.Run("normal TX with not empty guardian address", func(t *testing.T) { + tx := testTx + tx.GuardianAddr = []byte("guardian addr") + txVersionChecker := testTxVersionChecker + txVersionChecker.IsGuardedTransactionCalled = func(tx *dataTransaction.Transaction) bool { + return false + } + inTx, err := createInterceptedTxWithTxFeeHandlerAndVersionChecker(&tx, feeHandler, &txVersionChecker) + require.Nil(t, err) + + err = inTx.VerifyGuardianSig(&tx) + require.True(t, errors.Is(err, process.ErrGuardianAddressNotExpected)) + }) + t.Run("normal TX with guardian sig", func(t *testing.T) { + tx := testTx + tx.GuardianAddr = nil + tx.GuardianSignature = []byte("guardian signature") + txVersionChecker := testTxVersionChecker + txVersionChecker.IsGuardedTransactionCalled = func(tx *dataTransaction.Transaction) bool { + return false + } + inTx, err := createInterceptedTxWithTxFeeHandlerAndVersionChecker(&tx, feeHandler, &txVersionChecker) + require.Nil(t, err) + + err = inTx.VerifyGuardianSig(&tx) + require.True(t, errors.Is(err, process.ErrGuardianSignatureNotExpected)) + }) + t.Run("wrong guardian sig", func(t *testing.T) { + tx := testTx + tx.GuardianSignature = sigBad + inTx, err := createInterceptedTxWithTxFeeHandlerAndVersionChecker(&tx, feeHandler, &testTxVersionChecker) + require.Nil(t, err) + + err = inTx.VerifyGuardianSig(&tx) + require.ErrorIs(t, err, errSignerMockVerifySigFails) + require.Contains(t, err.Error(), "guardian's signature") + }) + t.Run("correct guardian sig", func(t *testing.T) { + tx := testTx + tx.GuardianSignature = sigOk + inTx, err := createInterceptedTxWithTxFeeHandlerAndVersionChecker(&tx, feeHandler, &testTxVersionChecker) + require.Nil(t, err) + + err = inTx.VerifyGuardianSig(&tx) + require.Nil(t, err) + }) +} diff --git a/process/transaction/metaProcess.go b/process/transaction/metaProcess.go index 4724438b20d..51f2c721552 100644 --- a/process/transaction/metaProcess.go +++ b/process/transaction/metaProcess.go @@ -1,6 +1,8 @@ package transaction import ( + "errors" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/transaction" @@ -33,6 +35,8 @@ type ArgsNewMetaTxProcessor struct { TxTypeHandler process.TxTypeHandler EconomicsFee process.FeeHandler EnableEpochsHandler common.EnableEpochsHandler + TxVersionChecker process.TxVersionCheckerHandler + GuardianChecker process.GuardianChecker } // NewMetaTxProcessor creates a new txProcessor engine @@ -59,6 +63,12 @@ func NewMetaTxProcessor(args ArgsNewMetaTxProcessor) (*metaTxProcessor, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + if check.IfNil(args.TxVersionChecker) { + return nil, process.ErrNilTransactionVersionChecker + } + if check.IfNil(args.GuardianChecker) { + return nil, process.ErrNilGuardianChecker + } baseTxProcess := &baseTxProcessor{ accounts: args.Accounts, @@ -69,6 +79,8 @@ func NewMetaTxProcessor(args ArgsNewMetaTxProcessor) (*metaTxProcessor, error) { marshalizer: args.Marshalizer, scProcessor: args.ScProcessor, enableEpochsHandler: args.EnableEpochsHandler, + txVersionChecker: args.TxVersionChecker, + guardianChecker: args.GuardianChecker, } // backwards compatibility baseTxProcess.enableEpochsHandler.ResetPenalizedTooMuchGasFlag() @@ -108,6 +120,13 @@ func (txProc *metaTxProcessor) ProcessTransaction(tx *transaction.Transaction) ( err = txProc.checkTxValues(tx, acntSnd, acntDst, false) if err != nil { + if errors.Is(err, process.ErrUserNameDoesNotMatchInCrossShardTx) { + errProcessIfErr := txProc.processIfTxErrorCrossShard(tx, err.Error()) + if errProcessIfErr != nil { + return 0, errProcessIfErr + } + return vmcommon.UserError, nil + } return 0, err } @@ -137,20 +156,6 @@ func (txProc *metaTxProcessor) ProcessTransaction(tx *transaction.Transaction) ( return vmcommon.UserError, nil } -// VerifyTransaction verifies the account states in respect with the transaction data -func (txProc *metaTxProcessor) VerifyTransaction(tx *transaction.Transaction) error { - if check.IfNil(tx) { - return process.ErrNilTransaction - } - - senderAccount, receiverAccount, err := txProc.getAccounts(tx.SndAddr, tx.RcvAddr) - if err != nil { - return err - } - - return txProc.checkTxValues(tx, senderAccount, receiverAccount, false) -} - func (txProc *metaTxProcessor) processSCDeployment( tx *transaction.Transaction, adrSrc []byte, diff --git a/process/transaction/metaProcess_test.go b/process/transaction/metaProcess_test.go index babe9ff0458..0db0be2af50 100644 --- a/process/transaction/metaProcess_test.go +++ b/process/transaction/metaProcess_test.go @@ -5,16 +5,20 @@ import ( "math/big" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/coordinator" "github.com/multiversx/mx-chain-go/process/mock" txproc "github.com/multiversx/mx-chain-go/process/transaction" + "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" + "github.com/multiversx/mx-chain-go/vm" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/multiversx/mx-chain-vm-common-go/builtInFunctions" "github.com/multiversx/mx-chain-vm-common-go/parsers" @@ -26,12 +30,14 @@ func createMockNewMetaTxArgs() txproc.ArgsNewMetaTxProcessor { Hasher: &hashingMocks.HasherMock{}, Marshalizer: &mock.MarshalizerMock{}, Accounts: &stateMock.AccountsStub{}, - PubkeyConv: createMockPubkeyConverter(), + PubkeyConv: createMockPubKeyConverter(), ShardCoordinator: mock.NewOneShardCoordinatorMock(), ScProcessor: &testscommon.SCProcessorMock{}, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, EconomicsFee: createFreeTxFeeHandler(), EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, + GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } return args } @@ -215,7 +221,7 @@ func TestMetaTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { tx := transaction.Transaction{} tx.Nonce = 0 tx.SndAddr = []byte("SRC") - tx.RcvAddr = generateRandomByteSlice(createMockPubkeyConverter().Len()) + tx.RcvAddr = generateRandomByteSlice(createMockPubKeyConverter().Len()) tx.Value = big.NewInt(45) tx.GasPrice = 1 tx.GasLimit = 1 @@ -266,7 +272,7 @@ func TestMetaTxProcessor_ProcessTransactionScTxShouldReturnErrWhenExecutionFails tx := transaction.Transaction{} tx.Nonce = 0 tx.SndAddr = []byte("SRC") - tx.RcvAddr = generateRandomByteSlice(createMockPubkeyConverter().Len()) + tx.RcvAddr = generateRandomByteSlice(createMockPubKeyConverter().Len()) tx.Value = big.NewInt(45) acntSrc, err := state.NewUserAccount(tx.SndAddr) @@ -316,7 +322,7 @@ func TestMetaTxProcessor_ProcessTransactionScTxShouldNotBeCalledWhenAdrDstIsNotI tx := transaction.Transaction{} tx.Nonce = 0 tx.SndAddr = []byte("SRC") - tx.RcvAddr = generateRandomByteSlice(createMockPubkeyConverter().Len()) + tx.RcvAddr = generateRandomByteSlice(createMockPubKeyConverter().Len()) tx.Value = big.NewInt(45) shardCoordinator.ComputeIdCalled = func(address []byte) uint32 { @@ -354,7 +360,7 @@ func TestMetaTxProcessor_ProcessTransactionScTxShouldNotBeCalledWhenAdrDstIsNotI esdtTransferParser, _ := parsers.NewESDTTransferParser(&mock.MarshalizerMock{}) argsTxTypeHandler := coordinator.ArgNewTxTypeHandler{ - PubkeyConverter: createMockPubkeyConverter(), + PubkeyConverter: createMockPubKeyConverter(), ShardCoordinator: shardCoordinator, BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), ArgumentParser: parsers.NewCallArgsParser(), @@ -386,7 +392,7 @@ func TestMetaTxProcessor_ProcessTransactionBuiltInCallTxShouldWork(t *testing.T) tx := transaction.Transaction{} tx.Nonce = 0 tx.SndAddr = []byte("SRC") - tx.RcvAddr = generateRandomByteSlice(createMockPubkeyConverter().Len()) + tx.RcvAddr = generateRandomByteSlice(createMockPubKeyConverter().Len()) tx.Value = big.NewInt(45) tx.GasPrice = 1 tx.GasLimit = 1 @@ -446,3 +452,42 @@ func TestMetaTxProcessor_ProcessTransactionBuiltInCallTxShouldWork(t *testing.T) assert.True(t, builtInCalled) assert.Equal(t, 0, saveAccountCalled) } + +func TestMetaTxProcessor_ProcessTransactionWithInvalidUsernameShouldNotError(t *testing.T) { + t.Parallel() + + tx := &transaction.Transaction{} + tx.Nonce = 0 + tx.SndAddr = bytes.Repeat([]byte{1}, 32) + tx.RcvAddr = vm.GovernanceSCAddress + tx.RcvUserName = []byte("username") + tx.Value = big.NewInt(45) + tx.GasPrice = 1 + tx.GasLimit = 1 + + acntDst, err := state.NewUserAccount(tx.RcvAddr) + assert.Nil(t, err) + + called := false + adb := createAccountStub(tx.SndAddr, tx.RcvAddr, acntDst, acntDst) + scProcessor := &testscommon.SCProcessorMock{ + ProcessIfErrorCalled: func(acntSnd state.UserAccountHandler, txHash []byte, tx data.TransactionHandler, returnCode string, returnMessage []byte, snapshot int, gasLocked uint64) error { + called = true + return nil + }, + } + + args := createMockNewMetaTxArgs() + args.Accounts = adb + args.ScProcessor = scProcessor + args.ShardCoordinator, _ = sharding.NewMultiShardCoordinator(3, core.MetachainShardId) + txProc, _ := txproc.NewMetaTxProcessor(args) + + err = txProc.VerifyTransaction(tx) + assert.Equal(t, err, process.ErrUserNameDoesNotMatchInCrossShardTx) + + returnCode, err := txProc.ProcessTransaction(tx) + assert.Nil(t, err) + assert.Equal(t, vmcommon.UserError, returnCode) + assert.True(t, called) +} diff --git a/process/transaction/shardProcess.go b/process/transaction/shardProcess.go index 7ebb6faa014..d9709cd2bf5 100644 --- a/process/transaction/shardProcess.go +++ b/process/transaction/shardProcess.go @@ -64,6 +64,8 @@ type ArgsNewTxProcessor struct { ArgsParser process.ArgumentsParser ScrForwarder process.IntermediateTransactionHandler EnableEpochsHandler common.EnableEpochsHandler + TxVersionChecker process.TxVersionCheckerHandler + GuardianChecker process.GuardianChecker } // NewTxProcessor creates a new txProcessor engine @@ -113,6 +115,12 @@ func NewTxProcessor(args ArgsNewTxProcessor) (*txProcessor, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + if check.IfNil(args.TxVersionChecker) { + return nil, process.ErrNilTransactionVersionChecker + } + if check.IfNil(args.GuardianChecker) { + return nil, process.ErrNilGuardianChecker + } baseTxProcess := &baseTxProcessor{ accounts: args.Accounts, @@ -123,6 +131,8 @@ func NewTxProcessor(args ArgsNewTxProcessor) (*txProcessor, error) { marshalizer: args.Marshalizer, scProcessor: args.ScProcessor, enableEpochsHandler: args.EnableEpochsHandler, + txVersionChecker: args.TxVersionChecker, + guardianChecker: args.GuardianChecker, } txProc := &txProcessor{ diff --git a/process/transaction/shardProcess_test.go b/process/transaction/shardProcess_test.go index e90df69d19b..61c56203d6a 100644 --- a/process/transaction/shardProcess_test.go +++ b/process/transaction/shardProcess_test.go @@ -20,6 +20,8 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/vm" @@ -36,8 +38,8 @@ func generateRandomByteSlice(size int) []byte { return buff } -func feeHandlerMock() *mock.FeeHandlerStub { - return &mock.FeeHandlerStub{ +func feeHandlerMock() *economicsmocks.EconomicsHandlerStub { + return &economicsmocks.EconomicsHandlerStub{ CheckValidityTxValuesCalled: func(tx data.TransactionWithFeeHandler) error { return nil }, @@ -71,7 +73,7 @@ func createArgsForTxProcessor() txproc.ArgsNewTxProcessor { args := txproc.ArgsNewTxProcessor{ Accounts: &stateMock.AccountsStub{}, Hasher: &hashingMocks.HasherMock{}, - PubkeyConv: createMockPubkeyConverter(), + PubkeyConv: createMockPubKeyConverter(), Marshalizer: &mock.MarshalizerMock{}, SignMarshalizer: &mock.MarshalizerMock{}, ShardCoordinator: mock.NewOneShardCoordinatorMock(), @@ -86,6 +88,8 @@ func createArgsForTxProcessor() txproc.ArgsNewTxProcessor { EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{ IsPenalizedTooMuchGasFlagEnabledField: true, }, + GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } return args } @@ -676,7 +680,7 @@ func TestTxProcessor_ProcessWithTxFeeHandlerCheckErrorShouldErr(t *testing.T) { args.Accounts = adb expectedError := errors.New("validatity check failed") - args.EconomicsFee = &mock.FeeHandlerStub{ + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ CheckValidityTxValuesCalled: func(tx data.TransactionWithFeeHandler) error { return expectedError }} @@ -729,7 +733,7 @@ func TestTxProcessor_ProcessWithTxFeeHandlerInsufficientFeeShouldErr(t *testing. args := createArgsForTxProcessor() args.Accounts = adb - args.EconomicsFee = &mock.FeeHandlerStub{ + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ ComputeTxFeeCalled: func(tx data.TransactionWithFeeHandler) *big.Int { return big.NewInt(0).Add(acntSrc.Balance, big.NewInt(1)) }} @@ -761,7 +765,7 @@ func TestTxProcessor_ProcessWithInsufficientFundsShouldCreateReceiptErr(t *testi args := createArgsForTxProcessor() args.Accounts = adb - args.EconomicsFee = &mock.FeeHandlerStub{ + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ CheckValidityTxValuesCalled: func(tx data.TransactionWithFeeHandler) error { return process.ErrInsufficientFunds }} @@ -794,7 +798,7 @@ func TestTxProcessor_ProcessWithUsernameMismatchCreateReceiptErr(t *testing.T) { args := createArgsForTxProcessor() args.Accounts = adb - args.EconomicsFee = &mock.FeeHandlerStub{ + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ CheckValidityTxValuesCalled: func(tx data.TransactionWithFeeHandler) error { return process.ErrUserNameDoesNotMatchInCrossShardTx }} @@ -825,7 +829,7 @@ func TestTxProcessor_ProcessWithUsernameMismatchAndSCProcessErrorShouldError(t * args := createArgsForTxProcessor() args.Accounts = adb - args.EconomicsFee = &mock.FeeHandlerStub{ + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ CheckValidityTxValuesCalled: func(tx data.TransactionWithFeeHandler) error { return process.ErrUserNameDoesNotMatchInCrossShardTx }} @@ -1019,7 +1023,7 @@ func TestTxProcessor_MoveBalanceWithFeesShouldWork(t *testing.T) { } txCost := big.NewInt(16) - feeHandler := &mock.FeeHandlerStub{ + feeHandler := &economicsmocks.EconomicsHandlerStub{ CheckValidityTxValuesCalled: func(tx data.TransactionWithFeeHandler) error { return nil }, @@ -1048,7 +1052,7 @@ func TestTxProcessor_ProcessTransactionScDeployTxShouldWork(t *testing.T) { tx := transaction.Transaction{} tx.Nonce = 0 tx.SndAddr = []byte("SRC") - tx.RcvAddr = generateRandomByteSlice(createMockPubkeyConverter().Len()) + tx.RcvAddr = generateRandomByteSlice(createMockPubKeyConverter().Len()) tx.Value = big.NewInt(45) tx.GasPrice = 1 tx.GasLimit = 1 @@ -1099,7 +1103,7 @@ func TestTxProcessor_ProcessTransactionBuiltInFunctionCallShouldWork(t *testing. tx := transaction.Transaction{} tx.Nonce = 0 tx.SndAddr = []byte("SRC") - tx.RcvAddr = generateRandomByteSlice(createMockPubkeyConverter().Len()) + tx.RcvAddr = generateRandomByteSlice(createMockPubKeyConverter().Len()) tx.Value = big.NewInt(45) tx.GasPrice = 1 tx.GasLimit = 1 @@ -1150,7 +1154,7 @@ func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { tx := transaction.Transaction{} tx.Nonce = 0 tx.SndAddr = []byte("SRC") - tx.RcvAddr = generateRandomByteSlice(createMockPubkeyConverter().Len()) + tx.RcvAddr = generateRandomByteSlice(createMockPubKeyConverter().Len()) tx.Value = big.NewInt(45) tx.GasPrice = 1 tx.GasLimit = 1 @@ -1202,7 +1206,7 @@ func TestTxProcessor_ProcessTransactionScTxShouldReturnErrWhenExecutionFails(t * tx := transaction.Transaction{} tx.Nonce = 0 tx.SndAddr = []byte("SRC") - tx.RcvAddr = generateRandomByteSlice(createMockPubkeyConverter().Len()) + tx.RcvAddr = generateRandomByteSlice(createMockPubKeyConverter().Len()) tx.Value = big.NewInt(45) acntSrc, err := state.NewUserAccount(tx.SndAddr) @@ -1251,7 +1255,7 @@ func TestTxProcessor_ProcessTransactionScTxShouldNotBeCalledWhenAdrDstIsNotInNod tx := transaction.Transaction{} tx.Nonce = 0 tx.SndAddr = []byte("SRC") - tx.RcvAddr = generateRandomByteSlice(createMockPubkeyConverter().Len()) + tx.RcvAddr = generateRandomByteSlice(createMockPubKeyConverter().Len()) tx.Value = big.NewInt(45) shardCoordinator.ComputeIdCalled = func(address []byte) uint32 { @@ -1315,7 +1319,7 @@ func TestTxProcessor_ProcessTxFeeIntraShard(t *testing.T) { negMoveBalanceFee := big.NewInt(0).Neg(moveBalanceFee) totalGiven := big.NewInt(100) args := createArgsForTxProcessor() - args.EconomicsFee = &mock.FeeHandlerStub{ + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ ComputeMoveBalanceFeeCalled: func(tx data.TransactionWithFeeHandler) *big.Int { return moveBalanceFee }, @@ -1350,7 +1354,7 @@ func TestTxProcessor_ProcessTxFeeCrossShardMoveBalance(t *testing.T) { negMoveBalanceFee := big.NewInt(0).Neg(moveBalanceFee) totalGiven := big.NewInt(100) args := createArgsForTxProcessor() - args.EconomicsFee = &mock.FeeHandlerStub{ + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ ComputeMoveBalanceFeeCalled: func(tx data.TransactionWithFeeHandler) *big.Int { return moveBalanceFee }, @@ -1409,7 +1413,7 @@ func TestTxProcessor_ProcessTxFeeCrossShardSCCall(t *testing.T) { moveBalanceFee := big.NewInt(50) args := createArgsForTxProcessor() - args.EconomicsFee = &mock.FeeHandlerStub{ + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ ComputeMoveBalanceFeeCalled: func(tx data.TransactionWithFeeHandler) *big.Int { return moveBalanceFee }, @@ -1448,7 +1452,7 @@ func TestTxProcessor_ProcessTxFeeMoveBalanceUserTx(t *testing.T) { processingFee := big.NewInt(5) negMoveBalanceFee := big.NewInt(0).Neg(moveBalanceFee) args := createArgsForTxProcessor() - args.EconomicsFee = &mock.FeeHandlerStub{ + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ ComputeMoveBalanceFeeCalled: func(tx data.TransactionWithFeeHandler) *big.Int { return moveBalanceFee }, @@ -1484,7 +1488,7 @@ func TestTxProcessor_ProcessTxFeeSCInvokeUserTx(t *testing.T) { negMoveBalanceFee := big.NewInt(0).Neg(moveBalanceFee) gasPerByte := uint64(1) args := createArgsForTxProcessor() - args.EconomicsFee = &mock.FeeHandlerStub{ + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ ComputeMoveBalanceFeeCalled: func(tx data.TransactionWithFeeHandler) *big.Int { return moveBalanceFee }, @@ -1548,7 +1552,7 @@ func TestTxProcessor_ProcessTransactionShouldReturnErrForInvalidMetaTx(t *testin args.Accounts = adb args.ScProcessor = scProcessorMock args.ShardCoordinator = shardC - args.EconomicsFee = &mock.FeeHandlerStub{ + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ ComputeMoveBalanceFeeCalled: func(tx data.TransactionWithFeeHandler) *big.Int { return big.NewInt(1) }, @@ -1599,7 +1603,7 @@ func TestTxProcessor_ProcessTransactionShouldTreatAsInvalidTxIfTxTypeIsWrong(t * args := createArgsForTxProcessor() args.Accounts = adb args.ShardCoordinator = shardC - args.EconomicsFee = &mock.FeeHandlerStub{ + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ ComputeTxFeeCalled: func(tx data.TransactionWithFeeHandler) *big.Int { return big.NewInt(1) }, @@ -2659,7 +2663,7 @@ func TestTxProcessor_ConsumeMoveBalanceWithUserTx(t *testing.T) { t.Parallel() args := createArgsForTxProcessor() - args.EconomicsFee = &mock.FeeHandlerStub{ + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ ComputeFeeForProcessingCalled: func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int { return big.NewInt(1) }, diff --git a/process/transaction/transactionCostEstimator_test.go b/process/transaction/transactionCostEstimator_test.go index 123e4200835..66338b83d9f 100644 --- a/process/transaction/transactionCostEstimator_test.go +++ b/process/transaction/transactionCostEstimator_test.go @@ -15,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-go/process/txsimulator" txSimData "github.com/multiversx/mx-chain-go/process/txsimulator/data" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/require" @@ -25,7 +26,7 @@ func TestTransactionCostEstimator_NilTxTypeHandler(t *testing.T) { tce, err := NewTransactionCostEstimator( nil, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &mock.TransactionSimulatorStub{}, &stateMock.AccountsStub{}, &mock.ShardCoordinatorStub{}, @@ -55,7 +56,7 @@ func TestTransactionCostEstimator_NilTransactionSimulatorShouldErr(t *testing.T) tce, err := NewTransactionCostEstimator( &testscommon.TxTypeHandlerMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, nil, &stateMock.AccountsStub{}, &mock.ShardCoordinatorStub{}, @@ -70,7 +71,7 @@ func TestTransactionCostEstimator_NilEnableEpochsHandlerShouldErr(t *testing.T) tce, err := NewTransactionCostEstimator( &testscommon.TxTypeHandlerMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &mock.TransactionSimulatorStub{}, &stateMock.AccountsStub{}, &mock.ShardCoordinatorStub{}, @@ -85,7 +86,7 @@ func TestTransactionCostEstimator_Ok(t *testing.T) { tce, err := NewTransactionCostEstimator( &testscommon.TxTypeHandlerMock{}, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &mock.TransactionSimulatorStub{}, &stateMock.AccountsStub{}, &mock.ShardCoordinatorStub{}, @@ -103,8 +104,8 @@ func TestComputeTransactionGasLimit_MoveBalance(t *testing.T) { ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (process.TransactionType, process.TransactionType) { return process.MoveBalance, process.MoveBalance }, - }, &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + }, &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return math.MaxUint64 }, ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { @@ -136,8 +137,8 @@ func TestComputeTransactionGasLimit_MoveBalanceInvalidNonceShouldStillComputeCos ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (process.TransactionType, process.TransactionType) { return process.MoveBalance, process.MoveBalance }, - }, &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + }, &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return math.MaxUint64 }, ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { @@ -166,8 +167,8 @@ func TestComputeTransactionGasLimit_BuiltInFunction(t *testing.T) { ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (process.TransactionType, process.TransactionType) { return process.BuiltInFunctionCall, process.BuiltInFunctionCall }, - }, &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + }, &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return math.MaxUint64 }, }, @@ -199,8 +200,8 @@ func TestComputeTransactionGasLimit_BuiltInFunctionShouldErr(t *testing.T) { ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (process.TransactionType, process.TransactionType) { return process.BuiltInFunctionCall, process.BuiltInFunctionCall }, - }, &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + }, &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return math.MaxUint64 }, }, @@ -226,8 +227,8 @@ func TestComputeTransactionGasLimit_NilVMOutput(t *testing.T) { ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (process.TransactionType, process.TransactionType) { return process.BuiltInFunctionCall, process.BuiltInFunctionCall }, - }, &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + }, &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return math.MaxUint64 }, }, @@ -253,8 +254,8 @@ func TestComputeTransactionGasLimit_RetCodeNotOk(t *testing.T) { ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (process.TransactionType, process.TransactionType) { return process.BuiltInFunctionCall, process.BuiltInFunctionCall }, - }, &mock.FeeHandlerStub{ - MaxGasLimitPerBlockCalled: func() uint64 { + }, &economicsmocks.EconomicsHandlerStub{ + MaxGasLimitPerBlockCalled: func(_ uint32) uint64 { return math.MaxUint64 }, }, @@ -288,7 +289,7 @@ func TestTransactionCostEstimator_RelayedTxShouldErr(t *testing.T) { return process.RelayedTx, process.RelayedTx }, }, - &mock.FeeHandlerStub{}, + &economicsmocks.EconomicsHandlerStub{}, &mock.TransactionSimulatorStub{}, &stateMock.AccountsStub{}, &mock.ShardCoordinatorStub{}, diff --git a/process/txsimulator/wrappedAccountsDB_test.go b/process/txsimulator/wrappedAccountsDB_test.go index 1bf48e18531..e83fe6a0d58 100644 --- a/process/txsimulator/wrappedAccountsDB_test.go +++ b/process/txsimulator/wrappedAccountsDB_test.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/state" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -150,11 +151,11 @@ func TestReadOnlyAccountsDB_ReadOperationsShouldWork(t *testing.T) { allLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err = roAccDb.GetAllLeaves(allLeaves, context.Background(), nil) require.NoError(t, err) - err = common.GetErrorFromChanNonBlocking(allLeaves.ErrChan) + err = allLeaves.ErrChan.ReadFromChanNonBlocking() require.NoError(t, err) } diff --git a/sharding/mock/enableEpochsHandlerMock.go b/sharding/mock/enableEpochsHandlerMock.go index 6173c091e32..cbe6fb10014 100644 --- a/sharding/mock/enableEpochsHandlerMock.go +++ b/sharding/mock/enableEpochsHandlerMock.go @@ -566,6 +566,21 @@ func (mock *EnableEpochsHandlerMock) IsAlwaysSaveTokenMetaDataEnabled() bool { return false } +// IsSetGuardianEnabled returns false +func (mock *EnableEpochsHandlerMock) IsSetGuardianEnabled() bool { + return false +} + +// IsKeepExecOrderOnCreatedSCRsEnabled - +func (mock *EnableEpochsHandlerMock) IsKeepExecOrderOnCreatedSCRsEnabled() bool { + return false +} + +// IsMultiClaimOnDelegationEnabled - +func (mock *EnableEpochsHandlerMock) IsMultiClaimOnDelegationEnabled() bool { + return false +} + // IsInterfaceNil returns true if there is no value under the interface func (mock *EnableEpochsHandlerMock) IsInterfaceNil() bool { return mock == nil diff --git a/state/accountsDB.go b/state/accountsDB.go index 295824b8c7f..5daea6408ab 100644 --- a/state/accountsDB.go +++ b/state/accountsDB.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/trie/keyBuilder" "github.com/multiversx/mx-chain-go/trie/statistics" @@ -509,12 +510,11 @@ func saveCodeEntry(codeHash []byte, entry *CodeEntry, trie Updater, marshalizer return nil } -// LoadDataTrie retrieves and saves the SC data inside accountHandler object. +// loadDataTrieConcurrentSafe retrieves and saves the SC data inside accountHandler object. // Errors if something went wrong -func (adb *AccountsDB) loadDataTrie(accountHandler baseAccountHandler, mainTrie common.Trie) error { - if len(accountHandler.GetRootHash()) == 0 { - return nil - } +func (adb *AccountsDB) loadDataTrieConcurrentSafe(accountHandler baseAccountHandler, mainTrie common.Trie) error { + adb.mutOp.Lock() + defer adb.mutOp.Unlock() dataTrie := adb.dataTries.Get(accountHandler.AddressBytes()) if dataTrie != nil { @@ -522,6 +522,10 @@ func (adb *AccountsDB) loadDataTrie(accountHandler baseAccountHandler, mainTrie return nil } + if len(accountHandler.GetRootHash()) == 0 { + return nil + } + dataTrie, err := mainTrie.Recreate(accountHandler.GetRootHash()) if err != nil { return fmt.Errorf("trie was not found for hash, rootHash = %s, err = %w", hex.EncodeToString(accountHandler.GetRootHash()), err) @@ -702,7 +706,7 @@ func (adb *AccountsDB) LoadAccount(address []byte) (vmcommon.AccountHandler, err baseAcc, ok := acnt.(baseAccountHandler) if ok { - err = adb.loadDataTrie(baseAcc, mainTrie) + err = adb.loadDataTrieConcurrentSafe(baseAcc, mainTrie) if err != nil { return nil, err } @@ -754,7 +758,7 @@ func (adb *AccountsDB) GetExistingAccount(address []byte) (vmcommon.AccountHandl baseAcc, ok := acnt.(baseAccountHandler) if ok { - err = adb.loadDataTrie(baseAcc, mainTrie) + err = adb.loadDataTrieConcurrentSafe(baseAcc, mainTrie) if err != nil { return nil, err } @@ -784,7 +788,7 @@ func (adb *AccountsDB) GetAccountFromBytes(address []byte, accountBytes []byte) return acnt, nil } - err = adb.loadDataTrie(baseAcc, adb.getMainTrie()) + err = adb.loadDataTrieConcurrentSafe(baseAcc, adb.getMainTrie()) if err != nil { return nil, err } @@ -1036,7 +1040,7 @@ func (adb *AccountsDB) recreateTrie(options common.RootHashHolder) error { func (adb *AccountsDB) RecreateAllTries(rootHash []byte) (map[string]common.Trie, error) { leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, leavesChannelSize), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } mainTrie := adb.getMainTrie() err := mainTrie.GetAllLeavesOnChannel(leavesChannels, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) @@ -1067,7 +1071,7 @@ func (adb *AccountsDB) RecreateAllTries(rootHash []byte) (map[string]common.Trie } } - err = common.GetErrorFromChanNonBlocking(leavesChannels.ErrChan) + err = leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, err } @@ -1099,7 +1103,10 @@ func (adb *AccountsDB) journalize(entry JournalEntry) { } adb.entries = append(adb.entries, entry) - log.Trace("accountsDB.Journalize", "new length", len(adb.entries)) + log.Trace("accountsDB.Journalize", + "new length", len(adb.entries), + "entry type", fmt.Sprintf("%T", entry), + ) if len(adb.entries) == 1 { adb.stackDebug = debug.Stack() @@ -1142,16 +1149,16 @@ func (adb *AccountsDB) SnapshotState(rootHash []byte) { missingNodesChannel := make(chan []byte, missingNodesChannelSize) iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, leavesChannelSize), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } stats := newSnapshotStatistics(1, 1) - accountMetrics := &accountMetrics{ + accountMetricsInstance := &accountMetrics{ snapshotInProgressKey: common.MetricAccountsSnapshotInProgress, lastSnapshotDurationKey: common.MetricLastAccountsSnapshotDurationSec, snapshotMessage: userTrieSnapshotMsg, } - adb.updateMetricsOnSnapshotStart(accountMetrics) + adb.updateMetricsOnSnapshotStart(accountMetricsInstance) go func() { stats.NewSnapshotStarted() @@ -1164,7 +1171,7 @@ func (adb *AccountsDB) SnapshotState(rootHash []byte) { go adb.syncMissingNodes(missingNodesChannel, iteratorChannels.ErrChan, stats, adb.trieSyncer) - go adb.processSnapshotCompletion(stats, trieStorageManager, missingNodesChannel, iteratorChannels.ErrChan, rootHash, accountMetrics, epoch) + go adb.processSnapshotCompletion(stats, trieStorageManager, missingNodesChannel, iteratorChannels.ErrChan, rootHash, accountMetricsInstance, epoch) adb.waitForCompletionIfAppropriate(stats) } @@ -1253,7 +1260,7 @@ func (adb *AccountsDB) processSnapshotCompletion( stats *snapshotStatistics, trieStorageManager common.StorageManager, missingNodesCh chan []byte, - errChan chan error, + errChan common.BufferedErrChan, rootHash []byte, metrics *accountMetrics, epoch uint32, @@ -1263,15 +1270,15 @@ func (adb *AccountsDB) processSnapshotCompletion( defer func() { adb.isSnapshotInProgress.Reset() adb.updateMetricsOnSnapshotCompletion(metrics, stats) - close(errChan) + errChan.Close() }() - containsErrorDuringSnapshot := emptyErrChanReturningHadContained(errChan) - shouldNotMarkActive := trieStorageManager.IsClosed() || containsErrorDuringSnapshot + errorDuringSnapshot := errChan.ReadFromChanNonBlocking() + shouldNotMarkActive := trieStorageManager.IsClosed() || errorDuringSnapshot != nil if shouldNotMarkActive { log.Debug("will not set activeDB in epoch as the snapshot might be incomplete", "epoch", epoch, "trie storage manager closed", trieStorageManager.IsClosed(), - "errors during snapshot found", containsErrorDuringSnapshot) + "errors during snapshot found", errorDuringSnapshot) return } @@ -1283,7 +1290,7 @@ func (adb *AccountsDB) processSnapshotCompletion( handleLoggingWhenError("error while putting active DB value into main storer", errPut) } -func (adb *AccountsDB) syncMissingNodes(missingNodesChan chan []byte, errChan chan error, stats *snapshotStatistics, syncer AccountsDBSyncer) { +func (adb *AccountsDB) syncMissingNodes(missingNodesChan chan []byte, errChan common.BufferedErrChan, stats *snapshotStatistics, syncer AccountsDBSyncer) { defer stats.SyncFinished() if check.IfNil(syncer) { @@ -1291,7 +1298,7 @@ func (adb *AccountsDB) syncMissingNodes(missingNodesChan chan []byte, errChan ch for missingNode := range missingNodesChan { log.Warn("could not sync node", "hash", missingNode) } - errChan <- ErrNilTrieSyncer + errChan.WriteInChanNonBlocking(ErrNilTrieSyncer) return } @@ -1302,7 +1309,7 @@ func (adb *AccountsDB) syncMissingNodes(missingNodesChan chan []byte, errChan ch "missing node hash", missingNode, "error", err, ) - errChan <- err + errChan.WriteInChanNonBlocking(err) } } } @@ -1370,7 +1377,7 @@ func (adb *AccountsDB) setStateCheckpoint(rootHash []byte) { iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, leavesChannelSize), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } missingNodesChannel := make(chan []byte, missingNodesChannelSize) stats := newSnapshotStatistics(1, 1) @@ -1436,7 +1443,7 @@ func (adb *AccountsDB) GetStatsForRootHash(rootHash []byte) (common.TriesStatist iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, leavesChannelSize), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err := mainTrie.GetAllLeavesOnChannel(iteratorChannels, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) if err != nil { @@ -1463,7 +1470,7 @@ func (adb *AccountsDB) GetStatsForRootHash(rootHash []byte) (common.TriesStatist collectStats(tr, stats, account.RootHash, accountAddress) } - err = common.GetErrorFromChanNonBlocking(iteratorChannels.ErrChan) + err = iteratorChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, err } diff --git a/state/accountsDB_test.go b/state/accountsDB_test.go index 4e1f98ea294..2bb43b499b1 100644 --- a/state/accountsDB_test.go +++ b/state/accountsDB_test.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/keyValStorage" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process/mock" @@ -706,7 +707,7 @@ func TestAccountsDB_LoadDataNilRootShouldRetNil(t *testing.T) { _, account, adb := generateAddressAccountAccountsDB(tr) // since root is nil, result should be nil and data trie should be nil - err := adb.LoadDataTrie(account) + err := adb.LoadDataTrieConcurrentSafe(account) assert.Nil(t, err) assert.Nil(t, account.DataTrie()) } @@ -723,7 +724,7 @@ func TestAccountsDB_LoadDataBadLengthShouldErr(t *testing.T) { account.SetRootHash([]byte("12345")) // should return error - err := adb.LoadDataTrie(account) + err := adb.LoadDataTrieConcurrentSafe(account) assert.NotNil(t, err) } @@ -741,7 +742,7 @@ func TestAccountsDB_LoadDataMalfunctionTrieShouldErr(t *testing.T) { adb := generateAccountDBFromTrie(mockTrie) // should return error - err := adb.LoadDataTrie(account) + err := adb.LoadDataTrieConcurrentSafe(account) assert.NotNil(t, err) } @@ -759,7 +760,7 @@ func TestAccountsDB_LoadDataNotFoundRootShouldReturnErr(t *testing.T) { account.SetRootHash(rootHash) // should return error - err := adb.LoadDataTrie(account) + err := adb.LoadDataTrieConcurrentSafe(account) assert.NotNil(t, err) fmt.Println(err.Error()) } @@ -803,7 +804,7 @@ func TestAccountsDB_LoadDataWithSomeValuesShouldWork(t *testing.T) { account.SetRootHash(rootHash) // should not return error - err := adb.LoadDataTrie(account) + err := adb.LoadDataTrieConcurrentSafe(account) assert.Nil(t, err) // verify data @@ -1028,7 +1029,7 @@ func TestAccountsDB_SnapshotStateWithErrorsShouldNotMarkActiveDB(t *testing.T) { return true }, TakeSnapshotCalled: func(_ string, _ []byte, _ []byte, iteratorChannels *common.TrieIteratorChannels, _ chan []byte, stats common.SnapshotStatisticsHandler, _ uint32) { - iteratorChannels.ErrChan <- expectedErr + iteratorChannels.ErrChan.WriteInChanNonBlocking(expectedErr) close(iteratorChannels.LeavesChan) stats.SnapshotFinished() }, @@ -1429,7 +1430,7 @@ func TestAccountsDB_GetAllLeaves(t *testing.T) { GetAllLeavesOnChannelCalled: func(channels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, builder common.KeyBuilder) error { getAllLeavesCalled = true close(channels.LeavesChan) - close(channels.ErrChan) + channels.ErrChan.Close() return nil }, @@ -1442,13 +1443,13 @@ func TestAccountsDB_GetAllLeaves(t *testing.T) { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err := adb.GetAllLeaves(leavesChannel, context.Background(), []byte("root hash")) assert.Nil(t, err) assert.True(t, getAllLeavesCalled) - err = common.GetErrorFromChanNonBlocking(leavesChannel.ErrChan) + err = leavesChannel.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) } @@ -2325,10 +2326,10 @@ func TestAccountsDB_RecreateAllTries(t *testing.T) { GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, keyBuilder common.KeyBuilder) error { go func() { leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("key"), []byte("val")) - leavesChannels.ErrChan <- expectedErr + leavesChannels.ErrChan.WriteInChanNonBlocking(expectedErr) close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -2356,7 +2357,7 @@ func TestAccountsDB_RecreateAllTries(t *testing.T) { leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("key"), []byte("val")) close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -2734,17 +2735,17 @@ func TestEmptyErrChanReturningHadContained(t *testing.T) { t.Run("unbuffered chan", func(t *testing.T) { t.Parallel() - errChan := make(chan error) - assert.False(t, state.EmptyErrChanReturningHadContained(errChan)) - assert.Equal(t, 0, len(errChan)) + errChannel := make(chan error) + assert.False(t, state.EmptyErrChanReturningHadContained(errChannel)) + assert.Equal(t, 0, len(errChannel)) }) t.Run("buffered chan", func(t *testing.T) { t.Parallel() for i := 1; i < 10; i++ { - errChan := make(chan error, i) - assert.False(t, state.EmptyErrChanReturningHadContained(errChan)) - assert.Equal(t, 0, len(errChan)) + errChannel := make(chan error, i) + assert.False(t, state.EmptyErrChanReturningHadContained(errChannel)) + assert.Equal(t, 0, len(errChannel)) } }) }) @@ -2754,27 +2755,27 @@ func TestEmptyErrChanReturningHadContained(t *testing.T) { t.Run("unbuffered chan", func(t *testing.T) { t.Parallel() - errChan := make(chan error) + errChannel := make(chan error) go func() { - errChan <- errors.New("test") + errChannel <- errors.New("test") }() time.Sleep(time.Second) // allow the go routine to start - assert.True(t, state.EmptyErrChanReturningHadContained(errChan)) - assert.Equal(t, 0, len(errChan)) + assert.True(t, state.EmptyErrChanReturningHadContained(errChannel)) + assert.Equal(t, 0, len(errChannel)) }) t.Run("buffered chan", func(t *testing.T) { t.Parallel() for i := 1; i < 10; i++ { - errChan := make(chan error, i) + errChannel := make(chan error, i) for j := 0; j < i; j++ { - errChan <- errors.New("test") + errChannel <- errors.New("test") } - assert.True(t, state.EmptyErrChanReturningHadContained(errChan)) - assert.Equal(t, 0, len(errChan)) + assert.True(t, state.EmptyErrChanReturningHadContained(errChannel)) + assert.Equal(t, 0, len(errChannel)) } }) }) @@ -2901,6 +2902,39 @@ func TestAccountsDB_SyncMissingSnapshotNodes(t *testing.T) { assert.True(t, isMissingNodeCalled) }) + + t.Run("should not deadlock if sync err after another err", func(t *testing.T) { + t.Parallel() + + missingNodeError := errors.New("missing trie node") + isMissingNodeCalled := false + + memDbMock := testscommon.NewMemDbMock() + memDbMock.PutCalled = func(key, val []byte) error { + return fmt.Errorf("put error") + } + memDbMock.GetCalled = func(key []byte) ([]byte, error) { + if bytes.Equal(key, []byte(common.ActiveDBKey)) { + return []byte(common.ActiveDBVal), nil + } + + isMissingNodeCalled = true + return nil, missingNodeError + } + + tr, adb := getDefaultTrieAndAccountsDbWithCustomDB(&testscommon.SnapshotPruningStorerMock{MemDbMock: memDbMock}) + prepareTrie(tr, 3) + + rootHash, _ := tr.RootHash() + + adb.SnapshotState(rootHash) + + for tr.GetStorageManager().IsPruningBlocked() { + time.Sleep(time.Millisecond * 100) + } + + assert.True(t, isMissingNodeCalled) + }) } func prepareTrie(tr common.Trie, numKeys int) { @@ -2935,6 +2969,35 @@ func TestAccountsDb_Concurrent(t *testing.T) { testAccountMethodsConcurrency(t, adb, accountsAddresses, rootHash) } +func TestAccountsDB_SaveKeyValAfterAccountIsReverted(t *testing.T) { + t.Parallel() + + _, adb := getDefaultTrieAndAccountsDb() + addr := generateRandomByteArray(32) + + acc, _ := adb.LoadAccount(addr) + _ = adb.SaveAccount(acc) + + acc, _ = adb.LoadAccount(addr) + acc.(state.UserAccountHandler).IncreaseNonce(1) + _ = acc.(state.UserAccountHandler).SaveKeyValue([]byte("key"), []byte("value")) + _ = adb.SaveAccount(acc) + + err := adb.RevertToSnapshot(1) + require.Nil(t, err) + + acc, _ = adb.LoadAccount(addr) + _ = acc.(state.UserAccountHandler).SaveKeyValue([]byte("key"), []byte("value")) + _ = adb.SaveAccount(acc) + + _, err = adb.Commit() + require.Nil(t, err) + + acc, err = adb.LoadAccount(addr) + require.Nil(t, err) + require.NotNil(t, acc) +} + func testAccountMethodsConcurrency( t *testing.T, adb state.AccountsAdapter, diff --git a/state/dataTriesHolder.go b/state/dataTriesHolder.go index 4cf51bd9a3d..8333b875fce 100644 --- a/state/dataTriesHolder.go +++ b/state/dataTriesHolder.go @@ -4,6 +4,7 @@ import ( "sync" "github.com/multiversx/mx-chain-go/common" + logger "github.com/multiversx/mx-chain-logger-go" ) type dataTriesHolder struct { @@ -20,6 +21,8 @@ func NewDataTriesHolder() *dataTriesHolder { // Put adds a trie pointer to the tries map func (dth *dataTriesHolder) Put(key []byte, tr common.Trie) { + log.Trace("put trie in data tries holder", "key", key) + dth.mutex.Lock() dth.tries[string(key)] = tr dth.mutex.Unlock() @@ -67,6 +70,13 @@ func (dth *dataTriesHolder) GetAllTries() map[string]common.Trie { // Reset clears the tries map func (dth *dataTriesHolder) Reset() { dth.mutex.Lock() + + if log.GetLevel() == logger.LogTrace { + for key := range dth.tries { + log.Trace("reset data tries holder", "key", key) + } + } + dth.tries = make(map[string]common.Trie) dth.mutex.Unlock() } diff --git a/state/export_test.go b/state/export_test.go index d7b956dd0ec..3ff10d977b2 100644 --- a/state/export_test.go +++ b/state/export_test.go @@ -22,9 +22,9 @@ func (adb *AccountsDB) LoadCode(accountHandler baseAccountHandler) error { return adb.loadCode(accountHandler) } -// LoadDataTrie - -func (adb *AccountsDB) LoadDataTrie(accountHandler baseAccountHandler) error { - return adb.loadDataTrie(accountHandler, adb.getMainTrie()) +// LoadDataTrieConcurrentSafe - +func (adb *AccountsDB) LoadDataTrieConcurrentSafe(accountHandler baseAccountHandler) error { + return adb.loadDataTrieConcurrentSafe(accountHandler, adb.getMainTrie()) } // GetAccount - diff --git a/state/interface.go b/state/interface.go index 83419de7d8e..8071418796c 100644 --- a/state/interface.go +++ b/state/interface.go @@ -85,6 +85,7 @@ type UserAccountHandler interface { GetOwnerAddress() []byte SetUserName(userName []byte) GetUserName() []byte + IsGuarded() bool vmcommon.AccountHandler } diff --git a/state/peerAccountsDB.go b/state/peerAccountsDB.go index ed1f080069e..171ab6e3d06 100644 --- a/state/peerAccountsDB.go +++ b/state/peerAccountsDB.go @@ -2,6 +2,7 @@ package state import ( "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" ) // PeerAccountsDB will save and synchronize data from peer processor, plus will synchronize with nodesCoordinator @@ -56,7 +57,7 @@ func (adb *PeerAccountsDB) SnapshotState(rootHash []byte) { missingNodesChannel := make(chan []byte, missingNodesChannelSize) iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } stats := newSnapshotStatistics(0, 1) stats.NewSnapshotStarted() @@ -92,7 +93,7 @@ func (adb *PeerAccountsDB) SetStateCheckpoint(rootHash []byte) { stats.NewSnapshotStarted() iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } trieStorageManager.SetCheckpoint(rootHash, rootHash, iteratorChannels, missingNodesChannel, stats) diff --git a/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList.go b/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList.go index ae67f262ce8..c1515eabb56 100644 --- a/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList.go +++ b/state/storagePruningManager/evictionWaitingList/memoryEvictionWaitingList.go @@ -6,9 +6,9 @@ import ( "sync" "github.com/multiversx/mx-chain-core-go/data" - logger "github.com/multiversx/mx-chain-logger-go" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/state" + logger "github.com/multiversx/mx-chain-logger-go" ) var log = logger.GetOrCreate("state/evictionWaitingList") diff --git a/state/storagePruningManager/storagePruningManager.go b/state/storagePruningManager/storagePruningManager.go index c985a5378ab..757d04cc9ed 100644 --- a/state/storagePruningManager/storagePruningManager.go +++ b/state/storagePruningManager/storagePruningManager.go @@ -3,6 +3,7 @@ package storagePruningManager import ( "bytes" "encoding/hex" + "fmt" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" @@ -80,7 +81,7 @@ func (spm *storagePruningManager) markForEviction( return err } - logMapWithTrace("MarkForEviction "+string(identifier), "hash", hashes) + logMapWithTrace(fmt.Sprintf("MarkForEviction %d", identifier), "hash", hashes) return nil } diff --git a/state/syncer/userAccountsSyncer.go b/state/syncer/userAccountsSyncer.go index 75dabb6f319..d0343df4628 100644 --- a/state/syncer/userAccountsSyncer.go +++ b/state/syncer/userAccountsSyncer.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/trie" @@ -227,7 +228,7 @@ func (u *userAccountsSyncer) syncAccountDataTries( leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err = mainTrie.GetAllLeavesOnChannel(leavesChannels, context.Background(), mainRootHash, keyBuilder.NewDisabledKeyBuilder()) if err != nil { @@ -279,7 +280,7 @@ func (u *userAccountsSyncer) syncAccountDataTries( wg.Wait() - err = common.GetErrorFromChanNonBlocking(leavesChannels.ErrChan) + err = leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return err } @@ -288,8 +289,8 @@ func (u *userAccountsSyncer) syncAccountDataTries( } func (u *userAccountsSyncer) printDataTrieStatistics() { - u.mutStatistics.RLock() - defer u.mutStatistics.RUnlock() + u.mutStatistics.Lock() + defer u.mutStatistics.Unlock() log.Debug("user accounts tries sync has finished", "num small data tries", u.numSmallTries, "threshold", core.ConvertBytes(uint64(smallTrieThreshold))) diff --git a/state/userAccount.go b/state/userAccount.go index 1ad68badb88..72ff86cd306 100644 --- a/state/userAccount.go +++ b/state/userAccount.go @@ -4,6 +4,8 @@ package state import ( "bytes" "math/big" + + vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) var _ UserAccountHandler = (*userAccount)(nil) @@ -140,6 +142,13 @@ func (a *userAccount) SetCodeMetadata(codeMetadata []byte) { a.CodeMetadata = codeMetadata } +// IsGuarded returns true if the account is in guarded state +func (a *userAccount) IsGuarded() bool { + codeMetaDataBytes := a.GetCodeMetadata() + codeMetaData := vmcommon.CodeMetadataFromBytes(codeMetaDataBytes) + return codeMetaData.Guarded +} + // IsInterfaceNil returns true if there is no value under the interface func (a *userAccount) IsInterfaceNil() bool { return a == nil diff --git a/statusHandler/errors.go b/statusHandler/errors.go index b90cd6b6539..d3f25cd3a4d 100644 --- a/statusHandler/errors.go +++ b/statusHandler/errors.go @@ -15,7 +15,7 @@ var ErrNilAppStatusHandler = errors.New("appStatusHandler is nil") var ErrNilMarshalizer = errors.New("nil Marshalizer") // ErrNilUint64Converter signals that uint64converter is nil -var ErrNilUint64Converter = errors.New("unit64converter is nil") +var ErrNilUint64Converter = errors.New("uint64converter is nil") // ErrNilStorage signals that a nil storage has been provided var ErrNilStorage = errors.New("nil storage") diff --git a/statusHandler/statusMetricsProvider.go b/statusHandler/statusMetricsProvider.go index 00f536da84e..a6ce71a75e9 100644 --- a/statusHandler/statusMetricsProvider.go +++ b/statusHandler/statusMetricsProvider.go @@ -245,6 +245,7 @@ func (sm *statusMetrics) ConfigMetrics() (map[string]interface{}, error) { configMetrics[common.MetricMetaConsensusGroupSize] = sm.uint64Metrics[common.MetricMetaConsensusGroupSize] configMetrics[common.MetricMinGasPrice] = sm.uint64Metrics[common.MetricMinGasPrice] configMetrics[common.MetricMinGasLimit] = sm.uint64Metrics[common.MetricMinGasLimit] + configMetrics[common.MetricExtraGasLimitGuardedTx] = sm.uint64Metrics[common.MetricExtraGasLimitGuardedTx] configMetrics[common.MetricMaxGasPerTransaction] = sm.uint64Metrics[common.MetricMaxGasPerTransaction] configMetrics[common.MetricRoundDuration] = sm.uint64Metrics[common.MetricRoundDuration] configMetrics[common.MetricStartTime] = sm.uint64Metrics[common.MetricStartTime] @@ -295,6 +296,7 @@ func (sm *statusMetrics) EnableEpochsMetrics() (map[string]interface{}, error) { enableEpochsMetrics[common.MetricIncrementSCRNonceInMultiTransferEnableEpoch] = sm.uint64Metrics[common.MetricIncrementSCRNonceInMultiTransferEnableEpoch] enableEpochsMetrics[common.MetricBalanceWaitingListsEnableEpoch] = sm.uint64Metrics[common.MetricBalanceWaitingListsEnableEpoch] enableEpochsMetrics[common.MetricWaitingListFixEnableEpoch] = sm.uint64Metrics[common.MetricWaitingListFixEnableEpoch] + enableEpochsMetrics[common.MetricSetGuardianEnableEpoch] = sm.uint64Metrics[common.MetricSetGuardianEnableEpoch] numNodesChangeConfig := sm.uint64Metrics[common.MetricMaxNodesChangeEnableEpoch+"_count"] diff --git a/statusHandler/statusMetricsProvider_test.go b/statusHandler/statusMetricsProvider_test.go index 5d2c2ab664a..cd399259e08 100644 --- a/statusHandler/statusMetricsProvider_test.go +++ b/statusHandler/statusMetricsProvider_test.go @@ -178,6 +178,7 @@ func TestStatusMetrics_NetworkConfig(t *testing.T) { sm.SetUInt64Value(common.MetricMetaConsensusGroupSize, 25) sm.SetUInt64Value(common.MetricMinGasPrice, 1000) sm.SetUInt64Value(common.MetricMinGasLimit, 50000) + sm.SetUInt64Value(common.MetricExtraGasLimitGuardedTx, 50000) sm.SetStringValue(common.MetricRewardsTopUpGradientPoint, "12345") sm.SetUInt64Value(common.MetricGasPerDataByte, 1500) sm.SetStringValue(common.MetricChainId, "local-id") @@ -200,6 +201,7 @@ func TestStatusMetrics_NetworkConfig(t *testing.T) { "erd_latest_tag_software_version": "version1.0", "erd_meta_consensus_group_size": uint64(25), "erd_min_gas_limit": uint64(50000), + "erd_extra_gas_limit_guarded_tx": uint64(50000), "erd_min_gas_price": uint64(1000), "erd_min_transaction_version": uint64(2), "erd_num_metachain_nodes": uint64(50), @@ -314,6 +316,7 @@ func TestStatusMetrics_EnableEpochMetrics(t *testing.T) { sm.SetUInt64Value(common.MetricIncrementSCRNonceInMultiTransferEnableEpoch, 3) sm.SetUInt64Value(common.MetricBalanceWaitingListsEnableEpoch, 4) sm.SetUInt64Value(common.MetricWaitingListFixEnableEpoch, 1) + sm.SetUInt64Value(common.MetricSetGuardianEnableEpoch, 3) maxNodesChangeConfig := []map[string]uint64{ { @@ -363,6 +366,7 @@ func TestStatusMetrics_EnableEpochMetrics(t *testing.T) { common.MetricIncrementSCRNonceInMultiTransferEnableEpoch: uint64(3), common.MetricBalanceWaitingListsEnableEpoch: uint64(4), common.MetricWaitingListFixEnableEpoch: uint64(1), + common.MetricSetGuardianEnableEpoch: uint64(3), common.MetricMaxNodesChangeEnableEpoch: []map[string]interface{}{ { diff --git a/storage/cache/cache_test.go b/storage/cache/cache_test.go new file mode 100644 index 00000000000..99a1731e5cb --- /dev/null +++ b/storage/cache/cache_test.go @@ -0,0 +1,165 @@ +package cache + +import ( + "testing" + "time" + + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/stretchr/testify/assert" +) + +func TestNewTimeCache_ShouldWork(t *testing.T) { + t.Parallel() + + instance := NewTimeCache(0) + assert.NotNil(t, instance) +} + +func TestNewTimeCacher(t *testing.T) { + t.Parallel() + + t.Run("invalid argument should error", func(t *testing.T) { + t.Parallel() + + args := ArgTimeCacher{ + DefaultSpan: time.Second - time.Nanosecond, + CacheExpiry: time.Second, + } + + instance, err := NewTimeCacher(args) + assert.Nil(t, instance) + assert.NotNil(t, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := ArgTimeCacher{ + DefaultSpan: time.Second, + CacheExpiry: time.Second, + } + + instance, err := NewTimeCacher(args) + assert.NotNil(t, instance) + assert.Nil(t, err) + }) +} + +func TestNewLRUCache(t *testing.T) { + t.Parallel() + + t.Run("invalid argument should error", func(t *testing.T) { + t.Parallel() + + instance, err := NewLRUCache(0) + assert.Nil(t, instance) + assert.NotNil(t, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + instance, err := NewLRUCache(1) + assert.NotNil(t, instance) + assert.Nil(t, err) + }) +} + +func TestNewPeerTimeCache(t *testing.T) { + t.Parallel() + + t.Run("invalid argument should error", func(t *testing.T) { + t.Parallel() + + instance, err := NewPeerTimeCache(nil) + assert.Nil(t, instance) + assert.NotNil(t, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + instance, err := NewPeerTimeCache(&testscommon.TimeCacheStub{}) + assert.NotNil(t, instance) + assert.Nil(t, err) + }) +} + +func TestNewCapacityLRU(t *testing.T) { + t.Parallel() + + t.Run("invalid argument should error", func(t *testing.T) { + t.Parallel() + + instance, err := NewCapacityLRU(0, 1) + assert.Nil(t, instance) + assert.NotNil(t, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + instance, err := NewCapacityLRU(1, 1) + assert.NotNil(t, instance) + assert.Nil(t, err) + }) +} + +func TestNewLRUCacheWithEviction(t *testing.T) { + t.Parallel() + + t.Run("invalid argument should error", func(t *testing.T) { + t.Parallel() + + instance, err := NewLRUCacheWithEviction(0, nil) + assert.Nil(t, instance) + assert.NotNil(t, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + t.Run("nil handler should work", func(t *testing.T) { + t.Parallel() + + instance, err := NewLRUCacheWithEviction(1, nil) + assert.NotNil(t, instance) + assert.Nil(t, err) + }) + t.Run("with handler should work", func(t *testing.T) { + t.Parallel() + + instance, err := NewLRUCacheWithEviction(1, func(key interface{}, value interface{}) {}) + assert.NotNil(t, instance) + assert.Nil(t, err) + }) + }) +} + +func TestNewImmunityCache(t *testing.T) { + t.Parallel() + + t.Run("invalid argument should error", func(t *testing.T) { + t.Parallel() + + config := CacheConfig{ + MaxNumBytes: 0, + MaxNumItems: 0, + NumChunks: 0, + Name: "test", + NumItemsToPreemptivelyEvict: 0, + } + instance, err := NewImmunityCache(config) + assert.Nil(t, instance) + assert.NotNil(t, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + config := CacheConfig{ + MaxNumBytes: 4, + MaxNumItems: 4, + NumChunks: 1, + Name: "test", + NumItemsToPreemptivelyEvict: 1, + } + instance, err := NewImmunityCache(config) + assert.NotNil(t, instance) + assert.Nil(t, err) + }) +} diff --git a/storage/clean/oldDataCleanerProvider_test.go b/storage/clean/oldDataCleanerProvider_test.go index efdec71a9da..85cbd3a47e9 100644 --- a/storage/clean/oldDataCleanerProvider_test.go +++ b/storage/clean/oldDataCleanerProvider_test.go @@ -4,7 +4,6 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon" @@ -29,7 +28,7 @@ func TestNewOldDataCleanerProvider(t *testing.T) { args := createMockArgOldDataCleanerProvider() args.NodeTypeProvider = nil odcp, err := NewOldDataCleanerProvider(args) - require.True(t, check.IfNil(odcp)) + require.Nil(t, odcp) require.Equal(t, storage.ErrNilNodeTypeProvider, err) }) t.Run("nil ManagedPeersHolder should error", func(t *testing.T) { @@ -38,7 +37,7 @@ func TestNewOldDataCleanerProvider(t *testing.T) { args := createMockArgOldDataCleanerProvider() args.ManagedPeersHolder = nil odcp, err := NewOldDataCleanerProvider(args) - require.True(t, check.IfNil(odcp)) + require.Nil(t, odcp) require.Equal(t, storage.ErrNilManagedPeersHolder, err) }) t.Run("should work", func(t *testing.T) { @@ -46,7 +45,7 @@ func TestNewOldDataCleanerProvider(t *testing.T) { odcp, err := NewOldDataCleanerProvider(createMockArgOldDataCleanerProvider()) require.NoError(t, err) - require.False(t, check.IfNil(odcp)) + require.NotNil(t, odcp) }) } @@ -136,3 +135,14 @@ func TestOldDataCleanerProvider_ShouldClean(t *testing.T) { require.True(t, odcp.ShouldClean()) }) } + +func TestOldDataCleanerProvider_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var odcp *oldDataCleanerProvider + require.True(t, odcp.IsInterfaceNil()) + + args := createMockArgOldDataCleanerProvider() + odcp, _ = NewOldDataCleanerProvider(args) + require.False(t, odcp.IsInterfaceNil()) +} diff --git a/storage/clean/oldDatabaseCleaner_test.go b/storage/clean/oldDatabaseCleaner_test.go index b839bf3dca3..85736c33b18 100644 --- a/storage/clean/oldDatabaseCleaner_test.go +++ b/storage/clean/oldDatabaseCleaner_test.go @@ -4,7 +4,6 @@ import ( "errors" "testing" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" @@ -12,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-go/storage/mock" "github.com/multiversx/mx-chain-go/testscommon" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -68,9 +68,9 @@ func TestNewOldDatabaseCleaner(t *testing.T) { odc, err := NewOldDatabaseCleaner(args) require.Equal(t1, tt.expectedErr, err) if err == nil { - require.False(t1, check.IfNil(odc)) + assert.NotNil(t1, odc) } else { - require.True(t1, check.IfNil(odc)) + assert.Nil(t1, odc) } }) } @@ -100,7 +100,6 @@ func TestOldDatabaseCleaner_EpochChangeShouldErrIfOldestEpochComputationFails(t odc, _ := NewOldDatabaseCleaner(args) odc.pathRemover = fileRemover odc.directoryReader = directoryReader - require.False(t, check.IfNil(odc)) handlerFunc.EpochStartAction(&block.Header{Epoch: 5}) require.False(t, fileRemoverWasCalled) @@ -132,7 +131,6 @@ func TestOldDatabaseCleaner_EpochChangeDirectoryReadFailsShouldNotRemove(t *test odc, _ := NewOldDatabaseCleaner(args) odc.pathRemover = fileRemover odc.directoryReader = directoryReader - require.False(t, check.IfNil(odc)) handlerFunc.EpochStartAction(&block.Header{Epoch: 5}) require.False(t, fileRemoverWasCalled) @@ -164,7 +162,6 @@ func TestOldDatabaseCleaner_EpochChangeNoEpochDirectory(t *testing.T) { odc, _ := NewOldDatabaseCleaner(args) odc.pathRemover = fileRemover odc.directoryReader = directoryReader - require.False(t, check.IfNil(odc)) handlerFunc.EpochStartAction(&block.Header{Epoch: 5}) require.False(t, fileRemoverWasCalled) @@ -199,7 +196,6 @@ func TestOldDatabaseCleaner_EpochChangeShouldNotRemoveIfNewOldestEpochIsOlder(t odc, _ := NewOldDatabaseCleaner(args) odc.pathRemover = fileRemover odc.directoryReader = directoryReader - require.False(t, check.IfNil(odc)) handlerFunc.EpochStartAction(&block.Header{Epoch: 5}) require.Empty(t, removedFiles) @@ -243,7 +239,6 @@ func TestOldDatabaseCleaner_EpochChange(t *testing.T) { odc, _ := NewOldDatabaseCleaner(args) odc.pathRemover = fileRemover odc.directoryReader = directoryReader - require.False(t, check.IfNil(odc)) handlerFunc.EpochStartAction(&block.Header{Epoch: 5}) require.Empty(t, removedFiles) @@ -283,3 +278,14 @@ func createMockArgs() ArgsOldDatabaseCleaner { OldDataCleanerProvider: &testscommon.OldDataCleanerProviderStub{}, } } + +func TestOldDatabaseCleaner_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var odc *oldDatabaseCleaner + require.True(t, odc.IsInterfaceNil()) + + args := createMockArgs() + odc, _ = NewOldDatabaseCleaner(args) + require.False(t, odc.IsInterfaceNil()) +} diff --git a/storage/database/db_test.go b/storage/database/db_test.go new file mode 100644 index 00000000000..d04aaa2a78d --- /dev/null +++ b/storage/database/db_test.go @@ -0,0 +1,73 @@ +package database + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewMemDB(t *testing.T) { + t.Parallel() + + instance := NewMemDB() + assert.NotNil(t, instance) +} + +func TestNewlruDB(t *testing.T) { + t.Parallel() + + t.Run("invalid argument should error", func(t *testing.T) { + t.Parallel() + + instance, err := NewlruDB(0) + assert.Nil(t, instance) + assert.NotNil(t, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + instance, err := NewlruDB(1) + assert.NotNil(t, instance) + assert.Nil(t, err) + }) +} + +func TestNewLevelDB(t *testing.T) { + t.Parallel() + + t.Run("invalid argument should error", func(t *testing.T) { + t.Parallel() + + instance, err := NewLevelDB(t.TempDir(), 0, 0, 0) + assert.Nil(t, instance) + assert.NotNil(t, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + instance, err := NewLevelDB(t.TempDir(), 1, 1, 1) + assert.NotNil(t, instance) + assert.Nil(t, err) + _ = instance.Close() + }) +} + +func TestNewSerialDB(t *testing.T) { + t.Parallel() + + t.Run("invalid argument should error", func(t *testing.T) { + t.Parallel() + + instance, err := NewSerialDB(t.TempDir(), 0, 0, 0) + assert.Nil(t, instance) + assert.NotNil(t, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + instance, err := NewSerialDB(t.TempDir(), 1, 1, 1) + assert.NotNil(t, instance) + assert.Nil(t, err) + _ = instance.Close() + }) +} diff --git a/storage/databaseremover/customDatabaseRemover_test.go b/storage/databaseremover/customDatabaseRemover_test.go index 0ff730c4583..907508d6a89 100644 --- a/storage/databaseremover/customDatabaseRemover_test.go +++ b/storage/databaseremover/customDatabaseRemover_test.go @@ -4,7 +4,6 @@ import ( "errors" "testing" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/config" "github.com/stretchr/testify/require" ) @@ -26,7 +25,7 @@ func TestCustomDatabaseRemover(t *testing.T) { cdr, err := NewCustomDatabaseRemover(createCfgWithPattern(",")) require.True(t, errors.Is(err, errEmptyPatternArgument)) - require.True(t, check.IfNil(cdr)) + require.Nil(t, cdr) }) t.Run("invalid pattern argument, should error", func(t *testing.T) { @@ -89,3 +88,13 @@ func createCfgWithPattern(pattern string) config.StoragePruningConfig { AccountsTrieSkipRemovalCustomPattern: pattern, } } + +func TestCustomDatabaseRemover_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var cdr *customDatabaseRemover + require.True(t, cdr.IsInterfaceNil()) + + cdr, _ = NewCustomDatabaseRemover(createCfgWithPattern("%2,%3")) + require.False(t, cdr.IsInterfaceNil()) +} diff --git a/storage/directoryhandler/directoryReader_test.go b/storage/directoryhandler/directoryReader_test.go index 36be200c69d..fb4a6a077c6 100644 --- a/storage/directoryhandler/directoryReader_test.go +++ b/storage/directoryhandler/directoryReader_test.go @@ -1,74 +1,168 @@ package directoryhandler import ( + "fmt" "os" "path/filepath" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestDirectoryReaderListFilesAsString(t *testing.T) { - t.Parallel() +const invalidPath = "\\/\\/\\/\\/" - dirName := t.TempDir() +func TestNewDirectoryReader(t *testing.T) { + t.Parallel() - file1 := "file1" - file2 := "file2" - dir1 := "dir1" - _, _ = os.Create(filepath.Join(dirName, file1)) - _, _ = os.Create(filepath.Join(dirName, file2)) - _ = os.Mkdir(filepath.Join(dirName, dir1), os.ModePerm) + instance := NewDirectoryReader() + assert.NotNil(t, instance) + assert.NotNil(t, instance) +} - dirReader := NewDirectoryReader() +func TestDirectoryReaderListFilesAsString(t *testing.T) { + t.Parallel() - filesName, err := dirReader.ListFilesAsString(dirName) - assert.Nil(t, err) - assert.Equal(t, 2, len(filesName)) - assert.True(t, contains(filesName, file1)) - assert.True(t, contains(filesName, file2)) + t.Run("invalid path should error", func(t *testing.T) { + t.Parallel() + + dirReader := NewDirectoryReader() + + filesName, err := dirReader.ListFilesAsString(invalidPath) + assert.NotNil(t, err) + assert.Equal(t, "*fs.PathError", fmt.Sprintf("%T", err)) + assert.Nil(t, filesName) + }) + t.Run("empty directory should error", func(t *testing.T) { + t.Parallel() + + dirReader := NewDirectoryReader() + + filesName, err := dirReader.ListFilesAsString(t.TempDir()) + expectedErrorString := "no file in provided directory" + assert.Equal(t, expectedErrorString, err.Error()) + assert.Nil(t, filesName) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + dirName := t.TempDir() + + file1 := "file1" + file2 := "file2" + dir1 := "dir1" + _, _ = os.Create(filepath.Join(dirName, file1)) + _, _ = os.Create(filepath.Join(dirName, file2)) + _ = os.Mkdir(filepath.Join(dirName, dir1), os.ModePerm) + + dirReader := NewDirectoryReader() + + filesName, err := dirReader.ListFilesAsString(dirName) + assert.Nil(t, err) + assert.Equal(t, 2, len(filesName)) + assert.True(t, contains(filesName, file1)) + assert.True(t, contains(filesName, file2)) + }) } func TestDirectoryReaderListDirectoriesAsString(t *testing.T) { t.Parallel() - dirName := t.TempDir() - - file1 := "file1" - file2 := "file2" - dir1 := "dir1" - _, _ = os.Create(filepath.Join(dirName, file1)) - _, _ = os.Create(filepath.Join(dirName, file2)) - _ = os.Mkdir(filepath.Join(dirName, dir1), os.ModePerm) - - dirReader := NewDirectoryReader() - - directoriesNames, err := dirReader.ListDirectoriesAsString(dirName) - assert.Nil(t, err) - assert.Equal(t, 1, len(directoriesNames)) - assert.True(t, contains(directoriesNames, dir1)) + t.Run("invalid path should error", func(t *testing.T) { + t.Parallel() + + dirReader := NewDirectoryReader() + + directoriesNames, err := dirReader.ListDirectoriesAsString(invalidPath) + assert.NotNil(t, err) + assert.Equal(t, "*fs.PathError", fmt.Sprintf("%T", err)) + assert.Nil(t, directoriesNames) + }) + t.Run("empty directory should error", func(t *testing.T) { + t.Parallel() + + dirReader := NewDirectoryReader() + + directoriesNames, err := dirReader.ListDirectoriesAsString(t.TempDir()) + expectedErrorString := "no sub-directory in provided directory" + assert.Equal(t, expectedErrorString, err.Error()) + assert.Nil(t, directoriesNames) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + dirName := t.TempDir() + + file1 := "file1" + file2 := "file2" + dir1 := "dir1" + _, _ = os.Create(filepath.Join(dirName, file1)) + _, _ = os.Create(filepath.Join(dirName, file2)) + _ = os.Mkdir(filepath.Join(dirName, dir1), os.ModePerm) + + dirReader := NewDirectoryReader() + + directoriesNames, err := dirReader.ListDirectoriesAsString(dirName) + assert.Nil(t, err) + assert.Equal(t, 1, len(directoriesNames)) + assert.True(t, contains(directoriesNames, dir1)) + }) } func TestDirectoryReaderListAllAsString(t *testing.T) { t.Parallel() - dirName := t.TempDir() + t.Run("invalid path should error", func(t *testing.T) { + t.Parallel() + + dirReader := NewDirectoryReader() + + allNames, err := dirReader.ListAllAsString(invalidPath) + assert.NotNil(t, err) + assert.Equal(t, "*fs.PathError", fmt.Sprintf("%T", err)) + assert.Nil(t, allNames) + }) + t.Run("empty directory should error", func(t *testing.T) { + t.Parallel() + + dirReader := NewDirectoryReader() + + allNames, err := dirReader.ListAllAsString(t.TempDir()) + expectedErrorString := "no file or directory in provided directory" + assert.Equal(t, expectedErrorString, err.Error()) + assert.Nil(t, allNames) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + dirName := t.TempDir() + + file1 := "file1" + file2 := "file2" + dir1 := "dir1" + _, _ = os.Create(filepath.Join(dirName, file1)) + _, _ = os.Create(filepath.Join(dirName, file2)) + _ = os.Mkdir(filepath.Join(dirName, dir1), os.ModePerm) + + dirReader := NewDirectoryReader() + + allNames, err := dirReader.ListAllAsString(dirName) + assert.Nil(t, err) + assert.Equal(t, 3, len(allNames)) + assert.True(t, contains(allNames, file1)) + assert.True(t, contains(allNames, file2)) + assert.True(t, contains(allNames, dir1)) + }) +} - file1 := "file1" - file2 := "file2" - dir1 := "dir1" - _, _ = os.Create(filepath.Join(dirName, file1)) - _, _ = os.Create(filepath.Join(dirName, file2)) - _ = os.Mkdir(filepath.Join(dirName, dir1), os.ModePerm) +func TestDirectoryReader_IsInterfaceNil(t *testing.T) { + t.Parallel() - dirReader := NewDirectoryReader() + var dr *directoryReader + require.True(t, dr.IsInterfaceNil()) - allNames, err := dirReader.ListAllAsString(dirName) - assert.Nil(t, err) - assert.Equal(t, 3, len(allNames)) - assert.True(t, contains(allNames, file1)) - assert.True(t, contains(allNames, file2)) - assert.True(t, contains(allNames, dir1)) + dr = NewDirectoryReader() + require.False(t, dr.IsInterfaceNil()) } func contains(s []string, e string) bool { diff --git a/storage/disabled/errorDisabledPersister.go b/storage/disabled/errorDisabledPersister.go new file mode 100644 index 00000000000..9877bc01899 --- /dev/null +++ b/storage/disabled/errorDisabledPersister.go @@ -0,0 +1,57 @@ +package disabled + +import ( + "fmt" +) + +type errorDisabledPersister struct { +} + +// NewErrorDisabledPersister returns a new instance of this disabled persister that errors on all operations +func NewErrorDisabledPersister() *errorDisabledPersister { + return &errorDisabledPersister{} +} + +// Put returns error +func (disabled *errorDisabledPersister) Put(_, _ []byte) error { + return fmt.Errorf("disabledPersister.Put") +} + +// Get returns error +func (disabled *errorDisabledPersister) Get(_ []byte) ([]byte, error) { + return nil, fmt.Errorf("disabledPersister.Get") +} + +// Has returns error +func (disabled *errorDisabledPersister) Has(_ []byte) error { + return fmt.Errorf("disabledPersister.Has") +} + +// Close returns error +func (disabled *errorDisabledPersister) Close() error { + return fmt.Errorf("disabledPersister.Close") +} + +// Remove returns error +func (disabled *errorDisabledPersister) Remove(_ []byte) error { + return fmt.Errorf("disabledPersister.Remove") +} + +// Destroy returns error +func (disabled *errorDisabledPersister) Destroy() error { + return fmt.Errorf("disabledPersister.Destroy") +} + +// DestroyClosed returns error +func (disabled *errorDisabledPersister) DestroyClosed() error { + return fmt.Errorf("disabledPersister.DestroyClosed") +} + +// RangeKeys does nothing +func (disabled *errorDisabledPersister) RangeKeys(_ func(key []byte, val []byte) bool) { +} + +// IsInterfaceNil returns true if there is no value under the interface +func (disabled *errorDisabledPersister) IsInterfaceNil() bool { + return disabled == nil +} diff --git a/storage/disabled/errorDisabledPersister_test.go b/storage/disabled/errorDisabledPersister_test.go new file mode 100644 index 00000000000..407f7ea4ddd --- /dev/null +++ b/storage/disabled/errorDisabledPersister_test.go @@ -0,0 +1,102 @@ +package disabled + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewErrorDisabledPersister(t *testing.T) { + t.Parallel() + + disabled := NewErrorDisabledPersister() + assert.NotNil(t, disabled) +} + +func TestErrorDisabledPersister_MethodsShouldError(t *testing.T) { + t.Parallel() + + disabled := NewErrorDisabledPersister() + t.Run("Put should error", func(t *testing.T) { + t.Parallel() + + expectedErrorString := "disabledPersister.Put" + err := disabled.Put(nil, nil) + assert.Equal(t, expectedErrorString, err.Error()) + }) + t.Run("Get should error", func(t *testing.T) { + t.Parallel() + + expectedErrorString := "disabledPersister.Get" + value, err := disabled.Get(nil) + assert.Equal(t, expectedErrorString, err.Error()) + assert.Nil(t, value) + }) + t.Run("Has should error", func(t *testing.T) { + t.Parallel() + + expectedErrorString := "disabledPersister.Has" + err := disabled.Has(nil) + assert.Equal(t, expectedErrorString, err.Error()) + }) + t.Run("Close should error", func(t *testing.T) { + t.Parallel() + + expectedErrorString := "disabledPersister.Close" + err := disabled.Close() + assert.Equal(t, expectedErrorString, err.Error()) + }) + t.Run("Remove should error", func(t *testing.T) { + t.Parallel() + + expectedErrorString := "disabledPersister.Remove" + err := disabled.Remove(nil) + assert.Equal(t, expectedErrorString, err.Error()) + }) + t.Run("Destroy should error", func(t *testing.T) { + t.Parallel() + + expectedErrorString := "disabledPersister.Destroy" + err := disabled.Destroy() + assert.Equal(t, expectedErrorString, err.Error()) + }) + t.Run("DestroyClosed should error", func(t *testing.T) { + t.Parallel() + + expectedErrorString := "disabledPersister.DestroyClosed" + err := disabled.DestroyClosed() + assert.Equal(t, expectedErrorString, err.Error()) + }) +} + +func TestErrorDisabledPersister_RangeKeys(t *testing.T) { + t.Parallel() + + disabled := NewErrorDisabledPersister() + t.Run("nil handler should not panic", func(t *testing.T) { + t.Parallel() + + assert.NotPanics(t, func() { + disabled.RangeKeys(nil) + }) + }) + t.Run("handler should not be called", func(t *testing.T) { + t.Parallel() + + disabled.RangeKeys(func(key []byte, val []byte) bool { + assert.Fail(t, "should have not called the handler") + return false + }) + }) +} + +func TestErrorDisabledPersister_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var edp *errorDisabledPersister + require.True(t, edp.IsInterfaceNil()) + + edp = NewErrorDisabledPersister() + require.False(t, edp.IsInterfaceNil()) +} diff --git a/storage/disabled/persister_test.go b/storage/disabled/persister_test.go index aee9666f98e..2b8e53d5ef1 100644 --- a/storage/disabled/persister_test.go +++ b/storage/disabled/persister_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/storage" "github.com/stretchr/testify/assert" ) @@ -20,7 +19,7 @@ func TestPersister_MethodsDoNotPanic(t *testing.T) { }() p := NewPersister() - assert.False(t, check.IfNil(p)) + assert.False(t, p.IsInterfaceNil()) assert.Nil(t, p.Put(nil, nil)) assert.Equal(t, storage.ErrKeyNotFound, p.Has(nil)) assert.Nil(t, p.Close()) diff --git a/storage/disabled/storer_test.go b/storage/disabled/storer_test.go index f7dd2a8a840..95fb7811fad 100644 --- a/storage/disabled/storer_test.go +++ b/storage/disabled/storer_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-storage-go/common" "github.com/stretchr/testify/assert" ) @@ -20,7 +19,7 @@ func TestStorer_MethodsDoNotPanic(t *testing.T) { }() s := NewStorer() - assert.False(t, check.IfNil(s)) + assert.False(t, s.IsInterfaceNil()) assert.Nil(t, s.Put(nil, nil)) assert.Nil(t, s.PutInEpoch(nil, nil, 0)) assert.Nil(t, s.Has(nil)) diff --git a/storage/errors.go b/storage/errors.go index f33ace22458..14c62db42ac 100644 --- a/storage/errors.go +++ b/storage/errors.go @@ -91,6 +91,15 @@ var ErrNilPersistersTracker = errors.New("nil persisters tracker provided") // ErrNilManagedPeersHolder signals that a nil managed peers holder has been provided var ErrNilManagedPeersHolder = errors.New("nil managed peers holder") +// ErrNilLatestStorageDataProvider signals that a nil latest storage data provider has been provided +var ErrNilLatestStorageDataProvider = errors.New("nil latest storage data provider") + +// ErrNilBootstrapDataProvider signals that a nil bootstrap data provider has been provided +var ErrNilBootstrapDataProvider = errors.New("nil bootstrap data provider") + +// ErrNilDirectoryReader signals that a nil directory reader has been provided +var ErrNilDirectoryReader = errors.New("nil directory reader") + // IsNotFoundInStorageErr returns whether an error is a "not found in storage" error. // Currently, "item not found" storage errors are untyped (thus not distinguishable from others). E.g. see "pruningStorer.go". // As a workaround, we test the error message for a match. diff --git a/storage/factory/bootstrapDataProvider_test.go b/storage/factory/bootstrapDataProvider_test.go index 22564130082..46e622fa980 100644 --- a/storage/factory/bootstrapDataProvider_test.go +++ b/storage/factory/bootstrapDataProvider_test.go @@ -5,7 +5,6 @@ import ( "strconv" "testing" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" "github.com/multiversx/mx-chain-go/storage" @@ -18,7 +17,7 @@ func TestNewBootstrapDataProvider_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() bdp, err := NewBootstrapDataProvider(nil) - require.True(t, check.IfNil(bdp)) + require.Nil(t, bdp) require.Equal(t, storage.ErrNilMarshalizer, err) } @@ -26,7 +25,7 @@ func TestNewBootstrapDataProvider_OkValuesShouldWork(t *testing.T) { t.Parallel() bdp, err := NewBootstrapDataProvider(&mock.MarshalizerMock{}) - require.False(t, check.IfNil(bdp)) + require.NotNil(t, bdp) require.NoError(t, err) } @@ -93,3 +92,13 @@ func TestBootstrapDataProvider_LoadForPath_ShouldWork(t *testing.T) { require.NotNil(t, storer) require.Equal(t, expectedBD, bootstrapData) } + +func TestBootstrapDataProvider_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var bdp *bootstrapDataProvider + require.True(t, bdp.IsInterfaceNil()) + + bdp, _ = NewBootstrapDataProvider(&mock.MarshalizerMock{}) + require.False(t, bdp.IsInterfaceNil()) +} diff --git a/storage/factory/disabledPersister.go b/storage/factory/disabledPersister.go deleted file mode 100644 index 66a6390387b..00000000000 --- a/storage/factory/disabledPersister.go +++ /dev/null @@ -1,52 +0,0 @@ -package factory - -import ( - "fmt" -) - -type disabledPersister struct { -} - -// Put returns error -func (dp *disabledPersister) Put(_, _ []byte) error { - return fmt.Errorf("disabledPersister.Put") -} - -// Get returns error -func (dp *disabledPersister) Get(_ []byte) ([]byte, error) { - return nil, fmt.Errorf("disabledPersister.Get") -} - -// Has returns error -func (dp *disabledPersister) Has(_ []byte) error { - return fmt.Errorf("disabledPersister.Has") -} - -// Close returns error -func (dp *disabledPersister) Close() error { - return fmt.Errorf("disabledPersister.Close") -} - -// Remove returns error -func (dp *disabledPersister) Remove(_ []byte) error { - return fmt.Errorf("disabledPersister.Remove") -} - -// Destroy does nothing -func (dp *disabledPersister) Destroy() error { - return fmt.Errorf("disabledPersister.Destroy") -} - -// DestroyClosed returns error -func (dp *disabledPersister) DestroyClosed() error { - return fmt.Errorf("disabledPersister.DestroyClosed") -} - -// RangeKeys does nothing -func (dp *disabledPersister) RangeKeys(_ func(key []byte, val []byte) bool) { -} - -// IsInterfaceNil returns true if there is no value under the interface -func (dp *disabledPersister) IsInterfaceNil() bool { - return dp == nil -} diff --git a/storage/factory/openStorage.go b/storage/factory/openStorage.go index cfd0e9e3c5b..2f02327cc02 100644 --- a/storage/factory/openStorage.go +++ b/storage/factory/openStorage.go @@ -5,6 +5,7 @@ import ( "path/filepath" "time" + "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" "github.com/multiversx/mx-chain-go/storage" @@ -31,6 +32,13 @@ type openStorageUnits struct { // NewStorageUnitOpenHandler creates an openStorageUnits component func NewStorageUnitOpenHandler(args ArgsNewOpenStorageUnits) (*openStorageUnits, error) { + if check.IfNil(args.BootstrapDataProvider) { + return nil, storage.ErrNilBootstrapDataProvider + } + if check.IfNil(args.LatestStorageDataProvider) { + return nil, storage.ErrNilLatestStorageDataProvider + } + o := &openStorageUnits{ defaultEpochString: args.DefaultEpochString, defaultShardString: args.DefaultShardString, diff --git a/storage/factory/openStorage_test.go b/storage/factory/openStorage_test.go index 8340db9c563..69a81bc1f67 100644 --- a/storage/factory/openStorage_test.go +++ b/storage/factory/openStorage_test.go @@ -5,12 +5,12 @@ import ( "strings" "testing" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/mock" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func createMockArgsOpenStorageUnits() ArgsNewOpenStorageUnits { @@ -25,10 +25,31 @@ func createMockArgsOpenStorageUnits() ArgsNewOpenStorageUnits { func TestNewStorageUnitOpenHandler(t *testing.T) { t.Parallel() - suoh, err := NewStorageUnitOpenHandler(createMockArgsOpenStorageUnits()) - - assert.NoError(t, err) - assert.False(t, check.IfNil(suoh)) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + suoh, err := NewStorageUnitOpenHandler(createMockArgsOpenStorageUnits()) + assert.NoError(t, err) + assert.NotNil(t, suoh) + }) + t.Run("nil BootstrapDataProvider should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgsOpenStorageUnits() + args.BootstrapDataProvider = nil + suoh, err := NewStorageUnitOpenHandler(args) + assert.Equal(t, storage.ErrNilBootstrapDataProvider, err) + assert.Nil(t, suoh) + }) + t.Run("nil LatestStorageDataProvider should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgsOpenStorageUnits() + args.LatestStorageDataProvider = nil + suoh, err := NewStorageUnitOpenHandler(args) + assert.Equal(t, storage.ErrNilLatestStorageDataProvider, err) + assert.Nil(t, suoh) + }) } func TestGetMostUpToDateDirectory(t *testing.T) { @@ -56,6 +77,23 @@ func TestGetMostUpToDateDirectory(t *testing.T) { assert.Equal(t, shardIDsStr[1], dirName) } +func TestGetMostRecentBootstrapStorageUnit_GetParentDirAndLastEpochErr(t *testing.T) { + t.Parallel() + + localErr := errors.New("localErr") + args := createMockArgsOpenStorageUnits() + args.LatestStorageDataProvider = &mock.LatestStorageDataProviderStub{ + GetParentDirAndLastEpochCalled: func() (string, uint32, error) { + return "", 0, localErr + }, + } + suoh, _ := NewStorageUnitOpenHandler(args) + + storer, err := suoh.GetMostRecentStorageUnit(config.DBConfig{}) + assert.Nil(t, storer) + assert.Equal(t, localErr, err) +} + func TestGetMostRecentBootstrapStorageUnit_GetShardsFromDirectoryErr(t *testing.T) { t.Parallel() @@ -140,5 +178,62 @@ func TestGetMostRecentBootstrapStorageUnit(t *testing.T) { storer, err := suoh.GetMostRecentStorageUnit(generalConfig.BootstrapStorage.DB) assert.NoError(t, err) assert.NotNil(t, storer) +} + +func TestStorageUnitOpenHandler_OpenDB(t *testing.T) { + t.Parallel() + + tempDir := t.TempDir() + args := createMockArgsOpenStorageUnits() + args.LatestStorageDataProvider = &mock.LatestStorageDataProviderStub{ + GetParentDirectoryCalled: func() string { + return tempDir + }, + } + suoh, _ := NewStorageUnitOpenHandler(args) + + // do not run these in parallel as they are using the same temp dir + t.Run("create DB fails, should error", func(t *testing.T) { + dbConfig := config.DBConfig{ + FilePath: "Test", + Type: "invalid DB type", + BatchDelaySeconds: 5, + MaxBatchSize: 100, + MaxOpenFiles: 10, + UseTmpAsFilePath: false, + } + + storerInstance, err := suoh.OpenDB(dbConfig, 0, 0) + assert.NotNil(t, err) + expectedErrorString := "not supported db type" + assert.Equal(t, expectedErrorString, err.Error()) + assert.Nil(t, storerInstance) + }) + t.Run("should work", func(t *testing.T) { + dbConfig := config.DBConfig{ + FilePath: "Test", + Type: "LvlDBSerial", + BatchDelaySeconds: 5, + MaxBatchSize: 100, + MaxOpenFiles: 10, + UseTmpAsFilePath: false, + } + + storerInstance, err := suoh.OpenDB(dbConfig, 0, 0) + assert.Nil(t, err) + assert.NotNil(t, storerInstance) + + _ = storerInstance.Close() + }) + +} + +func TestOldDataCleanerProvider_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var osu *openStorageUnits + require.True(t, osu.IsInterfaceNil()) + osu, _ = NewStorageUnitOpenHandler(createMockArgsOpenStorageUnits()) + require.False(t, osu.IsInterfaceNil()) } diff --git a/storage/factory/persisterFactory.go b/storage/factory/persisterFactory.go index 7bfb8cee594..55b3d45806a 100644 --- a/storage/factory/persisterFactory.go +++ b/storage/factory/persisterFactory.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" + "github.com/multiversx/mx-chain-go/storage/disabled" "github.com/multiversx/mx-chain-go/storage/storageunit" ) @@ -47,7 +48,7 @@ func (pf *PersisterFactory) Create(path string) (storage.Persister, error) { // CreateDisabled will return a new disabled persister func (pf *PersisterFactory) CreateDisabled() storage.Persister { - return &disabledPersister{} + return disabled.NewErrorDisabledPersister() } // IsInterfaceNil returns true if there is no value under the interface diff --git a/storage/factory/persisterFactory_test.go b/storage/factory/persisterFactory_test.go new file mode 100644 index 00000000000..3aee6a0132e --- /dev/null +++ b/storage/factory/persisterFactory_test.go @@ -0,0 +1,101 @@ +package factory + +import ( + "fmt" + "testing" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/storage" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func createDBConfig(dbType string) config.DBConfig { + return config.DBConfig{ + FilePath: "TEST", + Type: dbType, + BatchDelaySeconds: 5, + MaxBatchSize: 100, + MaxOpenFiles: 10, + UseTmpAsFilePath: false, + } +} + +func TestNewPersisterFactory(t *testing.T) { + t.Parallel() + + factoryInstance := NewPersisterFactory(createDBConfig("LvlDB")) + assert.NotNil(t, factoryInstance) +} + +func TestPersisterFactory_Create(t *testing.T) { + t.Parallel() + + t.Run("empty path should error", func(t *testing.T) { + t.Parallel() + + factoryInstance := NewPersisterFactory(createDBConfig("LvlDB")) + persisterInstance, err := factoryInstance.Create("") + assert.True(t, check.IfNil(persisterInstance)) + expectedErrString := "invalid file path" + assert.Equal(t, expectedErrString, err.Error()) + }) + t.Run("unknown type should error", func(t *testing.T) { + t.Parallel() + + factoryInstance := NewPersisterFactory(createDBConfig("invalid type")) + persisterInstance, err := factoryInstance.Create(t.TempDir()) + assert.True(t, check.IfNil(persisterInstance)) + assert.Equal(t, storage.ErrNotSupportedDBType, err) + }) + t.Run("for LvlDB should work", func(t *testing.T) { + t.Parallel() + + factoryInstance := NewPersisterFactory(createDBConfig("LvlDB")) + persisterInstance, err := factoryInstance.Create(t.TempDir()) + assert.Nil(t, err) + assert.False(t, check.IfNil(persisterInstance)) + assert.Equal(t, "*leveldb.DB", fmt.Sprintf("%T", persisterInstance)) + _ = persisterInstance.Close() + }) + t.Run("for LvlDBSerial should work", func(t *testing.T) { + t.Parallel() + + factoryInstance := NewPersisterFactory(createDBConfig("LvlDBSerial")) + persisterInstance, err := factoryInstance.Create(t.TempDir()) + assert.Nil(t, err) + assert.False(t, check.IfNil(persisterInstance)) + assert.Equal(t, "*leveldb.SerialDB", fmt.Sprintf("%T", persisterInstance)) + _ = persisterInstance.Close() + }) + t.Run("for MemoryDB should work", func(t *testing.T) { + t.Parallel() + + factoryInstance := NewPersisterFactory(createDBConfig("MemoryDB")) + persisterInstance, err := factoryInstance.Create(t.TempDir()) + assert.Nil(t, err) + assert.False(t, check.IfNil(persisterInstance)) + assert.Equal(t, "*memorydb.DB", fmt.Sprintf("%T", persisterInstance)) + _ = persisterInstance.Close() + }) +} + +func TestPersisterFactory_CreateDisabled(t *testing.T) { + t.Parallel() + + factoryInstance := NewPersisterFactory(createDBConfig("LvlDB")) + persisterInstance := factoryInstance.CreateDisabled() + assert.NotNil(t, persisterInstance) + assert.Equal(t, "*disabled.errorDisabledPersister", fmt.Sprintf("%T", persisterInstance)) +} + +func TestPersisterFactory_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var pf *PersisterFactory + require.True(t, pf.IsInterfaceNil()) + + pf = NewPersisterFactory(config.DBConfig{}) + require.False(t, pf.IsInterfaceNil()) +} diff --git a/storage/factory/pruningStorerFactory.go b/storage/factory/storageServiceFactory.go similarity index 92% rename from storage/factory/pruningStorerFactory.go rename to storage/factory/storageServiceFactory.go index d42f8db6ce6..df34577ceb1 100644 --- a/storage/factory/pruningStorerFactory.go +++ b/storage/factory/storageServiceFactory.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" @@ -48,6 +49,7 @@ type StorageServiceFactory struct { createTrieEpochRootHashStorer bool currentEpoch uint32 storageType StorageServiceType + nodeProcessingMode common.NodeProcessingMode snapshotsEnabled bool } @@ -63,6 +65,7 @@ type StorageServiceFactoryArgs struct { ManagedPeersHolder storage.ManagedPeersHolder CurrentEpoch uint32 CreateTrieEpochRootHashStorer bool + NodeProcessingMode common.NodeProcessingMode SnapshotsEnabled bool } @@ -96,6 +99,7 @@ func NewStorageServiceFactory(args StorageServiceFactoryArgs) (*StorageServiceFa createTrieEpochRootHashStorer: args.CreateTrieEpochRootHashStorer, oldDataCleanerProvider: oldDataCleanProvider, storageType: args.StorageType, + nodeProcessingMode: args.NodeProcessingMode, snapshotsEnabled: args.SnapshotsEnabled, }, nil } @@ -127,56 +131,56 @@ func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( txUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.TxStorage, disabledCustomDatabaseRemover) txUnit, err := psf.createPruningPersister(txUnitArgs) if err != nil { - return err + return fmt.Errorf("%w for TxStorage", err) } store.AddStorer(dataRetriever.TransactionUnit, txUnit) unsignedTxUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.UnsignedTransactionStorage, disabledCustomDatabaseRemover) unsignedTxUnit, err := psf.createPruningPersister(unsignedTxUnitArgs) if err != nil { - return err + return fmt.Errorf("%w for UnsignedTransactionStorage", err) } store.AddStorer(dataRetriever.UnsignedTransactionUnit, unsignedTxUnit) rewardTxUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.RewardTxStorage, disabledCustomDatabaseRemover) rewardTxUnit, err := psf.createPruningPersister(rewardTxUnitArgs) if err != nil { - return err + return fmt.Errorf("%w for RewardTxStorage", err) } store.AddStorer(dataRetriever.RewardTransactionUnit, rewardTxUnit) receiptsUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.ReceiptsStorage, disabledCustomDatabaseRemover) receiptsUnit, err := psf.createPruningPersister(receiptsUnitArgs) if err != nil { - return err + return fmt.Errorf("%w for ReceiptsStorage", err) } store.AddStorer(dataRetriever.ReceiptsUnit, receiptsUnit) scheduledSCRsUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.ScheduledSCRsStorage, disabledCustomDatabaseRemover) scheduledSCRsUnit, err := psf.createPruningPersister(scheduledSCRsUnitArgs) if err != nil { - return err + return fmt.Errorf("%w for ScheduledSCRsStorage", err) } store.AddStorer(dataRetriever.ScheduledSCRsUnit, scheduledSCRsUnit) bootstrapUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.BootstrapStorage, disabledCustomDatabaseRemover) bootstrapUnit, err := psf.createPruningPersister(bootstrapUnitArgs) if err != nil { - return err + return fmt.Errorf("%w for BootstrapStorage", err) } store.AddStorer(dataRetriever.BootstrapUnit, bootstrapUnit) miniBlockUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.MiniBlocksStorage, disabledCustomDatabaseRemover) miniBlockUnit, err := psf.createPruningPersister(miniBlockUnitArgs) if err != nil { - return err + return fmt.Errorf("%w for MiniBlocksStorage", err) } store.AddStorer(dataRetriever.MiniBlockUnit, miniBlockUnit) metaBlockUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.MetaBlockStorage, disabledCustomDatabaseRemover) metaBlockUnit, err := psf.createPruningPersister(metaBlockUnitArgs) if err != nil { - return err + return fmt.Errorf("%w for MetaBlockStorage", err) } store.AddStorer(dataRetriever.MetaBlockUnit, metaBlockUnit) @@ -188,34 +192,34 @@ func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( GetCacherFromConfig(psf.generalConfig.MetaHdrNonceHashStorage.Cache), metaHdrHashNonceUnitConfig) if err != nil { - return err + return fmt.Errorf("%w for MetaHdrNonceHashStorage", err) } store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, metaHdrHashNonceUnit) headerUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.BlockHeaderStorage, disabledCustomDatabaseRemover) headerUnit, err := psf.createPruningPersister(headerUnitArgs) if err != nil { - return err + return fmt.Errorf("%w for BlockHeaderStorage", err) } store.AddStorer(dataRetriever.BlockHeaderUnit, headerUnit) userAccountsUnit, err := psf.createTriePruningStorer(psf.generalConfig.AccountsTrieStorage, customDatabaseRemover) if err != nil { - return err + return fmt.Errorf("%w for AccountsTrieStorage", err) } store.AddStorer(dataRetriever.UserAccountsUnit, userAccountsUnit) userAccountsCheckpointsUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.AccountsTrieCheckpointsStorage, disabledCustomDatabaseRemover) userAccountsCheckpointsUnit, err := psf.createPruningPersister(userAccountsCheckpointsUnitArgs) if err != nil { - return err + return fmt.Errorf("%w for AccountsTrieCheckpointsStorage", err) } store.AddStorer(dataRetriever.UserAccountsCheckpointsUnit, userAccountsCheckpointsUnit) peerAccountsCheckpointsUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.PeerAccountsTrieCheckpointsStorage, disabledCustomDatabaseRemover) peerAccountsCheckpointsUnit, err := psf.createPruningPersister(peerAccountsCheckpointsUnitArgs) if err != nil { - return err + return fmt.Errorf("%w for PeerAccountsTrieCheckpointsStorage", err) } store.AddStorer(dataRetriever.PeerAccountsCheckpointsUnit, peerAccountsCheckpointsUnit) @@ -227,7 +231,7 @@ func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( GetCacherFromConfig(psf.generalConfig.StatusMetricsStorage.Cache), statusMetricsDbConfig) if err != nil { - return err + return fmt.Errorf("%w for StatusMetricsStorage", err) } store.AddStorer(dataRetriever.StatusMetricsUnit, statusMetricsStorageUnit) @@ -261,7 +265,7 @@ func (psf *StorageServiceFactory) CreateForShard() (dataRetriever.StorageService GetCacherFromConfig(psf.generalConfig.ShardHdrNonceHashStorage.Cache), shardHdrHashNonceConfig) if err != nil { - return nil, err + return nil, fmt.Errorf("%w for ShardHdrNonceHashStorage", err) } store := dataRetriever.NewChainStorer() @@ -273,14 +277,14 @@ func (psf *StorageServiceFactory) CreateForShard() (dataRetriever.StorageService peerAccountsUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.PeerAccountsTrieStorage, customDatabaseRemover) peerAccountsUnit, err := psf.createTrieUnit(psf.generalConfig.PeerAccountsTrieStorage, peerAccountsUnitArgs) if err != nil { - return nil, err + return nil, fmt.Errorf("%w for PeerAccountsTrieStorage", err) } store.AddStorer(dataRetriever.PeerAccountsUnit, peerAccountsUnit) peerBlockUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.PeerBlockBodyStorage, disabledCustomDatabaseRemover) peerBlockUnit, err := psf.createPruningPersister(peerBlockUnitArgs) if err != nil { - return nil, err + return nil, fmt.Errorf("%w for PeerBlockBodyStorage", err) } store.AddStorer(dataRetriever.PeerChangesUnit, peerBlockUnit) @@ -326,7 +330,7 @@ func (psf *StorageServiceFactory) CreateForMeta() (dataRetriever.StorageService, GetCacherFromConfig(psf.generalConfig.ShardHdrNonceHashStorage.Cache), shardHdrHashNonceConfig) if err != nil { - return nil, err + return nil, fmt.Errorf("%w for ShardHdrNonceHashStorage on shard %d", err, i) } } @@ -370,7 +374,7 @@ func (psf *StorageServiceFactory) createTriePruningStorer( customDatabaseRemover storage.CustomDatabaseRemoverHandler, ) (storage.Storer, error) { accountsUnitArgs := psf.createPruningStorerArgs(storageConfig, customDatabaseRemover) - if psf.storageType == ProcessStorageService { + if psf.storageType == ProcessStorageService && psf.nodeProcessingMode == common.Normal { accountsUnitArgs.PersistersTracker = pruning.NewTriePersisterTracker(accountsUnitArgs.EpochsData) } @@ -401,7 +405,7 @@ func (psf *StorageServiceFactory) setupLogsAndEventsStorer(chainStorer *dataRetr txLogsUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.LogsAndEvents.TxLogsStorage, disabled.NewDisabledCustomDatabaseRemover()) txLogsUnit, err = psf.createPruningPersister(txLogsUnitArgs) if err != nil { - return err + return fmt.Errorf("%w for LogsAndEvents.TxLogsStorage", err) } } @@ -422,7 +426,7 @@ func (psf *StorageServiceFactory) setupDbLookupExtensions(chainStorer *dataRetri eventsHashesByTxHashStorerArgs := psf.createPruningStorerArgs(eventsHashesByTxHashConfig, disabled.NewDisabledCustomDatabaseRemover()) eventsHashesByTxHashPruningStorer, err := psf.createPruningPersister(eventsHashesByTxHashStorerArgs) if err != nil { - return err + return fmt.Errorf("%w for DbLookupExtensions.ResultsHashesByTxHashStorageConfig", err) } chainStorer.AddStorer(dataRetriever.ResultsHashesByTxHashUnit, eventsHashesByTxHashPruningStorer) @@ -432,7 +436,7 @@ func (psf *StorageServiceFactory) setupDbLookupExtensions(chainStorer *dataRetri miniblocksMetadataPruningStorerArgs := psf.createPruningStorerArgs(miniblocksMetadataConfig, disabled.NewDisabledCustomDatabaseRemover()) miniblocksMetadataPruningStorer, err := psf.createPruningPersister(miniblocksMetadataPruningStorerArgs) if err != nil { - return err + return fmt.Errorf("%w for DbLookupExtensions.MiniblocksMetadataStorageConfig", err) } chainStorer.AddStorer(dataRetriever.MiniblocksMetadataUnit, miniblocksMetadataPruningStorer) @@ -444,7 +448,7 @@ func (psf *StorageServiceFactory) setupDbLookupExtensions(chainStorer *dataRetri miniblockHashByTxHashCacherConfig := GetCacherFromConfig(miniblockHashByTxHashConfig.Cache) miniblockHashByTxHashUnit, err := storageunit.NewStorageUnitFromConf(miniblockHashByTxHashCacherConfig, miniblockHashByTxHashDbConfig) if err != nil { - return err + return fmt.Errorf("%w for DbLookupExtensions.MiniblockHashByTxHashStorageConfig", err) } chainStorer.AddStorer(dataRetriever.MiniblockHashByTxHashUnit, miniblockHashByTxHashUnit) @@ -456,7 +460,7 @@ func (psf *StorageServiceFactory) setupDbLookupExtensions(chainStorer *dataRetri blockHashByRoundCacherConfig := GetCacherFromConfig(blockHashByRoundConfig.Cache) blockHashByRoundUnit, err := storageunit.NewStorageUnitFromConf(blockHashByRoundCacherConfig, blockHashByRoundDBConfig) if err != nil { - return err + return fmt.Errorf("%w for DbLookupExtensions.RoundHashStorageConfig", err) } chainStorer.AddStorer(dataRetriever.RoundHdrHashDataUnit, blockHashByRoundUnit) @@ -468,7 +472,7 @@ func (psf *StorageServiceFactory) setupDbLookupExtensions(chainStorer *dataRetri epochByHashCacherConfig := GetCacherFromConfig(epochByHashConfig.Cache) epochByHashUnit, err := storageunit.NewStorageUnitFromConf(epochByHashCacherConfig, epochByHashDbConfig) if err != nil { - return err + return fmt.Errorf("%w for DbLookupExtensions.EpochByHashStorageConfig", err) } chainStorer.AddStorer(dataRetriever.EpochByHashUnit, epochByHashUnit) @@ -479,7 +483,7 @@ func (psf *StorageServiceFactory) setupDbLookupExtensions(chainStorer *dataRetri esdtSuppliesCacherConfig := GetCacherFromConfig(esdtSuppliesConfig.Cache) esdtSuppliesUnit, err := storageunit.NewStorageUnitFromConf(esdtSuppliesCacherConfig, esdtSuppliesDbConfig) if err != nil { - return err + return fmt.Errorf("%w for DbLookupExtensions.ESDTSuppliesStorageConfig", err) } chainStorer.AddStorer(dataRetriever.ESDTSuppliesUnit, esdtSuppliesUnit) @@ -534,7 +538,7 @@ func (psf *StorageServiceFactory) createTrieEpochRootHashStorerIfNeeded() (stora GetCacherFromConfig(psf.generalConfig.TrieEpochRootHashStorage.Cache), trieEpochRootHashDbConfig) if err != nil { - return nil, err + return nil, fmt.Errorf("%w for TrieEpochRootHashStorage", err) } return trieEpochRootHashStorageUnit, nil diff --git a/storage/factory/storageServiceFactory_test.go b/storage/factory/storageServiceFactory_test.go new file mode 100644 index 00000000000..2d5cf95522a --- /dev/null +++ b/storage/factory/storageServiceFactory_test.go @@ -0,0 +1,514 @@ +package factory + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/storage/mock" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func createMockArgument(t *testing.T) StorageServiceFactoryArgs { + pathMan, err := CreatePathManagerFromSinglePathString(t.TempDir()) + require.Nil(t, err) + + return StorageServiceFactoryArgs{ + Config: config.Config{ + StateTriesConfig: config.StateTriesConfig{}, + StoragePruning: config.StoragePruningConfig{ + Enabled: true, + NumActivePersisters: 3, + NumEpochsToKeep: 4, + ObserverCleanOldEpochsData: true, + }, + ShardHdrNonceHashStorage: createMockStorageConfig("ShardHdrNonceHashStorage"), + TxStorage: createMockStorageConfig("TxStorage"), + UnsignedTransactionStorage: createMockStorageConfig("UnsignedTransactionStorage"), + RewardTxStorage: createMockStorageConfig("RewardTxStorage"), + ReceiptsStorage: createMockStorageConfig("ReceiptsStorage"), + ScheduledSCRsStorage: createMockStorageConfig("ScheduledSCRsStorage"), + BootstrapStorage: createMockStorageConfig("BootstrapStorage"), + MiniBlocksStorage: createMockStorageConfig("MiniBlocksStorage"), + MetaBlockStorage: createMockStorageConfig("MetaBlockStorage"), + MetaHdrNonceHashStorage: createMockStorageConfig("MetaHdrNonceHashStorage"), + BlockHeaderStorage: createMockStorageConfig("BlockHeaderStorage"), + AccountsTrieStorage: createMockStorageConfig("AccountsTrieStorage"), + AccountsTrieCheckpointsStorage: createMockStorageConfig("AccountsTrieCheckpointsStorage"), + PeerAccountsTrieStorage: createMockStorageConfig("PeerAccountsTrieStorage"), + PeerAccountsTrieCheckpointsStorage: createMockStorageConfig("PeerAccountsTrieCheckpointsStorage"), + StatusMetricsStorage: createMockStorageConfig("StatusMetricsStorage"), + PeerBlockBodyStorage: createMockStorageConfig("PeerBlockBodyStorage"), + TrieEpochRootHashStorage: createMockStorageConfig("TrieEpochRootHashStorage"), + DbLookupExtensions: config.DbLookupExtensionsConfig{ + Enabled: true, + DbLookupMaxActivePersisters: 10, + MiniblocksMetadataStorageConfig: createMockStorageConfig("MiniblocksMetadataStorage"), + MiniblockHashByTxHashStorageConfig: createMockStorageConfig("MiniblockHashByTxHashStorage"), + EpochByHashStorageConfig: createMockStorageConfig("EpochByHashStorage"), + ResultsHashesByTxHashStorageConfig: createMockStorageConfig("ResultsHashesByTxHashStorage"), + ESDTSuppliesStorageConfig: createMockStorageConfig("ESDTSuppliesStorage"), + RoundHashStorageConfig: createMockStorageConfig("RoundHashStorage"), + }, + LogsAndEvents: config.LogsAndEventsConfig{ + SaveInStorageEnabled: true, + TxLogsStorage: createMockStorageConfig("TxLogsStorage"), + }, + }, + PrefsConfig: config.PreferencesConfig{}, + ShardCoordinator: &mock.ShardCoordinatorMock{ + NumShards: 3, + }, + PathManager: pathMan, + EpochStartNotifier: &mock.EpochStartNotifierStub{}, + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{ + GetTypeCalled: func() core.NodeType { + return core.NodeTypeObserver + }, + }, + StorageType: ProcessStorageService, + CurrentEpoch: 0, + CreateTrieEpochRootHashStorer: true, + ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + } +} + +func createMockStorageConfig(dbName string) config.StorageConfig { + return config.StorageConfig{ + Cache: config.CacheConfig{ + Type: "LRU", + Capacity: 1000, + }, + DB: config.DBConfig{ + FilePath: dbName, + Type: "LvlDBSerial", + BatchDelaySeconds: 5, + MaxBatchSize: 100, + MaxOpenFiles: 10, + }, + } +} + +func TestNewStorageServiceFactory(t *testing.T) { + t.Parallel() + + t.Run("invalid StoragePruning.NumActivePersisters should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.StoragePruning.NumActivePersisters = 0 + storageServiceFactory, err := NewStorageServiceFactory(args) + assert.Equal(t, storage.ErrInvalidNumberOfActivePersisters, err) + assert.Nil(t, storageServiceFactory) + }) + t.Run("nil shard coordinator should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.ShardCoordinator = nil + storageServiceFactory, err := NewStorageServiceFactory(args) + assert.Equal(t, storage.ErrNilShardCoordinator, err) + assert.Nil(t, storageServiceFactory) + }) + t.Run("nil path manager should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.PathManager = nil + storageServiceFactory, err := NewStorageServiceFactory(args) + assert.Equal(t, storage.ErrNilPathManager, err) + assert.Nil(t, storageServiceFactory) + }) + t.Run("nil epoch start notifier should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.EpochStartNotifier = nil + storageServiceFactory, err := NewStorageServiceFactory(args) + assert.Equal(t, storage.ErrNilEpochStartNotifier, err) + assert.Nil(t, storageServiceFactory) + }) + t.Run("invalid number of epochs to save should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.StoragePruning.NumEpochsToKeep = 1 + storageServiceFactory, err := NewStorageServiceFactory(args) + assert.Equal(t, storage.ErrInvalidNumberOfEpochsToSave, err) + assert.Nil(t, storageServiceFactory) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + storageServiceFactory, err := NewStorageServiceFactory(args) + assert.Nil(t, err) + assert.NotNil(t, storageServiceFactory) + }) +} + +func TestStorageServiceFactory_CreateForShard(t *testing.T) { + t.Parallel() + + expectedErrForCacheString := "not supported cache type" + + t.Run("wrong config for ShardHdrNonceHashStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.ShardHdrNonceHashStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for ShardHdrNonceHashStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for TxStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.TxStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for TxStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for UnsignedTransactionStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.UnsignedTransactionStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for UnsignedTransactionStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for RewardTxStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.RewardTxStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for RewardTxStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for ReceiptsStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.ReceiptsStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for ReceiptsStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for ScheduledSCRsStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.ScheduledSCRsStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for ScheduledSCRsStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for BootstrapStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.BootstrapStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for BootstrapStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for MiniBlocksStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.MiniBlocksStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for MiniBlocksStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for MetaBlockStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.MetaBlockStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for MetaBlockStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for MetaHdrNonceHashStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.MetaHdrNonceHashStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for MetaHdrNonceHashStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for BlockHeaderStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.BlockHeaderStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for BlockHeaderStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for AccountsTrieStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.AccountsTrieStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for AccountsTrieStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for AccountsTrieCheckpointsStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.AccountsTrieCheckpointsStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for AccountsTrieCheckpointsStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for PeerAccountsTrieStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.PeerAccountsTrieStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for PeerAccountsTrieStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for PeerAccountsTrieCheckpointsStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.PeerAccountsTrieCheckpointsStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for PeerAccountsTrieCheckpointsStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for StatusMetricsStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.StatusMetricsStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for StatusMetricsStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for PeerBlockBodyStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.PeerBlockBodyStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for PeerBlockBodyStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for TrieEpochRootHashStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.TrieEpochRootHashStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for TrieEpochRootHashStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for DbLookupExtensions.MiniblocksMetadataStorageConfig should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.DbLookupExtensions.MiniblocksMetadataStorageConfig.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for DbLookupExtensions.MiniblocksMetadataStorageConfig", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for DbLookupExtensions.MiniblockHashByTxHashStorageConfig should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.DbLookupExtensions.MiniblockHashByTxHashStorageConfig.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for DbLookupExtensions.MiniblockHashByTxHashStorageConfig", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for DbLookupExtensions.EpochByHashStorageConfig should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.DbLookupExtensions.EpochByHashStorageConfig.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for DbLookupExtensions.EpochByHashStorageConfig", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for DbLookupExtensions.ResultsHashesByTxHashStorageConfig should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.DbLookupExtensions.ResultsHashesByTxHashStorageConfig.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for DbLookupExtensions.ResultsHashesByTxHashStorageConfig", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for DbLookupExtensions.ESDTSuppliesStorageConfig should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.DbLookupExtensions.ESDTSuppliesStorageConfig.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for DbLookupExtensions.ESDTSuppliesStorageConfig", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for DbLookupExtensions.RoundHashStorageConfig should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.DbLookupExtensions.RoundHashStorageConfig.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for DbLookupExtensions.RoundHashStorageConfig", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for LogsAndEvents.TxLogsStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.LogsAndEvents.TxLogsStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Equal(t, expectedErrForCacheString+" for LogsAndEvents.TxLogsStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Nil(t, err) + assert.False(t, check.IfNil(storageService)) + allStorers := storageService.GetAllStorers() + expectedStorers := 25 + assert.Equal(t, expectedStorers, len(allStorers)) + _ = storageService.CloseAll() + }) + t.Run("should work without DbLookupExtensions", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.DbLookupExtensions.Enabled = false + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Nil(t, err) + assert.False(t, check.IfNil(storageService)) + allStorers := storageService.GetAllStorers() + numDBLookupExtensionUnits := 6 + expectedStorers := 25 - numDBLookupExtensionUnits + assert.Equal(t, expectedStorers, len(allStorers)) + _ = storageService.CloseAll() + }) + t.Run("should work without TrieEpochRootHashStorage", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.CreateTrieEpochRootHashStorer = false + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForShard() + assert.Nil(t, err) + assert.False(t, check.IfNil(storageService)) + allStorers := storageService.GetAllStorers() + expectedStorers := 25 // we still have a storer for trie epoch root hash + assert.Equal(t, expectedStorers, len(allStorers)) + _ = storageService.CloseAll() + }) +} + +func TestStorageServiceFactory_CreateForMeta(t *testing.T) { + t.Parallel() + + expectedErrForCacheString := "not supported cache type" + + t.Run("wrong config for ShardHdrNonceHashStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.ShardHdrNonceHashStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForMeta() + assert.Equal(t, expectedErrForCacheString+" for ShardHdrNonceHashStorage on shard 0", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for AccountsTrieStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.AccountsTrieStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForMeta() + assert.Equal(t, expectedErrForCacheString+" for AccountsTrieStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for DbLookupExtensions.RoundHashStorageConfig should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.DbLookupExtensions.RoundHashStorageConfig.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForMeta() + assert.Equal(t, expectedErrForCacheString+" for DbLookupExtensions.RoundHashStorageConfig", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("wrong config for LogsAndEvents.TxLogsStorage should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + args.Config.LogsAndEvents.TxLogsStorage.Cache.Type = "" + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForMeta() + assert.Equal(t, expectedErrForCacheString+" for LogsAndEvents.TxLogsStorage", err.Error()) + assert.True(t, check.IfNil(storageService)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := createMockArgument(t) + storageServiceFactory, _ := NewStorageServiceFactory(args) + storageService, err := storageServiceFactory.CreateForMeta() + assert.Nil(t, err) + assert.False(t, check.IfNil(storageService)) + allStorers := storageService.GetAllStorers() + missingStorers := 2 // PeerChangesUnit and ShardHdrNonceHashDataUnit + numShardHdrStorage := 3 + expectedStorers := 25 - missingStorers + numShardHdrStorage + assert.Equal(t, expectedStorers, len(allStorers)) + _ = storageService.CloseAll() + }) +} diff --git a/storage/latestData/latestDataProvider.go b/storage/latestData/latestDataProvider.go index 44902dc9ae1..d372f81b43c 100644 --- a/storage/latestData/latestDataProvider.go +++ b/storage/latestData/latestDataProvider.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/marshal" @@ -53,6 +54,13 @@ type latestDataProvider struct { // NewLatestDataProvider returns a new instance of latestDataProvider func NewLatestDataProvider(args ArgsLatestDataProvider) (*latestDataProvider, error) { + if check.IfNil(args.DirectoryReader) { + return nil, storage.ErrNilDirectoryReader + } + if check.IfNil(args.BootstrapDataProvider) { + return nil, storage.ErrNilBootstrapDataProvider + } + return &latestDataProvider{ generalConfig: args.GeneralConfig, parentDir: args.ParentDir, diff --git a/storage/latestData/latestDataProvider_test.go b/storage/latestData/latestDataProvider_test.go index ebdbc31d178..e2d4c561ae0 100644 --- a/storage/latestData/latestDataProvider_test.go +++ b/storage/latestData/latestDataProvider_test.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/config" @@ -23,9 +22,39 @@ import ( func TestNewLatestDataProvider_ShouldWork(t *testing.T) { t.Parallel() - ldp, err := NewLatestDataProvider(getLatestDataProviderArgs()) - require.False(t, check.IfNil(ldp)) - require.NoError(t, err) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + ldp, err := NewLatestDataProvider(getLatestDataProviderArgs()) + require.NotNil(t, ldp) + require.NoError(t, err) + }) + t.Run("nil DirectoryReader should error", func(t *testing.T) { + t.Parallel() + + args := getLatestDataProviderArgs() + args.DirectoryReader = nil + ldp, err := NewLatestDataProvider(args) + require.Nil(t, ldp) + require.Equal(t, storage.ErrNilDirectoryReader, err) + }) + t.Run("nil BootstrapDataProvider should error", func(t *testing.T) { + t.Parallel() + + args := getLatestDataProviderArgs() + args.BootstrapDataProvider = nil + ldp, err := NewLatestDataProvider(args) + require.Nil(t, ldp) + require.Equal(t, storage.ErrNilBootstrapDataProvider, err) + }) +} + +func TestLatestDataProvider_GetParentDirectory(t *testing.T) { + t.Parallel() + + args := getLatestDataProviderArgs() + ldp, _ := NewLatestDataProvider(args) + require.Equal(t, args.ParentDir, ldp.GetParentDirectory()) } func TestGetShardsFromDirectory(t *testing.T) { @@ -366,3 +395,13 @@ func TestFullHistoryLoadEpochStartRoundMetachain(t *testing.T) { assert.NoError(t, err) assert.Equal(t, startRound, round) } + +func TestLatestDataProvider_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var ldp *latestDataProvider + require.True(t, ldp.IsInterfaceNil()) + + ldp, _ = NewLatestDataProvider(getLatestDataProviderArgs()) + require.False(t, ldp.IsInterfaceNil()) +} diff --git a/storage/pathmanager/pathManager_test.go b/storage/pathmanager/pathManager_test.go index 061b536ffd1..c29c9f01b2c 100644 --- a/storage/pathmanager/pathManager_test.go +++ b/storage/pathmanager/pathManager_test.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-go/storage/pathmanager" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewPathManager_EmptyPruningPathTemplateShouldErr(t *testing.T) { @@ -23,6 +24,14 @@ func TestNewPathManager_EmptyStaticPathTemplateShouldErr(t *testing.T) { assert.Equal(t, pathmanager.ErrEmptyStaticPathTemplate, err) } +func TestNewPathManager_EmptyDBPathTemplateShouldErr(t *testing.T) { + t.Parallel() + + pm, err := pathmanager.NewPathManager("epoch_[E]/shard_[S]/[I]", "shard_[S]/[I]", "") + assert.Nil(t, pm) + assert.Equal(t, pathmanager.ErrInvalidDatabasePath, err) +} + func TestNewPathManager_InvalidPruningPathTemplate_NoShardPlaceholder_ShouldErr(t *testing.T) { t.Parallel() @@ -71,6 +80,14 @@ func TestNewPathManager_OkValsShouldWork(t *testing.T) { assert.Nil(t, err) } +func TestPathManager_DatabasePath(t *testing.T) { + t.Parallel() + + dbPath := "db" + pm, _ := pathmanager.NewPathManager("epoch_[E]/shard_[S]/[I]", "shard_[S]/[I]", dbPath) + assert.Equal(t, dbPath, pm.DatabasePath()) +} + func TestPathManager_PathForEpoch(t *testing.T) { t.Parallel() @@ -151,3 +168,13 @@ func TestPathManager_PathForStatic(t *testing.T) { }) } } + +func TestPathManager_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var pm *pathmanager.PathManager + require.True(t, pm.IsInterfaceNil()) + + pm, _ = pathmanager.NewPathManager("epoch_[E]/shard_[S]/[I]", "shard_[S]/[I]", "db") + require.False(t, pm.IsInterfaceNil()) +} diff --git a/storage/pruning/fullHistoryPruningStorer_test.go b/storage/pruning/fullHistoryPruningStorer_test.go index 24adc389f9b..62c2d0c3b8c 100644 --- a/storage/pruning/fullHistoryPruningStorer_test.go +++ b/storage/pruning/fullHistoryPruningStorer_test.go @@ -10,7 +10,6 @@ import ( "testing" "time" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/random" storageCore "github.com/multiversx/mx-chain-core-go/storage" "github.com/multiversx/mx-chain-go/config" @@ -34,7 +33,7 @@ func TestNewFullHistoryPruningStorer_OkValsShouldWork(t *testing.T) { } fhps, err := pruning.NewFullHistoryPruningStorer(fhArgs) - assert.False(t, check.IfNil(fhps)) + assert.NotNil(t, fhps) assert.Nil(t, err) } @@ -383,3 +382,18 @@ func TestFullHistoryPruningStorer_ConcurrentOperations(t *testing.T) { // if the "resource temporary unavailable" occurs, this test will take longer than this to execute require.True(t, elapsedTime < 100*time.Second) } + +func TestFullHistoryPruningStorer_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var fhps *pruning.FullHistoryPruningStorer + require.True(t, fhps.IsInterfaceNil()) + + args := getDefaultArgs() + fhArgs := pruning.FullHistoryStorerArgs{ + StorerArgs: args, + NumOfOldActivePersisters: 10, + } + fhps, _ = pruning.NewFullHistoryPruningStorer(fhArgs) + require.False(t, fhps.IsInterfaceNil()) +} diff --git a/storage/pruning/fullHistoryTriePruningStorer.go b/storage/pruning/fullHistoryTriePruningStorer.go index 092e9aaa1b1..63a0d9f1ba6 100644 --- a/storage/pruning/fullHistoryTriePruningStorer.go +++ b/storage/pruning/fullHistoryTriePruningStorer.go @@ -55,3 +55,8 @@ func (fhtps *fullHistoryTriePruningStorer) PutInEpoch(key []byte, data []byte, e func (fhtps *fullHistoryTriePruningStorer) Close() error { return fhtps.storerWithEpochOperations.Close() } + +// IsInterfaceNil returns true if there is no value under the interface +func (fhtps *fullHistoryTriePruningStorer) IsInterfaceNil() bool { + return fhtps == nil +} diff --git a/storage/pruning/fullHistoryTriePruningStorer_test.go b/storage/pruning/fullHistoryTriePruningStorer_test.go index e9f2196c93d..9994c35c464 100644 --- a/storage/pruning/fullHistoryTriePruningStorer_test.go +++ b/storage/pruning/fullHistoryTriePruningStorer_test.go @@ -3,11 +3,11 @@ package pruning_test import ( "testing" - "github.com/multiversx/mx-chain-core-go/core/check" storageCore "github.com/multiversx/mx-chain-core-go/storage" "github.com/multiversx/mx-chain-go/storage/pruning" "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewFullHistoryTriePruningStorer(t *testing.T) { @@ -20,7 +20,7 @@ func TestNewFullHistoryTriePruningStorer(t *testing.T) { } fhps, err := pruning.NewFullHistoryTriePruningStorer(fhArgs) assert.Nil(t, err) - assert.False(t, check.IfNil(fhps)) + assert.NotNil(t, fhps) } func TestFullHistoryTriePruningStorer_CallsMethodsFromUndelyingFHPS(t *testing.T) { @@ -118,3 +118,18 @@ func TestFullHistoryTriePruningStorer_CallsMethodsFromUndelyingFHPS(t *testing.T assert.True(t, closeCalled) }) } + +func TestFullHistoryTriePruningStorer_IsInterfaceNil(t *testing.T) { + t.Parallel() + + fhtps, _ := pruning.NewFullHistoryTriePruningStorer(pruning.FullHistoryStorerArgs{}) + require.True(t, fhtps.IsInterfaceNil()) + + args := getDefaultArgs() + fhArgs := pruning.FullHistoryStorerArgs{ + StorerArgs: args, + NumOfOldActivePersisters: 10, + } + fhtps, _ = pruning.NewFullHistoryTriePruningStorer(fhArgs) + require.False(t, fhtps.IsInterfaceNil()) +} diff --git a/storage/pruning/persistersTracker_test.go b/storage/pruning/persistersTracker_test.go index 7c19c61f8a1..0c19e51ce8a 100644 --- a/storage/pruning/persistersTracker_test.go +++ b/storage/pruning/persistersTracker_test.go @@ -3,8 +3,8 @@ package pruning import ( "testing" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func getArgs() EpochArgs { @@ -19,7 +19,7 @@ func TestNewPersistersTracker(t *testing.T) { t.Parallel() pt := NewPersistersTracker(getArgs()) - assert.False(t, check.IfNil(pt)) + assert.NotNil(t, pt) assert.Equal(t, int64(7), pt.oldestEpochKeep) assert.Equal(t, int64(8), pt.oldestEpochActive) } @@ -43,3 +43,13 @@ func TestPersistersTracker_ShouldClosePersister(t *testing.T) { assert.False(t, pt.ShouldClosePersister(8)) assert.True(t, pt.ShouldClosePersister(7)) } + +func TestPersistersTracker_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var npt *normalPersistersTracker + require.True(t, npt.IsInterfaceNil()) + + npt = NewPersistersTracker(getArgs()) + require.False(t, npt.IsInterfaceNil()) +} diff --git a/storage/pruning/pruningStorer_test.go b/storage/pruning/pruningStorer_test.go index 0ac4f7cf71f..113eaf6ab26 100644 --- a/storage/pruning/pruningStorer_test.go +++ b/storage/pruning/pruningStorer_test.go @@ -15,7 +15,6 @@ import ( "testing" "time" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/random" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/config" @@ -132,7 +131,7 @@ func TestNewPruningStorer_InvalidNumberOfActivePersistersShouldErr(t *testing.T) ps, err := pruning.NewPruningStorer(args) - assert.True(t, check.IfNil(ps)) + assert.Nil(t, ps) assert.Equal(t, storage.ErrInvalidNumberOfPersisters, err) } @@ -144,7 +143,7 @@ func TestNewPruningStorer_NilPersistersTrackerShouldErr(t *testing.T) { ps, err := pruning.NewPruningStorer(args) - assert.True(t, check.IfNil(ps)) + assert.Nil(t, ps) assert.Equal(t, storage.ErrNilPersistersTracker, err) } @@ -157,7 +156,7 @@ func TestNewPruningStorer_NumEpochKeepLowerThanNumActiveShouldErr(t *testing.T) ps, err := pruning.NewPruningStorer(args) - assert.True(t, check.IfNil(ps)) + assert.Nil(t, ps) assert.Equal(t, storage.ErrEpochKeepIsLowerThanNumActive, err) } @@ -168,7 +167,7 @@ func TestNewPruningStorer_NilEpochStartHandlerShouldErr(t *testing.T) { args.Notifier = nil ps, err := pruning.NewPruningStorer(args) - assert.True(t, check.IfNil(ps)) + assert.Nil(t, ps) assert.Equal(t, storage.ErrNilEpochStartNotifier, err) } @@ -179,7 +178,7 @@ func TestNewPruningStorer_NilShardCoordinatorShouldErr(t *testing.T) { args.ShardCoordinator = nil ps, err := pruning.NewPruningStorer(args) - assert.True(t, check.IfNil(ps)) + assert.Nil(t, ps) assert.Equal(t, storage.ErrNilShardCoordinator, err) } @@ -190,7 +189,7 @@ func TestNewPruningStorer_NilPathManagerShouldErr(t *testing.T) { args.PathManager = nil ps, err := pruning.NewPruningStorer(args) - assert.True(t, check.IfNil(ps)) + assert.Nil(t, ps) assert.Equal(t, storage.ErrNilPathManager, err) } @@ -201,7 +200,7 @@ func TestNewPruningStorer_NilOldDataCleanerProviderShouldErr(t *testing.T) { args.OldDataCleanerProvider = nil ps, err := pruning.NewPruningStorer(args) - assert.True(t, check.IfNil(ps)) + assert.Nil(t, ps) assert.Equal(t, storage.ErrNilOldDataCleanerProvider, err) } @@ -212,7 +211,7 @@ func TestNewPruningStorer_NilCustomDatabaseRemoverProviderShouldErr(t *testing.T args.CustomDatabaseRemover = nil ps, err := pruning.NewPruningStorer(args) - assert.True(t, check.IfNil(ps)) + assert.Nil(t, ps) assert.Equal(t, storage.ErrNilCustomDatabaseRemover, err) } @@ -223,7 +222,7 @@ func TestNewPruningStorer_NilPersisterFactoryShouldErr(t *testing.T) { args.PersisterFactory = nil ps, err := pruning.NewPruningStorer(args) - assert.True(t, check.IfNil(ps)) + assert.Nil(t, ps) assert.Equal(t, storage.ErrNilPersisterFactory, err) } @@ -234,7 +233,7 @@ func TestNewPruningStorer_CacheSizeLowerThanBatchSizeShouldErr(t *testing.T) { args.MaxBatchSize = 11 ps, err := pruning.NewPruningStorer(args) - assert.True(t, check.IfNil(ps)) + assert.Nil(t, ps) assert.Equal(t, storage.ErrCacheSizeIsLowerThanBatchSize, err) } @@ -244,7 +243,7 @@ func TestNewPruningStorer_OkValsShouldWork(t *testing.T) { args := getDefaultArgs() ps, err := pruning.NewPruningStorer(args) - assert.False(t, check.IfNil(ps)) + assert.NotNil(t, ps) assert.Nil(t, err) assert.False(t, ps.IsInterfaceNil()) } @@ -1148,3 +1147,178 @@ func TestPruningStorer_ConcurrentOperations(t *testing.T) { // if the "resource temporary unavailable" occurs, this test will take longer than this to execute require.True(t, elapsedTime < 100*time.Second) } + +func TestPruningStorer_RangeKeys(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + ps, _ := pruning.NewPruningStorer(args) + + t.Run("should not panic with nil handler", func(t *testing.T) { + t.Parallel() + + assert.NotPanics(t, func() { + ps.RangeKeys(nil) + }) + }) + t.Run("should not call handler", func(t *testing.T) { + t.Parallel() + + ps.RangeKeys(func(key []byte, val []byte) bool { + assert.Fail(t, "should not have called handler") + return false + }) + }) +} + +func TestPruningStorer_GetOldestEpoch(t *testing.T) { + t.Parallel() + + t.Run("should return error if no persisters are found", func(t *testing.T) { + t.Parallel() + + epochsData := pruning.EpochArgs{ + NumOfEpochsToKeep: 0, + NumOfActivePersisters: 0, + } + + args := getDefaultArgs() + args.PersistersTracker = pruning.NewPersistersTracker(epochsData) + ps, _ := pruning.NewPruningStorer(args) + + epoch, err := ps.GetOldestEpoch() + assert.NotNil(t, err) + assert.Zero(t, epoch) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + epochsData := pruning.EpochArgs{ + NumOfEpochsToKeep: 2, + NumOfActivePersisters: 2, + StartingEpoch: 5, + } + + args := getDefaultArgs() + args.PersistersTracker = pruning.NewPersistersTracker(epochsData) + args.EpochsData = epochsData + ps, _ := pruning.NewPruningStorer(args) + + epoch, err := ps.GetOldestEpoch() + assert.Nil(t, err) + expectedEpoch := uint32(4) // 5 and 4 are the active epochs + assert.Equal(t, expectedEpoch, epoch) + }) +} + +func TestPruningStorer_PutInEpoch(t *testing.T) { + t.Parallel() + + epochsData := pruning.EpochArgs{ + NumOfEpochsToKeep: 2, + NumOfActivePersisters: 2, + StartingEpoch: 5, + } + args := getDefaultArgs() + args.PersistersTracker = pruning.NewPersistersTracker(epochsData) + args.EpochsData = epochsData + ps, _ := pruning.NewPruningStorer(args) + + t.Run("if the epoch is not handled, should error", func(t *testing.T) { + t.Parallel() + + err := ps.PutInEpoch([]byte("key"), []byte("value"), 3) // only 4 and 5 are handled + expectedErrorString := "put in epoch: persister for epoch 3 not found" + assert.Equal(t, expectedErrorString, err.Error()) + }) + t.Run("put in existing epochs", func(t *testing.T) { + t.Parallel() + + key4 := []byte("key4") + value4 := []byte("value4") + key5 := []byte("key5") + value5 := []byte("value5") + + err := ps.PutInEpoch(key4, value4, 4) + assert.Nil(t, err) + + err = ps.PutInEpoch(key5, value5, 5) + assert.Nil(t, err) + + t.Run("get from their respective epochs should work", func(t *testing.T) { + ps.ClearCache() + recovered4, errGet := ps.GetFromEpoch(key4, 4) + assert.Nil(t, errGet) + assert.Equal(t, value4, recovered4) + + ps.ClearCache() + recovered5, errGet := ps.GetFromEpoch(key5, 5) + assert.Nil(t, errGet) + assert.Equal(t, value5, recovered5) + }) + t.Run("get from wrong epochs should error", func(t *testing.T) { + ps.ClearCache() + result, errGet := ps.GetFromEpoch(key4, 3) + expectedErrorString := fmt.Sprintf("key %x not found in id", key4) + assert.Equal(t, expectedErrorString, errGet.Error()) + assert.Nil(t, result) + + ps.ClearCache() + result, errGet = ps.GetFromEpoch(key4, 5) + expectedErrorString = fmt.Sprintf("key %x not found in id", key4) + assert.Equal(t, expectedErrorString, errGet.Error()) + assert.Nil(t, result) + }) + }) +} + +func TestPruningStorer_RemoveFromCurrentEpoch(t *testing.T) { + t.Parallel() + + epochsData := pruning.EpochArgs{ + NumOfEpochsToKeep: 2, + NumOfActivePersisters: 2, + StartingEpoch: 5, + } + args := getDefaultArgs() + args.PersistersTracker = pruning.NewPersistersTracker(epochsData) + args.EpochsData = epochsData + ps, _ := pruning.NewPruningStorer(args) + + // current epoch is 5 + key := []byte("key") + value := []byte("value") + + // put in epoch 4 + _ = ps.PutInEpoch(key, value, 4) + // put in epoch 5 + _ = ps.PutInEpoch(key, value, 5) + + // remove from epoch 5 + err := ps.RemoveFromCurrentEpoch(key) + assert.Nil(t, err) + + // get from epoch 5 should error + ps.ClearCache() + result, errGet := ps.GetFromEpoch(key, 5) + expectedErrorString := fmt.Sprintf("key %x not found in id", key) + assert.Equal(t, expectedErrorString, errGet.Error()) + assert.Nil(t, result) + + // get from epoch 4 should work + ps.ClearCache() + recovered, errGet := ps.GetFromEpoch(key, 4) + assert.Nil(t, errGet) + assert.Equal(t, value, recovered) +} + +func TestPruningStorer_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var ps *pruning.PruningStorer + require.True(t, ps.IsInterfaceNil()) + + args := getDefaultArgs() + ps, _ = pruning.NewPruningStorer(args) + require.False(t, ps.IsInterfaceNil()) +} diff --git a/storage/pruning/triePersistersTracker_test.go b/storage/pruning/triePersistersTracker_test.go index 08b379188ca..a813b4c4d77 100644 --- a/storage/pruning/triePersistersTracker_test.go +++ b/storage/pruning/triePersistersTracker_test.go @@ -4,17 +4,17 @@ import ( "bytes" "testing" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/storage/mock" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewTriePersistersTracker(t *testing.T) { t.Parallel() pt := NewTriePersisterTracker(getArgs()) - assert.False(t, check.IfNil(pt)) + assert.NotNil(t, pt) assert.Equal(t, int64(7), pt.oldestEpochKeep) assert.Equal(t, int64(8), pt.oldestEpochActive) assert.Equal(t, 0, pt.numDbsMarkedAsActive) @@ -137,3 +137,13 @@ func TestTriePersistersTracker_ShouldClosePersister(t *testing.T) { assert.False(t, pt.ShouldClosePersister(8)) } + +func TestTriePersistersTracker_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var tpt *triePersistersTracker + require.True(t, tpt.IsInterfaceNil()) + + tpt = NewTriePersisterTracker(getArgs()) + require.False(t, tpt.IsInterfaceNil()) +} diff --git a/storage/pruning/triePruningStorer_test.go b/storage/pruning/triePruningStorer_test.go index dad280f364d..b3deebb98c2 100644 --- a/storage/pruning/triePruningStorer_test.go +++ b/storage/pruning/triePruningStorer_test.go @@ -4,7 +4,6 @@ import ( "strings" "testing" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/mock" @@ -27,7 +26,7 @@ func TestNewTriePruningStorer(t *testing.T) { emptyAndInvalidConfig := pruning.StorerArgs{} tps, err := pruning.NewTriePruningStorer(emptyAndInvalidConfig) require.Error(t, err) - require.True(t, check.IfNil(tps)) + require.Nil(t, tps) }) t.Run("should work", func(t *testing.T) { @@ -36,7 +35,7 @@ func TestNewTriePruningStorer(t *testing.T) { args := getDefaultArgs() ps, err := pruning.NewTriePruningStorer(args) require.NoError(t, err) - require.False(t, check.IfNil(ps)) + require.NotNil(t, ps) }) } @@ -350,3 +349,45 @@ func TestTriePruningStorer_KeepMoreDbsOpenIfNecessary(t *testing.T) { err = tps.Close() assert.Nil(t, err) } + +func TestTriePruningStorer_GetLatestStorageEpoch(t *testing.T) { + t.Parallel() + + epochsData := pruning.EpochArgs{ + NumOfEpochsToKeep: 2, + NumOfActivePersisters: 2, + StartingEpoch: 5, + } + args := getDefaultArgs() + args.PersistersTracker = pruning.NewPersistersTracker(epochsData) + + t.Run("no active db should error", func(t *testing.T) { + t.Parallel() + + tps, _ := pruning.NewTriePruningStorer(args) + latestEpoch, err := tps.GetLatestStorageEpoch() + expectedErrString := "there are no active persisters" + assert.Equal(t, expectedErrString, err.Error()) + assert.Zero(t, latestEpoch) + }) + t.Run("with at least one active DB should work", func(t *testing.T) { + t.Parallel() + + tps, _ := pruning.NewTriePruningStorer(args) + _ = tps.ChangeEpochSimple(5) + latestEpoch, err := tps.GetLatestStorageEpoch() + assert.Nil(t, err) + assert.Equal(t, uint32(5), latestEpoch) + }) +} + +func TestTriePruningStorer_IsInterfaceNil(t *testing.T) { + t.Parallel() + + tps, _ := pruning.NewTriePruningStorer(pruning.StorerArgs{}) + require.True(t, tps.IsInterfaceNil()) + + args := getDefaultArgs() + tps, _ = pruning.NewTriePruningStorer(args) + require.False(t, tps.IsInterfaceNil()) +} diff --git a/storage/storageunit/storageunit_test.go b/storage/storageunit/storageunit_test.go new file mode 100644 index 00000000000..9b9b125fa7e --- /dev/null +++ b/storage/storageunit/storageunit_test.go @@ -0,0 +1,174 @@ +package storageunit_test + +import ( + "path" + "testing" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/storage/mock" + "github.com/multiversx/mx-chain-go/storage/storageunit" + "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/multiversx/mx-chain-storage-go/common" + + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/stretchr/testify/assert" +) + +func TestNewStorageUnit(t *testing.T) { + t.Parallel() + + cacher := &testscommon.CacherStub{} + persister := &mock.PersisterStub{} + + t.Run("nil cacher should error", func(t *testing.T) { + t.Parallel() + + unit, err := storageunit.NewStorageUnit(nil, persister) + assert.Nil(t, unit) + assert.Equal(t, common.ErrNilCacher, err) + }) + t.Run("nil persister should error", func(t *testing.T) { + t.Parallel() + + unit, err := storageunit.NewStorageUnit(cacher, nil) + assert.Nil(t, unit) + assert.Equal(t, common.ErrNilPersister, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + unit, err := storageunit.NewStorageUnit(cacher, persister) + assert.NotNil(t, unit) + assert.Nil(t, err) + }) +} + +func TestNewCache(t *testing.T) { + t.Parallel() + + t.Run("wrong config should error", func(t *testing.T) { + t.Parallel() + + cfg := storageunit.CacheConfig{ + Type: "invalid type", + Capacity: 100, + } + cache, err := storageunit.NewCache(cfg) + assert.True(t, check.IfNil(cache)) + assert.Equal(t, common.ErrNotSupportedCacheType, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cfg := storageunit.CacheConfig{ + Type: "LRU", + Capacity: 100, + } + cache, err := storageunit.NewCache(cfg) + assert.False(t, check.IfNil(cache)) + assert.Nil(t, err) + }) +} + +func TestNewDB(t *testing.T) { + t.Parallel() + + t.Run("wrong config should error", func(t *testing.T) { + t.Parallel() + + args := storageunit.ArgDB{ + DBType: "invalid type", + Path: "TEST", + BatchDelaySeconds: 5, + MaxBatchSize: 10, + MaxOpenFiles: 10, + } + db, err := storageunit.NewDB(args) + assert.True(t, check.IfNil(db)) + assert.Equal(t, common.ErrNotSupportedDBType, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := storageunit.ArgDB{ + DBType: "LvlDBSerial", + Path: path.Join(t.TempDir(), "TEST"), + BatchDelaySeconds: 5, + MaxBatchSize: 10, + MaxOpenFiles: 10, + } + db, err := storageunit.NewDB(args) + assert.False(t, check.IfNil(db)) + assert.Nil(t, err) + _ = db.Close() + }) +} + +func TestNewStorageUnitFromConf(t *testing.T) { + t.Parallel() + + dbConfig := storageunit.DBConfig{ + FilePath: path.Join(t.TempDir(), "TEST"), + Type: "LvlDBSerial", + BatchDelaySeconds: 5, + MaxBatchSize: 10, + MaxOpenFiles: 10, + } + + t.Run("invalid config should error", func(t *testing.T) { + t.Parallel() + + cacheConfig := storageunit.CacheConfig{ + Type: "invalid type", + Capacity: 100, + } + + unit, err := storageunit.NewStorageUnitFromConf(cacheConfig, dbConfig) + assert.Nil(t, unit) + assert.Equal(t, common.ErrNotSupportedCacheType, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cacheConfig := storageunit.CacheConfig{ + Type: "LRU", + Capacity: 100, + } + + unit, err := storageunit.NewStorageUnitFromConf(cacheConfig, dbConfig) + assert.NotNil(t, unit) + assert.Nil(t, err) + _ = unit.Close() + }) +} + +func TestNewNilStorer(t *testing.T) { + t.Parallel() + + unit := storageunit.NewNilStorer() + assert.NotNil(t, unit) +} + +func TestNewStorageCacherAdapter(t *testing.T) { + t.Parallel() + + cacher := &mock.AdaptedSizedLruCacheStub{} + db := &mock.PersisterStub{} + storedDataFactory := &storage.StoredDataFactoryStub{} + marshaller := &testscommon.MarshalizerStub{} + + t.Run("nil parameter should error", func(t *testing.T) { + t.Parallel() + + adaptor, err := storageunit.NewStorageCacherAdapter(nil, db, storedDataFactory, marshaller) + assert.True(t, check.IfNil(adaptor)) + assert.Equal(t, common.ErrNilCacher, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + adaptor, err := storageunit.NewStorageCacherAdapter(cacher, db, storedDataFactory, marshaller) + assert.False(t, check.IfNil(adaptor)) + assert.Nil(t, err) + }) +} diff --git a/storage/txcache/txcache_test.go b/storage/txcache/txcache_test.go new file mode 100644 index 00000000000..cd0ded4f133 --- /dev/null +++ b/storage/txcache/txcache_test.go @@ -0,0 +1,96 @@ +package txcache + +import ( + "strings" + "testing" + + "github.com/multiversx/mx-chain-go/testscommon/txcachemocks" + "github.com/multiversx/mx-chain-storage-go/common" + "github.com/stretchr/testify/assert" +) + +func TestNewTxCache(t *testing.T) { + t.Parallel() + + t.Run("nil parameter should error", func(t *testing.T) { + t.Parallel() + + cfg := ConfigSourceMe{ + Name: "test", + NumChunks: 1, + NumBytesThreshold: 1000, + NumBytesPerSenderThreshold: 100, + CountThreshold: 10, + CountPerSenderThreshold: 100, + NumSendersToPreemptivelyEvict: 1, + } + + cache, err := NewTxCache(cfg, nil) + assert.Nil(t, cache) + assert.Equal(t, common.ErrNilTxGasHandler, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cfg := ConfigSourceMe{ + Name: "test", + NumChunks: 1, + NumBytesThreshold: 1000, + NumBytesPerSenderThreshold: 100, + CountThreshold: 10, + CountPerSenderThreshold: 100, + NumSendersToPreemptivelyEvict: 1, + } + + cache, err := NewTxCache(cfg, &txcachemocks.TxGasHandlerMock{ + GasProcessingDivisor: 1, + MinimumGasPrice: 1, + MinimumGasMove: 1, + }) + assert.NotNil(t, cache) + assert.Nil(t, err) + }) +} + +func TestNewDisabledCache(t *testing.T) { + t.Parallel() + + cache := NewDisabledCache() + assert.NotNil(t, cache) +} + +func TestNewCrossTxCache(t *testing.T) { + t.Parallel() + + t.Run("invalid config should error", func(t *testing.T) { + t.Parallel() + + cfg := ConfigDestinationMe{ + Name: "", + NumChunks: 1, + MaxNumItems: 100, + MaxNumBytes: 1000, + NumItemsToPreemptivelyEvict: 1, + } + + cache, err := NewCrossTxCache(cfg) + assert.Nil(t, cache) + assert.ErrorIs(t, err, common.ErrInvalidConfig) + assert.True(t, strings.Contains(err.Error(), "config.Name is invalid")) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cfg := ConfigDestinationMe{ + Name: "test", + NumChunks: 1, + MaxNumItems: 100, + MaxNumBytes: 1000, + NumItemsToPreemptivelyEvict: 1, + } + + cache, err := NewCrossTxCache(cfg) + assert.NotNil(t, cache) + assert.Nil(t, err) + }) +} diff --git a/testscommon/TxVersionCheckerStub.go b/testscommon/TxVersionCheckerStub.go new file mode 100644 index 00000000000..5616bca3079 --- /dev/null +++ b/testscommon/TxVersionCheckerStub.go @@ -0,0 +1,41 @@ +package testscommon + +import ( + "github.com/multiversx/mx-chain-core-go/data/transaction" +) + +// TxVersionCheckerStub - +type TxVersionCheckerStub struct { + IsSignedWithHashCalled func(tx *transaction.Transaction) bool + IsGuardedTransactionCalled func(tx *transaction.Transaction) bool + CheckTxVersionCalled func(tx *transaction.Transaction) error +} + +// IsSignedWithHash will return true if transaction is signed with hash +func (tvcs *TxVersionCheckerStub) IsSignedWithHash(tx *transaction.Transaction) bool { + if tvcs.IsSignedWithHashCalled != nil { + return tvcs.IsSignedWithHashCalled(tx) + } + return false +} + +// IsGuardedTransaction will return true if transaction also holds a guardian signature +func (tvcs *TxVersionCheckerStub) IsGuardedTransaction(tx *transaction.Transaction) bool { + if tvcs.IsGuardedTransactionCalled != nil { + return tvcs.IsGuardedTransactionCalled(tx) + } + return false +} + +// CheckTxVersion will check transaction version +func (tvcs *TxVersionCheckerStub) CheckTxVersion(tx *transaction.Transaction) error { + if tvcs.CheckTxVersionCalled != nil { + return tvcs.CheckTxVersionCalled(tx) + } + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (tvcs *TxVersionCheckerStub) IsInterfaceNil() bool { + return tvcs == nil +} diff --git a/testscommon/api/groupHandlerStub.go b/testscommon/api/groupHandlerStub.go new file mode 100644 index 00000000000..af61639d254 --- /dev/null +++ b/testscommon/api/groupHandlerStub.go @@ -0,0 +1,32 @@ +package api + +import ( + "github.com/gin-gonic/gin" + "github.com/multiversx/mx-chain-go/config" +) + +// GroupHandlerStub - +type GroupHandlerStub struct { + UpdateFacadeCalled func(facade interface{}) error + RegisterRoutesCalled func(ws *gin.RouterGroup, apiConfig config.ApiRoutesConfig) +} + +// UpdateFacade - +func (stub *GroupHandlerStub) UpdateFacade(facade interface{}) error { + if stub.UpdateFacadeCalled != nil { + return stub.UpdateFacadeCalled(facade) + } + return nil +} + +// RegisterRoutes - +func (stub *GroupHandlerStub) RegisterRoutes(ws *gin.RouterGroup, apiConfig config.ApiRoutesConfig) { + if stub.RegisterRoutesCalled != nil { + stub.RegisterRoutesCalled(ws, apiConfig) + } +} + +// IsInterfaceNil - +func (stub *GroupHandlerStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/testscommon/api/serverStub.go b/testscommon/api/serverStub.go new file mode 100644 index 00000000000..91ce7dd85ae --- /dev/null +++ b/testscommon/api/serverStub.go @@ -0,0 +1,25 @@ +package api + +import "context" + +// ServerStub - +type ServerStub struct { + ListenAndServeCalled func() error + ShutdownCalled func(ctx context.Context) error +} + +// ListenAndServe - +func (stub *ServerStub) ListenAndServe() error { + if stub.ListenAndServeCalled != nil { + return stub.ListenAndServeCalled() + } + return nil +} + +// Shutdown - +func (stub *ServerStub) Shutdown(ctx context.Context) error { + if stub.ShutdownCalled != nil { + return stub.ShutdownCalled(ctx) + } + return nil +} diff --git a/testscommon/api/upgradeableHttpServerHandlerStub.go b/testscommon/api/upgradeableHttpServerHandlerStub.go new file mode 100644 index 00000000000..24eb753bb45 --- /dev/null +++ b/testscommon/api/upgradeableHttpServerHandlerStub.go @@ -0,0 +1,42 @@ +package api + +import "github.com/multiversx/mx-chain-go/api/shared" + +// UpgradeableHttpServerHandlerStub - +type UpgradeableHttpServerHandlerStub struct { + StartHttpServerCalled func() error + UpdateFacadeCalled func(facade shared.FacadeHandler) error + CloseCalled func() error +} + +// StartHttpServer - +func (stub *UpgradeableHttpServerHandlerStub) StartHttpServer() error { + if stub.StartHttpServerCalled != nil { + return stub.StartHttpServerCalled() + } + + return nil +} + +// UpdateFacade - +func (stub *UpgradeableHttpServerHandlerStub) UpdateFacade(facade shared.FacadeHandler) error { + if stub.UpdateFacadeCalled != nil { + return stub.UpdateFacadeCalled(facade) + } + + return nil +} + +// Close - +func (stub *UpgradeableHttpServerHandlerStub) Close() error { + if stub.CloseCalled != nil { + return stub.CloseCalled() + } + + return nil +} + +// IsInterfaceNil - +func (stub *UpgradeableHttpServerHandlerStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/testscommon/blockChainHookStub.go b/testscommon/blockChainHookStub.go index bceeff704f1..36903f4a473 100644 --- a/testscommon/blockChainHookStub.go +++ b/testscommon/blockChainHookStub.go @@ -346,8 +346,8 @@ func (stub *BlockChainHookStub) FilterCodeMetadataForUpgrade(input []byte) ([]by return input, nil } -// ApplyFiltersOnCodeMetadata - -func (stub *BlockChainHookStub) ApplyFiltersOnCodeMetadata(codeMetadata vmcommon.CodeMetadata) vmcommon.CodeMetadata { +// ApplyFiltersOnSCCodeMetadata - +func (stub *BlockChainHookStub) ApplyFiltersOnSCCodeMetadata(codeMetadata vmcommon.CodeMetadata) vmcommon.CodeMetadata { if stub.ApplyFiltersOnCodeMetadataCalled != nil { stub.ApplyFiltersOnCodeMetadataCalled(codeMetadata) } diff --git a/testscommon/components/components.go b/testscommon/components/components.go index b810cce6140..479745c679c 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -134,7 +134,7 @@ func GetConsensusArgs(shardCoordinator sharding.Coordinator) consensusComp.Conse coreComponents := GetCoreComponents() cryptoComponents := GetCryptoComponents(coreComponents) networkComponents := GetNetworkComponents(cryptoComponents) - stateComponents := GetStateComponents(coreComponents, shardCoordinator) + stateComponents := GetStateComponents(coreComponents) dataComponents := GetDataComponents(coreComponents, shardCoordinator) processComponents := GetProcessComponents( shardCoordinator, @@ -147,7 +147,6 @@ func GetConsensusArgs(shardCoordinator sharding.Coordinator) consensusComp.Conse statusComponents := GetStatusComponents( coreComponents, networkComponents, - dataComponents, stateComponents, shardCoordinator, processComponents.NodesCoordinator(), @@ -218,6 +217,7 @@ func GetDataArgs(coreComponents factory.CoreComponentsHolder, shardCoordinator s Crypto: GetCryptoComponents(coreComponents), CurrentEpoch: 0, CreateTrieEpochRootHashStorer: false, + NodeProcessingMode: common.Normal, SnapshotsEnabled: false, } } @@ -331,7 +331,7 @@ func getNewTrieStorageManagerArgs() trie.NewTrieStorageManagerArgs { } // GetStateFactoryArgs - -func GetStateFactoryArgs(coreComponents factory.CoreComponentsHolder, shardCoordinator sharding.Coordinator) stateComp.StateComponentsFactoryArgs { +func GetStateFactoryArgs(coreComponents factory.CoreComponentsHolder) stateComp.StateComponentsFactoryArgs { tsm, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) storageManagerUser, _ := trie.NewTrieStorageManagerWithoutPruning(tsm) tsm, _ = trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) @@ -348,13 +348,12 @@ func GetStateFactoryArgs(coreComponents factory.CoreComponentsHolder, shardCoord triesHolder.Put([]byte(dataRetriever.PeerAccountsUnit.String()), triePeers) stateComponentsFactoryArgs := stateComp.StateComponentsFactoryArgs{ - Config: GetGeneralConfig(), - ShardCoordinator: shardCoordinator, - Core: coreComponents, - StatusCore: GetStatusCoreComponents(), - StorageService: disabled.NewChainStorer(), - ProcessingMode: common.Normal, - ChainHandler: &testscommon.ChainHandlerStub{}, + Config: GetGeneralConfig(), + Core: coreComponents, + StatusCore: GetStatusCoreComponents(), + StorageService: disabled.NewChainStorer(), + ProcessingMode: common.Normal, + ChainHandler: &testscommon.ChainHandlerStub{}, } return stateComponentsFactoryArgs @@ -366,7 +365,7 @@ func GetProcessComponentsFactoryArgs(shardCoordinator sharding.Coordinator) proc cryptoComponents := GetCryptoComponents(coreComponents) networkComponents := GetNetworkComponents(cryptoComponents) dataComponents := GetDataComponents(coreComponents, shardCoordinator) - stateComponents := GetStateComponents(coreComponents, shardCoordinator) + stateComponents := GetStateComponents(coreComponents) processArgs := GetProcessArgs( shardCoordinator, coreComponents, @@ -433,7 +432,6 @@ func GetProcessArgs( statusComponents := GetStatusComponents( coreComponents, networkComponents, - dataComponents, stateComponents, shardCoordinator, nc, @@ -537,11 +535,11 @@ func GetProcessArgs( }, Active: config.GovernanceSystemSCConfigActive{ ProposalCost: "500", - MinQuorum: "50", - MinPassThreshold: "50", - MinVetoThreshold: "50", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, }, - FirstWhitelistedAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + ChangeConfigAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "2500000000000000000000", @@ -576,7 +574,6 @@ func GetProcessArgs( func GetStatusComponents( coreComponents factory.CoreComponentsHolder, networkComponents factory.NetworkComponentsHolder, - dataComponents factory.DataComponentsHolder, stateComponents factory.StateComponentsHolder, shardCoordinator sharding.Coordinator, nodesCoordinator nodesCoordinator.NodesCoordinator, @@ -600,7 +597,6 @@ func GetStatusComponents( NodesCoordinator: nodesCoordinator, EpochStartNotifier: coreComponents.EpochStartNotifierWithConfirm(), CoreComponents: coreComponents, - DataComponents: dataComponents, NetworkComponents: networkComponents, StateComponents: stateComponents, IsInImportMode: false, @@ -627,7 +623,7 @@ func GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator shardin cryptoComponents := GetCryptoComponents(coreComponents) networkComponents := GetNetworkComponents(cryptoComponents) dataComponents := GetDataComponents(coreComponents, shardCoordinator) - stateComponents := GetStateComponents(coreComponents, shardCoordinator) + stateComponents := GetStateComponents(coreComponents) processComponents := GetProcessComponents( shardCoordinator, coreComponents, @@ -651,13 +647,15 @@ func GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator shardin Password: elasticPassword, EnabledIndexes: []string{"transactions", "blocks"}, }, + WebSocketConnector: config.WebSocketDriverConfig{ + MarshallerType: "json", + }, }, EconomicsConfig: config.EconomicsConfig{}, ShardCoordinator: mock.NewMultiShardsCoordinatorMock(2), NodesCoordinator: &shardingMocks.NodesCoordinatorMock{}, EpochStartNotifier: &mock.EpochStartNotifierStub{}, CoreComponents: coreComponents, - DataComponents: dataComponents, NetworkComponents: networkComponents, StateComponents: stateComponents, StatusCoreComponents: statusCoreComponents, @@ -705,8 +703,8 @@ func GetCryptoComponents(coreComponents factory.CoreComponentsHolder) factory.Cr } // GetStateComponents - -func GetStateComponents(coreComponents factory.CoreComponentsHolder, shardCoordinator sharding.Coordinator) factory.StateComponentsHolder { - stateArgs := GetStateFactoryArgs(coreComponents, shardCoordinator) +func GetStateComponents(coreComponents factory.CoreComponentsHolder) factory.StateComponentsHolder { + stateArgs := GetStateFactoryArgs(coreComponents) stateComponentsFactory, err := stateComp.NewStateComponentsFactory(stateArgs) if err != nil { log.Error("getStateComponents NewStateComponentsFactory", "error", err.Error()) @@ -812,6 +810,7 @@ func FillGasMapMetaChainSystemSCsCosts(value uint64) map[string]uint64 { gasMap["DelegationMgrOps"] = value gasMap["GetAllNodeStates"] = value gasMap["ValidatorToDelegation"] = value + gasMap["GetActiveFund"] = value gasMap["FixWaitingListSize"] = value return gasMap diff --git a/testscommon/components/configs.go b/testscommon/components/configs.go index f07b8b21bb3..6d44e383818 100644 --- a/testscommon/components/configs.go +++ b/testscommon/components/configs.go @@ -169,6 +169,7 @@ func GetGeneralConfig() config.Config { ChainID: "undefined", MinTransactionVersion: 1, GenesisMaxNumberOfShards: 3, + SetGuardianEpochsDelay: 20, }, Marshalizer: config.MarshalizerConfig{ Type: TestMarshalizer, @@ -261,11 +262,13 @@ func CreateDummyEconomicsConfig() config.EconomicsConfig { MaxGasLimitPerMetaMiniBlock: "15000000000", MaxGasLimitPerTx: "1500000000", MinGasLimit: "50000", + ExtraGasLimitGuardedTx: "50000", }, }, MinGasPrice: "1000000000", GasPerDataByte: "1500", GasPriceModifier: 1, + MaxGasPriceSetGuardian: "100000", }, } } diff --git a/testscommon/components/default.go b/testscommon/components/default.go index 474093d000e..d90406199db 100644 --- a/testscommon/components/default.go +++ b/testscommon/components/default.go @@ -5,14 +5,16 @@ import ( crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-go/common" - consensusMocks "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/factory/mock" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/consensus" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" dataRetrieverTests "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" @@ -49,6 +51,7 @@ func GetDefaultCoreComponents() *mock.CoreComponentsMock { NodesConfig: &testscommon.NodesSetupStub{}, StartTime: time.Time{}, NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EpochChangeNotifier: &epochNotifierMock.EpochNotifierStub{}, } } @@ -61,7 +64,6 @@ func GetDefaultCryptoComponents() *mock.CryptoComponentsMock { P2pPrivKey: mock.NewP2pPrivateKeyMock(), P2pSig: &mock.SinglesignMock{}, PubKeyString: "pubKey", - PrivKeyBytes: []byte("privKey"), PubKeyBytes: []byte("pubKey"), BlockSig: &mock.SinglesignMock{}, TxSig: &mock.SinglesignMock{}, @@ -71,7 +73,7 @@ func GetDefaultCryptoComponents() *mock.CryptoComponentsMock { TxKeyGen: &mock.KeyGenMock{}, P2PKeyGen: &mock.KeyGenMock{}, MsgSigVerifier: &testscommon.MessageSignVerifierMock{}, - SigHandler: &consensusMocks.SigningHandlerStub{}, + SigHandler: &consensus.SigningHandlerStub{}, } } @@ -86,8 +88,8 @@ func GetDefaultNetworkComponents() *mock.NetworkComponentsMock { } // GetDefaultStateComponents - -func GetDefaultStateComponents() *testscommon.StateComponentsMock { - return &testscommon.StateComponentsMock{ +func GetDefaultStateComponents() *factory.StateComponentsMock { + return &factory.StateComponentsMock{ PeersAcc: &stateMock.AccountsStub{}, Accounts: &stateMock.AccountsStub{}, Tries: &trieMock.TriesHolderStub{}, diff --git a/consensus/mock/signingHandlerStub.go b/testscommon/consensus/signingHandlerStub.go similarity index 99% rename from consensus/mock/signingHandlerStub.go rename to testscommon/consensus/signingHandlerStub.go index 33c4121d74c..e389ce864b3 100644 --- a/consensus/mock/signingHandlerStub.go +++ b/testscommon/consensus/signingHandlerStub.go @@ -1,4 +1,4 @@ -package mock +package consensus // SigningHandlerStub implements SigningHandler interface type SigningHandlerStub struct { diff --git a/testscommon/economicsConfig.go b/testscommon/economicsConfig.go index cedfbf59c92..59ec0088240 100644 --- a/testscommon/economicsConfig.go +++ b/testscommon/economicsConfig.go @@ -38,11 +38,13 @@ func GetEconomicsConfig() config.EconomicsConfig { MaxGasLimitPerMetaMiniBlock: "1500000000", MaxGasLimitPerTx: "600000000", MinGasLimit: "50000", + ExtraGasLimitGuardedTx: "50000", }, }, - MinGasPrice: "1000000000", - GasPerDataByte: "1500", - GasPriceModifier: 0.01, + MinGasPrice: "1000000000", + GasPerDataByte: "1500", + GasPriceModifier: 0.01, + MaxGasPriceSetGuardian: "2000000000", }, } } diff --git a/testscommon/economicsmocks/economicsDataHandlerStub.go b/testscommon/economicsmocks/economicsDataHandlerStub.go index d8476f475c5..9eb2847ca16 100644 --- a/testscommon/economicsmocks/economicsDataHandlerStub.go +++ b/testscommon/economicsmocks/economicsDataHandlerStub.go @@ -28,6 +28,8 @@ type EconomicsHandlerStub struct { MaxInflationRateCalled func(year uint32) float64 GasPerDataByteCalled func() uint64 MinGasLimitCalled func() uint64 + ExtraGasLimitGuardedTxCalled func() uint64 + MaxGasPriceSetGuardianCalled func() uint64 GenesisTotalSupplyCalled func() *big.Int ComputeFeeForProcessingCalled func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int RewardsTopUpGradientPointCalled func() *big.Int @@ -114,6 +116,22 @@ func (e *EconomicsHandlerStub) MinGasLimit() uint64 { return 0 } +// ExtraGasLimitGuardedTx - +func (e *EconomicsHandlerStub) ExtraGasLimitGuardedTx() uint64 { + if e.ExtraGasLimitGuardedTxCalled != nil { + return e.ExtraGasLimitGuardedTxCalled() + } + return 0 +} + +// MaxGasPriceSetGuardian - +func (e *EconomicsHandlerStub) MaxGasPriceSetGuardian() uint64 { + if e.MaxGasPriceSetGuardianCalled != nil { + return e.MaxGasPriceSetGuardianCalled() + } + return 0 +} + // GenesisTotalSupply - func (e *EconomicsHandlerStub) GenesisTotalSupply() *big.Int { if e.GenesisTotalSupplyCalled != nil { diff --git a/testscommon/economicsmocks/economicsHandlerMock.go b/testscommon/economicsmocks/economicsHandlerMock.go index c5f1b4bbd4e..0c4303ce6ec 100644 --- a/testscommon/economicsmocks/economicsHandlerMock.go +++ b/testscommon/economicsmocks/economicsHandlerMock.go @@ -82,6 +82,16 @@ func (ehm *EconomicsHandlerMock) MinGasLimit() uint64 { return 0 } +// ExtraGasLimitGuardedTx - +func (ehm *EconomicsHandlerMock) ExtraGasLimitGuardedTx() uint64 { + return 0 +} + +// MaxGasPriceSetGuardian - +func (ehm *EconomicsHandlerMock) MaxGasPriceSetGuardian() uint64{ + return 0 +} + // GasPerDataByte - func (ehm *EconomicsHandlerMock) GasPerDataByte() uint64 { return 0 diff --git a/testscommon/enableEpochsHandlerStub.go b/testscommon/enableEpochsHandlerStub.go index 092131f8ebc..d076a3676d3 100644 --- a/testscommon/enableEpochsHandlerStub.go +++ b/testscommon/enableEpochsHandlerStub.go @@ -117,6 +117,9 @@ type EnableEpochsHandlerStub struct { IsMaxBlockchainHookCountersFlagEnabledField bool IsWipeSingleNFTLiquidityDecreaseEnabledField bool IsAlwaysSaveTokenMetaDataEnabledField bool + IsSetGuardianEnabledField bool + IsKeepExecOrderOnCreatedSCRsEnabledField bool + IsMultiClaimOnDelegationEnabledField bool } // ResetPenalizedTooMuchGasFlag - @@ -1014,6 +1017,30 @@ func (stub *EnableEpochsHandlerStub) IsAlwaysSaveTokenMetaDataEnabled() bool { return stub.IsAlwaysSaveTokenMetaDataEnabledField } +// IsSetGuardianEnabled - +func (stub *EnableEpochsHandlerStub) IsSetGuardianEnabled() bool { + stub.RLock() + defer stub.RUnlock() + + return stub.IsSetGuardianEnabledField +} + +// IsKeepExecOrderOnCreatedSCRsEnabled - +func (stub *EnableEpochsHandlerStub) IsKeepExecOrderOnCreatedSCRsEnabled() bool { + stub.RLock() + defer stub.RUnlock() + + return stub.IsKeepExecOrderOnCreatedSCRsEnabledField +} + +// IsMultiClaimOnDelegationEnabled - +func (stub *EnableEpochsHandlerStub) IsMultiClaimOnDelegationEnabled() bool { + stub.RLock() + defer stub.RUnlock() + + return stub.IsMultiClaimOnDelegationEnabledField +} + // IsInterfaceNil - func (stub *EnableEpochsHandlerStub) IsInterfaceNil() bool { return stub == nil diff --git a/testscommon/factory/coreComponentsHolderStub.go b/testscommon/factory/coreComponentsHolderStub.go new file mode 100644 index 00000000000..557a7f44c7c --- /dev/null +++ b/testscommon/factory/coreComponentsHolderStub.go @@ -0,0 +1,374 @@ +package factory + +import ( + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/data/typeConverters" + "github.com/multiversx/mx-chain-core-go/hashing" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/ntp" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/storage" +) + +// CoreComponentsHolderStub - +type CoreComponentsHolderStub struct { + InternalMarshalizerCalled func() marshal.Marshalizer + SetInternalMarshalizerCalled func(marshalizer marshal.Marshalizer) error + TxMarshalizerCalled func() marshal.Marshalizer + VmMarshalizerCalled func() marshal.Marshalizer + HasherCalled func() hashing.Hasher + TxSignHasherCalled func() hashing.Hasher + Uint64ByteSliceConverterCalled func() typeConverters.Uint64ByteSliceConverter + AddressPubKeyConverterCalled func() core.PubkeyConverter + ValidatorPubKeyConverterCalled func() core.PubkeyConverter + PathHandlerCalled func() storage.PathManagerHandler + WatchdogCalled func() core.WatchdogTimer + AlarmSchedulerCalled func() core.TimersScheduler + SyncTimerCalled func() ntp.SyncTimer + RoundHandlerCalled func() consensus.RoundHandler + EconomicsDataCalled func() process.EconomicsDataHandler + APIEconomicsDataCalled func() process.EconomicsDataHandler + RatingsDataCalled func() process.RatingsInfoHandler + RaterCalled func() sharding.PeerAccountListAndRatingHandler + GenesisNodesSetupCalled func() sharding.GenesisNodesSetupHandler + NodesShufflerCalled func() nodesCoordinator.NodesShuffler + EpochNotifierCalled func() process.EpochNotifier + EnableRoundsHandlerCalled func() process.EnableRoundsHandler + EpochStartNotifierWithConfirmCalled func() factory.EpochStartNotifierWithConfirm + ChanStopNodeProcessCalled func() chan endProcess.ArgEndProcess + GenesisTimeCalled func() time.Time + ChainIDCalled func() string + MinTransactionVersionCalled func() uint32 + TxVersionCheckerCalled func() process.TxVersionCheckerHandler + EncodedAddressLenCalled func() uint32 + NodeTypeProviderCalled func() core.NodeTypeProviderHandler + WasmVMChangeLockerCalled func() common.Locker + ProcessStatusHandlerCalled func() common.ProcessStatusHandler + HardforkTriggerPubKeyCalled func() []byte + EnableEpochsHandlerCalled func() common.EnableEpochsHandler +} + +// NewCoreComponentsHolderStubFromRealComponent - +func NewCoreComponentsHolderStubFromRealComponent(coreComponents factory.CoreComponentsHolder) *CoreComponentsHolderStub { + return &CoreComponentsHolderStub{ + InternalMarshalizerCalled: coreComponents.InternalMarshalizer, + SetInternalMarshalizerCalled: coreComponents.SetInternalMarshalizer, + TxMarshalizerCalled: coreComponents.TxMarshalizer, + VmMarshalizerCalled: coreComponents.VmMarshalizer, + HasherCalled: coreComponents.Hasher, + TxSignHasherCalled: coreComponents.TxSignHasher, + Uint64ByteSliceConverterCalled: coreComponents.Uint64ByteSliceConverter, + AddressPubKeyConverterCalled: coreComponents.AddressPubKeyConverter, + ValidatorPubKeyConverterCalled: coreComponents.ValidatorPubKeyConverter, + PathHandlerCalled: coreComponents.PathHandler, + WatchdogCalled: coreComponents.Watchdog, + AlarmSchedulerCalled: coreComponents.AlarmScheduler, + SyncTimerCalled: coreComponents.SyncTimer, + RoundHandlerCalled: coreComponents.RoundHandler, + EconomicsDataCalled: coreComponents.EconomicsData, + APIEconomicsDataCalled: coreComponents.APIEconomicsData, + RatingsDataCalled: coreComponents.RatingsData, + RaterCalled: coreComponents.Rater, + GenesisNodesSetupCalled: coreComponents.GenesisNodesSetup, + NodesShufflerCalled: coreComponents.NodesShuffler, + EpochNotifierCalled: coreComponents.EpochNotifier, + EnableRoundsHandlerCalled: coreComponents.EnableRoundsHandler, + EpochStartNotifierWithConfirmCalled: coreComponents.EpochStartNotifierWithConfirm, + ChanStopNodeProcessCalled: coreComponents.ChanStopNodeProcess, + GenesisTimeCalled: coreComponents.GenesisTime, + ChainIDCalled: coreComponents.ChainID, + MinTransactionVersionCalled: coreComponents.MinTransactionVersion, + TxVersionCheckerCalled: coreComponents.TxVersionChecker, + EncodedAddressLenCalled: coreComponents.EncodedAddressLen, + NodeTypeProviderCalled: coreComponents.NodeTypeProvider, + WasmVMChangeLockerCalled: coreComponents.WasmVMChangeLocker, + ProcessStatusHandlerCalled: coreComponents.ProcessStatusHandler, + HardforkTriggerPubKeyCalled: coreComponents.HardforkTriggerPubKey, + EnableEpochsHandlerCalled: coreComponents.EnableEpochsHandler, + } +} + +// InternalMarshalizer - +func (stub *CoreComponentsHolderStub) InternalMarshalizer() marshal.Marshalizer { + if stub.InternalMarshalizerCalled != nil { + return stub.InternalMarshalizerCalled() + } + return nil +} + +// SetInternalMarshalizer - +func (stub *CoreComponentsHolderStub) SetInternalMarshalizer(marshalizer marshal.Marshalizer) error { + if stub.SetInternalMarshalizerCalled != nil { + return stub.SetInternalMarshalizerCalled(marshalizer) + } + return nil +} + +// TxMarshalizer - +func (stub *CoreComponentsHolderStub) TxMarshalizer() marshal.Marshalizer { + if stub.TxMarshalizerCalled != nil { + return stub.TxMarshalizerCalled() + } + return nil +} + +// VmMarshalizer - +func (stub *CoreComponentsHolderStub) VmMarshalizer() marshal.Marshalizer { + if stub.VmMarshalizerCalled != nil { + return stub.VmMarshalizerCalled() + } + return nil +} + +// Hasher - +func (stub *CoreComponentsHolderStub) Hasher() hashing.Hasher { + if stub.HasherCalled != nil { + return stub.HasherCalled() + } + return nil +} + +// TxSignHasher - +func (stub *CoreComponentsHolderStub) TxSignHasher() hashing.Hasher { + if stub.TxSignHasherCalled != nil { + return stub.TxSignHasherCalled() + } + return nil +} + +// Uint64ByteSliceConverter - +func (stub *CoreComponentsHolderStub) Uint64ByteSliceConverter() typeConverters.Uint64ByteSliceConverter { + if stub.Uint64ByteSliceConverterCalled != nil { + return stub.Uint64ByteSliceConverterCalled() + } + return nil +} + +// AddressPubKeyConverter - +func (stub *CoreComponentsHolderStub) AddressPubKeyConverter() core.PubkeyConverter { + if stub.AddressPubKeyConverterCalled != nil { + return stub.AddressPubKeyConverterCalled() + } + return nil +} + +// ValidatorPubKeyConverter - +func (stub *CoreComponentsHolderStub) ValidatorPubKeyConverter() core.PubkeyConverter { + if stub.ValidatorPubKeyConverterCalled != nil { + return stub.ValidatorPubKeyConverterCalled() + } + return nil +} + +// PathHandler - +func (stub *CoreComponentsHolderStub) PathHandler() storage.PathManagerHandler { + if stub.PathHandlerCalled != nil { + return stub.PathHandlerCalled() + } + return nil +} + +// Watchdog - +func (stub *CoreComponentsHolderStub) Watchdog() core.WatchdogTimer { + if stub.WatchdogCalled != nil { + return stub.WatchdogCalled() + } + return nil +} + +// AlarmScheduler - +func (stub *CoreComponentsHolderStub) AlarmScheduler() core.TimersScheduler { + if stub.AlarmSchedulerCalled != nil { + return stub.AlarmSchedulerCalled() + } + return nil +} + +// SyncTimer - +func (stub *CoreComponentsHolderStub) SyncTimer() ntp.SyncTimer { + if stub.SyncTimerCalled != nil { + return stub.SyncTimerCalled() + } + return nil +} + +// RoundHandler - +func (stub *CoreComponentsHolderStub) RoundHandler() consensus.RoundHandler { + if stub.RoundHandlerCalled != nil { + return stub.RoundHandlerCalled() + } + return nil +} + +// EconomicsData - +func (stub *CoreComponentsHolderStub) EconomicsData() process.EconomicsDataHandler { + if stub.EconomicsDataCalled != nil { + return stub.EconomicsDataCalled() + } + return nil +} + +// APIEconomicsData - +func (stub *CoreComponentsHolderStub) APIEconomicsData() process.EconomicsDataHandler { + if stub.APIEconomicsDataCalled != nil { + return stub.APIEconomicsDataCalled() + } + return nil +} + +// RatingsData - +func (stub *CoreComponentsHolderStub) RatingsData() process.RatingsInfoHandler { + if stub.RatingsDataCalled != nil { + return stub.RatingsDataCalled() + } + return nil +} + +// Rater - +func (stub *CoreComponentsHolderStub) Rater() sharding.PeerAccountListAndRatingHandler { + if stub.RaterCalled != nil { + return stub.RaterCalled() + } + return nil +} + +// GenesisNodesSetup - +func (stub *CoreComponentsHolderStub) GenesisNodesSetup() sharding.GenesisNodesSetupHandler { + if stub.GenesisNodesSetupCalled != nil { + return stub.GenesisNodesSetupCalled() + } + return nil +} + +// NodesShuffler - +func (stub *CoreComponentsHolderStub) NodesShuffler() nodesCoordinator.NodesShuffler { + if stub.NodesShufflerCalled != nil { + return stub.NodesShufflerCalled() + } + return nil +} + +// EpochNotifier - +func (stub *CoreComponentsHolderStub) EpochNotifier() process.EpochNotifier { + if stub.EpochNotifierCalled != nil { + return stub.EpochNotifierCalled() + } + return nil +} + +// EnableRoundsHandler - +func (stub *CoreComponentsHolderStub) EnableRoundsHandler() process.EnableRoundsHandler { + if stub.EnableRoundsHandlerCalled != nil { + return stub.EnableRoundsHandlerCalled() + } + return nil +} + +// EpochStartNotifierWithConfirm - +func (stub *CoreComponentsHolderStub) EpochStartNotifierWithConfirm() factory.EpochStartNotifierWithConfirm { + if stub.EpochStartNotifierWithConfirmCalled != nil { + return stub.EpochStartNotifierWithConfirmCalled() + } + return nil +} + +// ChanStopNodeProcess - +func (stub *CoreComponentsHolderStub) ChanStopNodeProcess() chan endProcess.ArgEndProcess { + if stub.ChanStopNodeProcessCalled != nil { + return stub.ChanStopNodeProcessCalled() + } + return nil +} + +// GenesisTime - +func (stub *CoreComponentsHolderStub) GenesisTime() time.Time { + if stub.GenesisTimeCalled != nil { + return stub.GenesisTimeCalled() + } + return time.Unix(0, 0) +} + +// ChainID - +func (stub *CoreComponentsHolderStub) ChainID() string { + if stub.ChainIDCalled != nil { + return stub.ChainIDCalled() + } + return "" +} + +// MinTransactionVersion - +func (stub *CoreComponentsHolderStub) MinTransactionVersion() uint32 { + if stub.MinTransactionVersionCalled != nil { + return stub.MinTransactionVersionCalled() + } + return 0 +} + +// TxVersionChecker - +func (stub *CoreComponentsHolderStub) TxVersionChecker() process.TxVersionCheckerHandler { + if stub.TxVersionCheckerCalled != nil { + return stub.TxVersionCheckerCalled() + } + return nil +} + +// EncodedAddressLen - +func (stub *CoreComponentsHolderStub) EncodedAddressLen() uint32 { + if stub.EncodedAddressLenCalled != nil { + return stub.EncodedAddressLenCalled() + } + return 0 +} + +// NodeTypeProvider - +func (stub *CoreComponentsHolderStub) NodeTypeProvider() core.NodeTypeProviderHandler { + if stub.NodeTypeProviderCalled != nil { + return stub.NodeTypeProviderCalled() + } + return nil +} + +// WasmVMChangeLocker - +func (stub *CoreComponentsHolderStub) WasmVMChangeLocker() common.Locker { + if stub.WasmVMChangeLockerCalled != nil { + return stub.WasmVMChangeLockerCalled() + } + return nil +} + +// ProcessStatusHandler - +func (stub *CoreComponentsHolderStub) ProcessStatusHandler() common.ProcessStatusHandler { + if stub.ProcessStatusHandlerCalled != nil { + return stub.ProcessStatusHandlerCalled() + } + return nil +} + +// HardforkTriggerPubKey - +func (stub *CoreComponentsHolderStub) HardforkTriggerPubKey() []byte { + if stub.HardforkTriggerPubKeyCalled != nil { + return stub.HardforkTriggerPubKeyCalled() + } + return nil +} + +// EnableEpochsHandler - +func (stub *CoreComponentsHolderStub) EnableEpochsHandler() common.EnableEpochsHandler { + if stub.EnableEpochsHandlerCalled != nil { + return stub.EnableEpochsHandlerCalled() + } + return nil +} + +// IsInterfaceNil - +func (stub *CoreComponentsHolderStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/testscommon/stateComponentsMock.go b/testscommon/factory/stateComponentsMock.go similarity index 56% rename from testscommon/stateComponentsMock.go rename to testscommon/factory/stateComponentsMock.go index a77f4a96b95..5aa541dffa0 100644 --- a/testscommon/stateComponentsMock.go +++ b/testscommon/factory/stateComponentsMock.go @@ -1,19 +1,34 @@ -package testscommon +package factory import ( "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/state" ) // StateComponentsMock - type StateComponentsMock struct { - PeersAcc state.AccountsAdapter - Accounts state.AccountsAdapter - AccountsAPI state.AccountsAdapter - AccountsRepo state.AccountsRepository - Tries common.TriesHolder - StorageManagers map[string]common.StorageManager - MissingNodesNotifier common.MissingTrieNodesNotifier + PeersAcc state.AccountsAdapter + Accounts state.AccountsAdapter + AccountsAPI state.AccountsAdapter + AccountsAdapterAPICalled func() state.AccountsAdapter + AccountsRepo state.AccountsRepository + Tries common.TriesHolder + StorageManagers map[string]common.StorageManager + MissingNodesNotifier common.MissingTrieNodesNotifier +} + +// NewStateComponentsMockFromRealComponent - +func NewStateComponentsMockFromRealComponent(stateComponents factory.StateComponentsHolder) *StateComponentsMock { + return &StateComponentsMock{ + PeersAcc: stateComponents.PeerAccounts(), + Accounts: stateComponents.AccountsAdapter(), + AccountsAPI: stateComponents.AccountsAdapterAPI(), + AccountsRepo: stateComponents.AccountsRepository(), + Tries: stateComponents.TriesContainer(), + StorageManagers: stateComponents.TrieStorageManagers(), + MissingNodesNotifier: stateComponents.MissingTrieNodesNotifier(), + } } // Create - @@ -43,6 +58,9 @@ func (scm *StateComponentsMock) AccountsAdapter() state.AccountsAdapter { // AccountsAdapterAPI - func (scm *StateComponentsMock) AccountsAdapterAPI() state.AccountsAdapter { + if scm.AccountsAdapterAPICalled != nil { + return scm.AccountsAdapterAPICalled() + } return scm.AccountsAPI } diff --git a/testscommon/factory/statusCoreComponentsStub.go b/testscommon/factory/statusCoreComponentsStub.go index 8698a56a11b..a5371408f66 100644 --- a/testscommon/factory/statusCoreComponentsStub.go +++ b/testscommon/factory/statusCoreComponentsStub.go @@ -12,6 +12,7 @@ type StatusCoreComponentsStub struct { NetworkStatisticsField factory.NetworkStatisticsProvider TrieSyncStatisticsField factory.TrieSyncStatisticsProvider AppStatusHandlerField core.AppStatusHandler + AppStatusHandlerCalled func() core.AppStatusHandler StatusMetricsField external.StatusMetricsHandler PersistentStatusHandlerField factory.PersistentStatusHandler } @@ -53,6 +54,9 @@ func (stub *StatusCoreComponentsStub) TrieSyncStatistics() factory.TrieSyncStati // AppStatusHandler - func (stub *StatusCoreComponentsStub) AppStatusHandler() core.AppStatusHandler { + if stub.AppStatusHandlerCalled != nil { + return stub.AppStatusHandlerCalled() + } return stub.AppStatusHandlerField } diff --git a/testscommon/generalConfig.go b/testscommon/generalConfig.go index 132effecc4e..5c246138c50 100644 --- a/testscommon/generalConfig.go +++ b/testscommon/generalConfig.go @@ -55,6 +55,8 @@ func GetGeneralConfig() config.Config { MaxComputableRounds: 1000, MaxConsecutiveRoundsOfRatingDecrease: 2000, SyncProcessTimeInMillis: 6000, + SetGuardianEpochsDelay: 20, + StatusPollingIntervalSec: 10, }, EpochStartConfig: config.EpochStartConfig{ MinRoundsBetweenEpochs: 5, diff --git a/testscommon/guardianMocks/guardianAccountHandlerStub.go b/testscommon/guardianMocks/guardianAccountHandlerStub.go new file mode 100644 index 00000000000..c2fb2319a5f --- /dev/null +++ b/testscommon/guardianMocks/guardianAccountHandlerStub.go @@ -0,0 +1,69 @@ +package guardianMocks + +import ( + "github.com/multiversx/mx-chain-core-go/data/guardians" + "github.com/multiversx/mx-chain-go/state" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +// GuardedAccountHandlerStub - +type GuardedAccountHandlerStub struct { + GetActiveGuardianCalled func(handler vmcommon.UserAccountHandler) ([]byte, error) + SetGuardianCalled func(uah vmcommon.UserAccountHandler, guardianAddress []byte, txGuardianAddress []byte, guardianServiceUID []byte) error + HasPendingGuardianCalled func(uah state.UserAccountHandler) bool + HasActiveGuardianCalled func(uah state.UserAccountHandler) bool + CleanOtherThanActiveCalled func(uah vmcommon.UserAccountHandler) + GetConfiguredGuardiansCalled func(uah state.UserAccountHandler) (active *guardians.Guardian, pending *guardians.Guardian, err error) +} + +// GetActiveGuardian - +func (gahs *GuardedAccountHandlerStub) GetActiveGuardian(handler vmcommon.UserAccountHandler) ([]byte, error) { + if gahs.GetActiveGuardianCalled != nil { + return gahs.GetActiveGuardianCalled(handler) + } + return nil, nil +} + +// HasActiveGuardian - +func (gahs *GuardedAccountHandlerStub) HasActiveGuardian(uah state.UserAccountHandler) bool { + if gahs.HasActiveGuardianCalled != nil { + return gahs.HasActiveGuardianCalled(uah) + } + return false +} + +// HasPendingGuardian - +func (gahs *GuardedAccountHandlerStub) HasPendingGuardian(uah state.UserAccountHandler) bool { + if gahs.HasPendingGuardianCalled != nil { + return gahs.HasPendingGuardianCalled(uah) + } + return false +} + +// SetGuardian - +func (gahs *GuardedAccountHandlerStub) SetGuardian(uah vmcommon.UserAccountHandler, guardianAddress []byte, txGuardianAddress []byte, guardianServiceUID []byte) error { + if gahs.SetGuardianCalled != nil { + return gahs.SetGuardianCalled(uah, guardianAddress, txGuardianAddress, guardianServiceUID) + } + return nil +} + +// CleanOtherThanActive - +func (gahs *GuardedAccountHandlerStub) CleanOtherThanActive(uah vmcommon.UserAccountHandler) { + if gahs.CleanOtherThanActiveCalled != nil { + gahs.CleanOtherThanActiveCalled(uah) + } +} + +// GetConfiguredGuardians - +func (gahs *GuardedAccountHandlerStub) GetConfiguredGuardians(uah state.UserAccountHandler) (active *guardians.Guardian, pending *guardians.Guardian, err error) { + if gahs.GetConfiguredGuardiansCalled != nil { + return gahs.GetConfiguredGuardiansCalled(uah) + } + return nil, nil, nil +} + +// IsInterfaceNil - +func (gahs *GuardedAccountHandlerStub) IsInterfaceNil() bool { + return gahs == nil +} diff --git a/testscommon/interceptedTxMocks/InterceptedUnsignedTxHandlerStub.go b/testscommon/interceptedTxMocks/InterceptedUnsignedTxHandlerStub.go new file mode 100644 index 00000000000..06eaf6c4fcb --- /dev/null +++ b/testscommon/interceptedTxMocks/InterceptedUnsignedTxHandlerStub.go @@ -0,0 +1,65 @@ +package interceptedTxMocks + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/data" +) + +// InterceptedUnsignedTxHandlerStub - +type InterceptedUnsignedTxHandlerStub struct { + SenderShardIdCalled func() uint32 + ReceiverShardIdCalled func() uint32 + NonceCalled func() uint64 + SenderAddressCalled func() []byte + FeeCalled func() *big.Int + TransactionCalled func() data.TransactionHandler +} + +// SenderShardId - +func (iths *InterceptedUnsignedTxHandlerStub) SenderShardId() uint32 { + if iths.SenderShardIdCalled != nil { + return iths.SenderShardIdCalled() + } + return 0 +} + +// ReceiverShardId - +func (iths *InterceptedUnsignedTxHandlerStub) ReceiverShardId() uint32 { + if iths.ReceiverShardIdCalled != nil { + return iths.ReceiverShardIdCalled() + } + return 0 +} + +// Nonce - +func (iths *InterceptedUnsignedTxHandlerStub) Nonce() uint64 { + if iths.NonceCalled != nil { + return iths.NonceCalled() + } + return 0 +} + +// SenderAddress - +func (iths *InterceptedUnsignedTxHandlerStub) SenderAddress() []byte { + if iths.SenderAddressCalled != nil { + return iths.SenderAddressCalled() + } + return nil +} + +// Fee - +func (iths *InterceptedUnsignedTxHandlerStub) Fee() *big.Int { + if iths.FeeCalled != nil { + return iths.FeeCalled() + } + return nil +} + +// Transaction - +func (iths *InterceptedUnsignedTxHandlerStub) Transaction() data.TransactionHandler { + if iths.TransactionCalled != nil { + return iths.TransactionCalled() + } + return nil +} diff --git a/testscommon/interceptedTxMocks/interceptedTxStub.go b/testscommon/interceptedTxMocks/interceptedTxStub.go new file mode 100644 index 00000000000..7e900aa3fba --- /dev/null +++ b/testscommon/interceptedTxMocks/interceptedTxStub.go @@ -0,0 +1,74 @@ +package interceptedTxMocks + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/data" +) + +// InterceptedTxHandlerStub - +type InterceptedTxHandlerStub struct { + SenderShardIdCalled func() uint32 + ReceiverShardIdCalled func() uint32 + NonceCalled func() uint64 + SenderAddressCalled func() []byte + FeeCalled func() *big.Int + TransactionCalled func() data.TransactionHandler + GetTxMessageForSignatureVerificationCalled func() ([]byte, error) +} + +// SenderShardId - +func (iths *InterceptedTxHandlerStub) SenderShardId() uint32 { + if iths.SenderShardIdCalled != nil { + return iths.SenderShardIdCalled() + } + return 0 +} + +// ReceiverShardId - +func (iths *InterceptedTxHandlerStub) ReceiverShardId() uint32 { + if iths.ReceiverShardIdCalled != nil { + return iths.ReceiverShardIdCalled() + } + return 0 +} + +// Nonce - +func (iths *InterceptedTxHandlerStub) Nonce() uint64 { + if iths.NonceCalled != nil { + return iths.NonceCalled() + } + return 0 +} + +// SenderAddress - +func (iths *InterceptedTxHandlerStub) SenderAddress() []byte { + if iths.SenderAddressCalled != nil { + return iths.SenderAddressCalled() + } + return nil +} + +// Fee - +func (iths *InterceptedTxHandlerStub) Fee() *big.Int { + if iths.FeeCalled != nil { + return iths.FeeCalled() + } + return nil +} + +// Transaction - +func (iths *InterceptedTxHandlerStub) Transaction() data.TransactionHandler { + if iths.TransactionCalled != nil { + return iths.TransactionCalled() + } + return nil +} + +// GetTxMessageForSignatureVerification - +func (iths *InterceptedTxHandlerStub) GetTxMessageForSignatureVerification() ([]byte, error) { + if iths.GetTxMessageForSignatureVerificationCalled != nil { + return iths.GetTxMessageForSignatureVerificationCalled() + } + return nil, nil +} diff --git a/testscommon/mainFactoryMocks/bootstrapComponentsStub.go b/testscommon/mainFactoryMocks/bootstrapComponentsStub.go index edba12a72ea..8c9d56dca7b 100644 --- a/testscommon/mainFactoryMocks/bootstrapComponentsStub.go +++ b/testscommon/mainFactoryMocks/bootstrapComponentsStub.go @@ -4,18 +4,21 @@ import ( "github.com/multiversx/mx-chain-core-go/core" nodeFactory "github.com/multiversx/mx-chain-go/cmd/node/factory" "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" ) // BootstrapComponentsStub - type BootstrapComponentsStub struct { - Bootstrapper factory.EpochStartBootstrapper - BootstrapParams factory.BootstrapParamsHolder - NodeRole core.NodeType - ShCoordinator sharding.Coordinator - HdrVersionHandler nodeFactory.HeaderVersionHandler - VersionedHdrFactory nodeFactory.VersionedHeaderFactory - HdrIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler + Bootstrapper factory.EpochStartBootstrapper + BootstrapParams factory.BootstrapParamsHolder + NodeRole core.NodeType + ShCoordinator sharding.Coordinator + ShardCoordinatorCalled func() sharding.Coordinator + HdrVersionHandler nodeFactory.HeaderVersionHandler + VersionedHdrFactory nodeFactory.VersionedHeaderFactory + HdrIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler + GuardedAccountHandlerField process.GuardedAccountHandler } // Create - @@ -50,6 +53,9 @@ func (bcs *BootstrapComponentsStub) NodeType() core.NodeType { // ShardCoordinator - func (bcs *BootstrapComponentsStub) ShardCoordinator() sharding.Coordinator { + if bcs.ShardCoordinatorCalled != nil { + return bcs.ShardCoordinatorCalled() + } return bcs.ShCoordinator } @@ -74,6 +80,11 @@ func (bcs *BootstrapComponentsStub) SetShardCoordinator(shardCoordinator shardin return nil } +// GuardedAccountHandler - +func (bcs *BootstrapComponentsStub) GuardedAccountHandler() process.GuardedAccountHandler { + return bcs.GuardedAccountHandlerField +} + // String - func (bcs *BootstrapComponentsStub) String() string { return "BootstrapComponentsStub" diff --git a/testscommon/networkStatisticsProviderStub.go b/testscommon/networkStatisticsProviderStub.go new file mode 100644 index 00000000000..a8d4de1b7d8 --- /dev/null +++ b/testscommon/networkStatisticsProviderStub.go @@ -0,0 +1,117 @@ +package testscommon + +// NetworkStatisticsProviderStub - +type NetworkStatisticsProviderStub struct { + BpsSentCalled func() uint64 + BpsRecvCalled func() uint64 + BpsSentPeakCalled func() uint64 + BpsRecvPeakCalled func() uint64 + PercentSentCalled func() uint64 + PercentRecvCalled func() uint64 + TotalBytesSentInCurrentEpochCalled func() uint64 + TotalBytesReceivedInCurrentEpochCalled func() uint64 + TotalSentInCurrentEpochCalled func() string + TotalReceivedInCurrentEpochCalled func() string + EpochConfirmedCalled func(epoch uint32, timestamp uint64) + CloseCalled func() error +} + +// BpsSent - +func (stub *NetworkStatisticsProviderStub) BpsSent() uint64 { + if stub.BpsSentCalled != nil { + return stub.BpsSentCalled() + } + return 0 +} + +// BpsRecv - +func (stub *NetworkStatisticsProviderStub) BpsRecv() uint64 { + if stub.BpsRecvCalled != nil { + return stub.BpsRecvCalled() + } + return 0 +} + +// BpsSentPeak - +func (stub *NetworkStatisticsProviderStub) BpsSentPeak() uint64 { + if stub.BpsSentPeakCalled != nil { + return stub.BpsSentPeakCalled() + } + return 0 +} + +// BpsRecvPeak - +func (stub *NetworkStatisticsProviderStub) BpsRecvPeak() uint64 { + if stub.BpsRecvPeakCalled != nil { + return stub.BpsRecvPeakCalled() + } + return 0 +} + +// PercentSent - +func (stub *NetworkStatisticsProviderStub) PercentSent() uint64 { + if stub.PercentSentCalled != nil { + return stub.PercentSentCalled() + } + return 0 +} + +// PercentRecv - +func (stub *NetworkStatisticsProviderStub) PercentRecv() uint64 { + if stub.PercentRecvCalled != nil { + return stub.PercentRecvCalled() + } + return 0 +} + +// TotalBytesSentInCurrentEpoch - +func (stub *NetworkStatisticsProviderStub) TotalBytesSentInCurrentEpoch() uint64 { + if stub.TotalBytesSentInCurrentEpochCalled != nil { + return stub.TotalBytesSentInCurrentEpochCalled() + } + return 0 +} + +// TotalBytesReceivedInCurrentEpoch - +func (stub *NetworkStatisticsProviderStub) TotalBytesReceivedInCurrentEpoch() uint64 { + if stub.TotalBytesReceivedInCurrentEpochCalled != nil { + return stub.TotalBytesReceivedInCurrentEpochCalled() + } + return 0 +} + +// TotalSentInCurrentEpoch - +func (stub *NetworkStatisticsProviderStub) TotalSentInCurrentEpoch() string { + if stub.TotalSentInCurrentEpochCalled != nil { + return stub.TotalSentInCurrentEpochCalled() + } + return "" +} + +// TotalReceivedInCurrentEpoch - +func (stub *NetworkStatisticsProviderStub) TotalReceivedInCurrentEpoch() string { + if stub.TotalReceivedInCurrentEpochCalled != nil { + return stub.TotalReceivedInCurrentEpochCalled() + } + return "" +} + +// EpochConfirmed - +func (stub *NetworkStatisticsProviderStub) EpochConfirmed(epoch uint32, timestamp uint64) { + if stub.EpochConfirmedCalled != nil { + stub.EpochConfirmedCalled(epoch, timestamp) + } +} + +// Close - +func (stub *NetworkStatisticsProviderStub) Close() error { + if stub.CloseCalled != nil { + return stub.CloseCalled() + } + return nil +} + +// IsInterfaceNil - +func (stub *NetworkStatisticsProviderStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/testscommon/rounderMock.go b/testscommon/roundHandlerMock.go similarity index 91% rename from testscommon/rounderMock.go rename to testscommon/roundHandlerMock.go index e9d2639eff3..976e8a55181 100644 --- a/testscommon/rounderMock.go +++ b/testscommon/roundHandlerMock.go @@ -1,12 +1,14 @@ package testscommon import ( + "sync" "time" ) // RoundHandlerMock - type RoundHandlerMock struct { - index int64 + indexMut sync.RWMutex + index int64 IndexCalled func() int64 TimeDurationCalled func() time.Duration @@ -30,6 +32,9 @@ func (rndm *RoundHandlerMock) Index() int64 { return rndm.IndexCalled() } + rndm.indexMut.RLock() + defer rndm.indexMut.RUnlock() + return rndm.index } @@ -58,7 +63,9 @@ func (rndm *RoundHandlerMock) UpdateRound(genesisRoundTimeStamp time.Time, timeS return } + rndm.indexMut.Lock() rndm.index++ + rndm.indexMut.Unlock() } // RemainingTime - diff --git a/testscommon/shardingMocks/nodesCoordinatorStub.go b/testscommon/shardingMocks/nodesCoordinatorStub.go index f4d8be2a53a..caf833b1a18 100644 --- a/testscommon/shardingMocks/nodesCoordinatorStub.go +++ b/testscommon/shardingMocks/nodesCoordinatorStub.go @@ -18,6 +18,7 @@ type NodesCoordinatorStub struct { ConsensusGroupSizeCalled func(shardID uint32) int ComputeConsensusGroupCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) EpochStartPrepareCalled func(metaHdr data.HeaderHandler, body data.BodyHandler) + GetConsensusWhitelistedNodesCalled func(epoch uint32) (map[string]struct{}, error) } // NodesCoordinatorToRegistry - @@ -157,8 +158,11 @@ func (ncm *NodesCoordinatorStub) ShuffleOutForEpoch(_ uint32) { } // GetConsensusWhitelistedNodes return the whitelisted nodes allowed to send consensus messages, for each of the shards -func (ncm *NodesCoordinatorStub) GetConsensusWhitelistedNodes(_ uint32) (map[string]struct{}, error) { - panic("not implemented") +func (ncm *NodesCoordinatorStub) GetConsensusWhitelistedNodes(epoch uint32) (map[string]struct{}, error) { + if ncm.GetConsensusWhitelistedNodesCalled != nil { + return ncm.GetConsensusWhitelistedNodesCalled(epoch) + } + return nil, nil } // GetSelectedPublicKeys - diff --git a/testscommon/shuffleOutCloserStub.go b/testscommon/shuffleOutCloserStub.go new file mode 100644 index 00000000000..b2b82e31aec --- /dev/null +++ b/testscommon/shuffleOutCloserStub.go @@ -0,0 +1,30 @@ +package testscommon + +import "github.com/multiversx/mx-chain-core-go/data/endProcess" + +// ShuffleOutCloserStub - +type ShuffleOutCloserStub struct { + EndOfProcessingHandlerCalled func(event endProcess.ArgEndProcess) error + CloseCalled func() error +} + +// EndOfProcessingHandler - +func (stub *ShuffleOutCloserStub) EndOfProcessingHandler(event endProcess.ArgEndProcess) error { + if stub.EndOfProcessingHandlerCalled != nil { + return stub.EndOfProcessingHandlerCalled(event) + } + return nil +} + +// IsInterfaceNil - +func (stub *ShuffleOutCloserStub) IsInterfaceNil() bool { + return stub == nil +} + +// Close - +func (stub *ShuffleOutCloserStub) Close() error { + if stub.CloseCalled != nil { + return stub.CloseCalled() + } + return nil +} diff --git a/testscommon/state/accountAdapterStub.go b/testscommon/state/accountAdapterStub.go new file mode 100644 index 00000000000..8e9ec352a36 --- /dev/null +++ b/testscommon/state/accountAdapterStub.go @@ -0,0 +1,242 @@ +package state + +import ( + "math/big" + + "github.com/multiversx/mx-chain-go/common" +) + +// StateUserAccountHandlerStub - +type StateUserAccountHandlerStub struct { + AddressBytesCalled func() []byte + IncreaseNonceCalled func(nonce uint64) + GetNonceCalled func() uint64 + SetCodeCalled func(code []byte) + SetCodeMetadataCalled func(codeMetadata []byte) + GetCodeMetadataCalled func() []byte + SetCodeHashCalled func([]byte) + GetCodeHashCalled func() []byte + SetRootHashCalled func([]byte) + GetRootHashCalled func() []byte + SetDataTrieCalled func(trie common.Trie) + DataTrieCalled func() common.DataTrieHandler + RetrieveValueCalled func(key []byte) ([]byte, uint32, error) + SaveKeyValueCalled func(key []byte, value []byte) error + AddToBalanceCalled func(value *big.Int) error + SubFromBalanceCalled func(value *big.Int) error + GetBalanceCalled func() *big.Int + ClaimDeveloperRewardsCalled func([]byte) (*big.Int, error) + AddToDeveloperRewardCalled func(*big.Int) + GetDeveloperRewardCalled func() *big.Int + ChangeOwnerAddressCalled func([]byte, []byte) error + SetOwnerAddressCalled func([]byte) + GetOwnerAddressCalled func() []byte + SetUserNameCalled func(userName []byte) + GetUserNameCalled func() []byte + IsGuardedCalled func() bool +} + +// AddressBytes - +func (aas *StateUserAccountHandlerStub) AddressBytes() []byte { + if aas.AddressBytesCalled != nil { + return aas.AddressBytesCalled() + } + + return nil +} + +// IncreaseNonce - +func (aas *StateUserAccountHandlerStub) IncreaseNonce(nonce uint64) { + if aas.IncreaseNonceCalled != nil { + aas.IncreaseNonceCalled(nonce) + } +} + +// GetNonce - +func (aas *StateUserAccountHandlerStub) GetNonce() uint64 { + if aas.GetNonceCalled != nil { + return aas.GetNonceCalled() + } + return 0 +} + +// SetCode - +func (aas *StateUserAccountHandlerStub) SetCode(code []byte) { + if aas.SetCodeCalled != nil { + aas.SetCodeCalled(code) + } +} + +// SetCodeMetadata - +func (aas *StateUserAccountHandlerStub) SetCodeMetadata(codeMetadata []byte) { + if aas.SetCodeMetadataCalled != nil { + aas.SetCodeMetadataCalled(codeMetadata) + } +} + +// GetCodeMetadata - +func (aas *StateUserAccountHandlerStub) GetCodeMetadata() []byte { + if aas.GetCodeMetadataCalled != nil { + return aas.GetCodeMetadataCalled() + } + return nil +} + +// SetCodeHash - +func (aas *StateUserAccountHandlerStub) SetCodeHash(codeHash []byte) { + if aas.SetCodeHashCalled != nil { + aas.SetCodeHashCalled(codeHash) + } +} + +// GetCodeHash - +func (aas *StateUserAccountHandlerStub) GetCodeHash() []byte { + if aas.GetCodeHashCalled != nil { + return aas.GetCodeHashCalled() + } + return nil +} + +// SetRootHash - +func (aas *StateUserAccountHandlerStub) SetRootHash(rootHash []byte) { + if aas.SetRootHashCalled != nil { + aas.SetRootHashCalled(rootHash) + } +} + +// GetRootHash - +func (aas *StateUserAccountHandlerStub) GetRootHash() []byte { + if aas.GetRootHashCalled != nil { + return aas.GetRootHashCalled() + } + return nil +} + +// SetDataTrie - +func (aas *StateUserAccountHandlerStub) SetDataTrie(trie common.Trie) { + if aas.SetDataTrieCalled != nil { + aas.SetDataTrieCalled(trie) + } +} + +// DataTrie - +func (aas *StateUserAccountHandlerStub) DataTrie() common.DataTrieHandler { + if aas.DataTrieCalled != nil { + return aas.DataTrieCalled() + } + return nil +} + +// RetrieveValue - +func (aas *StateUserAccountHandlerStub) RetrieveValue(key []byte) ([]byte, uint32, error) { + if aas.RetrieveValueCalled != nil { + return aas.RetrieveValueCalled(key) + } + return nil, 0, nil +} + +// SaveKeyValue - +func (aas *StateUserAccountHandlerStub) SaveKeyValue(key []byte, value []byte) error { + if aas.SaveKeyValueCalled != nil { + return aas.SaveKeyValueCalled(key, value) + } + return nil +} + +// AddToBalance - +func (aas *StateUserAccountHandlerStub) AddToBalance(value *big.Int) error { + if aas.AddToBalanceCalled != nil { + return aas.AddToBalanceCalled(value) + } + return nil +} + +// SubFromBalance - +func (aas *StateUserAccountHandlerStub) SubFromBalance(value *big.Int) error { + if aas.SubFromBalanceCalled != nil { + return aas.SubFromBalanceCalled(value) + } + return nil +} + +// GetBalance - +func (aas *StateUserAccountHandlerStub) GetBalance() *big.Int { + if aas.GetBalanceCalled != nil { + return aas.GetBalanceCalled() + } + return nil +} + +// ClaimDeveloperRewards - +func (aas *StateUserAccountHandlerStub) ClaimDeveloperRewards(senderAddr []byte) (*big.Int, error) { + if aas.ClaimDeveloperRewardsCalled != nil { + return aas.ClaimDeveloperRewardsCalled(senderAddr) + } + return nil, nil +} + +//AddToDeveloperReward - +func (aas *StateUserAccountHandlerStub) AddToDeveloperReward(val *big.Int) { + if aas.AddToDeveloperRewardCalled != nil { + aas.AddToDeveloperRewardCalled(val) + } +} + +//GetDeveloperReward - +func (aas *StateUserAccountHandlerStub) GetDeveloperReward() *big.Int { + if aas.GetDeveloperRewardCalled != nil { + return aas.GetDeveloperRewardCalled() + } + return nil +} + +// ChangeOwnerAddress - +func (aas *StateUserAccountHandlerStub) ChangeOwnerAddress(senderAddr []byte, newOwnerAddr []byte) error { + if aas.ChangeOwnerAddressCalled != nil { + return aas.ChangeOwnerAddressCalled(senderAddr, newOwnerAddr) + } + return nil +} + +// SetOwnerAddress - +func (aas *StateUserAccountHandlerStub) SetOwnerAddress(address []byte) { + if aas.SetOwnerAddressCalled != nil { + aas.SetOwnerAddressCalled(address) + } +} + +// GetOwnerAddress - +func (aas *StateUserAccountHandlerStub) GetOwnerAddress() []byte { + if aas.GetOwnerAddressCalled != nil { + return aas.GetOwnerAddressCalled() + } + return nil +} + +// SetUserName - +func (aas *StateUserAccountHandlerStub) SetUserName(userName []byte) { + if aas.SetUserNameCalled != nil { + aas.SetUserNameCalled(userName) + } +} + +// GetUserName - +func (aas *StateUserAccountHandlerStub) GetUserName() []byte { + if aas.GetUserNameCalled != nil { + return aas.GetUserNameCalled() + } + return nil +} + +//IsGuarded - +func (aas *StateUserAccountHandlerStub) IsGuarded() bool { + if aas.IsGuardedCalled != nil { + return aas.IsGuardedCalled() + } + return false +} + +// IsInterfaceNil - +func (aas *StateUserAccountHandlerStub) IsInterfaceNil() bool { + return aas == nil +} diff --git a/testscommon/state/accountWrapperMock.go b/testscommon/state/accountWrapperMock.go index 4c28ba83d21..2e717f410ad 100644 --- a/testscommon/state/accountWrapperMock.go +++ b/testscommon/state/accountWrapperMock.go @@ -22,6 +22,7 @@ type AccountWrapMock struct { address []byte trackableDataTrie state.DataTrieTracker Balance *big.Int + guarded bool SetNonceWithJournalCalled func(nonce uint64) error `json:"-"` SetCodeHashWithJournalCalled func(codeHash []byte) error `json:"-"` @@ -188,3 +189,8 @@ func (awm *AccountWrapMock) AccountDataHandler() vmcommon.AccountDataHandler { func (awm *AccountWrapMock) GetNonce() uint64 { return awm.nonce } + +// IsGuarded - +func (awm *AccountWrapMock) IsGuarded() bool { + return awm.guarded +} diff --git a/testscommon/state/userAccountStub.go b/testscommon/state/userAccountStub.go index 085ee37502d..538316f0acd 100644 --- a/testscommon/state/userAccountStub.go +++ b/testscommon/state/userAccountStub.go @@ -6,19 +6,23 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/state" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) var _ state.UserAccountHandler = (*UserAccountStub)(nil) // UserAccountStub - type UserAccountStub struct { - Balance *big.Int - DeveloperRewards *big.Int - UserName []byte - Owner []byte - Address []byte - AddToBalanceCalled func(value *big.Int) error - RetrieveValueCalled func(_ []byte) ([]byte, uint32, error) + Balance *big.Int + DeveloperRewards *big.Int + UserName []byte + Owner []byte + Address []byte + AddToBalanceCalled func(value *big.Int) error + DataTrieTrackerCalled func() state.DataTrieTracker + IsGuardedCalled func() bool + AccountDataHandlerCalled func() vmcommon.AccountDataHandler + RetrieveValueCalled func(_ []byte) ([]byte, uint32, error) } // HasNewCode - @@ -155,6 +159,14 @@ func (u *UserAccountStub) SaveKeyValue(_ []byte, _ []byte) error { return nil } +// IsGuarded - +func (u *UserAccountStub) IsGuarded() bool { + if u.IsGuardedCalled != nil { + return u.IsGuardedCalled() + } + return false +} + // SaveDirtyData - func (u *UserAccountStub) SaveDirtyData(_ common.Trie) (map[string][]byte, error) { return nil, nil @@ -162,5 +174,13 @@ func (u *UserAccountStub) SaveDirtyData(_ common.Trie) (map[string][]byte, error // IsInterfaceNil - func (u *UserAccountStub) IsInterfaceNil() bool { - return false + return u == nil +} + +// AccountDataHandler - +func (u *UserAccountStub) AccountDataHandler() vmcommon.AccountDataHandler { + if u.AccountDataHandlerCalled != nil { + return u.AccountDataHandlerCalled() + } + return nil } diff --git a/testscommon/storage/storedDataFactoryStub.go b/testscommon/storage/storedDataFactoryStub.go new file mode 100644 index 00000000000..ef4d7f91902 --- /dev/null +++ b/testscommon/storage/storedDataFactoryStub.go @@ -0,0 +1,20 @@ +package storage + +// StoredDataFactoryStub - +type StoredDataFactoryStub struct { + CreateEmptyCalled func() interface{} +} + +// CreateEmpty - +func (sdf *StoredDataFactoryStub) CreateEmpty() interface{} { + if sdf.CreateEmptyCalled != nil { + return sdf.CreateEmptyCalled() + } + + return nil +} + +// IsInterfaceNil - +func (sdf *StoredDataFactoryStub) IsInterfaceNil() bool { + return sdf == nil +} diff --git a/testscommon/vmcommonMocks/userAccountStub.go b/testscommon/vmcommonMocks/userAccountStub.go new file mode 100644 index 00000000000..6fb0b1f4d85 --- /dev/null +++ b/testscommon/vmcommonMocks/userAccountStub.go @@ -0,0 +1,165 @@ +package vmcommonMocks + +import ( + "math/big" + + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +// UserAccountStub - +type UserAccountStub struct { + GetCodeMetadataCalled func() []byte + SetCodeMetadataCalled func(codeMetadata []byte) + GetCodeHashCalled func() []byte + GetRootHashCalled func() []byte + AccountDataHandlerCalled func() vmcommon.AccountDataHandler + AddToBalanceCalled func(value *big.Int) error + GetBalanceCalled func() *big.Int + ClaimDeveloperRewardsCalled func([]byte) (*big.Int, error) + GetDeveloperRewardCalled func() *big.Int + ChangeOwnerAddressCalled func([]byte, []byte) error + SetOwnerAddressCalled func([]byte) + GetOwnerAddressCalled func() []byte + SetUserNameCalled func(userName []byte) + GetUserNameCalled func() []byte + AddressBytesCalled func() []byte + IncreaseNonceCalled func(nonce uint64) + GetNonceCalled func() uint64 +} + +// GetCodeMetadata - +func (uas *UserAccountStub) GetCodeMetadata() []byte { + if uas.GetCodeMetadataCalled != nil { + uas.GetCodeMetadataCalled() + } + return nil +} + +// SetCodeMetadata - +func (uas *UserAccountStub) SetCodeMetadata(codeMetaData []byte) { + if uas.SetCodeMetadataCalled != nil { + uas.SetCodeMetadataCalled(codeMetaData) + } +} + +// GetCodeHash - +func (uas *UserAccountStub) GetCodeHash() []byte { + if uas.GetCodeHashCalled != nil { + return uas.GetCodeHashCalled() + } + return nil +} + +// GetRootHash - +func (uas *UserAccountStub) GetRootHash() []byte { + if uas.GetRootHashCalled != nil { + return uas.GetRootHashCalled() + } + return nil +} + +// AccountDataHandler - +func (uas *UserAccountStub) AccountDataHandler() vmcommon.AccountDataHandler { + if uas.AccountDataHandlerCalled != nil { + return uas.AccountDataHandlerCalled() + } + return nil +} + +// AddToBalance - +func (uas *UserAccountStub) AddToBalance(value *big.Int) error { + if uas.AddToBalanceCalled != nil { + return uas.AddToBalanceCalled(value) + } + return nil +} + +// GetBalance - +func (uas *UserAccountStub) GetBalance() *big.Int { + if uas.GetBalanceCalled != nil { + return uas.GetBalanceCalled() + } + return nil +} + +// ClaimDeveloperRewards - +func (uas *UserAccountStub) ClaimDeveloperRewards(sndAddress []byte) (*big.Int, error) { + if uas.ClaimDeveloperRewardsCalled != nil { + return uas.ClaimDeveloperRewardsCalled(sndAddress) + } + return nil, nil +} + +// GetDeveloperReward - +func (uas *UserAccountStub) GetDeveloperReward() *big.Int { + if uas.GetDeveloperRewardCalled != nil { + return uas.GetDeveloperRewardCalled() + } + return nil +} + +// ChangeOwnerAddress - +func (uas *UserAccountStub) ChangeOwnerAddress(sndAddress []byte, newAddress []byte) error { + if uas.ChangeOwnerAddressCalled != nil { + return uas.ChangeOwnerAddressCalled(sndAddress, newAddress) + } + return nil +} + +// SetOwnerAddress - +func (uas *UserAccountStub) SetOwnerAddress(address []byte) { + if uas.SetOwnerAddressCalled != nil { + uas.SetOwnerAddressCalled(address) + } +} + +// GetOwnerAddress - +func (uas *UserAccountStub) GetOwnerAddress() []byte { + if uas.GetOwnerAddressCalled != nil { + return uas.GetOwnerAddressCalled() + } + return nil +} + +// SetUserName - +func (uas *UserAccountStub) SetUserName(userName []byte) { + if uas.SetUserNameCalled != nil { + uas.SetUserNameCalled(userName) + } +} + +// GetUserName - +func (uas *UserAccountStub) GetUserName() []byte { + if uas.GetUserNameCalled != nil { + return uas.GetUserNameCalled() + } + return nil +} + +// AddressBytes - +func (uas *UserAccountStub) AddressBytes() []byte { + if uas.AddressBytesCalled != nil { + return uas.AddressBytesCalled() + } + return nil +} + +// IncreaseNonce - +func (uas *UserAccountStub) IncreaseNonce(nonce uint64) { + if uas.IncreaseNonceCalled != nil { + uas.IncreaseNonceCalled(nonce) + } +} + +// GetNonce - +func (uas *UserAccountStub) GetNonce() uint64 { + if uas.GetNonceCalled != nil { + return uas.GetNonceCalled() + } + return 0 +} + +//IsInterfaceNil - +func (uas *UserAccountStub) IsInterfaceNil() bool { + return uas == nil +} diff --git a/trie/export_test.go b/trie/export_test.go index 457168d4a15..e91f2378919 100644 --- a/trie/export_test.go +++ b/trie/export_test.go @@ -75,7 +75,10 @@ func GetDirtyHashes(tr common.Trie) common.ModifiedHashes { // WriteInChanNonBlocking - func WriteInChanNonBlocking(errChan chan error, err error) { - writeInChanNonBlocking(errChan, err) + select { + case errChan <- err: + default: + } } type StorageManagerExtensionStub struct { diff --git a/trie/node.go b/trie/node.go index 52ff22bacc4..67c7f95d8c3 100644 --- a/trie/node.go +++ b/trie/node.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/trie/keyBuilder" + logger "github.com/multiversx/mx-chain-logger-go" ) const ( @@ -119,7 +120,7 @@ func computeAndSetNodeHash(n node) ([]byte, error) { func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) { encChild, err := db.Get(n) if err != nil { - log.Trace(core.GetNodeFromDBErrorString, "error", err, "key", n, "stack trace", string(debug.Stack())) + treatLogError(log, err, n) dbWithID, ok := db.(dbWriteCacherWithIdentifier) if !ok { @@ -133,6 +134,14 @@ func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marsh return decodeNode(encChild, marshalizer, hasher) } +func treatLogError(logInstance logger.Logger, err error, key []byte) { + if logInstance.GetLevel() != logger.LogTrace { + return + } + + logInstance.Trace(core.GetNodeFromDBErrorString, "error", err, "key", key, "stack trace", string(debug.Stack())) +} + func resolveIfCollapsed(n node, pos byte, db common.DBWriteCacher) error { err := n.isEmptyOrNil() if err != nil { diff --git a/trie/node_test.go b/trie/node_test.go index 8c58419be7b..0b6e850ee63 100644 --- a/trie/node_test.go +++ b/trie/node_test.go @@ -2,6 +2,7 @@ package trie import ( "context" + "errors" "strings" "testing" "time" @@ -9,10 +10,13 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" dataMock "github.com/multiversx/mx-chain-go/dataRetriever/mock" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/trie/keyBuilder" + logger "github.com/multiversx/mx-chain-logger-go" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNode_hashChildrenAndNodeBranchNode(t *testing.T) { @@ -519,7 +523,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesCollapsedTrie(t *testing.T) { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err := tr.GetAllLeavesOnChannel(leavesChannel, context.Background(), tr.root.getHash(), keyBuilder.NewKeyBuilder()) assert.Nil(t, err) @@ -529,7 +533,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesCollapsedTrie(t *testing.T) { leaves[string(l.Key())] = l.Value() } - err = common.GetErrorFromChanNonBlocking(leavesChannel.ErrChan) + err = leavesChannel.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) assert.Equal(t, 3, len(leaves)) @@ -631,6 +635,50 @@ func TestShouldStopIfContextDoneBlockingIfBusy(t *testing.T) { }) } +func TestTreatLogError(t *testing.T) { + t.Parallel() + + t.Run("logger instance is not in Trace mode, should not call", func(t *testing.T) { + t.Parallel() + + key := []byte("key") + err := errors.New("trie was not found") + logInstance := &testscommon.LoggerStub{ + GetLevelCalled: func() logger.LogLevel { + return logger.LogDebug + }, + TraceCalled: func(message string, args ...interface{}) { + assert.Fail(t, "should have not called Log") + }, + } + + treatLogError(logInstance, err, key) + treatLogError(log, err, key) //display only + }) + t.Run("logger instance is in Trace mode, should call", func(t *testing.T) { + t.Parallel() + + key := []byte("key") + wasCalled := false + err := errors.New("error") + logInstance := &testscommon.LoggerStub{ + GetLevelCalled: func() logger.LogLevel { + return logger.LogTrace + }, + TraceCalled: func(message string, args ...interface{}) { + wasCalled = true + require.Equal(t, core.GetNodeFromDBErrorString, message) + require.Equal(t, 6, len(args)) + expectedFirst5Args := []interface{}{"error", err, "key", key, "stack trace"} + require.Equal(t, expectedFirst5Args, args[:5]) + }, + } + + treatLogError(logInstance, err, key) + assert.True(t, wasCalled) + }) +} + func Benchmark_ShouldStopIfContextDoneBlockingIfBusy(b *testing.B) { ctx := context.Background() b.ResetTimer() diff --git a/trie/patriciaMerkleTrie.go b/trie/patriciaMerkleTrie.go index 75b035966af..fd1e41aca66 100644 --- a/trie/patriciaMerkleTrie.go +++ b/trie/patriciaMerkleTrie.go @@ -216,9 +216,11 @@ func (tr *patriciaMerkleTrie) Commit() error { defer tr.mutOperation.Unlock() if tr.root == nil { + log.Trace("trying to commit empty trie") return nil } if !tr.root.isDirty() { + log.Trace("trying to commit clean trie", "root", tr.root.getHash()) return nil } err := tr.root.setRootHash() @@ -441,13 +443,13 @@ func (tr *patriciaMerkleTrie) GetAllLeavesOnChannel( newTrie, err := tr.recreate(rootHash, tr.trieStorage) if err != nil { close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() return err } if check.IfNil(newTrie) || newTrie.root == nil { close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() return nil } @@ -463,14 +465,14 @@ func (tr *patriciaMerkleTrie) GetAllLeavesOnChannel( ctx, ) if err != nil { - writeInChanNonBlocking(leavesChannels.ErrChan, err) + leavesChannels.ErrChan.WriteInChanNonBlocking(err) log.Error("could not get all trie leaves: ", "error", err) } tr.trieStorage.ExitPruningBufferingMode() close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil diff --git a/trie/patriciaMerkleTrie_test.go b/trie/patriciaMerkleTrie_test.go index fc9a23a1843..ae93bf933e4 100644 --- a/trie/patriciaMerkleTrie_test.go +++ b/trie/patriciaMerkleTrie_test.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing/keccak" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/testscommon" @@ -482,7 +483,7 @@ func TestPatriciaMerkleTrie_GetSerializedNodesGetFromCheckpoint(t *testing.T) { storageManager.AddDirtyCheckpointHashes(rootHash, dirtyHashes) iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } storageManager.SetCheckpoint(rootHash, make([]byte, 0), iteratorChannels, nil, &trieMock.MockStatistics{}) trie.WaitForOperationToComplete(storageManager) @@ -569,7 +570,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err := tr.GetAllLeavesOnChannel(iteratorChannels, context.Background(), []byte{}, keyBuilder.NewDisabledKeyBuilder()) assert.Equal(t, trie.ErrNilTrieIteratorLeavesChannel, err) @@ -595,7 +596,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err := tr.GetAllLeavesOnChannel(leavesChannel, context.Background(), []byte{}, keyBuilder.NewDisabledKeyBuilder()) assert.Nil(t, err) @@ -604,7 +605,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { _, ok := <-leavesChannel.LeavesChan assert.False(t, ok) - err = common.GetErrorFromChanNonBlocking(leavesChannel.ErrChan) + err = leavesChannel.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) }) @@ -617,7 +618,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } expectedErr := errors.New("expected error") @@ -637,7 +638,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { for leaf := range leavesChannel.LeavesChan { recovered[string(leaf.Key())] = leaf.Value() } - err = common.GetErrorFromChanNonBlocking(leavesChannel.ErrChan) + err = leavesChannel.ErrChan.ReadFromChanNonBlocking() assert.Equal(t, expectedErr, err) assert.Equal(t, 0, len(recovered)) }) @@ -653,7 +654,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } expectedErr := errors.New("expected error") @@ -679,7 +680,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { for leaf := range leavesChannel.LeavesChan { recovered[string(leaf.Key())] = leaf.Value() } - err = common.GetErrorFromChanNonBlocking(leavesChannel.ErrChan) + err = leavesChannel.ErrChan.ReadFromChanNonBlocking() assert.Equal(t, expectedErr, err) expectedLeaves := map[string][]byte{ @@ -702,7 +703,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err := tr.GetAllLeavesOnChannel(leavesChannel, context.Background(), rootHash, keyBuilder.NewKeyBuilder()) assert.Nil(t, err) @@ -712,7 +713,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { for leaf := range leavesChannel.LeavesChan { recovered[string(leaf.Key())] = leaf.Value() } - err = common.GetErrorFromChanNonBlocking(leavesChannel.ErrChan) + err = leavesChannel.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) assert.Equal(t, leaves, recovered) }) @@ -1008,7 +1009,7 @@ func TestPatriciaMerkleTrie_ConcurrentOperations(t *testing.T) { case 12: trieIteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, 1000), - ErrChan: make(chan error, 1000), + ErrChan: errChan.NewErrChanWrapper(), } err := tr.GetAllLeavesOnChannel( diff --git a/trie/trieStorageManager.go b/trie/trieStorageManager.go index 45c251d4983..dfbce363d38 100644 --- a/trie/trieStorageManager.go +++ b/trie/trieStorageManager.go @@ -437,7 +437,7 @@ func (tsm *trieStorageManager) takeSnapshot(snapshotEntry *snapshotsQueueEntry, stsm, err := newSnapshotTrieStorageManager(tsm, snapshotEntry.epoch) if err != nil { - writeInChanNonBlocking(snapshotEntry.iteratorChannels.ErrChan, err) + snapshotEntry.iteratorChannels.ErrChan.WriteInChanNonBlocking(err) log.Error("takeSnapshot: trie storage manager: newSnapshotTrieStorageManager", "rootHash", snapshotEntry.rootHash, "main trie rootHash", snapshotEntry.mainTrieRootHash, @@ -447,7 +447,7 @@ func (tsm *trieStorageManager) takeSnapshot(snapshotEntry *snapshotsQueueEntry, newRoot, err := newSnapshotNode(stsm, msh, hsh, snapshotEntry.rootHash, snapshotEntry.missingNodesChan) if err != nil { - writeInChanNonBlocking(snapshotEntry.iteratorChannels.ErrChan, err) + snapshotEntry.iteratorChannels.ErrChan.WriteInChanNonBlocking(err) treatSnapshotError(err, "trie storage manager: newSnapshotNode takeSnapshot", snapshotEntry.rootHash, @@ -459,7 +459,7 @@ func (tsm *trieStorageManager) takeSnapshot(snapshotEntry *snapshotsQueueEntry, stats := statistics.NewTrieStatistics() err = newRoot.commitSnapshot(stsm, snapshotEntry.iteratorChannels.LeavesChan, snapshotEntry.missingNodesChan, ctx, stats, tsm.idleProvider, rootDepthLevel) if err != nil { - writeInChanNonBlocking(snapshotEntry.iteratorChannels.ErrChan, err) + snapshotEntry.iteratorChannels.ErrChan.WriteInChanNonBlocking(err) treatSnapshotError(err, "trie storage manager: takeSnapshot commit", snapshotEntry.rootHash, @@ -472,13 +472,6 @@ func (tsm *trieStorageManager) takeSnapshot(snapshotEntry *snapshotsQueueEntry, snapshotEntry.stats.AddTrieStats(stats.GetTrieStats()) } -func writeInChanNonBlocking(errChan chan error, err error) { - select { - case errChan <- err: - default: - } -} - func (tsm *trieStorageManager) takeCheckpoint(checkpointEntry *snapshotsQueueEntry, msh marshal.Marshalizer, hsh hashing.Hasher, ctx context.Context, goRoutinesThrottler core.Throttler) { defer func() { tsm.finishOperation(checkpointEntry, "trie checkpoint finished") @@ -489,7 +482,7 @@ func (tsm *trieStorageManager) takeCheckpoint(checkpointEntry *snapshotsQueueEnt newRoot, err := newSnapshotNode(tsm, msh, hsh, checkpointEntry.rootHash, checkpointEntry.missingNodesChan) if err != nil { - writeInChanNonBlocking(checkpointEntry.iteratorChannels.ErrChan, err) + checkpointEntry.iteratorChannels.ErrChan.WriteInChanNonBlocking(err) treatSnapshotError(err, "trie storage manager: newSnapshotNode takeCheckpoint", checkpointEntry.rootHash, @@ -501,7 +494,7 @@ func (tsm *trieStorageManager) takeCheckpoint(checkpointEntry *snapshotsQueueEnt stats := statistics.NewTrieStatistics() err = newRoot.commitCheckpoint(tsm, tsm.checkpointsStorer, tsm.checkpointHashesHolder, checkpointEntry.iteratorChannels.LeavesChan, ctx, stats, tsm.idleProvider, rootDepthLevel) if err != nil { - writeInChanNonBlocking(checkpointEntry.iteratorChannels.ErrChan, err) + checkpointEntry.iteratorChannels.ErrChan.WriteInChanNonBlocking(err) treatSnapshotError(err, "trie storage manager: takeCheckpoint commit", checkpointEntry.rootHash, diff --git a/trie/trieStorageManagerFactory_test.go b/trie/trieStorageManagerFactory_test.go index d1afb5c1737..26d679e157a 100644 --- a/trie/trieStorageManagerFactory_test.go +++ b/trie/trieStorageManagerFactory_test.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" @@ -134,7 +135,7 @@ func TestTrieStorageManager_SerialFuncShadowingCallsExpectedImpl(t *testing.T) { iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } tsm.SetCheckpoint(nil, nil, iteratorChannels, nil, &trieMock.MockStatistics{}) @@ -167,7 +168,7 @@ func testTsmWithoutSnapshot( iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } tsm.TakeSnapshot("", nil, nil, iteratorChannels, nil, &trieMock.MockStatistics{}, 10) diff --git a/trie/trieStorageManagerWithoutCheckpoints_test.go b/trie/trieStorageManagerWithoutCheckpoints_test.go index 0f3cf254a77..891a14a392e 100644 --- a/trie/trieStorageManagerWithoutCheckpoints_test.go +++ b/trie/trieStorageManagerWithoutCheckpoints_test.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" "github.com/stretchr/testify/assert" @@ -27,14 +28,14 @@ func TestTrieStorageManagerWithoutCheckpoints_SetCheckpoint(t *testing.T) { iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } ts.SetCheckpoint([]byte("rootHash"), make([]byte, 0), iteratorChannels, nil, &trieMock.MockStatistics{}) assert.Equal(t, uint32(0), ts.PruningBlockingOperations()) iteratorChannels = &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } ts.SetCheckpoint([]byte("rootHash"), make([]byte, 0), iteratorChannels, nil, &trieMock.MockStatistics{}) assert.Equal(t, uint32(0), ts.PruningBlockingOperations()) diff --git a/trie/trieStorageManagerWithoutSnapshot_test.go b/trie/trieStorageManagerWithoutSnapshot_test.go index 4077c71978a..309e328433f 100644 --- a/trie/trieStorageManagerWithoutSnapshot_test.go +++ b/trie/trieStorageManagerWithoutSnapshot_test.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" "github.com/stretchr/testify/assert" @@ -79,7 +80,7 @@ func TestTrieStorageManagerWithoutSnapshot_TakeSnapshot(t *testing.T) { iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } ts.TakeSnapshot("", nil, nil, iteratorChannels, nil, &trieMock.MockStatistics{}, 10) diff --git a/trie/trieStorageManager_test.go b/trie/trieStorageManager_test.go index dc742c03afc..9f37a27874c 100644 --- a/trie/trieStorageManager_test.go +++ b/trie/trieStorageManager_test.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" @@ -36,6 +37,12 @@ func getNewTrieStorageManagerArgs() trie.NewTrieStorageManagerArgs { } } +// errChanWithLen extends the BufferedErrChan interface with a Len method +type errChanWithLen interface { + common.BufferedErrChan + Len() int +} + func TestNewTrieStorageManager(t *testing.T) { t.Parallel() @@ -91,7 +98,7 @@ func TestTrieCheckpoint(t *testing.T) { trieStorage.AddDirtyCheckpointHashes(rootHash, dirtyHashes) iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } trieStorage.SetCheckpoint(rootHash, []byte{}, iteratorChannels, nil, &trieMock.MockStatistics{}) trie.WaitForOperationToComplete(trieStorage) @@ -99,7 +106,10 @@ func TestTrieCheckpoint(t *testing.T) { val, err = trieStorage.GetFromCheckpoint(rootHash) assert.Nil(t, err) assert.NotNil(t, val) - assert.Equal(t, 0, len(iteratorChannels.ErrChan)) + + ch, ok := iteratorChannels.ErrChan.(errChanWithLen) + assert.True(t, ok) + assert.Equal(t, 0, ch.Len()) } func TestTrieStorageManager_SetCheckpointNilErrorChan(t *testing.T) { @@ -131,13 +141,15 @@ func TestTrieStorageManager_SetCheckpointClosedDb(t *testing.T) { rootHash := []byte("rootHash") iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } ts.SetCheckpoint(rootHash, rootHash, iteratorChannels, nil, &trieMock.MockStatistics{}) _, ok := <-iteratorChannels.LeavesChan assert.False(t, ok) - assert.Equal(t, 0, len(iteratorChannels.ErrChan)) + ch, ok := iteratorChannels.ErrChan.(errChanWithLen) + assert.True(t, ok) + assert.Equal(t, 0, ch.Len()) } func TestTrieStorageManager_SetCheckpointEmptyTrieRootHash(t *testing.T) { @@ -149,13 +161,15 @@ func TestTrieStorageManager_SetCheckpointEmptyTrieRootHash(t *testing.T) { rootHash := make([]byte, 32) iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } ts.SetCheckpoint(rootHash, rootHash, iteratorChannels, nil, &trieMock.MockStatistics{}) _, ok := <-iteratorChannels.LeavesChan assert.False(t, ok) - assert.Equal(t, 0, len(iteratorChannels.ErrChan)) + ch, ok := iteratorChannels.ErrChan.(errChanWithLen) + assert.True(t, ok) + assert.Equal(t, 0, ch.Len()) } func TestTrieCheckpoint_DoesNotSaveToCheckpointStorageIfNotDirty(t *testing.T) { @@ -170,7 +184,7 @@ func TestTrieCheckpoint_DoesNotSaveToCheckpointStorageIfNotDirty(t *testing.T) { iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } trieStorage.SetCheckpoint(rootHash, []byte{}, iteratorChannels, nil, &trieMock.MockStatistics{}) trie.WaitForOperationToComplete(trieStorage) @@ -178,7 +192,9 @@ func TestTrieCheckpoint_DoesNotSaveToCheckpointStorageIfNotDirty(t *testing.T) { val, err = trieStorage.GetFromCheckpoint(rootHash) assert.NotNil(t, err) assert.Nil(t, val) - assert.Equal(t, 0, len(iteratorChannels.ErrChan)) + ch, ok := iteratorChannels.ErrChan.(errChanWithLen) + assert.True(t, ok) + assert.Equal(t, 0, ch.Len()) } func TestTrieStorageManager_IsPruningEnabled(t *testing.T) { @@ -344,13 +360,15 @@ func TestTrieStorageManager_TakeSnapshotClosedDb(t *testing.T) { rootHash := []byte("rootHash") iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } ts.TakeSnapshot("", rootHash, rootHash, iteratorChannels, nil, &trieMock.MockStatistics{}, 0) _, ok := <-iteratorChannels.LeavesChan assert.False(t, ok) - assert.Equal(t, 0, len(iteratorChannels.ErrChan)) + ch, ok := iteratorChannels.ErrChan.(errChanWithLen) + assert.True(t, ok) + assert.Equal(t, 0, ch.Len()) } func TestTrieStorageManager_TakeSnapshotEmptyTrieRootHash(t *testing.T) { @@ -362,13 +380,15 @@ func TestTrieStorageManager_TakeSnapshotEmptyTrieRootHash(t *testing.T) { rootHash := make([]byte, 32) iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } ts.TakeSnapshot("", rootHash, rootHash, iteratorChannels, nil, &trieMock.MockStatistics{}, 0) _, ok := <-iteratorChannels.LeavesChan assert.False(t, ok) - assert.Equal(t, 0, len(iteratorChannels.ErrChan)) + ch, ok := iteratorChannels.ErrChan.(errChanWithLen) + assert.True(t, ok) + assert.Equal(t, 0, ch.Len()) } func TestTrieStorageManager_TakeSnapshotWithGetNodeFromDBError(t *testing.T) { @@ -381,15 +401,17 @@ func TestTrieStorageManager_TakeSnapshotWithGetNodeFromDBError(t *testing.T) { rootHash := []byte("rootHash") iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } missingNodesChan := make(chan []byte, 2) ts.TakeSnapshot("", rootHash, rootHash, iteratorChannels, missingNodesChan, &trieMock.MockStatistics{}, 0) _, ok := <-iteratorChannels.LeavesChan assert.False(t, ok) - require.Equal(t, 1, len(iteratorChannels.ErrChan)) - errRecovered := <-iteratorChannels.ErrChan + ch, ok := iteratorChannels.ErrChan.(errChanWithLen) + assert.True(t, ok) + assert.Equal(t, 1, ch.Len()) + errRecovered := iteratorChannels.ErrChan.ReadFromChanNonBlocking() assert.True(t, strings.Contains(errRecovered.Error(), core.GetNodeFromDBErrorString)) } @@ -430,20 +452,20 @@ func TestWriteInChanNonBlocking(t *testing.T) { t.Run("unbuffered, reader has been set up, should add", func(t *testing.T) { t.Parallel() - errChan := make(chan error) + errChannel := make(chan error) var recovered error wg := sync.WaitGroup{} wg.Add(1) // set up the consumer that will be blocked until writing is done go func() { - recovered = <-errChan + recovered = <-errChannel wg.Done() }() time.Sleep(time.Second) // allow the go routine to start - trie.WriteInChanNonBlocking(errChan, err1) + trie.WriteInChanNonBlocking(errChannel, err1) wg.Wait() assert.Equal(t, err1, recovered) @@ -453,8 +475,8 @@ func TestWriteInChanNonBlocking(t *testing.T) { chanFinish := make(chan struct{}) go func() { - errChan := make(chan error) - trie.WriteInChanNonBlocking(errChan, err1) + errChannel := make(chan error) + trie.WriteInChanNonBlocking(errChannel, err1) close(chanFinish) }() @@ -468,53 +490,54 @@ func TestWriteInChanNonBlocking(t *testing.T) { t.Run("buffered (one element), empty chan should add", func(t *testing.T) { t.Parallel() - errChan := make(chan error, 1) - trie.WriteInChanNonBlocking(errChan, err1) - require.Equal(t, 1, len(errChan)) - recovered := <-errChan + errChannel := errChan.NewErrChanWrapper() + errChannel.WriteInChanNonBlocking(err1) + + require.Equal(t, 1, errChannel.Len()) + recovered := errChannel.ReadFromChanNonBlocking() assert.Equal(t, err1, recovered) }) t.Run("buffered (1 element), full chan should not add, but should finish", func(t *testing.T) { t.Parallel() - errChan := make(chan error, 1) - trie.WriteInChanNonBlocking(errChan, err1) - trie.WriteInChanNonBlocking(errChan, err2) + errChannel := errChan.NewErrChanWrapper() + errChannel.WriteInChanNonBlocking(err1) + errChannel.WriteInChanNonBlocking(err2) - require.Equal(t, 1, len(errChan)) - recovered := <-errChan + require.Equal(t, 1, errChannel.Len()) + recovered := errChannel.ReadFromChanNonBlocking() assert.Equal(t, err1, recovered) }) t.Run("buffered (two elements), empty chan should add", func(t *testing.T) { t.Parallel() - errChan := make(chan error, 2) - trie.WriteInChanNonBlocking(errChan, err1) - require.Equal(t, 1, len(errChan)) - recovered := <-errChan + errChannel := make(chan error, 2) + trie.WriteInChanNonBlocking(errChannel, err1) + require.Equal(t, 1, len(errChannel)) + recovered := <-errChannel assert.Equal(t, err1, recovered) - trie.WriteInChanNonBlocking(errChan, err1) - trie.WriteInChanNonBlocking(errChan, err2) - require.Equal(t, 2, len(errChan)) + trie.WriteInChanNonBlocking(errChannel, err1) + trie.WriteInChanNonBlocking(errChannel, err2) + require.Equal(t, 2, len(errChannel)) - recovered = <-errChan + recovered = <-errChannel assert.Equal(t, err1, recovered) - recovered = <-errChan + recovered = <-errChannel assert.Equal(t, err2, recovered) }) t.Run("buffered (2 elements), full chan should not add, but should finish", func(t *testing.T) { t.Parallel() - errChan := make(chan error, 2) - trie.WriteInChanNonBlocking(errChan, err1) - trie.WriteInChanNonBlocking(errChan, err2) - trie.WriteInChanNonBlocking(errChan, err3) + errChannel := make(chan error, 2) + trie.WriteInChanNonBlocking(errChannel, err1) + trie.WriteInChanNonBlocking(errChannel, err2) + trie.WriteInChanNonBlocking(errChannel, err3) - require.Equal(t, 2, len(errChan)) - recovered := <-errChan + require.Equal(t, 2, len(errChannel)) + recovered := <-errChannel assert.Equal(t, err1, recovered) - recovered = <-errChan + recovered = <-errChannel assert.Equal(t, err2, recovered) }) } diff --git a/update/errors.go b/update/errors.go index b2100dd2b85..dc94a334854 100644 --- a/update/errors.go +++ b/update/errors.go @@ -63,7 +63,7 @@ var ErrNilMarshalizer = errors.New("nil Marshalizer") var ErrNilHeaderValidator = errors.New("nil header validator") // ErrNilUint64Converter signals that uint64converter is nil -var ErrNilUint64Converter = errors.New("unit64converter is nil") +var ErrNilUint64Converter = errors.New("uint64converter is nil") // ErrNilDataPoolHolder signals that the data pool holder is nil var ErrNilDataPoolHolder = errors.New("nil data pool holder") diff --git a/update/factory/fullSyncInterceptors.go b/update/factory/fullSyncInterceptors.go index 357ecc1ccfc..545b4114f51 100644 --- a/update/factory/fullSyncInterceptors.go +++ b/update/factory/fullSyncInterceptors.go @@ -489,6 +489,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) createOneTxInterceptor(topic s ficf.shardCoordinator, ficf.whiteListHandler, ficf.addressPubkeyConv, + ficf.argInterceptorFactory.CoreComponents.TxVersionChecker(), ficf.maxTxNonceDeltaAllowed, ) if err != nil { diff --git a/update/genesis/common.go b/update/genesis/common.go index 8c62a78ef61..023fe6d7c8d 100644 --- a/update/genesis/common.go +++ b/update/genesis/common.go @@ -34,7 +34,7 @@ func getValidatorDataFromLeaves( validators[currentShardId] = append(validators[currentShardId], validatorInfoData) } - err := common.GetErrorFromChanNonBlocking(leavesChannels.ErrChan) + err := leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, err } diff --git a/update/genesis/export.go b/update/genesis/export.go index 115b6f92a91..51a6fc237b6 100644 --- a/update/genesis/export.go +++ b/update/genesis/export.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" @@ -295,7 +296,7 @@ func (se *stateExport) exportTrie(key string, trie common.Trie) error { leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err = trie.GetAllLeavesOnChannel(leavesChannels, context.Background(), rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -357,7 +358,7 @@ func (se *stateExport) exportDataTries( } } - err := common.GetErrorFromChanNonBlocking(leavesChannels.ErrChan) + err := leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return err } @@ -379,7 +380,7 @@ func (se *stateExport) exportAccountLeaves( } } - err := common.GetErrorFromChanNonBlocking(leavesChannels.ErrChan) + err := leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return err } diff --git a/update/genesis/export_test.go b/update/genesis/export_test.go index ec8fbca7a02..d5587d031ae 100644 --- a/update/genesis/export_test.go +++ b/update/genesis/export_test.go @@ -295,7 +295,7 @@ func TestStateExport_ExportTrieShouldExportNodesSetupJson(t *testing.T) { go func() { channels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("test"), pacB) - channels.ErrChan <- expectedErr + channels.ErrChan.WriteInChanNonBlocking(expectedErr) close(channels.LeavesChan) }() @@ -345,7 +345,7 @@ func TestStateExport_ExportTrieShouldExportNodesSetupJson(t *testing.T) { go func() { channels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("test"), pacB) close(channels.LeavesChan) - close(channels.ErrChan) + channels.ErrChan.Close() }() return nil diff --git a/vm/errors.go b/vm/errors.go index 887543eeac9..5d54eda1e5d 100644 --- a/vm/errors.go +++ b/vm/errors.go @@ -1,6 +1,8 @@ package vm -import "errors" +import ( + "errors" +) // ErrUnknownSystemSmartContract signals that there is no system smart contract on the provided address var ErrUnknownSystemSmartContract = errors.New("missing system smart contract on selected address") @@ -128,8 +130,8 @@ var ErrNilSystemSCConfig = errors.New("nil system sc config") // ErrNilValidatorAccountsDB signals that nil validator accounts DB was provided var ErrNilValidatorAccountsDB = errors.New("nil validator accounts DB") -// ErrInvalidStartEndVoteNonce signals that invalid arguments where passed for start or end vote nonce -var ErrInvalidStartEndVoteNonce = errors.New("invalid start/end vote nonce") +// ErrInvalidStartEndVoteEpoch signals that invalid arguments where passed for start or end vote epoch +var ErrInvalidStartEndVoteEpoch = errors.New("invalid start/end vote epoch") // ErrEmptyStorage signals that the storage is empty for given key var ErrEmptyStorage = errors.New("storage is nil for given key") @@ -137,6 +139,9 @@ var ErrEmptyStorage = errors.New("storage is nil for given key") // ErrVotedForAnExpiredProposal signals that voting was done for an expired proposal var ErrVotedForAnExpiredProposal = errors.New("voting period is over for this proposal") +// ErrDoubleVote signals that user is voting for the second time for the same proposal +var ErrDoubleVote = errors.New("double vote is not allowed") + // ErrVotingNotStartedForProposal signals that voting was done for a proposal that not begins yet var ErrVotingNotStartedForProposal = errors.New("voting has not yet started for this proposal") @@ -239,8 +244,11 @@ var ErrNilShardCoordinator = errors.New("nil shard coordinator") // ErrProposalNotFound signals that the storage is empty for given key var ErrProposalNotFound = errors.New("proposal was not found in storage") -// ErrInvalidNumOfInitialWhiteListedAddress signals that 0 initial whiteListed addresses were provided to the governance contract -var ErrInvalidNumOfInitialWhiteListedAddress = errors.New("0 initial whiteListed addresses provided to the governance contract") - // ErrNilEnableEpochsHandler signals that a nil enable epochs handler has been provided var ErrNilEnableEpochsHandler = errors.New("nil enable epochs handler") + +// ErrNotEnoughStakeToVote signals that the stake/delegation is not enough to vote +var ErrNotEnoughStakeToVote = errors.New("not enough stake/delegate to vote") + +// ErrNotEnoughVotingPower signals that there is not enough voting power to cast the vote +var ErrNotEnoughVotingPower = errors.New("not enough voting power to cast this vote") diff --git a/vm/factory/systemSCFactory.go b/vm/factory/systemSCFactory.go index a37ac4383ec..d5272aed81a 100644 --- a/vm/factory/systemSCFactory.go +++ b/vm/factory/systemSCFactory.go @@ -219,22 +219,23 @@ func (scf *systemSCFactory) createESDTContract() (vm.SystemSmartContract, error) } func (scf *systemSCFactory) createGovernanceContract() (vm.SystemSmartContract, error) { - firstWhitelistAddress, err := scf.addressPubKeyConverter.Decode(scf.systemSCConfig.GovernanceSystemSCConfig.FirstWhitelistedAddress) + configChangeAddress, err := scf.addressPubKeyConverter.Decode(scf.systemSCConfig.GovernanceSystemSCConfig.ChangeConfigAddress) if err != nil { - return nil, fmt.Errorf("%w for GovernanceSystemSCConfig.FirstWhitelistedAddress in systemSCFactory", vm.ErrInvalidAddress) + return nil, fmt.Errorf("%w for GovernanceSystemSCConfig.ChangeConfigAddress in systemSCFactory", vm.ErrInvalidAddress) } argsGovernance := systemSmartContracts.ArgsNewGovernanceContract{ - Eei: scf.systemEI, - GasCost: scf.gasCost, - GovernanceConfig: scf.systemSCConfig.GovernanceSystemSCConfig, - Marshalizer: scf.marshalizer, - Hasher: scf.hasher, - GovernanceSCAddress: vm.GovernanceSCAddress, - DelegationMgrSCAddress: vm.DelegationManagerSCAddress, - ValidatorSCAddress: vm.ValidatorSCAddress, - EnableEpochsHandler: scf.enableEpochsHandler, - InitialWhiteListedAddresses: [][]byte{firstWhitelistAddress}, + Eei: scf.systemEI, + GasCost: scf.gasCost, + GovernanceConfig: scf.systemSCConfig.GovernanceSystemSCConfig, + Marshalizer: scf.marshalizer, + Hasher: scf.hasher, + GovernanceSCAddress: vm.GovernanceSCAddress, + DelegationMgrSCAddress: vm.DelegationManagerSCAddress, + ValidatorSCAddress: vm.ValidatorSCAddress, + EnableEpochsHandler: scf.enableEpochsHandler, + UnBondPeriodInEpochs: scf.systemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs, + ConfigChangeAddress: configChangeAddress, } governance, err := systemSmartContracts.NewGovernanceContract(argsGovernance) return governance, err diff --git a/vm/factory/systemSCFactory_test.go b/vm/factory/systemSCFactory_test.go index 921931f1b5d..2d52a260a18 100644 --- a/vm/factory/systemSCFactory_test.go +++ b/vm/factory/systemSCFactory_test.go @@ -36,13 +36,20 @@ func createMockNewSystemScFactoryArgs() ArgsNewSystemSCFactory { OwnerAddress: "aaaaaa", }, GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ + V1: config.GovernanceSystemSCConfigV1{ + NumNodes: 3, + MinPassThreshold: 1, + MinQuorum: 2, + MinVetoThreshold: 2, + ProposalCost: "100", + }, Active: config.GovernanceSystemSCConfigActive{ ProposalCost: "500", - MinQuorum: "50", - MinPassThreshold: "50", - MinVetoThreshold: "50", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, }, - FirstWhitelistedAddress: "3132333435363738393031323334353637383930313233343536373839303234", + ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", @@ -238,19 +245,6 @@ func TestSystemSCFactory_CreateWithBadDelegationManagerConfigChangeAddressShould assert.True(t, errors.Is(err, vm.ErrInvalidAddress)) } -func TestSystemSCFactory_CreateWithFirstWhiteListAddressShouldError(t *testing.T) { - t.Parallel() - - arguments := createMockNewSystemScFactoryArgs() - arguments.SystemSCConfig.GovernanceSystemSCConfig.FirstWhitelistedAddress = "not a hex string" - scFactory, _ := NewSystemSCFactory(arguments) - - container, err := scFactory.Create() - - assert.True(t, check.IfNil(container)) - assert.True(t, errors.Is(err, vm.ErrInvalidAddress)) -} - func TestSystemSCFactory_Create(t *testing.T) { t.Parallel() diff --git a/vm/gasCost.go b/vm/gasCost.go index 57762655960..33254a1a204 100644 --- a/vm/gasCost.go +++ b/vm/gasCost.go @@ -34,6 +34,7 @@ type MetaChainSystemSCsCost struct { DelegationMgrOps uint64 ValidatorToDelegation uint64 GetAllNodeStates uint64 + GetActiveFund uint64 FixWaitingListSize uint64 } diff --git a/vm/interface.go b/vm/interface.go index 64891d642ec..4436f1e1986 100644 --- a/vm/interface.go +++ b/vm/interface.go @@ -56,6 +56,8 @@ type SystemEI interface { CanUnJail(blsKey []byte) bool IsBadRating(blsKey []byte) bool CleanStorageUpdates() + GetTotalSentToUser(dest []byte) *big.Int + GetLogs() []*vmcommon.LogEntry IsInterfaceNil() bool } diff --git a/vm/mock/systemEIStub.go b/vm/mock/systemEIStub.go index ab4e953845b..271e36214e3 100644 --- a/vm/mock/systemEIStub.go +++ b/vm/mock/systemEIStub.go @@ -104,6 +104,16 @@ func (s *SystemEIStub) UseGas(gas uint64) error { return nil } +// GetTotalSentToUser - +func (s *SystemEIStub) GetTotalSentToUser(_ []byte) *big.Int { + return big.NewInt(0) +} + +// GetLogs - +func (s *SystemEIStub) GetLogs() []*vmcommon.LogEntry { + return make([]*vmcommon.LogEntry, 0) +} + // SetGasProvided - func (s *SystemEIStub) SetGasProvided(_ uint64) { } diff --git a/vm/systemSmartContracts/defaults/gasMap.go b/vm/systemSmartContracts/defaults/gasMap.go index 9137f03cc35..98a1ce483d9 100644 --- a/vm/systemSmartContracts/defaults/gasMap.go +++ b/vm/systemSmartContracts/defaults/gasMap.go @@ -47,6 +47,9 @@ func FillGasMapBuiltInCosts(value uint64) map[string]uint64 { gasMap["ESDTNFTAddUri"] = value gasMap["ESDTNFTUpdateAttributes"] = value gasMap["ESDTNFTMultiTransfer"] = value + gasMap["SetGuardian"] = value + gasMap["GuardAccount"] = value + gasMap["UnGuardAccount"] = value return gasMap } @@ -75,6 +78,7 @@ func FillGasMapMetaChainSystemSCsCosts(value uint64) map[string]uint64 { gasMap["DelegationMgrOps"] = value gasMap["GetAllNodeStates"] = value gasMap["ValidatorToDelegation"] = value + gasMap["GetActiveFund"] = value gasMap["FixWaitingListSize"] = value return gasMap diff --git a/vm/systemSmartContracts/delegation.go b/vm/systemSmartContracts/delegation.go index 69866adb3f8..1ef2d493bf8 100644 --- a/vm/systemSmartContracts/delegation.go +++ b/vm/systemSmartContracts/delegation.go @@ -34,6 +34,9 @@ const deleteWhitelistForMerge = "deleteWhitelistForMerge" const whitelistedAddress = "whitelistedAddress" const changeOwner = "changeOwner" const withdraw = "withdraw" +const claimRewards = "claimRewards" +const reDelegateRewards = "reDelegateRewards" +const delegate = "delegate" const ( active = uint32(0) @@ -199,7 +202,7 @@ func (d *delegation) Execute(args *vmcommon.ContractCallInput) vmcommon.ReturnCo return d.unBondNodes(args) case "unJailNodes": return d.unJailNodes(args) - case "delegate": + case delegate: return d.delegate(args) case "unDelegate": return d.unDelegate(args) @@ -215,7 +218,7 @@ func (d *delegation) Execute(args *vmcommon.ContractCallInput) vmcommon.ReturnCo return d.modifyTotalDelegationCap(args) case "updateRewards": return d.updateRewards(args) - case "claimRewards": + case claimRewards: return d.claimRewards(args) case "getRewardData": return d.getRewardData(args) @@ -245,7 +248,7 @@ func (d *delegation) Execute(args *vmcommon.ContractCallInput) vmcommon.ReturnCo return d.getContractConfig(args) case "unStakeAtEndOfEpoch": return d.unStakeAtEndOfEpoch(args) - case "reDelegateRewards": + case reDelegateRewards: return d.reDelegateRewards(args) case "reStakeUnStakedNodes": return d.reStakeUnStakedNodes(args) @@ -1685,11 +1688,6 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.UserError } - if isStakeLocked(d.eei, d.governanceSCAddr, args.CallerAddr) { - d.eei.AddReturnMessage("stake is locked for voting") - return vmcommon.UserError - } - delegationManagement, err := getDelegationManagement(d.eei, d.marshalizer, d.delegationMgrSCAddress) if err != nil { d.eei.AddReturnMessage("error getting minimum delegation amount " + err.Error()) @@ -2085,6 +2083,9 @@ func (d *delegation) withdraw(args *vmcommon.ContractCallInput) vmcommon.ReturnC } if totalUnBondable.Cmp(zero) == 0 { d.eei.AddReturnMessage("nothing to unBond") + if d.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + return vmcommon.UserError + } return vmcommon.Ok } diff --git a/vm/systemSmartContracts/delegationManager.go b/vm/systemSmartContracts/delegationManager.go index cc453d36792..3d7756bb786 100644 --- a/vm/systemSmartContracts/delegationManager.go +++ b/vm/systemSmartContracts/delegationManager.go @@ -144,6 +144,10 @@ func (d *delegationManager) Execute(args *vmcommon.ContractCallInput) vmcommon.R return d.mergeValidatorToDelegation(args, d.checkCallerIsOwnerOfContract) case "mergeValidatorToDelegationWithWhitelist": return d.mergeValidatorToDelegation(args, d.isAddressWhiteListedForMerge) + case "claimMulti": + return d.claimMulti(args) + case "reDelegateMulti": + return d.reDelegateMulti(args) } d.eei.AddReturnMessage("invalid function to call") @@ -495,6 +499,92 @@ func (d *delegationManager) getContractConfig(args *vmcommon.ContractCallInput) return vmcommon.Ok } +func (d *delegationManager) claimMulti(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + returnCode := d.executeFuncOnListAddresses(args, claimRewards) + if returnCode != vmcommon.Ok { + return returnCode + } + totalSent := d.eei.GetTotalSentToUser(args.CallerAddr) + d.eei.Finish(totalSent.Bytes()) + + return vmcommon.Ok +} + +func (d *delegationManager) reDelegateMulti(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + returnCode := d.executeFuncOnListAddresses(args, reDelegateRewards) + if returnCode != vmcommon.Ok { + return returnCode + } + logs := d.eei.GetLogs() + totalReDelegated := getTotalReDelegatedFromLogs(logs) + d.eei.Finish(totalReDelegated.Bytes()) + + return vmcommon.Ok +} + +func getTotalReDelegatedFromLogs(logs []*vmcommon.LogEntry) *big.Int { + totalReDelegated := big.NewInt(0) + for _, reDelegateLog := range logs { + if len(reDelegateLog.Topics) < 1 { + continue + } + if !bytes.Equal(reDelegateLog.Identifier, []byte(delegate)) { + continue + } + valueFromFirstTopic := big.NewInt(0).SetBytes(reDelegateLog.Topics[0]) + totalReDelegated.Add(totalReDelegated, valueFromFirstTopic) + } + + return totalReDelegated +} + +func (d *delegationManager) executeFuncOnListAddresses( + args *vmcommon.ContractCallInput, + funcName string, +) vmcommon.ReturnCode { + if !d.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + d.eei.AddReturnMessage("invalid function to call") + return vmcommon.UserError + } + if len(args.Arguments) < 1 { + d.eei.AddReturnMessage(vm.ErrInvalidNumOfArguments.Error()) + return vmcommon.UserError + } + err := d.eei.UseGas(d.gasCost.MetaChainSystemSCsCost.DelegationOps) + if err != nil { + d.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + mapAddresses := make(map[string]struct{}) + var vmOutput *vmcommon.VMOutput + var found bool + for _, address := range args.Arguments { + if len(address) != len(args.CallerAddr) { + d.eei.AddReturnMessage(vm.ErrInvalidArgument.Error()) + return vmcommon.UserError + } + _, found = mapAddresses[string(address)] + if found { + d.eei.AddReturnMessage("duplicated input") + return vmcommon.UserError + } + + mapAddresses[string(address)] = struct{}{} + vmOutput, err = d.eei.ExecuteOnDestContext(address, args.CallerAddr, big.NewInt(0), []byte(funcName)) + if err != nil { + d.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + if vmOutput.ReturnCode != vmcommon.Ok { + return vmOutput.ReturnCode + } + } + + return vmcommon.Ok +} + func createNewAddress(lastAddress []byte) []byte { i := 0 for ; i < len(lastAddress) && lastAddress[i] == 0; i++ { diff --git a/vm/systemSmartContracts/delegationManager_test.go b/vm/systemSmartContracts/delegationManager_test.go index 363199dda18..ed374a69b24 100644 --- a/vm/systemSmartContracts/delegationManager_test.go +++ b/vm/systemSmartContracts/delegationManager_test.go @@ -40,6 +40,7 @@ func createMockArgumentsForDelegationManager() ArgsNewDelegationManager { EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{ IsDelegationManagerFlagEnabledField: true, IsValidatorToDelegationFlagEnabledField: true, + IsMultiClaimOnDelegationEnabledField: true, }, } } @@ -1085,3 +1086,166 @@ func TestDelegationManagerSystemSC_MakeNewContractFromValidatorDataCallerAlready assert.Equal(t, vmcommon.UserError, returnCode) assert.Equal(t, eei.returnMessage, "caller already deployed a delegation sc") } + +func TestDelegationManagerSystemSC_ClaimMultipleDelegationFails(t *testing.T) { + t.Parallel() + + args := createMockArgumentsForDelegationManager() + eei := createDefaultEei() + _ = eei.SetSystemSCContainer( + createSystemSCContainer(eei), + ) + + enableHandlerStub := &testscommon.EnableEpochsHandlerStub{ + IsMultiClaimOnDelegationEnabledField: false, + IsDelegationManagerFlagEnabledField: true, + } + args.EnableEpochsHandler = enableHandlerStub + args.Eei = eei + createDelegationManagerConfig(eei, args.Marshalizer, big.NewInt(20)) + + dm, _ := NewDelegationManagerSystemSC(args) + eei.SetSCAddress(dm.delegationMgrSCAddress) + + vmInput := getDefaultVmInputForDelegationManager("claimMulti", [][]byte{}) + returnCode := dm.Execute(vmInput) + assert.Equal(t, returnCode, vmcommon.UserError) + assert.Equal(t, eei.GetReturnMessage(), "invalid function to call") + + eei.returnMessage = "" + enableHandlerStub.IsMultiClaimOnDelegationEnabledField = true + returnCode = dm.Execute(vmInput) + assert.Equal(t, returnCode, vmcommon.UserError) + assert.Equal(t, eei.GetReturnMessage(), vm.ErrInvalidNumOfArguments.Error()) + + dm.gasCost.MetaChainSystemSCsCost.DelegationOps = 10 + eei.returnMessage = "" + eei.gasRemaining = 5 + vmInput.Arguments = [][]byte{{1}} + returnCode = dm.Execute(vmInput) + assert.Equal(t, returnCode, vmcommon.UserError) + assert.Equal(t, eei.GetReturnMessage(), vm.ErrNotEnoughGas.Error()) + + eei.returnMessage = "" + eei.gasRemaining = 20 + returnCode = dm.Execute(vmInput) + assert.Equal(t, returnCode, vmcommon.UserError) + assert.Equal(t, eei.GetReturnMessage(), vm.ErrInvalidArgument.Error()) + + eei.returnMessage = "" + eei.gasRemaining = 20 + vmInput.Arguments[0] = vmInput.CallerAddr + returnCode = dm.Execute(vmInput) + assert.Equal(t, returnCode, vmcommon.UserError) + assert.Equal(t, eei.GetReturnMessage(), "missing system smart contract on selected address") + + vmInput.CallerAddr = bytes.Repeat([]byte{1}, 32) + vmInput.Arguments[0] = vm.FirstDelegationSCAddress + eei.returnMessage = "" + eei.gasRemaining = 20 + returnCode = dm.Execute(vmInput) + assert.Equal(t, returnCode, vmcommon.UserError) + assert.Equal(t, eei.GetReturnMessage(), "first delegation sc address cannot be called") + + vmInput.Function = "reDelegateMulti" + eei.returnMessage = "" + eei.gasRemaining = 20 + returnCode = dm.Execute(vmInput) + assert.Equal(t, returnCode, vmcommon.UserError) + assert.Equal(t, eei.GetReturnMessage(), "first delegation sc address cannot be called") +} + +func TestDelegationManagerSystemSC_ClaimMultipleDelegationDuplicatedInput(t *testing.T) { + t.Parallel() + + d, eei := createTestEEIAndDelegationFormMergeValidator() + _ = prepareVmInputContextAndDelegationManager(d, eei) + + _ = eei.SetSystemSCContainer( + &mock.SystemSCContainerStub{ + GetCalled: func(key []byte) (vm.SystemSmartContract, error) { + return &mock.SystemSCStub{ + ExecuteCalled: func(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + _ = d.eei.Transfer(args.RecipientAddr, args.CallerAddr, big.NewInt(10), nil, 0) + return vmcommon.Ok + }, + }, nil + }}) + + vmInput := getDefaultVmInputForDelegationManager("claimMulti", [][]byte{}) + vmInput.CallerAddr = bytes.Repeat([]byte{1}, 32) + vmInput.RecipientAddr = vm.DelegationManagerSCAddress + vmInput.Arguments = [][]byte{bytes.Repeat([]byte{2}, 32), bytes.Repeat([]byte{2}, 32)} + returnCode := d.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, returnCode) + assert.Equal(t, eei.GetReturnMessage(), "duplicated input") +} + +func TestDelegationManagerSystemSC_ClaimMultipleDelegation(t *testing.T) { + t.Parallel() + + d, eei := createTestEEIAndDelegationFormMergeValidator() + _ = prepareVmInputContextAndDelegationManager(d, eei) + + _ = eei.SetSystemSCContainer( + &mock.SystemSCContainerStub{ + GetCalled: func(key []byte) (vm.SystemSmartContract, error) { + return &mock.SystemSCStub{ + ExecuteCalled: func(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + _ = d.eei.Transfer(args.CallerAddr, args.RecipientAddr, big.NewInt(10), nil, 0) + return vmcommon.Ok + }, + }, nil + }}) + + vmInput := getDefaultVmInputForDelegationManager("claimMulti", [][]byte{}) + vmInput.CallerAddr = bytes.Repeat([]byte{1}, 32) + vmInput.RecipientAddr = vm.DelegationManagerSCAddress + vmInput.Arguments = [][]byte{bytes.Repeat([]byte{2}, 32), bytes.Repeat([]byte{3}, 32)} + returnCode := d.Execute(vmInput) + require.Equal(t, vmcommon.Ok, returnCode) + require.Equal(t, len(eei.output), 1) + require.Equal(t, eei.output[0], big.NewInt(20).Bytes()) + require.Equal(t, len(eei.outputAccounts[string(vmInput.CallerAddr)].OutputTransfers), 2) +} + +func TestDelegationManagerSystemSC_ReDelegateMulti(t *testing.T) { + t.Parallel() + + d, eei := createTestEEIAndDelegationFormMergeValidator() + _ = prepareVmInputContextAndDelegationManager(d, eei) + + _ = eei.SetSystemSCContainer( + &mock.SystemSCContainerStub{ + GetCalled: func(key []byte) (vm.SystemSmartContract, error) { + return &mock.SystemSCStub{ + ExecuteCalled: func(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + entry := &vmcommon.LogEntry{ + Identifier: []byte("delegate"), + Address: args.CallerAddr, + Topics: [][]byte{big.NewInt(10).Bytes()}, + Data: nil, + } + d.eei.AddLogEntry(entry) + + wrongEntry := &vmcommon.LogEntry{} + d.eei.AddLogEntry(wrongEntry) + + wrongEntry2 := &vmcommon.LogEntry{ + Topics: [][]byte{big.NewInt(10).Bytes()}, + } + d.eei.AddLogEntry(wrongEntry2) + return vmcommon.Ok + }, + }, nil + }}) + + vmInput := getDefaultVmInputForDelegationManager("reDelegateMulti", [][]byte{}) + vmInput.CallerAddr = bytes.Repeat([]byte{1}, 32) + vmInput.RecipientAddr = vm.DelegationManagerSCAddress + vmInput.Arguments = [][]byte{bytes.Repeat([]byte{2}, 32), bytes.Repeat([]byte{3}, 32)} + returnCode := d.Execute(vmInput) + require.Equal(t, vmcommon.Ok, returnCode) + require.Equal(t, len(eei.output), 1) + require.Equal(t, eei.output[0], big.NewInt(20).Bytes()) +} diff --git a/vm/systemSmartContracts/delegation_test.go b/vm/systemSmartContracts/delegation_test.go index 29821fcf027..86d93954064 100644 --- a/vm/systemSmartContracts/delegation_test.go +++ b/vm/systemSmartContracts/delegation_test.go @@ -53,6 +53,7 @@ func createMockArgumentsForDelegation() ArgsNewDelegation { IsComputeRewardCheckpointFlagEnabledField: true, IsValidatorToDelegationFlagEnabledField: true, IsReDelegateBelowMinCheckFlagEnabledField: true, + IsMultiClaimOnDelegationEnabledField: true, }, } } @@ -919,7 +920,6 @@ func TestDelegationSystemSC_ExecuteDelegateStakeNodes(t *testing.T) { vmOutput := eei.CreateVMOutput() assert.Equal(t, 6, len(vmOutput.OutputAccounts)) - assert.Equal(t, 2, len(vmOutput.OutputAccounts[string(vm.StakingSCAddress)].OutputTransfers)) output = d.Execute(vmInput) eei.gasRemaining = vmInput.GasProvided @@ -1723,41 +1723,6 @@ func TestDelegationSystemSC_ExecuteUnDelegatePartOfFunds(t *testing.T) { assert.Equal(t, eei.output[3], []byte{50}) } -func TestDelegationSystemSC_ExecuteUnDelegateFailsAsLockedForVoting(t *testing.T) { - t.Parallel() - - fundKey := append([]byte(fundKeyPrefix), []byte{1}...) - args := createMockArgumentsForDelegation() - eei := createDefaultEei() - args.Eei = eei - addValidatorAndStakingScToVmContext(eei) - createDelegationManagerConfig(eei, args.Marshalizer, big.NewInt(10)) - - vmInput := getDefaultVmInputForFunc("unDelegate", [][]byte{{100}}) - d, _ := NewDelegationSystemSC(args) - - _ = d.saveDelegatorData(vmInput.CallerAddr, &DelegatorData{ - ActiveFund: fundKey, - UnStakedFunds: [][]byte{}, - UnClaimedRewards: big.NewInt(0), - TotalCumulatedRewards: big.NewInt(0), - }) - _ = d.saveFund(fundKey, &Fund{ - Value: big.NewInt(100), - }) - _ = d.saveGlobalFundData(&GlobalFundData{ - TotalActive: big.NewInt(100), - TotalUnStaked: big.NewInt(0), - }) - d.eei.SetStorage([]byte(lastFundKey), fundKey) - stakeLockKey := append([]byte(stakeLockPrefix), vmInput.CallerAddr...) - eei.SetStorageForAddress(d.governanceSCAddr, stakeLockKey, big.NewInt(0).SetUint64(10000).Bytes()) - - output := d.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) - assert.Equal(t, eei.returnMessage, "stake is locked for voting") -} - func TestDelegationSystemSC_ExecuteUnDelegateAllFunds(t *testing.T) { t.Parallel() @@ -2093,6 +2058,10 @@ func TestDelegationSystemSC_ExecuteWithdraw(t *testing.T) { output := d.Execute(vmInput) assert.Equal(t, vmcommon.Ok, output) + output = d.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, eei.returnMessage, "nothing to unBond") + gFundData, _ := d.getGlobalFundData() assert.Equal(t, big.NewInt(80), gFundData.TotalUnStaked) @@ -4576,6 +4545,7 @@ func TestDelegation_OptimizeRewardsComputation(t *testing.T) { vmInput.CallerAddr = delegator output = d.Execute(vmInput) + fmt.Println(eei.returnMessage) assert.Equal(t, vmcommon.Ok, output) destAcc, exists := eei.outputAccounts[string(vmInput.CallerAddr)] @@ -4758,7 +4728,9 @@ func createDefaultEeiArgs() VMContextArgs { InputParser: parsers.NewCallArgsParser(), ValidatorAccountsDB: &stateMock.AccountsStub{}, ChanceComputer: &mock.RaterMock{}, - EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, + EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{ + IsMultiClaimOnDelegationEnabledField: true, + }, } } diff --git a/vm/systemSmartContracts/eei.go b/vm/systemSmartContracts/eei.go index 6f03b8487fa..32da253277f 100644 --- a/vm/systemSmartContracts/eei.go +++ b/vm/systemSmartContracts/eei.go @@ -13,6 +13,8 @@ import ( vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) +const transferValueOnly = "transferValueOnly" + type vmContext struct { blockChainHook vm.BlockchainHook cryptoHook vmcommon.CryptoHook @@ -147,7 +149,6 @@ func (host *vmContext) SetStorageForAddress(address []byte, key []byte, value [] if !exists { host.storageUpdate[strAdr] = make(map[string][]byte) } - length := len(value) host.storageUpdate[strAdr][string(key)] = make([]byte, length) copy(host.storageUpdate[strAdr][string(key)][:length], value[:length]) @@ -168,18 +169,10 @@ func (host *vmContext) GetBalance(addr []byte) *big.Int { } account, err := host.blockChainHook.GetUserAccount(addr) - if err == state.ErrAccNotFound { - return big.NewInt(0) - } if err != nil { - return nil + return big.NewInt(0) } - host.outputAccounts[strAdr] = &vmcommon.OutputAccount{ - Balance: big.NewInt(0).Set(account.GetBalance()), - BalanceDelta: big.NewInt(0), - Address: addr} - return account.GetBalance() } @@ -209,16 +202,7 @@ func (host *vmContext) SendGlobalSettingToAll(_ []byte, input []byte) { } } -// Transfer handles any necessary value transfer required and takes -// the necessary steps to create accounts -func (host *vmContext) Transfer( - destination []byte, - sender []byte, - value *big.Int, - input []byte, - gasLimit uint64, -) error { - +func (host *vmContext) getSenderDestination(sender, destination []byte) (*vmcommon.OutputAccount, *vmcommon.OutputAccount) { senderAcc, exists := host.outputAccounts[string(sender)] if !exists { senderAcc = &vmcommon.OutputAccount{ @@ -239,9 +223,31 @@ func (host *vmContext) Transfer( host.outputAccounts[string(destAcc.Address)] = destAcc } + return senderAcc, destAcc +} + +func (host *vmContext) transferValueOnly( + destination []byte, + sender []byte, + value *big.Int, +) { + senderAcc, destAcc := host.getSenderDestination(sender, destination) + _ = senderAcc.BalanceDelta.Sub(senderAcc.BalanceDelta, value) _ = destAcc.BalanceDelta.Add(destAcc.BalanceDelta, value) +} +// Transfer handles any necessary value transfer required and takes +// the necessary steps to create accounts +func (host *vmContext) Transfer( + destination []byte, + sender []byte, + value *big.Int, + input []byte, + gasLimit uint64, +) error { + host.transferValueOnly(destination, sender, value) + senderAcc, destAcc := host.getSenderDestination(sender, destination) outputTransfer := vmcommon.OutputTransfer{ Value: big.NewInt(0).Set(value), GasLimit: gasLimit, @@ -257,6 +263,21 @@ func (host *vmContext) Transfer( return nil } +// GetLogs returns the logs +func (host *vmContext) GetLogs() []*vmcommon.LogEntry { + return host.logs +} + +// GetTotalSentToUser returns the total sent to the specified address +func (host *vmContext) GetTotalSentToUser(dest []byte) *big.Int { + destination, exists := host.outputAccounts[string(dest)] + if !exists { + return big.NewInt(0) + } + + return destination.BalanceDelta +} + func (host *vmContext) copyToNewContext() *vmContext { newContext := vmContext{ storageUpdate: host.storageUpdate, @@ -295,6 +316,71 @@ func (host *vmContext) mergeContext(currContext *vmContext) { host.scAddress = currContext.scAddress } +func (host *vmContext) properMergeContexts(parentContext *vmContext, returnCode vmcommon.ReturnCode) { + if !host.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + host.mergeContext(parentContext) + return + } + + host.scAddress = parentContext.scAddress + host.AddReturnMessage(parentContext.returnMessage) + if returnCode != vmcommon.Ok { + // no need to merge - revert was done - transaction will fail + return + } + + host.output = append(host.output, parentContext.output...) + for _, rightAccount := range parentContext.outputAccounts { + leftAccount, exist := host.outputAccounts[string(rightAccount.Address)] + if !exist { + leftAccount = &vmcommon.OutputAccount{ + Balance: big.NewInt(0), + BalanceDelta: big.NewInt(0), + Address: rightAccount.Address, + } + host.outputAccounts[string(rightAccount.Address)] = leftAccount + } + addOutputAccounts(leftAccount, rightAccount) + } +} + +func addOutputAccounts( + destination *vmcommon.OutputAccount, + rightAccount *vmcommon.OutputAccount, +) { + if len(rightAccount.Address) != 0 { + destination.Address = rightAccount.Address + } + if rightAccount.Balance != nil { + destination.Balance = rightAccount.Balance + } + if destination.BalanceDelta == nil { + destination.BalanceDelta = big.NewInt(0) + } + if rightAccount.BalanceDelta != nil { + destination.BalanceDelta.Add(destination.BalanceDelta, rightAccount.BalanceDelta) + } + if len(rightAccount.Code) > 0 { + destination.Code = rightAccount.Code + } + if len(rightAccount.CodeMetadata) > 0 { + destination.CodeMetadata = rightAccount.CodeMetadata + } + if rightAccount.Nonce > destination.Nonce { + destination.Nonce = rightAccount.Nonce + } + + destination.GasUsed += rightAccount.GasUsed + + if rightAccount.CodeDeployerAddress != nil { + destination.CodeDeployerAddress = rightAccount.CodeDeployerAddress + } + + destination.BytesAddedToStorage += rightAccount.BytesAddedToStorage + destination.BytesDeletedFromStorage += rightAccount.BytesDeletedFromStorage + destination.OutputTransfers = append(destination.OutputTransfers, rightAccount.OutputTransfers...) +} + func (host *vmContext) createContractCallInput( destination []byte, sender []byte, @@ -328,6 +414,25 @@ func createDirectCallInput( return input } +func (host *vmContext) transferBeforeInternalExec(callInput *vmcommon.ContractCallInput, sender []byte) error { + if !host.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + return host.Transfer(callInput.RecipientAddr, sender, callInput.CallValue, nil, 0) + } + host.transferValueOnly(callInput.RecipientAddr, sender, callInput.CallValue) + + if callInput.CallValue.Cmp(zero) > 0 { + logEntry := &vmcommon.LogEntry{ + Identifier: []byte(transferValueOnly), + Address: callInput.RecipientAddr, + Topics: [][]byte{sender, callInput.RecipientAddr, callInput.CallValue.Bytes()}, + Data: []byte{}, + } + host.AddLogEntry(logEntry) + } + + return nil +} + // DeploySystemSC will deploy a smart contract according to the input // will call the init function and merge the vmOutputs // will add to the system smart contracts container the new address @@ -345,7 +450,8 @@ func (host *vmContext) DeploySystemSC( } callInput := createDirectCallInput(newAddress, ownerAddress, value, initFunction, input) - err := host.Transfer(callInput.RecipientAddr, host.scAddress, callInput.CallValue, nil, 0) + + err := host.transferBeforeInternalExec(callInput, host.scAddress) if err != nil { return vmcommon.ExecutionFailed, err } @@ -402,16 +508,16 @@ func (host *vmContext) ExecuteOnDestContext(destination []byte, sender []byte, v return nil, err } - err = host.Transfer(callInput.RecipientAddr, callInput.CallerAddr, callInput.CallValue, nil, 0) + err = host.transferBeforeInternalExec(callInput, sender) if err != nil { return nil, err } - vmOutput := &vmcommon.VMOutput{} + vmOutput := &vmcommon.VMOutput{ReturnCode: vmcommon.UserError} currContext := host.copyToNewContext() defer func() { host.output = make([][]byte, 0) - host.mergeContext(currContext) + host.properMergeContexts(currContext, vmOutput.ReturnCode) }() host.softCleanCache() @@ -436,6 +542,7 @@ func (host *vmContext) ExecuteOnDestContext(destination []byte, sender []byte, v } else { // all changes must be deleted host.outputAccounts = make(map[string]*vmcommon.OutputAccount) + host.storageUpdate = currContext.storageUpdate } vmOutput.ReturnCode = returnCode vmOutput.ReturnMessage = host.returnMessage diff --git a/vm/systemSmartContracts/esdt.go b/vm/systemSmartContracts/esdt.go index 7b48d2c6912..bdd51e5ef93 100644 --- a/vm/systemSmartContracts/esdt.go +++ b/vm/systemSmartContracts/esdt.go @@ -427,7 +427,7 @@ func (e *esdt) registerMetaESDT(args *vmcommon.ContractCallInput) vmcommon.Retur logEntry := &vmcommon.LogEntry{ Identifier: []byte(args.Function), Address: args.CallerAddr, - Topics: [][]byte{tokenIdentifier, args.Arguments[0], args.Arguments[1], []byte(metaESDT)}, + Topics: [][]byte{tokenIdentifier, args.Arguments[0], args.Arguments[1], []byte(metaESDT), big.NewInt(int64(numOfDecimals)).Bytes()}, } e.eei.AddLogEntry(logEntry) @@ -812,6 +812,9 @@ func (e *esdt) burn(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } e.eei.AddReturnMessage("token is not burnable") + if e.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + return vmcommon.UserError + } return vmcommon.Ok } diff --git a/vm/systemSmartContracts/governance.go b/vm/systemSmartContracts/governance.go index 6f6886d6c02..2e97f2ccb97 100644 --- a/vm/systemSmartContracts/governance.go +++ b/vm/systemSmartContracts/governance.go @@ -4,7 +4,6 @@ package systemSmartContracts import ( "bytes" "fmt" - "math" "math/big" "sync" @@ -19,45 +18,45 @@ import ( ) const governanceConfigKey = "governanceConfig" -const hardForkPrefix = "hardFork_" -const proposalPrefix = "proposal_" -const fundsLockPrefix = "foundsLock_" -const whiteListPrefix = "whiteList_" -const stakeLockPrefix = "stakeLock_" +const noncePrefix = "n_" +const proposalPrefix = "p_" const yesString = "yes" const noString = "no" const vetoString = "veto" -const hardForkEpochGracePeriod = 2 +const abstainString = "abstain" const commitHashLength = 40 +const maxPercentage = float64(10000.0) // ArgsNewGovernanceContract defines the arguments needed for the on-chain governance contract type ArgsNewGovernanceContract struct { - Eei vm.SystemEI - GasCost vm.GasCost - GovernanceConfig config.GovernanceSystemSCConfig - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - GovernanceSCAddress []byte - DelegationMgrSCAddress []byte - ValidatorSCAddress []byte - InitialWhiteListedAddresses [][]byte - EnableEpochsHandler common.EnableEpochsHandler + Eei vm.SystemEI + GasCost vm.GasCost + GovernanceConfig config.GovernanceSystemSCConfig + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + GovernanceSCAddress []byte + DelegationMgrSCAddress []byte + ValidatorSCAddress []byte + ConfigChangeAddress []byte + UnBondPeriodInEpochs uint32 + EnableEpochsHandler common.EnableEpochsHandler } type governanceContract struct { - eei vm.SystemEI - gasCost vm.GasCost - baseProposalCost *big.Int - ownerAddress []byte - governanceSCAddress []byte - delegationMgrSCAddress []byte - validatorSCAddress []byte - marshalizer marshal.Marshalizer - hasher hashing.Hasher - governanceConfig config.GovernanceSystemSCConfig - initialWhiteListedAddresses [][]byte - enableEpochsHandler common.EnableEpochsHandler - mutExecution sync.RWMutex + eei vm.SystemEI + gasCost vm.GasCost + baseProposalCost *big.Int + ownerAddress []byte + governanceSCAddress []byte + delegationMgrSCAddress []byte + validatorSCAddress []byte + changeConfigAddress []byte + marshalizer marshal.Marshalizer + hasher hashing.Hasher + governanceConfig config.GovernanceSystemSCConfig + unBondPeriodInEpochs uint32 + enableEpochsHandler common.EnableEpochsHandler + mutExecution sync.RWMutex } // NewGovernanceContract creates a new governance smart contract @@ -75,8 +74,7 @@ func NewGovernanceContract(args ArgsNewGovernanceContract) (*governanceContract, return nil, vm.ErrNilEnableEpochsHandler } - activeConfig := args.GovernanceConfig.Active - baseProposalCost, okConvert := big.NewInt(0).SetString(activeConfig.ProposalCost, conversionBase) + baseProposalCost, okConvert := big.NewInt(0).SetString(args.GovernanceConfig.V1.ProposalCost, conversionBase) if !okConvert || baseProposalCost.Cmp(zero) < 0 { return nil, vm.ErrInvalidBaseIssuingCost } @@ -90,6 +88,9 @@ func NewGovernanceContract(args ArgsNewGovernanceContract) (*governanceContract, if len(args.GovernanceSCAddress) < 1 { return nil, fmt.Errorf("%w for governance sc address", vm.ErrInvalidAddress) } + if len(args.ConfigChangeAddress) < 1 { + return nil, fmt.Errorf("%w for change config address", vm.ErrInvalidAddress) + } g := &governanceContract{ eei: args.Eei, @@ -103,14 +104,10 @@ func NewGovernanceContract(args ArgsNewGovernanceContract) (*governanceContract, hasher: args.Hasher, governanceConfig: args.GovernanceConfig, enableEpochsHandler: args.EnableEpochsHandler, + unBondPeriodInEpochs: args.UnBondPeriodInEpochs, + changeConfigAddress: args.ConfigChangeAddress, } - err := g.validateInitialWhiteListedAddresses(args.InitialWhiteListedAddresses) - if err != nil { - return nil, err - } - g.initialWhiteListedAddresses = args.InitialWhiteListedAddresses - return g, nil } @@ -145,22 +142,20 @@ func (g *governanceContract) Execute(args *vmcommon.ContractCallInput) vmcommon. return g.vote(args) case "delegateVote": return g.delegateVote(args) - case "voteWithFunds": - return g.voteWithFunds(args) - case "claimFunds": - return g.claimFunds(args) - case "whiteList": - return g.whiteListProposal(args) - case "hardFork": - return g.hardForkProposal(args) case "changeConfig": return g.changeConfig(args) case "closeProposal": return g.closeProposal(args) - case "getValidatorVotingPower": - return g.getValidatorVotingPower(args) - case "getBalanceVotingPower": - return g.getBalanceVotingPower(args) + case "viewVotingPower": + return g.viewVotingPower(args) + case "viewConfig": + return g.viewConfig(args) + case "viewUserVoteHistory": + return g.viewUserVoteHistory(args) + case "viewDelegatedVoteInfo": + return g.viewDelegatedVoteInfo(args) + case "viewProposal": + return g.viewProposal(args) } g.eei.AddReturnMessage("invalid method to call") @@ -187,31 +182,84 @@ func (g *governanceContract) init(args *vmcommon.ContractCallInput) vmcommon.Ret func (g *governanceContract) initV2(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if !bytes.Equal(args.CallerAddr, g.governanceSCAddress) { - log.Error("invalid caller to switch to V2 config") + g.eei.AddReturnMessage("invalid caller to switch to V2 config") return vmcommon.UserError } cfg, err := g.convertV2Config(g.governanceConfig) if err != nil { - log.Error("could not create governance V2 config") + g.eei.AddReturnMessage("could not create governance V2 config") return vmcommon.UserError } - marshaledData, err := g.marshalizer.Marshal(cfg) + err = g.saveConfig(cfg) if err != nil { - log.Error("marshal error on governance init function") - return vmcommon.ExecutionFailed + g.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError } - g.eei.SetStorage([]byte(governanceConfigKey), marshaledData) g.eei.SetStorage([]byte(ownerKey), args.CallerAddr) g.ownerAddress = make([]byte, 0, len(args.CallerAddr)) g.ownerAddress = append(g.ownerAddress, args.CallerAddr...) - for _, address := range g.initialWhiteListedAddresses { - returnCode := g.whiteListAtGovernanceGenesis(address) - if returnCode != vmcommon.Ok { - return returnCode - } + return vmcommon.Ok +} + +// changeConfig allows the owner to change the configuration for requesting proposals +// args.Arguments[0] - proposalFee - as string +// args.Arguments[1] - minQuorum - 0-10000 - represents percentage +// args.Arguments[2] - minVeto - 0-10000 - represents percentage +// args.Arguments[3] - minPass - 0-10000 - represents percentage +func (g *governanceContract) changeConfig(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !bytes.Equal(g.changeConfigAddress, args.CallerAddr) { + g.eei.AddReturnMessage("changeConfig can be called only by owner") + return vmcommon.UserError + } + if args.CallValue.Cmp(zero) != 0 { + g.eei.AddReturnMessage("changeConfig can be called only without callValue") + return vmcommon.UserError + } + if len(args.Arguments) != 4 { + g.eei.AddReturnMessage("changeConfig needs 4 arguments") + return vmcommon.UserError + } + + proposalFee, okConvert := big.NewInt(0).SetString(string(args.Arguments[0]), conversionBase) + if !okConvert || proposalFee.Cmp(zero) <= 0 { + g.eei.AddReturnMessage("changeConfig first argument is incorrectly formatted") + return vmcommon.UserError + } + minQuorum, err := convertDecimalToPercentage(args.Arguments[1]) + if err != nil { + g.eei.AddReturnMessage(err.Error() + " minQuorum") + return vmcommon.UserError + } + minVeto, err := convertDecimalToPercentage(args.Arguments[2]) + if err != nil { + g.eei.AddReturnMessage(err.Error() + " minVeto") + return vmcommon.UserError + } + minPass, err := convertDecimalToPercentage(args.Arguments[3]) + if err != nil { + g.eei.AddReturnMessage(err.Error() + " minPass") + return vmcommon.UserError + } + + scConfig, err := g.getConfig() + if err != nil { + g.eei.AddReturnMessage("changeConfig error " + err.Error()) + return vmcommon.UserError + } + + scConfig.MinQuorum = minQuorum + scConfig.MinVetoThreshold = minVeto + scConfig.MinPassThreshold = minPass + scConfig.ProposalFee = proposalFee + + g.baseProposalCost.Set(proposalFee) + err = g.saveConfig(scConfig) + if err != nil { + g.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError } return vmcommon.Ok @@ -219,10 +267,6 @@ func (g *governanceContract) initV2(args *vmcommon.ContractCallInput) vmcommon.R // proposal creates a new proposal from passed arguments func (g *governanceContract) proposal(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(g.baseProposalCost) != 0 { - g.eei.AddReturnMessage("invalid proposal cost, expected " + g.baseProposalCost.String()) - return vmcommon.OutOfFunds - } err := g.eei.UseGas(g.gasCost.MetaChainSystemSCsCost.Proposal) if err != nil { g.eei.AddReturnMessage("not enough gas") @@ -232,10 +276,24 @@ func (g *governanceContract) proposal(args *vmcommon.ContractCallInput) vmcommon g.eei.AddReturnMessage("invalid number of arguments, expected 3") return vmcommon.FunctionWrongSignature } - if !g.isWhiteListed(args.CallerAddr) { - g.eei.AddReturnMessage("called address is not whiteListed") + generalConfig, err := g.getConfig() + if err != nil { + g.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if args.CallValue.Cmp(generalConfig.ProposalFee) != 0 { + g.eei.AddReturnMessage("invalid value provided, expected " + generalConfig.ProposalFee.String()) + return vmcommon.OutOfFunds + } + + generalConfig.LastProposalNonce++ + nextNonce := generalConfig.LastProposalNonce + err = g.saveConfig(generalConfig) + if err != nil { + g.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } + commitHash := args.Arguments[0] if len(commitHash) != commitHashLength { g.eei.AddReturnMessage(fmt.Sprintf("invalid github commit length, wanted exactly %d", commitHashLength)) @@ -246,36 +304,49 @@ func (g *governanceContract) proposal(args *vmcommon.ContractCallInput) vmcommon return vmcommon.UserError } - startVoteNonce, endVoteNonce, err := g.startEndNonceFromArguments(args.Arguments[1], args.Arguments[2]) + startVoteEpoch, endVoteEpoch, err := g.startEndEpochFromArguments(args.Arguments[1], args.Arguments[2]) if err != nil { - g.eei.AddReturnMessage("invalid start/end vote nonce " + err.Error()) + g.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } generalProposal := &GeneralProposal{ IssuerAddress: args.CallerAddr, CommitHash: commitHash, - StartVoteNonce: startVoteNonce, - EndVoteNonce: endVoteNonce, + StartVoteEpoch: startVoteEpoch, + EndVoteEpoch: endVoteEpoch, Yes: big.NewInt(0), No: big.NewInt(0), Veto: big.NewInt(0), + Abstain: big.NewInt(0), + QuorumStake: big.NewInt(0), Passed: false, - Votes: make([][]byte, 0), + ProposalCost: generalConfig.ProposalFee, + Nonce: nextNonce, } err = g.saveGeneralProposal(commitHash, generalProposal) if err != nil { - log.Warn("saveGeneralProposal", "err", err) g.eei.AddReturnMessage("saveGeneralProposal " + err.Error()) return vmcommon.UserError } + nonceAsBytes := big.NewInt(0).SetUint64(nextNonce).Bytes() + nonceKey := append([]byte(noncePrefix), nonceAsBytes...) + g.eei.SetStorage(nonceKey, commitHash) + + logEntry := &vmcommon.LogEntry{ + Identifier: []byte(args.Function), + Address: args.CallerAddr, + Topics: [][]byte{nonceAsBytes, commitHash, args.Arguments[1], args.Arguments[1], args.Arguments[2]}, + } + g.eei.AddLogEntry(logEntry) + return vmcommon.Ok } // vote casts a vote for a validator/delegation. This function receives 2 parameters and will vote with its full delegation + validator amount -// args.Arguments[0] - proposal reference (GitHub commit) -// args.Arguments[1] - vote option (yes, no, veto) +// args.Arguments[0] - reference - nonce as string +// args.Arguments[1] - vote option (yes, no, veto, abstain) func (g *governanceContract) vote(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if args.CallValue.Cmp(zero) != 0 { g.eei.AddReturnMessage("function is not payable") @@ -290,80 +361,43 @@ func (g *governanceContract) vote(args *vmcommon.ContractCallInput) vmcommon.Ret g.eei.AddReturnMessage("invalid number of arguments, expected 2") return vmcommon.FunctionWrongSignature } - - voterAddress := args.CallerAddr - proposalToVote := args.Arguments[0] - proposal, err := g.getValidProposal(proposalToVote) - if err != nil { - g.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - voteOption, err := g.castVoteType(string(args.Arguments[1])) - if err != nil { - g.eei.AddReturnMessage(err.Error()) + if core.IsSmartContractAddress(args.CallerAddr) { + g.eei.AddReturnMessage("only user can call this") return vmcommon.UserError } - currentVoteSet, err := g.getOrCreateVoteSet(append(proposalToVote, voterAddress...)) + voterAddress := args.CallerAddr + proposalToVote := args.Arguments[0] + totalStake, totalVotingPower, err := g.computeTotalStakeAndVotingPower(voterAddress) if err != nil { g.eei.AddReturnMessage(err.Error()) - return vmcommon.ExecutionFailed - } - if len(currentVoteSet.VoteItems) > 0 { - g.eei.AddReturnMessage("vote only once") return vmcommon.UserError } - totalVotingPower, err := g.computeVotingPowerFromTotalStake(voterAddress) + err = g.addUserVote( + voterAddress, + proposalToVote, + string(args.Arguments[1]), + totalVotingPower, + totalStake, + true) if err != nil { g.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - // clean all the read storage - g.eei.CleanStorageUpdates() - currentVote := &VoteDetails{ - Value: voteOption, - Power: totalVotingPower, - Balance: big.NewInt(0), - } - err = g.addNewVote(voterAddress, currentVote, currentVoteSet, proposal) - if err != nil { - g.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError + logEntry := &vmcommon.LogEntry{ + Identifier: []byte(args.Function), + Address: args.CallerAddr, + Topics: [][]byte{proposalToVote, args.Arguments[1], totalStake.Bytes(), totalVotingPower.Bytes()}, } - - g.lockStake(voterAddress, proposal.EndVoteNonce) + g.eei.AddLogEntry(logEntry) return vmcommon.Ok } -func (g *governanceContract) lockStake(address []byte, endNonce uint64) { - stakeLockKey := append([]byte(stakeLockPrefix), address...) - lastData := g.eei.GetStorage(stakeLockKey) - lastEndNonce := uint64(0) - if len(lastData) > 0 { - lastEndNonce = big.NewInt(0).SetBytes(lastData).Uint64() - } - - if lastEndNonce < endNonce { - g.eei.SetStorage(stakeLockKey, big.NewInt(0).SetUint64(endNonce).Bytes()) - } -} - -func isStakeLocked(eei vm.SystemEI, governanceAddress []byte, address []byte) bool { - stakeLockKey := append([]byte(stakeLockPrefix), address...) - lastData := eei.GetStorageFromAddress(governanceAddress, stakeLockKey) - if len(lastData) == 0 { - return false - } - - lastEndNonce := big.NewInt(0).SetBytes(lastData).Uint64() - return eei.BlockChainHook().CurrentNonce() < lastEndNonce -} - // delegateVote casts a vote from a validator run by WASM SC and delegates it to someone else. This function receives 4 parameters: -// args.Arguments[0] - proposal reference (GitHub commit) +// args.Arguments[0] - proposal reference - nonce of proposal // args.Arguments[1] - vote option (yes, no, veto) // args.Arguments[2] - delegatedTo // args.Arguments[3] - balance to vote @@ -385,62 +419,143 @@ func (g *governanceContract) delegateVote(args *vmcommon.ContractCallInput) vmco g.eei.AddReturnMessage("only SC can call this") return vmcommon.UserError } - if len(args.Arguments[3]) != len(args.CallerAddr) { + voter := args.Arguments[2] + if len(voter) != len(args.CallerAddr) { g.eei.AddReturnMessage("invalid delegator address") return vmcommon.UserError } - voterAddress := args.CallerAddr proposalToVote := args.Arguments[0] - proposal, err := g.getValidProposal(proposalToVote) + userStake := big.NewInt(0).SetBytes(args.Arguments[3]) + + scDelegatedVoteInfo, votePower, err := g.computeDelegatedVotePower(args.CallerAddr, proposalToVote, userStake) if err != nil { g.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - voteOption, err := g.castVoteType(string(args.Arguments[1])) + + err = g.updateDelegatedContractInfo(args.CallerAddr, proposalToVote, scDelegatedVoteInfo, userStake, votePower) if err != nil { g.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - votePower, err := g.computeVotingPower(big.NewInt(0).SetBytes(args.Arguments[2])) + err = g.addUserVote( + voter, + proposalToVote, + string(args.Arguments[1]), + votePower, + userStake, + false) if err != nil { g.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - delegatedTo := args.Arguments[3] - currentVote := &VoteDetails{ - Value: voteOption, - Power: votePower, - DelegatedTo: delegatedTo, - Balance: big.NewInt(0), + logEntry := &vmcommon.LogEntry{ + Identifier: []byte(args.Function), + Address: args.CallerAddr, + Topics: [][]byte{proposalToVote, args.Arguments[1], voter, userStake.Bytes(), votePower.Bytes()}, } + g.eei.AddLogEntry(logEntry) + + return vmcommon.Ok +} - totalVotingPower, err := g.computeValidatorVotingPower(voterAddress) +func (g *governanceContract) computeDelegatedVotePower( + scAddress []byte, + reference []byte, + balance *big.Int, +) (*DelegatedSCVoteInfo, *big.Int, error) { + scVoteInfo, err := g.getDelegatedContractInfo(scAddress, reference) if err != nil { - g.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError + return nil, nil, err + } + + totalPower := big.NewInt(0).Set(scVoteInfo.TotalPower) + votePower := big.NewInt(0).Mul(totalPower, balance) + votePower.Div(votePower, scVoteInfo.TotalStake) + return scVoteInfo, votePower, nil +} + +func (g *governanceContract) updateDelegatedContractInfo( + scAddress []byte, + reference []byte, + scVoteInfo *DelegatedSCVoteInfo, + balance *big.Int, + votePower *big.Int, +) error { + scVoteInfo.UsedPower.Add(scVoteInfo.UsedPower, votePower) + if scVoteInfo.TotalPower.Cmp(scVoteInfo.UsedPower) < 0 { + return vm.ErrNotEnoughVotingPower + } + + scVoteInfo.UsedStake.Add(scVoteInfo.UsedStake, balance) + if scVoteInfo.TotalStake.Cmp(scVoteInfo.UsedStake) < 0 { + return vm.ErrNotEnoughVotingPower } - currentVoteSet, err := g.getOrCreateVoteSet(append(proposalToVote, voterAddress...)) + return g.saveDelegatedContractInfo(scAddress, scVoteInfo, reference) +} + +func (g *governanceContract) addUserVote( + address []byte, + nonceAsBytes []byte, + vote string, + totalVotingPower *big.Int, + totalStake *big.Int, + direct bool, +) error { + nonce := big.NewInt(0).SetBytes(nonceAsBytes) + err := g.updateUserVoteList(address, nonce.Uint64(), direct) if err != nil { - g.eei.AddReturnMessage(err.Error()) - return vmcommon.ExecutionFailed + return err } - if totalVotingPower.Cmp(big.NewInt(0).Add(votePower, currentVoteSet.UsedPower)) < 0 { - g.eei.AddReturnMessage("not enough voting power to cast this vote") - return vmcommon.UserError + + proposal, err := g.getValidProposal(nonce) + if err != nil { + return err } - err = g.addNewVote(voterAddress, currentVote, currentVoteSet, proposal) + err = g.addNewVote(vote, totalVotingPower, proposal) if err != nil { - g.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError + return err } - g.lockStake(voterAddress, proposal.EndVoteNonce) - return vmcommon.Ok + proposal.QuorumStake.Add(proposal.QuorumStake, totalStake) + return g.saveGeneralProposal(proposal.CommitHash, proposal) +} + +func (g *governanceContract) updateUserVoteList(address []byte, nonce uint64, direct bool) error { + userVoteList, err := g.getUserVotes(address) + if err != nil { + return err + } + + if direct { + userVoteList.Direct, err = addNewNonce(userVoteList.Direct, nonce) + if err != nil { + return err + } + } else { + userVoteList.Delegated, err = addNewNonce(userVoteList.Delegated, nonce) + if err != nil { + return err + } + } + + return g.saveUserVotes(address, userVoteList) +} + +func addNewNonce(nonceList []uint64, newNonce uint64) ([]uint64, error) { + for _, nonce := range nonceList { + if newNonce == nonce { + return nil, vm.ErrDoubleVote + } + } + + nonceList = append(nonceList, newNonce) + return nonceList, nil } func (g *governanceContract) getMinValueToVote() (*big.Int, error) { @@ -452,971 +567,549 @@ func (g *governanceContract) getMinValueToVote() (*big.Int, error) { return delegationManagement.MinDelegationAmount, nil } -func (g *governanceContract) getVoteSetKeyForVoteWithFunds(proposalToVote, address []byte) []byte { - key := append(proposalToVote, address...) - key = append([]byte(fundsLockPrefix), key...) - return key -} - -// voteWithFunds casts a vote taking the transaction value as input for the vote power. It receives 2 arguments: -// args.Arguments[0] - proposal reference (GitHub commit) -// args.Arguments[1] - vote option (yes, no, veto) -func (g *governanceContract) voteWithFunds(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - err := g.eei.UseGas(g.gasCost.MetaChainSystemSCsCost.Vote) +// closeProposal generates and saves end results for a proposal +func (g *governanceContract) closeProposal(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if args.CallValue.Cmp(zero) != 0 { + g.eei.AddReturnMessage("closeProposal callValue expected to be 0") + return vmcommon.UserError + } + if len(args.Arguments) != 1 { + g.eei.AddReturnMessage("invalid number of arguments expected 1") + return vmcommon.UserError + } + err := g.eei.UseGas(g.gasCost.MetaChainSystemSCsCost.CloseProposal) if err != nil { g.eei.AddReturnMessage("not enough gas") return vmcommon.OutOfGas } - if len(args.Arguments) != 2 { - g.eei.AddReturnMessage("invalid number of arguments, expected 2") - return vmcommon.FunctionWrongSignature - } - minValueToVote, err := g.getMinValueToVote() + + nonce := big.NewInt(0).SetBytes(args.Arguments[0]) + generalProposal, err := g.getProposalFromNonce(nonce) if err != nil { - g.eei.AddReturnMessage(err.Error()) + g.eei.AddReturnMessage("getGeneralProposal error " + err.Error()) return vmcommon.UserError } - if args.CallValue.Cmp(minValueToVote) < 0 { - g.eei.AddReturnMessage("not enough funds to vote") + if generalProposal.Closed { + g.eei.AddReturnMessage("proposal is already closed, do nothing") return vmcommon.UserError } - - voterAddress := args.CallerAddr - proposalToVote := args.Arguments[0] - proposal, err := g.getValidProposal(proposalToVote) - if err != nil { - g.eei.AddReturnMessage(err.Error()) + if !bytes.Equal(generalProposal.IssuerAddress, args.CallerAddr) { + g.eei.AddReturnMessage("only the issuer can close the proposal") return vmcommon.UserError } - voteOption, err := g.castVoteType(string(args.Arguments[1])) - if err != nil { - g.eei.AddReturnMessage(err.Error()) + + currentEpoch := g.eei.BlockChainHook().CurrentEpoch() + if uint64(currentEpoch) < generalProposal.EndVoteEpoch { + g.eei.AddReturnMessage(fmt.Sprintf("proposal can be closed only after epoch %d", generalProposal.EndVoteEpoch)) return vmcommon.UserError } - voteKey := g.getVoteSetKeyForVoteWithFunds(proposalToVote, voterAddress) - currentVoteSet, err := g.getOrCreateVoteSet(voteKey) + generalProposal.Closed = true + err = g.computeEndResults(generalProposal) if err != nil { - g.eei.AddReturnMessage(err.Error()) - return vmcommon.ExecutionFailed + g.eei.AddReturnMessage("computeEndResults error " + err.Error()) + return vmcommon.UserError } - lenVoteSet := len(currentVoteSet.VoteItems) - if lenVoteSet > 0 { - lastVote := currentVoteSet.VoteItems[lenVoteSet-1] - if lastVote.Value != voteOption { - g.eei.AddReturnMessage("conflicting votes for same proposal") - return vmcommon.UserError - } + err = g.saveGeneralProposal(generalProposal.CommitHash, generalProposal) + if err != nil { + g.eei.AddReturnMessage("saveGeneralProposal error " + err.Error()) + return vmcommon.UserError } - votePower, err := g.computeAccountLeveledPower(args.CallValue, currentVoteSet) + err = g.eei.Transfer(args.CallerAddr, args.RecipientAddr, generalProposal.ProposalCost, nil, 0) if err != nil { g.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - currentVote := &VoteDetails{ - Value: voteOption, - Power: votePower, - Balance: args.CallValue, + logEntry := &vmcommon.LogEntry{ + Identifier: []byte(args.Function), + Address: args.CallerAddr, + Topics: [][]byte{generalProposal.CommitHash, boolToSlice(generalProposal.Passed)}, } + g.eei.AddLogEntry(logEntry) + + return vmcommon.Ok +} - newVoteSet, updatedProposal, err := g.applyVote(currentVote, currentVoteSet, proposal) +// viewVotingPower returns the total voting power +func (g *governanceContract) viewVotingPower(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + err := g.checkViewFuncArguments(args, 1) if err != nil { g.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - - err = g.saveVoteSet(voterAddress, newVoteSet, updatedProposal) - if err != nil { - g.eei.AddReturnMessage(err.Error()) + validatorAddress := args.Arguments[0] + if len(validatorAddress) != len(args.CallerAddr) { + g.eei.AddReturnMessage("invalid address") return vmcommon.UserError } - err = g.saveGeneralProposal(proposal.CommitHash, proposal) + _, votingPower, err := g.computeTotalStakeAndVotingPower(validatorAddress) if err != nil { g.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } + g.eei.Finish(votingPower.Bytes()) + return vmcommon.Ok } -// claimFunds returns the used funds for a particular proposal if they are unlocked. Accepts a single parameter: -// args.Arguments[0] - proposal reference -func (g *governanceContract) claimFunds(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(big.NewInt(0)) != 0 { - g.eei.AddReturnMessage("invalid callValue, should be 0") +func (g *governanceContract) viewConfig(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + err := g.checkViewFuncArguments(args, 0) + if err != nil { + g.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - err := g.eei.UseGas(g.gasCost.MetaChainSystemSCsCost.Claim) + + gConfig, err := g.getConfig() if err != nil { - g.eei.AddReturnMessage("not enough gas") - return vmcommon.OutOfGas + g.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError } - if len(args.Arguments) != 1 { - g.eei.AddReturnMessage("invalid number of arguments, expected 1") - return vmcommon.FunctionWrongSignature - } + g.eei.Finish([]byte(gConfig.ProposalFee.String())) + g.eei.Finish([]byte(big.NewFloat(float64(gConfig.MinQuorum)).String())) + g.eei.Finish([]byte(big.NewFloat(float64(gConfig.MinPassThreshold)).String())) + g.eei.Finish([]byte(big.NewFloat(float64(gConfig.MinVetoThreshold)).String())) + g.eei.Finish([]byte(big.NewInt(int64(gConfig.LastProposalNonce)).String())) - endNonce := g.getEndNonceForProposal(args.Arguments[0]) - currentNonce := g.eei.BlockChainHook().CurrentNonce() + return vmcommon.Ok +} - if endNonce > currentNonce { - g.eei.AddReturnMessage("your funds are still locked") +func (g *governanceContract) viewUserVoteHistory(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + err := g.checkViewFuncArguments(args, 1) + if err != nil { + g.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - voteKey := g.getVoteSetKeyForVoteWithFunds(args.Arguments[0], args.CallerAddr) - currentVoteSet, err := g.getOrCreateVoteSet(voteKey) + userVotes, err := g.getUserVotes(args.Arguments[0]) if err != nil { g.eei.AddReturnMessage(err.Error()) - return vmcommon.ExecutionFailed - } - if currentVoteSet.UsedBalance.Cmp(zero) <= 0 { - g.eei.AddReturnMessage("no funds to claim for this proposal") return vmcommon.UserError } - g.eei.SetStorage(voteKey, nil) - - err = g.eei.Transfer(args.CallerAddr, g.governanceSCAddress, currentVoteSet.UsedBalance, nil, 0) - if err != nil { - g.eei.AddReturnMessage("transfer error on claimFunds function") - return vmcommon.ExecutionFailed - } + g.eei.Finish([]byte(userVotes.String())) return vmcommon.Ok } -// whiteListProposal will create a new proposal to white list an address -func (g *governanceContract) whiteListProposal(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(g.baseProposalCost) != 0 { - g.eei.AddReturnMessage("invalid callValue, needs exactly " + g.baseProposalCost.String()) - return vmcommon.OutOfFunds - } - err := g.eei.UseGas(g.gasCost.MetaChainSystemSCsCost.Proposal) +func (g *governanceContract) viewProposal(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + err := g.checkViewFuncArguments(args, 1) if err != nil { - g.eei.AddReturnMessage("not enough gas") - return vmcommon.OutOfGas - } - if len(args.Arguments) != 3 { - g.eei.AddReturnMessage("invalid number of arguments") - return vmcommon.FunctionWrongSignature - } - if g.proposalExists(args.Arguments[0]) { - g.eei.AddReturnMessage("cannot re-propose existing proposal") - return vmcommon.UserError - } - if g.isWhiteListed(args.CallerAddr) { - g.eei.AddReturnMessage("address is already whitelisted") - return vmcommon.UserError - } - if len(args.Arguments[0]) != commitHashLength { - g.eei.AddReturnMessage(fmt.Sprintf("invalid github commit length, wanted exactly %d", commitHashLength)) + g.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - startVoteNonce, endVoteNonce, err := g.startEndNonceFromArguments(args.Arguments[1], args.Arguments[2]) + proposal, err := g.getProposalFromNonce(big.NewInt(0).SetBytes(args.Arguments[0])) if err != nil { - g.eei.AddReturnMessage("invalid start/end vote nonce " + err.Error()) + g.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - key := append([]byte(proposalPrefix), args.CallerAddr...) - whiteListAcc := &WhiteListProposal{ - WhiteListAddress: args.CallerAddr, - ProposalStatus: key, - } + g.eei.Finish(proposal.ProposalCost.Bytes()) + g.eei.Finish(proposal.CommitHash) + g.eei.Finish(big.NewInt(0).SetUint64(proposal.Nonce).Bytes()) + g.eei.Finish(proposal.IssuerAddress) + g.eei.Finish(big.NewInt(0).SetUint64(proposal.StartVoteEpoch).Bytes()) + g.eei.Finish(big.NewInt(0).SetUint64(proposal.EndVoteEpoch).Bytes()) + g.eei.Finish(proposal.QuorumStake.Bytes()) + g.eei.Finish(proposal.Yes.Bytes()) + g.eei.Finish(proposal.No.Bytes()) + g.eei.Finish(proposal.Veto.Bytes()) + g.eei.Finish(proposal.Abstain.Bytes()) + g.eei.Finish(boolToSlice(proposal.Closed)) + g.eei.Finish(boolToSlice(proposal.Passed)) - key = append([]byte(whiteListPrefix), args.CallerAddr...) - generalProposal := &GeneralProposal{ - IssuerAddress: args.CallerAddr, - CommitHash: args.Arguments[0], - StartVoteNonce: startVoteNonce, - EndVoteNonce: endVoteNonce, - Yes: big.NewInt(0), - No: big.NewInt(0), - Veto: big.NewInt(0), - Passed: false, - Votes: make([][]byte, 0), - } + return vmcommon.Ok +} - marshaledData, err := g.marshalizer.Marshal(whiteListAcc) +func (g *governanceContract) viewDelegatedVoteInfo(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + err := g.checkViewFuncArguments(args, 2) if err != nil { - g.eei.AddReturnMessage("marshal error " + err.Error()) + g.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - g.eei.SetStorage(key, marshaledData) - err = g.saveGeneralProposal(args.CallerAddr, generalProposal) + scDelegatedInfo, err := g.getDelegatedContractInfo(args.Arguments[0], args.Arguments[1]) if err != nil { - g.eei.AddReturnMessage("save proposal error " + err.Error()) + g.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } + g.eei.Finish(scDelegatedInfo.UsedStake.Bytes()) + g.eei.Finish(scDelegatedInfo.UsedPower.Bytes()) + g.eei.Finish(scDelegatedInfo.TotalStake.Bytes()) + g.eei.Finish(scDelegatedInfo.TotalPower.Bytes()) + return vmcommon.Ok } -// hardForkProposal creates a new proposal for a hard-fork -func (g *governanceContract) hardForkProposal(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(g.baseProposalCost) != 0 { - g.eei.AddReturnMessage("invalid proposal cost, expected " + g.baseProposalCost.String()) - return vmcommon.OutOfFunds - } - err := g.eei.UseGas(g.gasCost.MetaChainSystemSCsCost.Proposal) - if err != nil { - g.eei.AddReturnMessage("not enough gas") - return vmcommon.OutOfGas - } - if len(args.Arguments) != 5 { - g.eei.AddReturnMessage("invalid number of arguments, expected 5") - return vmcommon.FunctionWrongSignature +func (g *governanceContract) checkViewFuncArguments( + args *vmcommon.ContractCallInput, + numArgs int, +) error { + if !bytes.Equal(args.CallerAddr, args.RecipientAddr) { + return vm.ErrInvalidCaller } - if !g.isWhiteListed(args.CallerAddr) { - g.eei.AddReturnMessage("called address is not whiteListed") - return vmcommon.UserError + if args.CallValue.Cmp(zero) != 0 { + return vm.ErrCallValueMustBeZero } - commitHash := args.Arguments[2] - if len(commitHash) != commitHashLength { - g.eei.AddReturnMessage(fmt.Sprintf("invalid github commit length, wanted exactly %d", commitHashLength)) - return vmcommon.UserError - } - if g.proposalExists(commitHash) { - g.eei.AddReturnMessage("proposal already exists") - return vmcommon.UserError - } - - key := append([]byte(hardForkPrefix), commitHash...) - marshaledData := g.eei.GetStorage(key) - if len(marshaledData) != 0 { - g.eei.AddReturnMessage("hardFork proposal already exists") - return vmcommon.UserError + if len(args.Arguments) != numArgs { + return vm.ErrInvalidNumOfArguments } - startVoteNonce, endVoteNonce, err := g.startEndNonceFromArguments(args.Arguments[3], args.Arguments[4]) - if err != nil { - g.eei.AddReturnMessage("invalid start/end vote nonce" + err.Error()) - return vmcommon.UserError - } - - bigIntEpochToHardFork, okConvert := big.NewInt(0).SetString(string(args.Arguments[0]), conversionBase) - if !okConvert || !bigIntEpochToHardFork.IsUint64() { - g.eei.AddReturnMessage("invalid argument for epoch") - return vmcommon.UserError - } - - epochToHardFork := uint32(bigIntEpochToHardFork.Uint64()) - currentEpoch := g.eei.BlockChainHook().CurrentEpoch() - if epochToHardFork < currentEpoch && currentEpoch-epochToHardFork < hardForkEpochGracePeriod { - g.eei.AddReturnMessage("invalid epoch to hardFork") - return vmcommon.UserError - } - - key = append([]byte(proposalPrefix), commitHash...) - hardForkProposal := &HardForkProposal{ - EpochToHardFork: epochToHardFork, - NewSoftwareVersion: args.Arguments[1], - ProposalStatus: key, - } - - key = append([]byte(hardForkPrefix), commitHash...) - generalProposal := &GeneralProposal{ - IssuerAddress: args.CallerAddr, - CommitHash: commitHash, - StartVoteNonce: startVoteNonce, - EndVoteNonce: endVoteNonce, - Yes: big.NewInt(0), - No: big.NewInt(0), - Veto: big.NewInt(0), - Passed: false, - Votes: make([][]byte, 0), - } - marshaledData, err = g.marshalizer.Marshal(hardForkProposal) - if err != nil { - log.Warn("hardFork proposal marshal", "err", err) - g.eei.AddReturnMessage("marshal proposal" + err.Error()) - return vmcommon.UserError - } - g.eei.SetStorage(key, marshaledData) - - err = g.saveGeneralProposal(commitHash, generalProposal) - if err != nil { - log.Warn("save general proposal", "error", err) - g.eei.AddReturnMessage("saveGeneralProposal" + err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok + return nil } -// changeConfig allows the owner to change the configuration for requesting proposals -func (g *governanceContract) changeConfig(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(g.ownerAddress, args.CallerAddr) { - g.eei.AddReturnMessage("changeConfig can be called only by owner") - return vmcommon.UserError - } - if args.CallValue.Cmp(zero) != 0 { - g.eei.AddReturnMessage("changeConfig can be called only without callValue") - return vmcommon.UserError - } - if len(args.Arguments) != 4 { - g.eei.AddReturnMessage("changeConfig needs 4 arguments") - return vmcommon.UserError +// addNewVote applies a new vote on a proposal then saves the new information into the storage +func (g *governanceContract) addNewVote(vote string, power *big.Int, proposal *GeneralProposal) error { + switch vote { + case yesString: + proposal.Yes.Add(proposal.Yes, power) + case noString: + proposal.No.Add(proposal.No, power) + case vetoString: + proposal.Veto.Add(proposal.Veto, power) + case abstainString: + proposal.Abstain.Add(proposal.Abstain, power) + default: + return fmt.Errorf("%s: %s", vm.ErrInvalidArgument, "invalid vote type") } - proposalFee, okConvert := big.NewInt(0).SetString(string(args.Arguments[0]), conversionBase) - if !okConvert || proposalFee.Cmp(zero) < 0 { - g.eei.AddReturnMessage("changeConfig first argument is incorrectly formatted") - return vmcommon.UserError - } - minQuorum, okConvert := big.NewInt(0).SetString(string(args.Arguments[1]), conversionBase) - if !okConvert || minQuorum.Cmp(zero) < 0 { - g.eei.AddReturnMessage("changeConfig second argument is incorrectly formatted") - return vmcommon.UserError - } - minVeto, okConvert := big.NewInt(0).SetString(string(args.Arguments[2]), conversionBase) - if !okConvert || minVeto.Cmp(zero) < 0 { - g.eei.AddReturnMessage("changeConfig third argument is incorrectly formatted") - return vmcommon.UserError - } - minPass, okConvert := big.NewInt(0).SetString(string(args.Arguments[3]), conversionBase) - if !okConvert || minPass.Cmp(zero) < 0 { - g.eei.AddReturnMessage("changeConfig fourth argument is incorrectly formatted") - return vmcommon.UserError - } + return nil +} - scConfig, err := g.getConfig() +// computeVotingPower returns the voting power for a value. The value can be either a balance or +// the staked value for a validator +func (g *governanceContract) computeVotingPower(value *big.Int) (*big.Int, error) { + minValue, err := g.getMinValueToVote() if err != nil { - g.eei.AddReturnMessage("changeConfig error " + err.Error()) - return vmcommon.UserError + return nil, err } - scConfig.MinQuorum = minQuorum - scConfig.MinVetoThreshold = minVeto - scConfig.MinPassThreshold = minPass - scConfig.ProposalFee = proposalFee - - marshaledData, err := g.marshalizer.Marshal(scConfig) - if err != nil { - g.eei.AddReturnMessage("changeConfig error " + err.Error()) - return vmcommon.UserError + if value.Cmp(minValue) < 0 { + return nil, vm.ErrNotEnoughStakeToVote } - g.eei.SetStorage([]byte(governanceConfigKey), marshaledData) - return vmcommon.Ok + return big.NewInt(0).Sqrt(value), nil } -// closeProposal generates and saves end results for a proposal -func (g *governanceContract) closeProposal(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(zero) != 0 { - g.eei.AddReturnMessage("closeProposal callValue expected to be 0") - return vmcommon.UserError - } - if !g.isWhiteListed(args.CallerAddr) { - g.eei.AddReturnMessage("caller is not whitelisted") - return vmcommon.UserError - } - if len(args.Arguments) != 1 { - g.eei.AddReturnMessage("invalid number of arguments expected 1") - return vmcommon.UserError - } - err := g.eei.UseGas(g.gasCost.MetaChainSystemSCsCost.CloseProposal) - if err != nil { - g.eei.AddReturnMessage("not enough gas") - return vmcommon.OutOfGas - } - - proposal := args.Arguments[0] - generalProposal, err := g.getGeneralProposal(proposal) +// function iterates over all delegation contracts and verifies balances of the given account and makes a sum of it +func (g *governanceContract) computeTotalStakeAndVotingPower(address []byte) (*big.Int, *big.Int, error) { + totalStake, err := g.getTotalStake(address) if err != nil { - g.eei.AddReturnMessage("getGeneralProposal error " + err.Error()) - return vmcommon.UserError - } - if generalProposal.Closed { - g.eei.AddReturnMessage("proposal is already closed, do nothing") - return vmcommon.Ok + return nil, nil, err } - currentNonce := g.eei.BlockChainHook().CurrentNonce() - if currentNonce < generalProposal.EndVoteNonce { - g.eei.AddReturnMessage(fmt.Sprintf("proposal can be closed only after nonce %d", generalProposal.EndVoteNonce)) - return vmcommon.UserError - } - - generalProposal.Closed = true - err = g.computeEndResults(generalProposal) + dContractList, err := getDelegationContractList(g.eei, g.marshalizer, g.delegationMgrSCAddress) if err != nil { - g.eei.AddReturnMessage("computeEndResults error" + err.Error()) - return vmcommon.UserError + return nil, nil, err } - g.deleteAllVotes(generalProposal) - - err = g.saveGeneralProposal(proposal, generalProposal) + err = g.eei.UseGas(g.gasCost.MetaChainSystemSCsCost.GetActiveFund * uint64(len(dContractList.Addresses))) if err != nil { - g.eei.AddReturnMessage("saveGeneralProposal error" + err.Error()) - return vmcommon.UserError + return nil, nil, err } - return vmcommon.Ok -} + var activeDelegated *big.Int + for _, contract := range dContractList.Addresses { + activeDelegated, err = g.getActiveFundForDelegator(contract, address) + if err != nil { + return nil, nil, err + } -//TODO: the problem is that voteKey has to be short - as these kind of lists can't be longer than 1MB -func (g *governanceContract) deleteAllVotes(proposal *GeneralProposal) { - for _, address := range proposal.Votes { - voteKey := getVoteItemKey(proposal.CommitHash, address) - g.eei.SetStorage(voteKey, nil) + totalStake.Add(totalStake, activeDelegated) } - proposal.Votes = make([][]byte, 0) -} -// getConfig returns the curent system smart contract configuration -func (g *governanceContract) getConfig() (*GovernanceConfigV2, error) { - marshaledData := g.eei.GetStorage([]byte(governanceConfigKey)) - scConfig := &GovernanceConfigV2{} - err := g.marshalizer.Unmarshal(scConfig, marshaledData) + votingPower, err := g.computeVotingPower(totalStake) if err != nil { - return nil, err + return nil, nil, err } - return scConfig, nil + return totalStake, votingPower, nil } -// getValidatorVotingPower returns the total voting power for a validator. Un-staked nodes are not -// taken into consideration -func (g *governanceContract) getValidatorVotingPower(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(zero) != 0 { - g.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - err := g.eei.UseGas(g.gasCost.MetaChainSystemSCsCost.Vote) - if err != nil { - g.eei.AddReturnMessage("not enough gas") - return vmcommon.OutOfGas - } - if len(args.Arguments) != 1 { - g.eei.AddReturnMessage("function accepts only one argument, the validator address") - return vmcommon.FunctionWrongSignature - } - validatorAddress := args.Arguments[0] - if len(validatorAddress) != len(args.CallerAddr) { - g.eei.AddReturnMessage("invalid argument - validator address") - return vmcommon.UserError - } - - votingPower, err := g.computeValidatorVotingPower(validatorAddress) - if err != nil { - g.eei.AddReturnMessage(err.Error()) - return vmcommon.ExecutionFailed - } - - g.eei.Finish(votingPower.Bytes()) - - return vmcommon.Ok +func (g *governanceContract) getTotalStakeInSystem() *big.Int { + return g.eei.GetBalance(g.validatorSCAddress) } -// getBalanceVotingPower returns the voting power associated with the value sent in the transaction by the user -func (g *governanceContract) getBalanceVotingPower(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(zero) != 0 { - g.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - if len(args.Arguments) != 1 { - g.eei.AddReturnMessage("function accepts only one argument, the balance for computing the power") - return vmcommon.FunctionWrongSignature - } - err := g.eei.UseGas(g.gasCost.MetaChainSystemSCsCost.Vote) +// computeEndResults computes if a proposal has passed or not based on votes accumulated +func (g *governanceContract) computeEndResults(proposal *GeneralProposal) error { + baseConfig, err := g.getConfig() if err != nil { - g.eei.AddReturnMessage("not enough gas") - return vmcommon.OutOfGas + return err } - balance := big.NewInt(0).SetBytes(args.Arguments[0]) - votingPower, err := g.computeVotingPower(balance) - if err != nil { - g.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + totalVotes := big.NewInt(0).Add(proposal.Yes, proposal.No) + totalVotes.Add(totalVotes, proposal.Veto) + totalVotes.Add(totalVotes, proposal.Abstain) - g.eei.Finish(votingPower.Bytes()) - return vmcommon.Ok -} + totalStake := g.getTotalStakeInSystem() + minQuorumOutOfStake := core.GetIntTrimmedPercentageOfValue(totalStake, float64(baseConfig.MinQuorum)) -// saveGeneralProposal saves a proposal into the storage -func (g *governanceContract) saveGeneralProposal(reference []byte, generalProposal *GeneralProposal) error { - marshaledData, err := g.marshalizer.Marshal(generalProposal) - if err != nil { - return err + if totalVotes.Cmp(minQuorumOutOfStake) == -1 { + g.eei.Finish([]byte("Proposal did not reach minQuorum")) + proposal.Passed = false + return nil } - key := append([]byte(proposalPrefix), reference...) - g.eei.SetStorage(key, marshaledData) - return nil -} - -func (g *governanceContract) getEndNonceForProposal(reference []byte) uint64 { - proposal, err := g.getGeneralProposal(reference) - if err == vm.ErrProposalNotFound { - return 0 + minVetoOfTotalVotes := core.GetIntTrimmedPercentageOfValue(totalVotes, float64(baseConfig.MinVetoThreshold)) + if proposal.Veto.Cmp(minVetoOfTotalVotes) >= 0 { + proposal.Passed = false + g.eei.Finish([]byte("Proposal vetoed")) + return nil } - if err != nil { - return math.MaxUint64 + minPassOfTotalVotes := core.GetIntTrimmedPercentageOfValue(totalVotes, float64(baseConfig.MinPassThreshold)) + if proposal.Yes.Cmp(minPassOfTotalVotes) >= 0 && proposal.Yes.Cmp(proposal.No) > 0 { + g.eei.Finish([]byte("Proposal passed")) + proposal.Passed = true + return nil } - return proposal.EndVoteNonce + g.eei.Finish([]byte("Proposal rejected")) + proposal.Passed = false + return nil } -// getGeneralProposal returns a proposal from storage -func (g *governanceContract) getGeneralProposal(reference []byte) (*GeneralProposal, error) { - key := append([]byte(proposalPrefix), reference...) - marshaledData := g.eei.GetStorage(key) - +func (g *governanceContract) getActiveFundForDelegator(delegationAddress []byte, address []byte) (*big.Int, error) { + marshaledData := g.eei.GetStorageFromAddress(delegationAddress, address) if len(marshaledData) == 0 { - return nil, vm.ErrProposalNotFound + return big.NewInt(0), nil } - generalProposal := &GeneralProposal{} - err := g.marshalizer.Unmarshal(generalProposal, marshaledData) + dData := &DelegatorData{} + err := g.marshalizer.Unmarshal(dData, marshaledData) if err != nil { return nil, err } - return generalProposal, nil -} - -// proposalExists returns true if a proposal already exists -func (g *governanceContract) proposalExists(reference []byte) bool { - key := append([]byte(proposalPrefix), reference...) - marshaledData := g.eei.GetStorage(key) - return len(marshaledData) > 0 -} + if len(dData.ActiveFund) == 0 { + return big.NewInt(0), nil + } -// getValidProposal returns a proposal from storage if it exists, or it is still valid/in-progress -func (g *governanceContract) getValidProposal(reference []byte) (*GeneralProposal, error) { - proposal, err := g.getGeneralProposal(reference) + marshaledData = g.eei.GetStorageFromAddress(delegationAddress, dData.ActiveFund) + activeFund := &Fund{} + err = g.marshalizer.Unmarshal(activeFund, marshaledData) if err != nil { return nil, err } - currentNonce := g.eei.BlockChainHook().CurrentNonce() - if currentNonce < proposal.StartVoteNonce { - return nil, vm.ErrVotingNotStartedForProposal - } - - if currentNonce > proposal.EndVoteNonce { - return nil, vm.ErrVotedForAnExpiredProposal + if activeFund.Value == nil { + activeFund.Value = big.NewInt(0) } - return proposal, nil + return activeFund.Value, nil } -// isWhiteListed checks if an address is whitelisted -func (g *governanceContract) isWhiteListed(address []byte) bool { - key := append([]byte(whiteListPrefix), address...) - marshaledData := g.eei.GetStorage(key) +func (g *governanceContract) getTotalStake(validatorAddress []byte) (*big.Int, error) { + marshaledData := g.eei.GetStorageFromAddress(g.validatorSCAddress, validatorAddress) if len(marshaledData) == 0 { - return false + return big.NewInt(0), nil } - key = append([]byte(proposalPrefix), address...) - marshaledData = g.eei.GetStorage(key) - generalProposal := &GeneralProposal{} - err := g.marshalizer.Unmarshal(generalProposal, marshaledData) + validatorData := &ValidatorDataV2{} + err := g.marshalizer.Unmarshal(validatorData, marshaledData) if err != nil { - return false - } - - return generalProposal.Passed -} - -func (g *governanceContract) whiteListAtGovernanceGenesis(address []byte) vmcommon.ReturnCode { - if g.proposalExists(address) { - log.Warn("proposal with this key already exists") - return vmcommon.UserError - } - - key := append([]byte(proposalPrefix), address...) - whiteListAcc := &WhiteListProposal{ - WhiteListAddress: address, - ProposalStatus: key, + return nil, err } - minQuorum, success := big.NewInt(0).SetString(g.governanceConfig.Active.MinQuorum, conversionBase) - if !success { - log.Warn("could not convert min quorum to bigInt") - return vmcommon.UserError + if validatorData.TotalStakeValue == nil { + validatorData.TotalStakeValue = big.NewInt(0) } - key = append([]byte(whiteListPrefix), address...) - generalProposal := &GeneralProposal{ - IssuerAddress: address, - CommitHash: []byte("genesis"), - StartVoteNonce: 0, - EndVoteNonce: 0, - Yes: minQuorum, - No: big.NewInt(0), - Veto: big.NewInt(0), - Passed: true, - Votes: make([][]byte, 0), - } - marshaledData, err := g.marshalizer.Marshal(whiteListAcc) - if err != nil { - log.Warn("marshal error in whiteListAtGenesis", "err", err) - return vmcommon.UserError - } - g.eei.SetStorage(key, marshaledData) + return validatorData.TotalStakeValue, nil +} - err = g.saveGeneralProposal(address, generalProposal) +func (g *governanceContract) saveUserVotes(address []byte, votedList *OngoingVotedList) error { + marshaledData, err := g.marshalizer.Marshal(votedList) if err != nil { - log.Warn("save general proposal ", "err", err) - return vmcommon.UserError + return err } + g.eei.SetStorage(address, marshaledData) - return vmcommon.Ok + return nil } -// applyVote takes in a vote and a full VoteSet object and correctly applies the new vote, then returning -// the new full VoteSet object. In the same way applies the vote to the general proposal -func (g *governanceContract) applyVote(vote *VoteDetails, voteData *VoteSet, proposal *GeneralProposal) (*VoteSet, *GeneralProposal, error) { - switch vote.Value { - case Yes: - voteData.TotalYes.Add(voteData.TotalYes, vote.Power) - proposal.Yes.Add(proposal.Yes, vote.Power) - case No: - voteData.TotalNo.Add(voteData.TotalNo, vote.Power) - proposal.No.Add(proposal.No, vote.Power) - case Veto: - voteData.TotalVeto.Add(voteData.TotalVeto, vote.Power) - proposal.Veto.Add(proposal.Veto, vote.Power) - default: - return nil, nil, fmt.Errorf("%s: %s", vm.ErrInvalidArgument, "invalid vote type") +func (g *governanceContract) getUserVotes(address []byte) (*OngoingVotedList, error) { + onGoingList := &OngoingVotedList{ + Direct: make([]uint64, 0), + Delegated: make([]uint64, 0), } - - voteData.UsedPower.Add(voteData.UsedPower, vote.Power) - voteData.UsedBalance.Add(voteData.UsedBalance, vote.Balance) - voteData.VoteItems = append(voteData.VoteItems, vote) - - return voteData, proposal, nil -} - -// addNewVote applies a new vote on a proposal then saves the new information into the storage -func (g *governanceContract) addNewVote(voterAddress []byte, currentVote *VoteDetails, currentVoteSet *VoteSet, proposal *GeneralProposal) error { - newVoteSet, updatedProposal, err := g.applyVote(currentVote, currentVoteSet, proposal) - if err != nil { - return err + marshaledData := g.eei.GetStorage(address) + if len(marshaledData) == 0 { + return onGoingList, nil } - err = g.saveVoteSet(voterAddress, newVoteSet, updatedProposal) + err := g.marshalizer.Unmarshal(onGoingList, marshaledData) if err != nil { - return err - } - - if !g.proposalContainsVoter(proposal, voterAddress) { - proposal.Votes = append(proposal.Votes, voterAddress) + return nil, err } - return g.saveGeneralProposal(proposal.CommitHash, proposal) -} - -func getVoteItemKey(reference []byte, address []byte) []byte { - proposalKey := append([]byte(proposalPrefix), reference...) - voteItemKey := append(proposalKey, address...) - return voteItemKey + return onGoingList, nil } -// saveVoteSet first saves the main vote data of the voter, then updates the proposal with the new voter information -func (g *governanceContract) saveVoteSet(voter []byte, voteData *VoteSet, proposal *GeneralProposal) error { - voteItemKey := getVoteItemKey(proposal.CommitHash, voter) - - marshaledVoteItem, err := g.marshalizer.Marshal(voteData) - if err != nil { - return err +func (g *governanceContract) getDelegatedContractInfo(scAddress []byte, reference []byte) (*DelegatedSCVoteInfo, error) { + scVoteInfo := &DelegatedSCVoteInfo{ + TotalPower: big.NewInt(0), + UsedPower: big.NewInt(0), + TotalStake: big.NewInt(0), + UsedStake: big.NewInt(0), } - g.eei.SetStorage(voteItemKey, marshaledVoteItem) - return nil -} -// proposalContainsVoter iterates through all the votes on a proposal and returns if it already contains a -// vote from a certain address -func (g *governanceContract) proposalContainsVoter(proposal *GeneralProposal, voteKey []byte) bool { - for _, vote := range proposal.Votes { - if bytes.Equal(vote, voteKey) { - return true + marshalledData := g.eei.GetStorage(append(scAddress, reference...)) + if len(marshalledData) > 0 { + err := g.marshalizer.Unmarshal(scVoteInfo, marshalledData) + if err != nil { + return nil, err } - } - return false -} - -// computeVotingPower returns the voting power for a value. The value can be either a balance or -// the staked value for a validator -func (g *governanceContract) computeVotingPower(value *big.Int) (*big.Int, error) { - if value.Cmp(zero) < 0 { - return nil, fmt.Errorf("cannot compute voting power on a negative value") + return scVoteInfo, nil } - return big.NewInt(0).Sqrt(value), nil -} - -// computeAccountLeveledPower takes a value and some voter data and returns the voting power of that value in -// the following way: the power of all votes combined has to be sqrt(sum(allVoteWithFunds)). So, the new -// vote will have a smaller power depending on how much existed previously -func (g *governanceContract) computeAccountLeveledPower(value *big.Int, voteData *VoteSet) (*big.Int, error) { - previousAccountPower, err := g.computeVotingPower(voteData.UsedBalance) + totalStake, totalVotingPower, err := g.computeTotalStakeAndVotingPower(scAddress) if err != nil { return nil, err } - fullAccountBalance := big.NewInt(0).Add(voteData.UsedBalance, value) - newAccountPower, err := g.computeVotingPower(fullAccountBalance) - if err != nil { - return nil, err - } + scVoteInfo.TotalPower.Set(totalVotingPower) + scVoteInfo.TotalStake.Set(totalStake) - return big.NewInt(0).Sub(newAccountPower, previousAccountPower), nil + return scVoteInfo, nil } -// isValidVoteString checks if a certain string represents a valid vote string -func (g *governanceContract) isValidVoteString(vote string) bool { - switch vote { - case yesString, noString, vetoString: - return true - default: - return false +func (g *governanceContract) saveDelegatedContractInfo( + scAddress []byte, + scVoteInfo *DelegatedSCVoteInfo, + reference []byte, +) error { + marshalledData, err := g.marshalizer.Marshal(scVoteInfo) + if err != nil { + return err } -} -// castVoteType casts a valid string vote passed as an argument to the actual mapped value -func (g *governanceContract) castVoteType(vote string) (VoteValueType, error) { - switch vote { - case yesString: - return Yes, nil - case noString: - return No, nil - case vetoString: - return Veto, nil - default: - return 0, fmt.Errorf("%s: %s%s", vm.ErrInvalidArgument, "invalid vote type option: ", vote) - } + g.eei.SetStorage(append(scAddress, reference...), marshalledData) + return nil } -// getOrCreateVoteSet returns the vote data from storage for a given proposer/validator pair. -// If no vote data exists, it returns a new instance of VoteSet -func (g *governanceContract) getOrCreateVoteSet(key []byte) (*VoteSet, error) { - marshaledData := g.eei.GetStorage(key) +// getConfig returns the current system smart contract configuration +func (g *governanceContract) getConfig() (*GovernanceConfigV2, error) { + scConfig := &GovernanceConfigV2{} + + marshaledData := g.eei.GetStorage([]byte(governanceConfigKey)) if len(marshaledData) == 0 { - return g.getEmptyVoteSet(), nil + return nil, vm.ErrElementNotFound } - voteData := &VoteSet{} - err := g.marshalizer.Unmarshal(voteData, marshaledData) + err := g.marshalizer.Unmarshal(scConfig, marshaledData) if err != nil { return nil, err } - return voteData, nil -} - -// getEmptyVoteSet returns a new VoteSet instance with its members initialised with their 0 value -func (g *governanceContract) getEmptyVoteSet() *VoteSet { - return &VoteSet{ - UsedPower: big.NewInt(0), - UsedBalance: big.NewInt(0), - TotalYes: big.NewInt(0), - TotalNo: big.NewInt(0), - TotalVeto: big.NewInt(0), - VoteItems: make([]*VoteDetails, 0), - } + return scConfig, nil } -// computeValidatorVotingPower returns the total voting power of a validator -func (g *governanceContract) computeValidatorVotingPower(validatorAddress []byte) (*big.Int, error) { - totalStake, err := g.getTotalStake(validatorAddress) - if err != nil { - return nil, fmt.Errorf("could not return total stake for the provided address, thus cannot compute voting power") - } - - votingPower, err := g.computeVotingPower(totalStake) +func (g *governanceContract) saveConfig(cfg *GovernanceConfigV2) error { + marshaledData, err := g.marshalizer.Marshal(cfg) if err != nil { - return nil, fmt.Errorf("could not return total stake for the provided address, thus cannot compute voting power") + return err } - return votingPower, nil + g.eei.SetStorage([]byte(governanceConfigKey), marshaledData) + return nil } -// function iterates over all delegation contracts and verifies balances of the given account and makes a sum of it -//TODO: benchmark this, the other solution is to receive in the arguments which delegation contracts should be checked -// and consume gas for each delegation contract to be checked -func (g *governanceContract) computeVotingPowerFromTotalStake(address []byte) (*big.Int, error) { - totalStake, err := g.getTotalStake(address) - if err != nil && err != vm.ErrEmptyStorage { - return nil, fmt.Errorf("could not return total stake for the provided address, thus cannot compute voting power") - } - if totalStake == nil { - totalStake = big.NewInt(0) - } - - dContractList, err := getDelegationContractList(g.eei, g.marshalizer, g.delegationMgrSCAddress) - if err != nil { - return nil, err - } - - var activeDelegated *big.Int - for _, contract := range dContractList.Addresses { - activeDelegated, err = g.getActiveFundForDelegator(contract, address) - if err != nil { - return nil, err - } - - totalStake.Add(totalStake, activeDelegated) - } - - votingPower, err := g.computeVotingPower(totalStake) +// saveGeneralProposal saves a proposal into the storage +func (g *governanceContract) saveGeneralProposal(reference []byte, generalProposal *GeneralProposal) error { + marshaledData, err := g.marshalizer.Marshal(generalProposal) if err != nil { - return nil, err + return err } + key := append([]byte(proposalPrefix), reference...) + g.eei.SetStorage(key, marshaledData) - return votingPower, nil + return nil } -func (g *governanceContract) getActiveFundForDelegator(delegationAddress []byte, address []byte) (*big.Int, error) { - dData := &DelegatorData{ - UnClaimedRewards: big.NewInt(0), - TotalCumulatedRewards: big.NewInt(0), - } - marshaledData := g.eei.GetStorageFromAddress(delegationAddress, address) - if len(marshaledData) == 0 { - return big.NewInt(0), nil - } - - err := g.marshalizer.Unmarshal(dData, marshaledData) +// getValidProposal returns a proposal from storage if it exists, or it is still valid/in-progress +func (g *governanceContract) getValidProposal(nonce *big.Int) (*GeneralProposal, error) { + proposal, err := g.getProposalFromNonce(nonce) if err != nil { return nil, err } - if len(dData.ActiveFund) == 0 { - return big.NewInt(0), nil + currentEpoch := uint64(g.eei.BlockChainHook().CurrentEpoch()) + if currentEpoch < proposal.StartVoteEpoch { + return nil, vm.ErrVotingNotStartedForProposal } - marshaledData = g.eei.GetStorageFromAddress(delegationAddress, dData.ActiveFund) - activeFund := &Fund{Value: big.NewInt(0)} - err = g.marshalizer.Unmarshal(activeFund, marshaledData) - if err != nil { - return nil, err + if currentEpoch > proposal.EndVoteEpoch { + return nil, vm.ErrVotedForAnExpiredProposal } - return activeFund.Value, nil + return proposal, nil } -func (g *governanceContract) getTotalStake(validatorAddress []byte) (*big.Int, error) { - marshaledData := g.eei.GetStorageFromAddress(g.validatorSCAddress, validatorAddress) - if len(marshaledData) == 0 { - return nil, vm.ErrEmptyStorage - } - - validatorData := &ValidatorDataV2{} - err := g.marshalizer.Unmarshal(validatorData, marshaledData) - if err != nil { - return nil, err - } - - return validatorData.TotalStakeValue, nil +func (g *governanceContract) getProposalFromNonce(nonce *big.Int) (*GeneralProposal, error) { + nonceKey := append([]byte(noncePrefix), nonce.Bytes()...) + commitHash := g.eei.GetStorage(nonceKey) + return g.getGeneralProposal(commitHash) } -// validateInitialWhiteListedAddresses makes basic checks that the provided initial whitelisted -// addresses have the correct format -func (g *governanceContract) validateInitialWhiteListedAddresses(addresses [][]byte) error { - if len(addresses) == 0 { - log.Debug("0 initial whiteListed addresses provided to the governance contract") - return vm.ErrInvalidNumOfInitialWhiteListedAddress - } +// getGeneralProposal returns a proposal from storage +func (g *governanceContract) getGeneralProposal(reference []byte) (*GeneralProposal, error) { + key := append([]byte(proposalPrefix), reference...) + marshaledData := g.eei.GetStorage(key) - for _, addr := range addresses { - if len(addr) != len(g.governanceSCAddress) { - return fmt.Errorf("invalid address length for %s", string(addr)) - } + if len(marshaledData) == 0 { + return nil, vm.ErrProposalNotFound } - return nil -} - -// startEndNonceFromArguments converts the nonce string arguments to uint64 -func (g *governanceContract) startEndNonceFromArguments(argStart []byte, argEnd []byte) (uint64, uint64, error) { - startVoteNonce, err := g.nonceFromBytes(argStart) - if err != nil { - return 0, 0, err - } - endVoteNonce, err := g.nonceFromBytes(argEnd) + generalProposal := &GeneralProposal{} + err := g.marshalizer.Unmarshal(generalProposal, marshaledData) if err != nil { - return 0, 0, err - } - - currentNonce := g.eei.BlockChainHook().CurrentNonce() - if currentNonce > startVoteNonce.Uint64() || startVoteNonce.Uint64() > endVoteNonce.Uint64() { - return 0, 0, vm.ErrInvalidStartEndVoteNonce + return nil, err } - return startVoteNonce.Uint64(), endVoteNonce.Uint64(), nil + return generalProposal, nil } -// nonceFromBytes converts a byte array to a big.Int. Returns ErrInvalidStartEndVoteNonce for invalid values -func (g *governanceContract) nonceFromBytes(nonce []byte) (*big.Int, error) { - voteNonce, okConvert := big.NewInt(0).SetString(string(nonce), conversionBase) - if !okConvert { - return nil, vm.ErrInvalidStartEndVoteNonce - } - if !voteNonce.IsUint64() { - return nil, vm.ErrInvalidStartEndVoteNonce - } - - return voteNonce, nil +// proposalExists returns true if a proposal already exists +func (g *governanceContract) proposalExists(reference []byte) bool { + key := append([]byte(proposalPrefix), reference...) + marshaledData := g.eei.GetStorage(key) + return len(marshaledData) > 0 } -// computeEndResults computes if a proposal has passed or not based on votes accumulated -func (g *governanceContract) computeEndResults(proposal *GeneralProposal) error { - baseConfig, err := g.getConfig() - if err != nil { - return err - } - - totalVotes := big.NewInt(0).Add(proposal.Yes, proposal.No) - totalVotes.Add(totalVotes, proposal.Veto) - - if totalVotes.Cmp(baseConfig.MinQuorum) == -1 { - proposal.Passed = false - return nil - } +// startEndEpochFromArguments converts the nonce string arguments to uint64 +func (g *governanceContract) startEndEpochFromArguments(argStart []byte, argEnd []byte) (uint64, uint64, error) { + startVoteEpoch := big.NewInt(0).SetBytes(argStart) + endVoteEpoch := big.NewInt(0).SetBytes(argEnd) - if proposal.Veto.Cmp(baseConfig.MinVetoThreshold) >= 0 { - proposal.Passed = false - return nil + currentEpoch := uint64(g.eei.BlockChainHook().CurrentEpoch()) + if currentEpoch > startVoteEpoch.Uint64() || startVoteEpoch.Uint64() > endVoteEpoch.Uint64() { + return 0, 0, vm.ErrInvalidStartEndVoteEpoch } - - if proposal.Yes.Cmp(baseConfig.MinPassThreshold) >= 0 && proposal.Yes.Cmp(proposal.No) == 1 { - proposal.Passed = true - return nil + if endVoteEpoch.Uint64()-startVoteEpoch.Uint64() >= uint64(g.unBondPeriodInEpochs) { + return 0, 0, vm.ErrInvalidStartEndVoteEpoch } - proposal.Passed = false - return nil + return startVoteEpoch.Uint64(), endVoteEpoch.Uint64(), nil } // convertV2Config converts the passed config file to the correct V2 typed GovernanceConfig func (g *governanceContract) convertV2Config(config config.GovernanceSystemSCConfig) (*GovernanceConfigV2, error) { - minQuorum, success := big.NewInt(0).SetString(config.Active.MinQuorum, conversionBase) - if !success { + if config.Active.MinQuorum <= 0.01 { return nil, vm.ErrIncorrectConfig } - minPass, success := big.NewInt(0).SetString(config.Active.MinPassThreshold, conversionBase) - if !success { + if config.Active.MinPassThreshold <= 0.01 { return nil, vm.ErrIncorrectConfig } - minVeto, success := big.NewInt(0).SetString(config.Active.MinVetoThreshold, conversionBase) - if !success { + if config.Active.MinVetoThreshold <= 0.01 { return nil, vm.ErrIncorrectConfig } proposalFee, success := big.NewInt(0).SetString(config.Active.ProposalCost, conversionBase) @@ -1425,13 +1118,26 @@ func (g *governanceContract) convertV2Config(config config.GovernanceSystemSCCon } return &GovernanceConfigV2{ - MinQuorum: minQuorum, - MinPassThreshold: minPass, - MinVetoThreshold: minVeto, + MinQuorum: float32(config.Active.MinQuorum), + MinPassThreshold: float32(config.Active.MinPassThreshold), + MinVetoThreshold: float32(config.Active.MinVetoThreshold), ProposalFee: proposalFee, }, nil } +func convertDecimalToPercentage(arg []byte) (float32, error) { + value, okConvert := big.NewInt(0).SetString(string(arg), conversionBase) + if !okConvert { + return 0.0, vm.ErrIncorrectConfig + } + + valAsFloat := float64(value.Uint64()) / maxPercentage + if valAsFloat < 0.001 || valAsFloat > 1.0 { + return 0.0, vm.ErrIncorrectConfig + } + return float32(valAsFloat), nil +} + // CanUseContract returns true if contract is enabled func (g *governanceContract) CanUseContract() bool { return true diff --git a/vm/systemSmartContracts/governance.pb.go b/vm/systemSmartContracts/governance.pb.go index 3860f68fa2e..2f7e55df5c3 100644 --- a/vm/systemSmartContracts/governance.pb.go +++ b/vm/systemSmartContracts/governance.pb.go @@ -5,6 +5,7 @@ package systemSmartContracts import ( bytes "bytes" + encoding_binary "encoding/binary" fmt "fmt" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" @@ -32,21 +33,24 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type VoteValueType int32 const ( - Yes VoteValueType = 0 - No VoteValueType = 1 - Veto VoteValueType = 2 + Yes VoteValueType = 0 + No VoteValueType = 1 + Veto VoteValueType = 2 + Abstain VoteValueType = 3 ) var VoteValueType_name = map[int32]string{ 0: "Yes", 1: "No", 2: "Veto", + 3: "Abstain", } var VoteValueType_value = map[string]int32{ - "Yes": 0, - "No": 1, - "Veto": 2, + "Yes": 0, + "No": 1, + "Veto": 2, + "Abstain": 3, } func (VoteValueType) EnumDescriptor() ([]byte, []int) { @@ -54,17 +58,19 @@ func (VoteValueType) EnumDescriptor() ([]byte, []int) { } type GeneralProposal struct { - IssuerAddress []byte `protobuf:"bytes,1,opt,name=IssuerAddress,proto3" json:"IssuerAddress"` + Nonce uint64 `protobuf:"varint,1,opt,name=Nonce,proto3" json:"Nonce"` CommitHash []byte `protobuf:"bytes,2,opt,name=CommitHash,proto3" json:"CommitHash"` - StartVoteNonce uint64 `protobuf:"varint,3,opt,name=StartVoteNonce,proto3" json:"StartVoteNonce"` - EndVoteNonce uint64 `protobuf:"varint,4,opt,name=EndVoteNonce,proto3" json:"EndVoteNonce"` + StartVoteEpoch uint64 `protobuf:"varint,3,opt,name=StartVoteEpoch,proto3" json:"StartVoteEpoch"` + EndVoteEpoch uint64 `protobuf:"varint,4,opt,name=EndVoteEpoch,proto3" json:"EndVoteEpoch"` Yes *math_big.Int `protobuf:"bytes,5,opt,name=Yes,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"Yes"` No *math_big.Int `protobuf:"bytes,6,opt,name=No,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"No"` Veto *math_big.Int `protobuf:"bytes,7,opt,name=Veto,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"Veto"` - Passed bool `protobuf:"varint,8,opt,name=Passed,proto3" json:"Passed"` - Votes [][]byte `protobuf:"bytes,9,rep,name=Votes,proto3" json:"Votes"` - TopReference []byte `protobuf:"bytes,10,opt,name=TopReference,proto3" json:"TopReference"` + Abstain *math_big.Int `protobuf:"bytes,8,opt,name=Abstain,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"Abstain"` + QuorumStake *math_big.Int `protobuf:"bytes,9,opt,name=QuorumStake,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"QuorumStake"` + Passed bool `protobuf:"varint,10,opt,name=Passed,proto3" json:"Passed"` Closed bool `protobuf:"varint,11,opt,name=Closed,proto3" json:"Closed"` + IssuerAddress []byte `protobuf:"bytes,12,opt,name=IssuerAddress,proto3" json:"IssuerAddress"` + ProposalCost *math_big.Int `protobuf:"bytes,13,opt,name=ProposalCost,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"ProposalCost"` } func (m *GeneralProposal) Reset() { *m = GeneralProposal{} } @@ -95,11 +101,11 @@ func (m *GeneralProposal) XXX_DiscardUnknown() { var xxx_messageInfo_GeneralProposal proto.InternalMessageInfo -func (m *GeneralProposal) GetIssuerAddress() []byte { +func (m *GeneralProposal) GetNonce() uint64 { if m != nil { - return m.IssuerAddress + return m.Nonce } - return nil + return 0 } func (m *GeneralProposal) GetCommitHash() []byte { @@ -109,16 +115,16 @@ func (m *GeneralProposal) GetCommitHash() []byte { return nil } -func (m *GeneralProposal) GetStartVoteNonce() uint64 { +func (m *GeneralProposal) GetStartVoteEpoch() uint64 { if m != nil { - return m.StartVoteNonce + return m.StartVoteEpoch } return 0 } -func (m *GeneralProposal) GetEndVoteNonce() uint64 { +func (m *GeneralProposal) GetEndVoteEpoch() uint64 { if m != nil { - return m.EndVoteNonce + return m.EndVoteEpoch } return 0 } @@ -144,25 +150,25 @@ func (m *GeneralProposal) GetVeto() *math_big.Int { return nil } -func (m *GeneralProposal) GetPassed() bool { +func (m *GeneralProposal) GetAbstain() *math_big.Int { if m != nil { - return m.Passed + return m.Abstain } - return false + return nil } -func (m *GeneralProposal) GetVotes() [][]byte { +func (m *GeneralProposal) GetQuorumStake() *math_big.Int { if m != nil { - return m.Votes + return m.QuorumStake } return nil } -func (m *GeneralProposal) GetTopReference() []byte { +func (m *GeneralProposal) GetPassed() bool { if m != nil { - return m.TopReference + return m.Passed } - return nil + return false } func (m *GeneralProposal) GetClosed() bool { @@ -172,104 +178,16 @@ func (m *GeneralProposal) GetClosed() bool { return false } -type WhiteListProposal struct { - WhiteListAddress []byte `protobuf:"bytes,1,opt,name=WhiteListAddress,proto3" json:"WhiteListAddress"` - ProposalStatus []byte `protobuf:"bytes,2,opt,name=ProposalStatus,proto3" json:"ProposalStatus"` -} - -func (m *WhiteListProposal) Reset() { *m = WhiteListProposal{} } -func (*WhiteListProposal) ProtoMessage() {} -func (*WhiteListProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_e18a03da5266c714, []int{1} -} -func (m *WhiteListProposal) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WhiteListProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WhiteListProposal) XXX_Merge(src proto.Message) { - xxx_messageInfo_WhiteListProposal.Merge(m, src) -} -func (m *WhiteListProposal) XXX_Size() int { - return m.Size() -} -func (m *WhiteListProposal) XXX_DiscardUnknown() { - xxx_messageInfo_WhiteListProposal.DiscardUnknown(m) -} - -var xxx_messageInfo_WhiteListProposal proto.InternalMessageInfo - -func (m *WhiteListProposal) GetWhiteListAddress() []byte { - if m != nil { - return m.WhiteListAddress - } - return nil -} - -func (m *WhiteListProposal) GetProposalStatus() []byte { - if m != nil { - return m.ProposalStatus - } - return nil -} - -type HardForkProposal struct { - EpochToHardFork uint32 `protobuf:"varint,1,opt,name=EpochToHardFork,proto3" json:"EpochToHardFork"` - NewSoftwareVersion []byte `protobuf:"bytes,2,opt,name=NewSoftwareVersion,proto3" json:"NewSoftwareVersion"` - ProposalStatus []byte `protobuf:"bytes,3,opt,name=ProposalStatus,proto3" json:"ProposalStatus"` -} - -func (m *HardForkProposal) Reset() { *m = HardForkProposal{} } -func (*HardForkProposal) ProtoMessage() {} -func (*HardForkProposal) Descriptor() ([]byte, []int) { - return fileDescriptor_e18a03da5266c714, []int{2} -} -func (m *HardForkProposal) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HardForkProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HardForkProposal) XXX_Merge(src proto.Message) { - xxx_messageInfo_HardForkProposal.Merge(m, src) -} -func (m *HardForkProposal) XXX_Size() int { - return m.Size() -} -func (m *HardForkProposal) XXX_DiscardUnknown() { - xxx_messageInfo_HardForkProposal.DiscardUnknown(m) -} - -var xxx_messageInfo_HardForkProposal proto.InternalMessageInfo - -func (m *HardForkProposal) GetEpochToHardFork() uint32 { - if m != nil { - return m.EpochToHardFork - } - return 0 -} - -func (m *HardForkProposal) GetNewSoftwareVersion() []byte { +func (m *GeneralProposal) GetIssuerAddress() []byte { if m != nil { - return m.NewSoftwareVersion + return m.IssuerAddress } return nil } -func (m *HardForkProposal) GetProposalStatus() []byte { +func (m *GeneralProposal) GetProposalCost() *math_big.Int { if m != nil { - return m.ProposalStatus + return m.ProposalCost } return nil } @@ -285,7 +203,7 @@ type GovernanceConfig struct { func (m *GovernanceConfig) Reset() { *m = GovernanceConfig{} } func (*GovernanceConfig) ProtoMessage() {} func (*GovernanceConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_e18a03da5266c714, []int{3} + return fileDescriptor_e18a03da5266c714, []int{1} } func (m *GovernanceConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -346,16 +264,17 @@ func (m *GovernanceConfig) GetProposalFee() *math_big.Int { } type GovernanceConfigV2 struct { - MinQuorum *math_big.Int `protobuf:"bytes,1,opt,name=MinQuorum,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"MinQuorum"` - MinPassThreshold *math_big.Int `protobuf:"bytes,2,opt,name=MinPassThreshold,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"MinPassThreshold"` - MinVetoThreshold *math_big.Int `protobuf:"bytes,3,opt,name=MinVetoThreshold,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"MinVetoThreshold"` - ProposalFee *math_big.Int `protobuf:"bytes,4,opt,name=ProposalFee,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"ProposalFee"` + MinQuorum float32 `protobuf:"fixed32,1,opt,name=MinQuorum,proto3" json:"MinQuorum"` + MinPassThreshold float32 `protobuf:"fixed32,2,opt,name=MinPassThreshold,proto3" json:"MinPassThreshold"` + MinVetoThreshold float32 `protobuf:"fixed32,3,opt,name=MinVetoThreshold,proto3" json:"MinVetoThreshold"` + ProposalFee *math_big.Int `protobuf:"bytes,4,opt,name=ProposalFee,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"ProposalFee"` + LastProposalNonce uint64 `protobuf:"varint,5,opt,name=LastProposalNonce,proto3" json:"LastProposalNonce"` } func (m *GovernanceConfigV2) Reset() { *m = GovernanceConfigV2{} } func (*GovernanceConfigV2) ProtoMessage() {} func (*GovernanceConfigV2) Descriptor() ([]byte, []int) { - return fileDescriptor_e18a03da5266c714, []int{4} + return fileDescriptor_e18a03da5266c714, []int{2} } func (m *GovernanceConfigV2) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -380,25 +299,25 @@ func (m *GovernanceConfigV2) XXX_DiscardUnknown() { var xxx_messageInfo_GovernanceConfigV2 proto.InternalMessageInfo -func (m *GovernanceConfigV2) GetMinQuorum() *math_big.Int { +func (m *GovernanceConfigV2) GetMinQuorum() float32 { if m != nil { return m.MinQuorum } - return nil + return 0 } -func (m *GovernanceConfigV2) GetMinPassThreshold() *math_big.Int { +func (m *GovernanceConfigV2) GetMinPassThreshold() float32 { if m != nil { return m.MinPassThreshold } - return nil + return 0 } -func (m *GovernanceConfigV2) GetMinVetoThreshold() *math_big.Int { +func (m *GovernanceConfigV2) GetMinVetoThreshold() float32 { if m != nil { return m.MinVetoThreshold } - return nil + return 0 } func (m *GovernanceConfigV2) GetProposalFee() *math_big.Int { @@ -408,22 +327,27 @@ func (m *GovernanceConfigV2) GetProposalFee() *math_big.Int { return nil } -type VoteDetails struct { - Value VoteValueType `protobuf:"varint,1,opt,name=Value,proto3,enum=proto.VoteValueType" json:"Value"` - Power *math_big.Int `protobuf:"bytes,2,opt,name=Power,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"Power"` - Balance *math_big.Int `protobuf:"bytes,3,opt,name=Balance,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"Balance"` - DelegatedTo []byte `protobuf:"bytes,4,opt,name=DelegatedTo,proto3" json:"DelegatedTo"` +func (m *GovernanceConfigV2) GetLastProposalNonce() uint64 { + if m != nil { + return m.LastProposalNonce + } + return 0 +} + +type OngoingVotedList struct { + Direct []uint64 `protobuf:"varint,1,rep,packed,name=Direct,proto3" json:"Direct"` + Delegated []uint64 `protobuf:"varint,2,rep,packed,name=Delegated,proto3" json:"Delegated"` } -func (m *VoteDetails) Reset() { *m = VoteDetails{} } -func (*VoteDetails) ProtoMessage() {} -func (*VoteDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_e18a03da5266c714, []int{5} +func (m *OngoingVotedList) Reset() { *m = OngoingVotedList{} } +func (*OngoingVotedList) ProtoMessage() {} +func (*OngoingVotedList) Descriptor() ([]byte, []int) { + return fileDescriptor_e18a03da5266c714, []int{3} } -func (m *VoteDetails) XXX_Unmarshal(b []byte) error { +func (m *OngoingVotedList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *VoteDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *OngoingVotedList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -431,64 +355,48 @@ func (m *VoteDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) } return b[:n], nil } -func (m *VoteDetails) XXX_Merge(src proto.Message) { - xxx_messageInfo_VoteDetails.Merge(m, src) +func (m *OngoingVotedList) XXX_Merge(src proto.Message) { + xxx_messageInfo_OngoingVotedList.Merge(m, src) } -func (m *VoteDetails) XXX_Size() int { +func (m *OngoingVotedList) XXX_Size() int { return m.Size() } -func (m *VoteDetails) XXX_DiscardUnknown() { - xxx_messageInfo_VoteDetails.DiscardUnknown(m) -} - -var xxx_messageInfo_VoteDetails proto.InternalMessageInfo - -func (m *VoteDetails) GetValue() VoteValueType { - if m != nil { - return m.Value - } - return Yes +func (m *OngoingVotedList) XXX_DiscardUnknown() { + xxx_messageInfo_OngoingVotedList.DiscardUnknown(m) } -func (m *VoteDetails) GetPower() *math_big.Int { - if m != nil { - return m.Power - } - return nil -} +var xxx_messageInfo_OngoingVotedList proto.InternalMessageInfo -func (m *VoteDetails) GetBalance() *math_big.Int { +func (m *OngoingVotedList) GetDirect() []uint64 { if m != nil { - return m.Balance + return m.Direct } return nil } -func (m *VoteDetails) GetDelegatedTo() []byte { +func (m *OngoingVotedList) GetDelegated() []uint64 { if m != nil { - return m.DelegatedTo + return m.Delegated } return nil } -type VoteSet struct { - UsedPower *math_big.Int `protobuf:"bytes,1,opt,name=UsedPower,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"UsedPower"` - UsedBalance *math_big.Int `protobuf:"bytes,2,opt,name=UsedBalance,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"UsedBalance"` - TotalYes *math_big.Int `protobuf:"bytes,3,opt,name=TotalYes,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"TotalYes"` - TotalNo *math_big.Int `protobuf:"bytes,4,opt,name=TotalNo,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"TotalNo"` - TotalVeto *math_big.Int `protobuf:"bytes,5,opt,name=TotalVeto,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"TotalVeto"` - VoteItems []*VoteDetails `protobuf:"bytes,6,rep,name=VoteItems,proto3" json:"VoteItems"` +type DelegatedSCVoteInfo struct { + TotalPower *math_big.Int `protobuf:"bytes,1,opt,name=TotalPower,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"TotalPower"` + UsedPower *math_big.Int `protobuf:"bytes,2,opt,name=UsedPower,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"UsedPower"` + TotalStake *math_big.Int `protobuf:"bytes,3,opt,name=TotalStake,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"TotalStake"` + UsedStake *math_big.Int `protobuf:"bytes,4,opt,name=UsedStake,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"UsedStake"` } -func (m *VoteSet) Reset() { *m = VoteSet{} } -func (*VoteSet) ProtoMessage() {} -func (*VoteSet) Descriptor() ([]byte, []int) { - return fileDescriptor_e18a03da5266c714, []int{6} +func (m *DelegatedSCVoteInfo) Reset() { *m = DelegatedSCVoteInfo{} } +func (*DelegatedSCVoteInfo) ProtoMessage() {} +func (*DelegatedSCVoteInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_e18a03da5266c714, []int{4} } -func (m *VoteSet) XXX_Unmarshal(b []byte) error { +func (m *DelegatedSCVoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *VoteSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *DelegatedSCVoteInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -496,56 +404,42 @@ func (m *VoteSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { } return b[:n], nil } -func (m *VoteSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_VoteSet.Merge(m, src) +func (m *DelegatedSCVoteInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_DelegatedSCVoteInfo.Merge(m, src) } -func (m *VoteSet) XXX_Size() int { +func (m *DelegatedSCVoteInfo) XXX_Size() int { return m.Size() } -func (m *VoteSet) XXX_DiscardUnknown() { - xxx_messageInfo_VoteSet.DiscardUnknown(m) -} - -var xxx_messageInfo_VoteSet proto.InternalMessageInfo - -func (m *VoteSet) GetUsedPower() *math_big.Int { - if m != nil { - return m.UsedPower - } - return nil +func (m *DelegatedSCVoteInfo) XXX_DiscardUnknown() { + xxx_messageInfo_DelegatedSCVoteInfo.DiscardUnknown(m) } -func (m *VoteSet) GetUsedBalance() *math_big.Int { - if m != nil { - return m.UsedBalance - } - return nil -} +var xxx_messageInfo_DelegatedSCVoteInfo proto.InternalMessageInfo -func (m *VoteSet) GetTotalYes() *math_big.Int { +func (m *DelegatedSCVoteInfo) GetTotalPower() *math_big.Int { if m != nil { - return m.TotalYes + return m.TotalPower } return nil } -func (m *VoteSet) GetTotalNo() *math_big.Int { +func (m *DelegatedSCVoteInfo) GetUsedPower() *math_big.Int { if m != nil { - return m.TotalNo + return m.UsedPower } return nil } -func (m *VoteSet) GetTotalVeto() *math_big.Int { +func (m *DelegatedSCVoteInfo) GetTotalStake() *math_big.Int { if m != nil { - return m.TotalVeto + return m.TotalStake } return nil } -func (m *VoteSet) GetVoteItems() []*VoteDetails { +func (m *DelegatedSCVoteInfo) GetUsedStake() *math_big.Int { if m != nil { - return m.VoteItems + return m.UsedStake } return nil } @@ -553,83 +447,72 @@ func (m *VoteSet) GetVoteItems() []*VoteDetails { func init() { proto.RegisterEnum("proto.VoteValueType", VoteValueType_name, VoteValueType_value) proto.RegisterType((*GeneralProposal)(nil), "proto.GeneralProposal") - proto.RegisterType((*WhiteListProposal)(nil), "proto.WhiteListProposal") - proto.RegisterType((*HardForkProposal)(nil), "proto.HardForkProposal") proto.RegisterType((*GovernanceConfig)(nil), "proto.GovernanceConfig") proto.RegisterType((*GovernanceConfigV2)(nil), "proto.GovernanceConfigV2") - proto.RegisterType((*VoteDetails)(nil), "proto.VoteDetails") - proto.RegisterType((*VoteSet)(nil), "proto.VoteSet") + proto.RegisterType((*OngoingVotedList)(nil), "proto.OngoingVotedList") + proto.RegisterType((*DelegatedSCVoteInfo)(nil), "proto.DelegatedSCVoteInfo") } func init() { proto.RegisterFile("governance.proto", fileDescriptor_e18a03da5266c714) } var fileDescriptor_e18a03da5266c714 = []byte{ - // 1028 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0xe3, 0x44, - 0x14, 0x8f, 0x93, 0xb8, 0x7f, 0xa6, 0xe9, 0x36, 0x3b, 0x54, 0x28, 0xe2, 0x60, 0x47, 0x39, 0x45, - 0x8b, 0x9a, 0x88, 0x02, 0x42, 0x02, 0x21, 0xc0, 0xd9, 0x76, 0xb7, 0x12, 0x1b, 0xba, 0x93, 0x6c, - 0xf8, 0x23, 0x21, 0x34, 0x89, 0xa7, 0x89, 0x85, 0xed, 0x09, 0x33, 0x93, 0xed, 0x2e, 0x27, 0xae, - 0xdc, 0xe0, 0xc0, 0x95, 0x33, 0xe2, 0x0b, 0xf0, 0x15, 0x38, 0x96, 0x03, 0x52, 0x4f, 0x86, 0xa6, - 0x17, 0xf0, 0x69, 0x3f, 0x02, 0x9a, 0xb1, 0xe3, 0xd8, 0x49, 0x84, 0x38, 0x58, 0x9c, 0x66, 0xde, - 0xef, 0xd9, 0xef, 0x3d, 0xbf, 0xdf, 0xef, 0x8d, 0x07, 0x54, 0xc7, 0xf4, 0x29, 0x61, 0x3e, 0xf6, - 0x47, 0xa4, 0x35, 0x65, 0x54, 0x50, 0xa8, 0xab, 0xe5, 0x95, 0xa3, 0xb1, 0x23, 0x26, 0xb3, 0x61, - 0x6b, 0x44, 0xbd, 0xf6, 0x98, 0x8e, 0x69, 0x5b, 0xc1, 0xc3, 0xd9, 0x85, 0xb2, 0x94, 0xa1, 0x76, - 0xd1, 0x5b, 0x8d, 0x5f, 0x74, 0x70, 0xf0, 0x80, 0xf8, 0x84, 0x61, 0xf7, 0x9c, 0xd1, 0x29, 0xe5, - 0xd8, 0x85, 0x6f, 0x81, 0xfd, 0x33, 0xce, 0x67, 0x84, 0x7d, 0x60, 0xdb, 0x8c, 0x70, 0x5e, 0xd3, - 0xea, 0x5a, 0xb3, 0x62, 0xdd, 0x0d, 0x03, 0x33, 0xeb, 0x40, 0x59, 0x13, 0xb6, 0x00, 0xe8, 0x50, - 0xcf, 0x73, 0xc4, 0x43, 0xcc, 0x27, 0xb5, 0xa2, 0x7a, 0xeb, 0x4e, 0x18, 0x98, 0x29, 0x14, 0xa5, - 0xf6, 0xf0, 0x6d, 0x70, 0xa7, 0x27, 0x30, 0x13, 0x03, 0x2a, 0x48, 0x97, 0xfa, 0x23, 0x52, 0x2b, - 0xd5, 0xb5, 0x66, 0xd9, 0x82, 0x61, 0x60, 0xae, 0x78, 0xd0, 0x8a, 0x0d, 0xdf, 0x00, 0x95, 0x13, - 0xdf, 0x5e, 0xbe, 0x59, 0x56, 0x6f, 0x56, 0xc3, 0xc0, 0xcc, 0xe0, 0x28, 0x63, 0xc1, 0x21, 0x28, - 0x7d, 0x4a, 0x78, 0x4d, 0x57, 0xa5, 0x9d, 0x87, 0x81, 0x29, 0xcd, 0x9f, 0xff, 0x30, 0x4f, 0x3c, - 0x2c, 0x26, 0xed, 0xa1, 0x33, 0x6e, 0x9d, 0xf9, 0xe2, 0x9d, 0x54, 0x0b, 0xbd, 0x99, 0x2b, 0x9c, - 0xa7, 0x84, 0xf1, 0x67, 0x6d, 0xef, 0xd9, 0xd1, 0x68, 0x82, 0x1d, 0xff, 0x68, 0x44, 0x19, 0x39, - 0x1a, 0xd3, 0xb6, 0x8d, 0x05, 0x6e, 0x59, 0xce, 0xf8, 0xcc, 0x17, 0x1d, 0xcc, 0x05, 0x61, 0x48, - 0x46, 0x83, 0x5f, 0x80, 0x62, 0x97, 0xd6, 0xb6, 0x54, 0x8a, 0x8f, 0xc2, 0xc0, 0x2c, 0x76, 0x69, - 0x7e, 0x19, 0x8a, 0x5d, 0x0a, 0x09, 0x28, 0x0f, 0x88, 0xa0, 0xb5, 0x6d, 0x95, 0xe2, 0x71, 0x18, - 0x98, 0xca, 0xce, 0x2f, 0x89, 0x0a, 0x07, 0x1b, 0x60, 0xeb, 0x1c, 0x73, 0x4e, 0xec, 0xda, 0x4e, - 0x5d, 0x6b, 0xee, 0x58, 0x20, 0x0c, 0xcc, 0x18, 0x41, 0xf1, 0x0a, 0x4d, 0xa0, 0xcb, 0xe6, 0xf2, - 0xda, 0x6e, 0xbd, 0xd4, 0xac, 0x58, 0xbb, 0x61, 0x60, 0x46, 0x00, 0x8a, 0x16, 0x49, 0x53, 0x9f, - 0x4e, 0x11, 0xb9, 0x20, 0x8c, 0x48, 0x9a, 0x80, 0xaa, 0x59, 0xd1, 0x94, 0xc6, 0x51, 0xc6, 0x92, - 0xa9, 0x3b, 0x2e, 0x95, 0xa9, 0xf7, 0x96, 0xa9, 0x23, 0x04, 0xc5, 0x6b, 0xe3, 0x7b, 0x0d, 0xdc, - 0xfd, 0x78, 0xe2, 0x08, 0xf2, 0xa1, 0xc3, 0x45, 0xa2, 0xdd, 0xf7, 0x41, 0x35, 0x01, 0xb3, 0xf2, - 0x3d, 0x0c, 0x03, 0x73, 0xcd, 0x87, 0xd6, 0x10, 0x29, 0xca, 0x45, 0xb4, 0x9e, 0xc0, 0x62, 0xc6, - 0x63, 0x21, 0x2b, 0x51, 0x66, 0x3d, 0x68, 0xc5, 0x6e, 0xfc, 0xae, 0x81, 0xea, 0x43, 0xcc, 0xec, - 0x53, 0xca, 0xbe, 0x4c, 0x4a, 0x7a, 0x17, 0x1c, 0x9c, 0x4c, 0xe9, 0x68, 0xd2, 0xa7, 0x0b, 0x97, - 0xaa, 0x68, 0xdf, 0x7a, 0x29, 0x0c, 0xcc, 0x55, 0x17, 0x5a, 0x05, 0xe0, 0x29, 0x80, 0x5d, 0x72, - 0xd9, 0xa3, 0x17, 0xe2, 0x12, 0x33, 0x32, 0x20, 0x8c, 0x3b, 0xd4, 0x8f, 0x6b, 0x7a, 0x39, 0x0c, - 0xcc, 0x0d, 0x5e, 0xb4, 0x01, 0xdb, 0xf0, 0x5d, 0xa5, 0xff, 0xfc, 0x5d, 0x7f, 0x17, 0x41, 0xf5, - 0x41, 0x72, 0xe0, 0x74, 0xa8, 0x7f, 0xe1, 0x8c, 0x61, 0x13, 0xec, 0x74, 0x67, 0x5e, 0x97, 0xda, - 0x24, 0x6a, 0x71, 0xc9, 0xaa, 0x84, 0x81, 0x99, 0x60, 0x28, 0xd9, 0xc1, 0x57, 0xc1, 0xee, 0x23, - 0xc7, 0x7f, 0x3c, 0xa3, 0x6c, 0xe6, 0xa9, 0xca, 0x75, 0x6b, 0x3f, 0x0c, 0xcc, 0x25, 0x88, 0x96, - 0x5b, 0xc9, 0xe0, 0x23, 0xc7, 0x97, 0xfa, 0xea, 0x4f, 0x18, 0xe1, 0x13, 0xea, 0xda, 0xaa, 0x52, - 0x3d, 0x62, 0x70, 0xd5, 0x87, 0xd6, 0x90, 0x38, 0x82, 0xd4, 0xf0, 0x32, 0x42, 0x39, 0x13, 0x21, - 0xe3, 0x43, 0x6b, 0x08, 0xfc, 0x1a, 0xec, 0x2d, 0x3a, 0x70, 0x4a, 0x48, 0x7c, 0x5c, 0x7c, 0x12, - 0x06, 0x66, 0x1a, 0xce, 0x6f, 0xde, 0xd2, 0x51, 0x1b, 0x3f, 0x96, 0x01, 0x5c, 0xed, 0xf5, 0xe0, - 0x18, 0x8a, 0x74, 0x0f, 0x23, 0x45, 0x0f, 0x32, 0x3d, 0xcc, 0xaf, 0x9c, 0x14, 0x19, 0xdf, 0x6a, - 0x1b, 0xd8, 0x88, 0xb4, 0xf7, 0xf9, 0x26, 0x36, 0xf2, 0x2b, 0x62, 0x9d, 0xd6, 0xb8, 0x96, 0x2c, - 0xaf, 0xa5, 0x4c, 0x2d, 0x19, 0x5f, 0xbe, 0xb5, 0xfc, 0xab, 0x40, 0xca, 0xff, 0xa7, 0x40, 0x7e, - 0x2b, 0x82, 0x3d, 0x79, 0xb8, 0xde, 0x27, 0x02, 0x3b, 0x2e, 0x87, 0x6f, 0x02, 0x7d, 0x80, 0xdd, - 0x19, 0x51, 0xaa, 0xb8, 0x73, 0x7c, 0x18, 0xfd, 0xd9, 0x5b, 0xf2, 0x11, 0x85, 0xf7, 0x9f, 0x4f, - 0x49, 0x7c, 0x32, 0x4b, 0x13, 0x45, 0x0b, 0x9c, 0x00, 0xfd, 0x9c, 0x5e, 0x12, 0x16, 0xd3, 0x89, - 0xe4, 0x03, 0x0a, 0xc8, 0xaf, 0xec, 0x28, 0x1e, 0xf4, 0xc1, 0xb6, 0x85, 0x5d, 0xbc, 0xf8, 0xbf, - 0x57, 0xac, 0x7e, 0x18, 0x98, 0x0b, 0x28, 0xbf, 0x6c, 0x8b, 0x88, 0xf0, 0x35, 0xb0, 0x77, 0x9f, - 0xb8, 0x64, 0x8c, 0x05, 0xb1, 0xfb, 0x34, 0x26, 0xe7, 0x40, 0x92, 0x93, 0x82, 0x51, 0xda, 0x68, - 0xfc, 0xa0, 0x83, 0x6d, 0xd9, 0xb0, 0x1e, 0x11, 0x72, 0xd2, 0x9e, 0x70, 0x62, 0x47, 0xcd, 0x49, - 0x4d, 0x5a, 0x02, 0xe6, 0x38, 0x69, 0x49, 0x4c, 0xa9, 0x28, 0x69, 0x2c, 0x1a, 0x55, 0x5c, 0x2a, - 0x2a, 0x05, 0xe7, 0xa8, 0xa8, 0x54, 0x54, 0xf8, 0x15, 0xd8, 0xe9, 0x53, 0x81, 0x5d, 0x79, 0x35, - 0x8a, 0x18, 0x7a, 0x22, 0x4f, 0xf2, 0x05, 0x96, 0x5f, 0xd6, 0x24, 0xa4, 0xd4, 0x84, 0xda, 0x77, - 0x17, 0xfc, 0x28, 0x4d, 0xc4, 0x50, 0x8e, 0x9a, 0x88, 0x23, 0x4a, 0x52, 0xd5, 0x56, 0x5d, 0x9c, - 0xf4, 0x25, 0xa9, 0x09, 0x98, 0x23, 0xa9, 0x49, 0x4c, 0xf8, 0x1e, 0xd8, 0x95, 0xaa, 0x3a, 0x13, - 0xc4, 0xe3, 0xb5, 0xad, 0x7a, 0xa9, 0xb9, 0x77, 0x0c, 0x53, 0xe3, 0x19, 0x4f, 0x70, 0xf4, 0x33, - 0x4c, 0x1e, 0x44, 0xcb, 0xed, 0xbd, 0x7b, 0x60, 0x3f, 0x33, 0xc7, 0x70, 0x5b, 0x5d, 0x60, 0xab, - 0x05, 0xb8, 0x25, 0x6f, 0x99, 0x55, 0x0d, 0xee, 0x44, 0x97, 0xc1, 0x6a, 0xd1, 0xea, 0x5e, 0xdd, - 0x18, 0x85, 0xeb, 0x1b, 0xa3, 0xf0, 0xe2, 0xc6, 0xd0, 0xbe, 0x99, 0x1b, 0xda, 0x4f, 0x73, 0x43, - 0xfb, 0x75, 0x6e, 0x68, 0x57, 0x73, 0x43, 0xbb, 0x9e, 0x1b, 0xda, 0x9f, 0x73, 0x43, 0xfb, 0x6b, - 0x6e, 0x14, 0x5e, 0xcc, 0x0d, 0xed, 0xbb, 0x5b, 0xa3, 0x70, 0x75, 0x6b, 0x14, 0xae, 0x6f, 0x8d, - 0xc2, 0x67, 0x87, 0xfc, 0x39, 0x17, 0xc4, 0xeb, 0x79, 0x98, 0x89, 0x0e, 0xf5, 0x05, 0xc3, 0x23, - 0xc1, 0x87, 0x5b, 0xaa, 0xd0, 0xd7, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0xad, 0x5b, 0xe6, 0xfd, - 0x6b, 0x0c, 0x00, 0x00, + // 883 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4d, 0x8f, 0xe3, 0x34, + 0x18, 0x6e, 0x9a, 0x76, 0x3e, 0x3c, 0x9d, 0x25, 0x6b, 0x16, 0x29, 0xe2, 0x10, 0x8f, 0x7a, 0xaa, + 0x40, 0xd3, 0x4a, 0x80, 0xb4, 0x12, 0x5c, 0xd8, 0x74, 0x87, 0x65, 0xa4, 0xdd, 0xee, 0x6c, 0x66, + 0x28, 0x2c, 0x17, 0xe4, 0x26, 0x9e, 0x34, 0xa2, 0xb1, 0x2b, 0xdb, 0xdd, 0x0f, 0x24, 0x24, 0x4e, + 0x9c, 0xf9, 0x19, 0x88, 0x5f, 0xc2, 0x71, 0x8e, 0x73, 0x0a, 0x4c, 0xe7, 0x02, 0x11, 0x87, 0x95, + 0xf8, 0x03, 0xc8, 0x4e, 0x9b, 0x8f, 0xe9, 0x69, 0x44, 0xb4, 0x27, 0xdb, 0x8f, 0xed, 0xe7, 0xc9, + 0xfb, 0xd8, 0xaf, 0xdf, 0x00, 0x2b, 0x64, 0x2f, 0x08, 0xa7, 0x98, 0xfa, 0xa4, 0x3f, 0xe7, 0x4c, + 0x32, 0xd8, 0xd6, 0xcd, 0xfb, 0x87, 0x61, 0x24, 0xa7, 0x8b, 0x49, 0xdf, 0x67, 0xf1, 0x20, 0x64, + 0x21, 0x1b, 0x68, 0x78, 0xb2, 0x38, 0xd7, 0x23, 0x3d, 0xd0, 0xbd, 0x6c, 0x57, 0xf7, 0x9f, 0x6d, + 0xf0, 0xce, 0x23, 0x42, 0x09, 0xc7, 0xb3, 0x13, 0xce, 0xe6, 0x4c, 0xe0, 0x19, 0x44, 0xa0, 0x3d, + 0x62, 0xd4, 0x27, 0xb6, 0x71, 0x60, 0xf4, 0x5a, 0xee, 0x6e, 0x9a, 0xa0, 0x0c, 0xf0, 0xb2, 0x06, + 0xf6, 0x01, 0x18, 0xb2, 0x38, 0x8e, 0xe4, 0x97, 0x58, 0x4c, 0xed, 0xe6, 0x81, 0xd1, 0xeb, 0xb8, + 0x77, 0xd2, 0x04, 0x95, 0x50, 0xaf, 0xd4, 0x87, 0x9f, 0x82, 0x3b, 0xa7, 0x12, 0x73, 0x39, 0x66, + 0x92, 0x1c, 0xcd, 0x99, 0x3f, 0xb5, 0x4d, 0xcd, 0x0c, 0xd3, 0x04, 0x15, 0x33, 0x99, 0xc4, 0x8d, + 0x95, 0xf0, 0x13, 0xd0, 0x39, 0xa2, 0x41, 0xb1, 0xb3, 0xa5, 0x77, 0x5a, 0x69, 0x82, 0xd6, 0x78, + 0xb6, 0xaf, 0xb2, 0x0a, 0x4e, 0x80, 0xf9, 0x9c, 0x08, 0xbb, 0xad, 0x3f, 0xed, 0x24, 0x4d, 0x90, + 0x1a, 0xfe, 0xf6, 0x07, 0x3a, 0x8a, 0xb1, 0x9c, 0x0e, 0x26, 0x51, 0xd8, 0x3f, 0xa6, 0xf2, 0xb3, + 0x92, 0x55, 0xf1, 0x62, 0x26, 0xa3, 0x17, 0x84, 0x8b, 0x57, 0x83, 0xf8, 0xd5, 0xa1, 0x3f, 0xc5, + 0x11, 0x3d, 0xf4, 0x19, 0x27, 0x87, 0x21, 0x1b, 0x04, 0x58, 0xe2, 0xbe, 0x1b, 0x85, 0xc7, 0x54, + 0x0e, 0xb1, 0x90, 0x84, 0x7b, 0x8a, 0x0d, 0x7e, 0x07, 0x9a, 0x23, 0x66, 0x6f, 0x69, 0x89, 0xa7, + 0x69, 0x82, 0x9a, 0x23, 0x56, 0x9f, 0x42, 0x73, 0xc4, 0x20, 0x01, 0xad, 0x31, 0x91, 0xcc, 0xde, + 0xd6, 0x12, 0xcf, 0xd2, 0x04, 0xe9, 0x71, 0x7d, 0x22, 0x9a, 0x0e, 0x52, 0xb0, 0xfd, 0x60, 0x22, + 0x24, 0x8e, 0xa8, 0xbd, 0xa3, 0x95, 0xce, 0xd2, 0x04, 0xad, 0xa1, 0xfa, 0xc4, 0xd6, 0x8c, 0xf0, + 0x07, 0xb0, 0xf7, 0x6c, 0xc1, 0xf8, 0x22, 0x3e, 0x95, 0xf8, 0x7b, 0x62, 0xef, 0x6a, 0xcd, 0x6f, + 0xd2, 0x04, 0x95, 0xe1, 0xfa, 0x74, 0xcb, 0xac, 0xb0, 0x0b, 0xb6, 0x4e, 0xb0, 0x10, 0x24, 0xb0, + 0xc1, 0x81, 0xd1, 0xdb, 0x71, 0x41, 0x9a, 0xa0, 0x15, 0xe2, 0xad, 0x5a, 0xb5, 0x66, 0x38, 0x63, + 0x6a, 0xcd, 0x5e, 0xb1, 0x26, 0x43, 0xbc, 0x55, 0x0b, 0xef, 0x83, 0xfd, 0x63, 0x21, 0x16, 0x84, + 0x3f, 0x08, 0x02, 0x4e, 0x84, 0xb0, 0x3b, 0x3a, 0x8a, 0xbb, 0x69, 0x82, 0xaa, 0x13, 0x5e, 0x75, + 0x08, 0x7f, 0x04, 0x9d, 0x75, 0x9e, 0x0d, 0x99, 0x90, 0xf6, 0xbe, 0xde, 0xf7, 0x5c, 0x5d, 0xe7, + 0x32, 0x5e, 0x5f, 0xf8, 0x15, 0xda, 0xee, 0xdf, 0x4d, 0x60, 0x3d, 0xca, 0x5f, 0x8e, 0x21, 0xa3, + 0xe7, 0x51, 0x08, 0x7b, 0x60, 0x67, 0xb4, 0x88, 0x47, 0x2c, 0x20, 0x42, 0xa7, 0xbc, 0xe9, 0x76, + 0xd2, 0x04, 0xe5, 0x98, 0x97, 0xf7, 0xe0, 0x87, 0x60, 0xf7, 0x49, 0x44, 0x33, 0x43, 0x75, 0xde, + 0xb7, 0xdd, 0xfd, 0x34, 0x41, 0x05, 0xe8, 0x15, 0x5d, 0xf8, 0x39, 0xb0, 0x9e, 0x44, 0x54, 0x99, + 0x7a, 0x36, 0xe5, 0x44, 0x4c, 0xd9, 0x2c, 0xd0, 0x79, 0xdf, 0x76, 0xef, 0xa5, 0x09, 0xda, 0x98, + 0xf3, 0x36, 0x90, 0x15, 0x83, 0xba, 0xa4, 0x05, 0x43, 0xab, 0xc2, 0x50, 0x99, 0xf3, 0x36, 0x10, + 0x75, 0xd7, 0xd6, 0xf1, 0x7f, 0x41, 0xc8, 0xea, 0x3d, 0xd0, 0x77, 0xad, 0x04, 0xd7, 0x78, 0xd7, + 0x4a, 0xac, 0xdd, 0x9f, 0x4d, 0x00, 0x6f, 0x7a, 0x3d, 0xfe, 0xa8, 0xea, 0xa1, 0xb2, 0xbb, 0x79, + 0x4b, 0x0f, 0x9b, 0x7a, 0xcf, 0xff, 0xf1, 0xd0, 0xac, 0x30, 0xdc, 0xd2, 0xc3, 0xd6, 0x5b, 0xf4, + 0x10, 0x0e, 0xc1, 0xdd, 0xc7, 0x58, 0xc8, 0x35, 0x94, 0x95, 0xa5, 0xb6, 0x2e, 0x01, 0xef, 0xa5, + 0x09, 0xda, 0x9c, 0xf4, 0x36, 0xa1, 0xae, 0x0f, 0xac, 0xa7, 0x34, 0x64, 0x11, 0x0d, 0x55, 0x81, + 0x08, 0x1e, 0x47, 0x42, 0xaa, 0x24, 0x7f, 0x18, 0x71, 0xe2, 0x4b, 0xdb, 0x38, 0x30, 0x7b, 0xad, + 0x2c, 0xc9, 0x33, 0xc4, 0x5b, 0xb5, 0xea, 0xa4, 0x1e, 0x92, 0x19, 0x09, 0xb1, 0x24, 0xca, 0x75, + 0xb5, 0x4c, 0x9f, 0x54, 0x0e, 0x7a, 0x45, 0xb7, 0xfb, 0xaf, 0x09, 0xde, 0xcd, 0x47, 0xa7, 0x43, + 0xa5, 0x74, 0x4c, 0xcf, 0x19, 0x7c, 0x09, 0xc0, 0x19, 0x93, 0x78, 0x76, 0xc2, 0x5e, 0x12, 0xae, + 0xcf, 0xbb, 0xe3, 0x7e, 0xad, 0x6a, 0x65, 0x81, 0xd6, 0xe7, 0x5d, 0x89, 0x14, 0x4a, 0xb0, 0xfb, + 0x95, 0x20, 0x41, 0xa6, 0x9b, 0xd5, 0xe8, 0xb1, 0xfa, 0xfa, 0x1c, 0xac, 0x4f, 0xb6, 0xe0, 0xcc, + 0xc3, 0xcd, 0xde, 0x76, 0xf3, 0x46, 0xb8, 0x35, 0x3f, 0xed, 0x25, 0xd2, 0x75, 0xb8, 0x99, 0x6e, + 0xab, 0x1a, 0x6e, 0xcd, 0xb2, 0x05, 0xe7, 0x07, 0xf7, 0xc1, 0xbe, 0x3a, 0xe9, 0x31, 0x9e, 0x2d, + 0xc8, 0xd9, 0xeb, 0x39, 0x81, 0xdb, 0xfa, 0xc7, 0xc3, 0x6a, 0xc0, 0x2d, 0xf5, 0x77, 0x60, 0x19, + 0x70, 0x27, 0x2b, 0xe2, 0x56, 0x13, 0xee, 0xe5, 0x75, 0xd6, 0x32, 0xdd, 0xd1, 0xc5, 0x95, 0xd3, + 0xb8, 0xbc, 0x72, 0x1a, 0x6f, 0xae, 0x1c, 0xe3, 0xa7, 0xa5, 0x63, 0xfc, 0xba, 0x74, 0x8c, 0xdf, + 0x97, 0x8e, 0x71, 0xb1, 0x74, 0x8c, 0xcb, 0xa5, 0x63, 0xfc, 0xb9, 0x74, 0x8c, 0xbf, 0x96, 0x4e, + 0xe3, 0xcd, 0xd2, 0x31, 0x7e, 0xb9, 0x76, 0x1a, 0x17, 0xd7, 0x4e, 0xe3, 0xf2, 0xda, 0x69, 0x7c, + 0x7b, 0x4f, 0xbc, 0x16, 0x92, 0xc4, 0xa7, 0x31, 0xe6, 0x72, 0xc8, 0xa8, 0xe4, 0xd8, 0x97, 0x62, + 0xb2, 0xa5, 0x7f, 0xe7, 0x3e, 0xfe, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x52, 0x6e, 0xfa, 0xb5, 0x18, + 0x0a, 0x00, 0x00, } func (x VoteValueType) String() string { @@ -658,16 +541,16 @@ func (this *GeneralProposal) Equal(that interface{}) bool { } else if this == nil { return false } - if !bytes.Equal(this.IssuerAddress, that1.IssuerAddress) { + if this.Nonce != that1.Nonce { return false } if !bytes.Equal(this.CommitHash, that1.CommitHash) { return false } - if this.StartVoteNonce != that1.StartVoteNonce { + if this.StartVoteEpoch != that1.StartVoteEpoch { return false } - if this.EndVoteNonce != that1.EndVoteNonce { + if this.EndVoteEpoch != that1.EndVoteEpoch { return false } { @@ -688,80 +571,33 @@ func (this *GeneralProposal) Equal(that interface{}) bool { return false } } - if this.Passed != that1.Passed { - return false - } - if len(this.Votes) != len(that1.Votes) { - return false - } - for i := range this.Votes { - if !bytes.Equal(this.Votes[i], that1.Votes[i]) { + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + if !__caster.Equal(this.Abstain, that1.Abstain) { return false } } - if !bytes.Equal(this.TopReference, that1.TopReference) { - return false - } - if this.Closed != that1.Closed { - return false - } - return true -} -func (this *WhiteListProposal) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*WhiteListProposal) - if !ok { - that2, ok := that.(WhiteListProposal) - if ok { - that1 = &that2 - } else { + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + if !__caster.Equal(this.QuorumStake, that1.QuorumStake) { return false } } - if that1 == nil { - return this == nil - } else if this == nil { + if this.Passed != that1.Passed { return false } - if !bytes.Equal(this.WhiteListAddress, that1.WhiteListAddress) { + if this.Closed != that1.Closed { return false } - if !bytes.Equal(this.ProposalStatus, that1.ProposalStatus) { + if !bytes.Equal(this.IssuerAddress, that1.IssuerAddress) { return false } - return true -} -func (this *HardForkProposal) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*HardForkProposal) - if !ok { - that2, ok := that.(HardForkProposal) - if ok { - that1 = &that2 - } else { + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + if !__caster.Equal(this.ProposalCost, that1.ProposalCost) { return false } } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.EpochToHardFork != that1.EpochToHardFork { - return false - } - if !bytes.Equal(this.NewSoftwareVersion, that1.NewSoftwareVersion) { - return false - } - if !bytes.Equal(this.ProposalStatus, that1.ProposalStatus) { - return false - } return true } func (this *GovernanceConfig) Equal(that interface{}) bool { @@ -822,23 +658,14 @@ func (this *GovernanceConfigV2) Equal(that interface{}) bool { } else if this == nil { return false } - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - if !__caster.Equal(this.MinQuorum, that1.MinQuorum) { - return false - } + if this.MinQuorum != that1.MinQuorum { + return false } - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - if !__caster.Equal(this.MinPassThreshold, that1.MinPassThreshold) { - return false - } + if this.MinPassThreshold != that1.MinPassThreshold { + return false } - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - if !__caster.Equal(this.MinVetoThreshold, that1.MinVetoThreshold) { - return false - } + if this.MinVetoThreshold != that1.MinVetoThreshold { + return false } { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} @@ -846,16 +673,19 @@ func (this *GovernanceConfigV2) Equal(that interface{}) bool { return false } } + if this.LastProposalNonce != that1.LastProposalNonce { + return false + } return true } -func (this *VoteDetails) Equal(that interface{}) bool { +func (this *OngoingVotedList) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*VoteDetails) + that1, ok := that.(*OngoingVotedList) if !ok { - that2, ok := that.(VoteDetails) + that2, ok := that.(OngoingVotedList) if ok { that1 = &that2 } else { @@ -867,34 +697,32 @@ func (this *VoteDetails) Equal(that interface{}) bool { } else if this == nil { return false } - if this.Value != that1.Value { + if len(this.Direct) != len(that1.Direct) { return false } - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - if !__caster.Equal(this.Power, that1.Power) { + for i := range this.Direct { + if this.Direct[i] != that1.Direct[i] { return false } } - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - if !__caster.Equal(this.Balance, that1.Balance) { + if len(this.Delegated) != len(that1.Delegated) { + return false + } + for i := range this.Delegated { + if this.Delegated[i] != that1.Delegated[i] { return false } } - if !bytes.Equal(this.DelegatedTo, that1.DelegatedTo) { - return false - } return true } -func (this *VoteSet) Equal(that interface{}) bool { +func (this *DelegatedSCVoteInfo) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*VoteSet) + that1, ok := that.(*DelegatedSCVoteInfo) if !ok { - that2, ok := that.(VoteSet) + that2, ok := that.(DelegatedSCVoteInfo) if ok { that1 = &that2 } else { @@ -908,39 +736,25 @@ func (this *VoteSet) Equal(that interface{}) bool { } { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - if !__caster.Equal(this.UsedPower, that1.UsedPower) { - return false - } - } - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - if !__caster.Equal(this.UsedBalance, that1.UsedBalance) { + if !__caster.Equal(this.TotalPower, that1.TotalPower) { return false } } { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - if !__caster.Equal(this.TotalYes, that1.TotalYes) { + if !__caster.Equal(this.UsedPower, that1.UsedPower) { return false } } { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - if !__caster.Equal(this.TotalNo, that1.TotalNo) { + if !__caster.Equal(this.TotalStake, that1.TotalStake) { return false } } { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - if !__caster.Equal(this.TotalVeto, that1.TotalVeto) { - return false - } - } - if len(this.VoteItems) != len(that1.VoteItems) { - return false - } - for i := range this.VoteItems { - if !this.VoteItems[i].Equal(that1.VoteItems[i]) { + if !__caster.Equal(this.UsedStake, that1.UsedStake) { return false } } @@ -950,42 +764,21 @@ func (this *GeneralProposal) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 15) + s := make([]string, 0, 17) s = append(s, "&systemSmartContracts.GeneralProposal{") - s = append(s, "IssuerAddress: "+fmt.Sprintf("%#v", this.IssuerAddress)+",\n") + s = append(s, "Nonce: "+fmt.Sprintf("%#v", this.Nonce)+",\n") s = append(s, "CommitHash: "+fmt.Sprintf("%#v", this.CommitHash)+",\n") - s = append(s, "StartVoteNonce: "+fmt.Sprintf("%#v", this.StartVoteNonce)+",\n") - s = append(s, "EndVoteNonce: "+fmt.Sprintf("%#v", this.EndVoteNonce)+",\n") + s = append(s, "StartVoteEpoch: "+fmt.Sprintf("%#v", this.StartVoteEpoch)+",\n") + s = append(s, "EndVoteEpoch: "+fmt.Sprintf("%#v", this.EndVoteEpoch)+",\n") s = append(s, "Yes: "+fmt.Sprintf("%#v", this.Yes)+",\n") s = append(s, "No: "+fmt.Sprintf("%#v", this.No)+",\n") s = append(s, "Veto: "+fmt.Sprintf("%#v", this.Veto)+",\n") + s = append(s, "Abstain: "+fmt.Sprintf("%#v", this.Abstain)+",\n") + s = append(s, "QuorumStake: "+fmt.Sprintf("%#v", this.QuorumStake)+",\n") s = append(s, "Passed: "+fmt.Sprintf("%#v", this.Passed)+",\n") - s = append(s, "Votes: "+fmt.Sprintf("%#v", this.Votes)+",\n") - s = append(s, "TopReference: "+fmt.Sprintf("%#v", this.TopReference)+",\n") s = append(s, "Closed: "+fmt.Sprintf("%#v", this.Closed)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *WhiteListProposal) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&systemSmartContracts.WhiteListProposal{") - s = append(s, "WhiteListAddress: "+fmt.Sprintf("%#v", this.WhiteListAddress)+",\n") - s = append(s, "ProposalStatus: "+fmt.Sprintf("%#v", this.ProposalStatus)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *HardForkProposal) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&systemSmartContracts.HardForkProposal{") - s = append(s, "EpochToHardFork: "+fmt.Sprintf("%#v", this.EpochToHardFork)+",\n") - s = append(s, "NewSoftwareVersion: "+fmt.Sprintf("%#v", this.NewSoftwareVersion)+",\n") - s = append(s, "ProposalStatus: "+fmt.Sprintf("%#v", this.ProposalStatus)+",\n") + s = append(s, "IssuerAddress: "+fmt.Sprintf("%#v", this.IssuerAddress)+",\n") + s = append(s, "ProposalCost: "+fmt.Sprintf("%#v", this.ProposalCost)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1007,42 +800,37 @@ func (this *GovernanceConfigV2) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 8) + s := make([]string, 0, 9) s = append(s, "&systemSmartContracts.GovernanceConfigV2{") s = append(s, "MinQuorum: "+fmt.Sprintf("%#v", this.MinQuorum)+",\n") s = append(s, "MinPassThreshold: "+fmt.Sprintf("%#v", this.MinPassThreshold)+",\n") s = append(s, "MinVetoThreshold: "+fmt.Sprintf("%#v", this.MinVetoThreshold)+",\n") s = append(s, "ProposalFee: "+fmt.Sprintf("%#v", this.ProposalFee)+",\n") + s = append(s, "LastProposalNonce: "+fmt.Sprintf("%#v", this.LastProposalNonce)+",\n") s = append(s, "}") return strings.Join(s, "") } -func (this *VoteDetails) GoString() string { +func (this *OngoingVotedList) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 8) - s = append(s, "&systemSmartContracts.VoteDetails{") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "Power: "+fmt.Sprintf("%#v", this.Power)+",\n") - s = append(s, "Balance: "+fmt.Sprintf("%#v", this.Balance)+",\n") - s = append(s, "DelegatedTo: "+fmt.Sprintf("%#v", this.DelegatedTo)+",\n") + s := make([]string, 0, 6) + s = append(s, "&systemSmartContracts.OngoingVotedList{") + s = append(s, "Direct: "+fmt.Sprintf("%#v", this.Direct)+",\n") + s = append(s, "Delegated: "+fmt.Sprintf("%#v", this.Delegated)+",\n") s = append(s, "}") return strings.Join(s, "") } -func (this *VoteSet) GoString() string { +func (this *DelegatedSCVoteInfo) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 10) - s = append(s, "&systemSmartContracts.VoteSet{") + s := make([]string, 0, 8) + s = append(s, "&systemSmartContracts.DelegatedSCVoteInfo{") + s = append(s, "TotalPower: "+fmt.Sprintf("%#v", this.TotalPower)+",\n") s = append(s, "UsedPower: "+fmt.Sprintf("%#v", this.UsedPower)+",\n") - s = append(s, "UsedBalance: "+fmt.Sprintf("%#v", this.UsedBalance)+",\n") - s = append(s, "TotalYes: "+fmt.Sprintf("%#v", this.TotalYes)+",\n") - s = append(s, "TotalNo: "+fmt.Sprintf("%#v", this.TotalNo)+",\n") - s = append(s, "TotalVeto: "+fmt.Sprintf("%#v", this.TotalVeto)+",\n") - if this.VoteItems != nil { - s = append(s, "VoteItems: "+fmt.Sprintf("%#v", this.VoteItems)+",\n") - } + s = append(s, "TotalStake: "+fmt.Sprintf("%#v", this.TotalStake)+",\n") + s = append(s, "UsedStake: "+fmt.Sprintf("%#v", this.UsedStake)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1074,6 +862,24 @@ func (m *GeneralProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + size := __caster.Size(m.ProposalCost) + i -= size + if _, err := __caster.MarshalTo(m.ProposalCost, dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintGovernance(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + if len(m.IssuerAddress) > 0 { + i -= len(m.IssuerAddress) + copy(dAtA[i:], m.IssuerAddress) + i = encodeVarintGovernance(dAtA, i, uint64(len(m.IssuerAddress))) + i-- + dAtA[i] = 0x62 + } if m.Closed { i-- if m.Closed { @@ -1084,22 +890,6 @@ func (m *GeneralProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x58 } - if len(m.TopReference) > 0 { - i -= len(m.TopReference) - copy(dAtA[i:], m.TopReference) - i = encodeVarintGovernance(dAtA, i, uint64(len(m.TopReference))) - i-- - dAtA[i] = 0x52 - } - if len(m.Votes) > 0 { - for iNdEx := len(m.Votes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Votes[iNdEx]) - copy(dAtA[i:], m.Votes[iNdEx]) - i = encodeVarintGovernance(dAtA, i, uint64(len(m.Votes[iNdEx]))) - i-- - dAtA[i] = 0x4a - } - } if m.Passed { i-- if m.Passed { @@ -1108,8 +898,30 @@ func (m *GeneralProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0 } i-- - dAtA[i] = 0x40 + dAtA[i] = 0x50 + } + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + size := __caster.Size(m.QuorumStake) + i -= size + if _, err := __caster.MarshalTo(m.QuorumStake, dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintGovernance(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + size := __caster.Size(m.Abstain) + i -= size + if _, err := __caster.MarshalTo(m.Abstain, dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintGovernance(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x42 { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} size := __caster.Size(m.Veto) @@ -1143,13 +955,13 @@ func (m *GeneralProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x2a - if m.EndVoteNonce != 0 { - i = encodeVarintGovernance(dAtA, i, uint64(m.EndVoteNonce)) + if m.EndVoteEpoch != 0 { + i = encodeVarintGovernance(dAtA, i, uint64(m.EndVoteEpoch)) i-- dAtA[i] = 0x20 } - if m.StartVoteNonce != 0 { - i = encodeVarintGovernance(dAtA, i, uint64(m.StartVoteNonce)) + if m.StartVoteEpoch != 0 { + i = encodeVarintGovernance(dAtA, i, uint64(m.StartVoteEpoch)) i-- dAtA[i] = 0x18 } @@ -1160,89 +972,8 @@ func (m *GeneralProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if len(m.IssuerAddress) > 0 { - i -= len(m.IssuerAddress) - copy(dAtA[i:], m.IssuerAddress) - i = encodeVarintGovernance(dAtA, i, uint64(len(m.IssuerAddress))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *WhiteListProposal) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WhiteListProposal) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WhiteListProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ProposalStatus) > 0 { - i -= len(m.ProposalStatus) - copy(dAtA[i:], m.ProposalStatus) - i = encodeVarintGovernance(dAtA, i, uint64(len(m.ProposalStatus))) - i-- - dAtA[i] = 0x12 - } - if len(m.WhiteListAddress) > 0 { - i -= len(m.WhiteListAddress) - copy(dAtA[i:], m.WhiteListAddress) - i = encodeVarintGovernance(dAtA, i, uint64(len(m.WhiteListAddress))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *HardForkProposal) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HardForkProposal) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HardForkProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ProposalStatus) > 0 { - i -= len(m.ProposalStatus) - copy(dAtA[i:], m.ProposalStatus) - i = encodeVarintGovernance(dAtA, i, uint64(len(m.ProposalStatus))) - i-- - dAtA[i] = 0x1a - } - if len(m.NewSoftwareVersion) > 0 { - i -= len(m.NewSoftwareVersion) - copy(dAtA[i:], m.NewSoftwareVersion) - i = encodeVarintGovernance(dAtA, i, uint64(len(m.NewSoftwareVersion))) - i-- - dAtA[i] = 0x12 - } - if m.EpochToHardFork != 0 { - i = encodeVarintGovernance(dAtA, i, uint64(m.EpochToHardFork)) + if m.Nonce != 0 { + i = encodeVarintGovernance(dAtA, i, uint64(m.Nonce)) i-- dAtA[i] = 0x8 } @@ -1323,6 +1054,11 @@ func (m *GovernanceConfigV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.LastProposalNonce != 0 { + i = encodeVarintGovernance(dAtA, i, uint64(m.LastProposalNonce)) + i-- + dAtA[i] = 0x28 + } { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} size := __caster.Size(m.ProposalFee) @@ -1334,43 +1070,28 @@ func (m *GovernanceConfigV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x22 - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - size := __caster.Size(m.MinVetoThreshold) - i -= size - if _, err := __caster.MarshalTo(m.MinVetoThreshold, dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintGovernance(dAtA, i, uint64(size)) + if m.MinVetoThreshold != 0 { + i -= 4 + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.MinVetoThreshold)))) + i-- + dAtA[i] = 0x1d } - i-- - dAtA[i] = 0x1a - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - size := __caster.Size(m.MinPassThreshold) - i -= size - if _, err := __caster.MarshalTo(m.MinPassThreshold, dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintGovernance(dAtA, i, uint64(size)) + if m.MinPassThreshold != 0 { + i -= 4 + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.MinPassThreshold)))) + i-- + dAtA[i] = 0x15 } - i-- - dAtA[i] = 0x12 - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - size := __caster.Size(m.MinQuorum) - i -= size - if _, err := __caster.MarshalTo(m.MinQuorum, dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintGovernance(dAtA, i, uint64(size)) + if m.MinQuorum != 0 { + i -= 4 + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.MinQuorum)))) + i-- + dAtA[i] = 0xd } - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *VoteDetails) Marshal() (dAtA []byte, err error) { +func (m *OngoingVotedList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1380,54 +1101,56 @@ func (m *VoteDetails) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VoteDetails) MarshalTo(dAtA []byte) (int, error) { +func (m *OngoingVotedList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VoteDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *OngoingVotedList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.DelegatedTo) > 0 { - i -= len(m.DelegatedTo) - copy(dAtA[i:], m.DelegatedTo) - i = encodeVarintGovernance(dAtA, i, uint64(len(m.DelegatedTo))) + if len(m.Delegated) > 0 { + dAtA2 := make([]byte, len(m.Delegated)*10) + var j1 int + for _, num := range m.Delegated { + for num >= 1<<7 { + dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA2[j1] = uint8(num) + j1++ + } + i -= j1 + copy(dAtA[i:], dAtA2[:j1]) + i = encodeVarintGovernance(dAtA, i, uint64(j1)) i-- - dAtA[i] = 0x22 - } - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - size := __caster.Size(m.Balance) - i -= size - if _, err := __caster.MarshalTo(m.Balance, dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintGovernance(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - size := __caster.Size(m.Power) - i -= size - if _, err := __caster.MarshalTo(m.Power, dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintGovernance(dAtA, i, uint64(size)) + dAtA[i] = 0x12 } - i-- - dAtA[i] = 0x12 - if m.Value != 0 { - i = encodeVarintGovernance(dAtA, i, uint64(m.Value)) + if len(m.Direct) > 0 { + dAtA4 := make([]byte, len(m.Direct)*10) + var j3 int + for _, num := range m.Direct { + for num >= 1<<7 { + dAtA4[j3] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j3++ + } + dAtA4[j3] = uint8(num) + j3++ + } + i -= j3 + copy(dAtA[i:], dAtA4[:j3]) + i = encodeVarintGovernance(dAtA, i, uint64(j3)) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *VoteSet) Marshal() (dAtA []byte, err error) { +func (m *DelegatedSCVoteInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1437,46 +1160,21 @@ func (m *VoteSet) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VoteSet) MarshalTo(dAtA []byte) (int, error) { +func (m *DelegatedSCVoteInfo) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VoteSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DelegatedSCVoteInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.VoteItems) > 0 { - for iNdEx := len(m.VoteItems) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.VoteItems[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGovernance(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - size := __caster.Size(m.TotalVeto) - i -= size - if _, err := __caster.MarshalTo(m.TotalVeto, dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintGovernance(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - size := __caster.Size(m.TotalNo) + size := __caster.Size(m.UsedStake) i -= size - if _, err := __caster.MarshalTo(m.TotalNo, dAtA[i:]); err != nil { + if _, err := __caster.MarshalTo(m.UsedStake, dAtA[i:]); err != nil { return 0, err } i = encodeVarintGovernance(dAtA, i, uint64(size)) @@ -1485,9 +1183,9 @@ func (m *VoteSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x22 { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - size := __caster.Size(m.TotalYes) + size := __caster.Size(m.TotalStake) i -= size - if _, err := __caster.MarshalTo(m.TotalYes, dAtA[i:]); err != nil { + if _, err := __caster.MarshalTo(m.TotalStake, dAtA[i:]); err != nil { return 0, err } i = encodeVarintGovernance(dAtA, i, uint64(size)) @@ -1496,9 +1194,9 @@ func (m *VoteSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x1a { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - size := __caster.Size(m.UsedBalance) + size := __caster.Size(m.UsedPower) i -= size - if _, err := __caster.MarshalTo(m.UsedBalance, dAtA[i:]); err != nil { + if _, err := __caster.MarshalTo(m.UsedPower, dAtA[i:]); err != nil { return 0, err } i = encodeVarintGovernance(dAtA, i, uint64(size)) @@ -1507,9 +1205,9 @@ func (m *VoteSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x12 { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - size := __caster.Size(m.UsedPower) + size := __caster.Size(m.TotalPower) i -= size - if _, err := __caster.MarshalTo(m.UsedPower, dAtA[i:]); err != nil { + if _, err := __caster.MarshalTo(m.TotalPower, dAtA[i:]); err != nil { return 0, err } i = encodeVarintGovernance(dAtA, i, uint64(size)) @@ -1536,19 +1234,18 @@ func (m *GeneralProposal) Size() (n int) { } var l int _ = l - l = len(m.IssuerAddress) - if l > 0 { - n += 1 + l + sovGovernance(uint64(l)) + if m.Nonce != 0 { + n += 1 + sovGovernance(uint64(m.Nonce)) } l = len(m.CommitHash) if l > 0 { n += 1 + l + sovGovernance(uint64(l)) } - if m.StartVoteNonce != 0 { - n += 1 + sovGovernance(uint64(m.StartVoteNonce)) + if m.StartVoteEpoch != 0 { + n += 1 + sovGovernance(uint64(m.StartVoteEpoch)) } - if m.EndVoteNonce != 0 { - n += 1 + sovGovernance(uint64(m.EndVoteNonce)) + if m.EndVoteEpoch != 0 { + n += 1 + sovGovernance(uint64(m.EndVoteEpoch)) } { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} @@ -1565,57 +1262,29 @@ func (m *GeneralProposal) Size() (n int) { l = __caster.Size(m.Veto) n += 1 + l + sovGovernance(uint64(l)) } - if m.Passed { - n += 2 - } - if len(m.Votes) > 0 { - for _, b := range m.Votes { - l = len(b) - n += 1 + l + sovGovernance(uint64(l)) - } - } - l = len(m.TopReference) - if l > 0 { - n += 1 + l + sovGovernance(uint64(l)) - } - if m.Closed { - n += 2 - } - return n -} - -func (m *WhiteListProposal) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.WhiteListAddress) - if l > 0 { + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + l = __caster.Size(m.Abstain) n += 1 + l + sovGovernance(uint64(l)) } - l = len(m.ProposalStatus) - if l > 0 { + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + l = __caster.Size(m.QuorumStake) n += 1 + l + sovGovernance(uint64(l)) } - return n -} - -func (m *HardForkProposal) Size() (n int) { - if m == nil { - return 0 + if m.Passed { + n += 2 } - var l int - _ = l - if m.EpochToHardFork != 0 { - n += 1 + sovGovernance(uint64(m.EpochToHardFork)) + if m.Closed { + n += 2 } - l = len(m.NewSoftwareVersion) + l = len(m.IssuerAddress) if l > 0 { n += 1 + l + sovGovernance(uint64(l)) } - l = len(m.ProposalStatus) - if l > 0 { + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + l = __caster.Size(m.ProposalCost) n += 1 + l + sovGovernance(uint64(l)) } return n @@ -1653,56 +1322,50 @@ func (m *GovernanceConfigV2) Size() (n int) { } var l int _ = l - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - l = __caster.Size(m.MinQuorum) - n += 1 + l + sovGovernance(uint64(l)) + if m.MinQuorum != 0 { + n += 5 } - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - l = __caster.Size(m.MinPassThreshold) - n += 1 + l + sovGovernance(uint64(l)) + if m.MinPassThreshold != 0 { + n += 5 } - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - l = __caster.Size(m.MinVetoThreshold) - n += 1 + l + sovGovernance(uint64(l)) + if m.MinVetoThreshold != 0 { + n += 5 } { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} l = __caster.Size(m.ProposalFee) n += 1 + l + sovGovernance(uint64(l)) } + if m.LastProposalNonce != 0 { + n += 1 + sovGovernance(uint64(m.LastProposalNonce)) + } return n } -func (m *VoteDetails) Size() (n int) { +func (m *OngoingVotedList) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Value != 0 { - n += 1 + sovGovernance(uint64(m.Value)) - } - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - l = __caster.Size(m.Power) - n += 1 + l + sovGovernance(uint64(l)) - } - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - l = __caster.Size(m.Balance) - n += 1 + l + sovGovernance(uint64(l)) + if len(m.Direct) > 0 { + l = 0 + for _, e := range m.Direct { + l += sovGovernance(uint64(e)) + } + n += 1 + sovGovernance(uint64(l)) + l } - l = len(m.DelegatedTo) - if l > 0 { - n += 1 + l + sovGovernance(uint64(l)) + if len(m.Delegated) > 0 { + l = 0 + for _, e := range m.Delegated { + l += sovGovernance(uint64(e)) + } + n += 1 + sovGovernance(uint64(l)) + l } return n } -func (m *VoteSet) Size() (n int) { +func (m *DelegatedSCVoteInfo) Size() (n int) { if m == nil { return 0 } @@ -1710,35 +1373,24 @@ func (m *VoteSet) Size() (n int) { _ = l { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - l = __caster.Size(m.UsedPower) - n += 1 + l + sovGovernance(uint64(l)) - } - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - l = __caster.Size(m.UsedBalance) + l = __caster.Size(m.TotalPower) n += 1 + l + sovGovernance(uint64(l)) } { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - l = __caster.Size(m.TotalYes) + l = __caster.Size(m.UsedPower) n += 1 + l + sovGovernance(uint64(l)) } { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - l = __caster.Size(m.TotalNo) + l = __caster.Size(m.TotalStake) n += 1 + l + sovGovernance(uint64(l)) } { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - l = __caster.Size(m.TotalVeto) + l = __caster.Size(m.UsedStake) n += 1 + l + sovGovernance(uint64(l)) } - if len(m.VoteItems) > 0 { - for _, e := range m.VoteItems { - l = e.Size() - n += 1 + l + sovGovernance(uint64(l)) - } - } return n } @@ -1753,40 +1405,19 @@ func (this *GeneralProposal) String() string { return "nil" } s := strings.Join([]string{`&GeneralProposal{`, - `IssuerAddress:` + fmt.Sprintf("%v", this.IssuerAddress) + `,`, + `Nonce:` + fmt.Sprintf("%v", this.Nonce) + `,`, `CommitHash:` + fmt.Sprintf("%v", this.CommitHash) + `,`, - `StartVoteNonce:` + fmt.Sprintf("%v", this.StartVoteNonce) + `,`, - `EndVoteNonce:` + fmt.Sprintf("%v", this.EndVoteNonce) + `,`, + `StartVoteEpoch:` + fmt.Sprintf("%v", this.StartVoteEpoch) + `,`, + `EndVoteEpoch:` + fmt.Sprintf("%v", this.EndVoteEpoch) + `,`, `Yes:` + fmt.Sprintf("%v", this.Yes) + `,`, `No:` + fmt.Sprintf("%v", this.No) + `,`, `Veto:` + fmt.Sprintf("%v", this.Veto) + `,`, + `Abstain:` + fmt.Sprintf("%v", this.Abstain) + `,`, + `QuorumStake:` + fmt.Sprintf("%v", this.QuorumStake) + `,`, `Passed:` + fmt.Sprintf("%v", this.Passed) + `,`, - `Votes:` + fmt.Sprintf("%v", this.Votes) + `,`, - `TopReference:` + fmt.Sprintf("%v", this.TopReference) + `,`, `Closed:` + fmt.Sprintf("%v", this.Closed) + `,`, - `}`, - }, "") - return s -} -func (this *WhiteListProposal) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WhiteListProposal{`, - `WhiteListAddress:` + fmt.Sprintf("%v", this.WhiteListAddress) + `,`, - `ProposalStatus:` + fmt.Sprintf("%v", this.ProposalStatus) + `,`, - `}`, - }, "") - return s -} -func (this *HardForkProposal) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HardForkProposal{`, - `EpochToHardFork:` + fmt.Sprintf("%v", this.EpochToHardFork) + `,`, - `NewSoftwareVersion:` + fmt.Sprintf("%v", this.NewSoftwareVersion) + `,`, - `ProposalStatus:` + fmt.Sprintf("%v", this.ProposalStatus) + `,`, + `IssuerAddress:` + fmt.Sprintf("%v", this.IssuerAddress) + `,`, + `ProposalCost:` + fmt.Sprintf("%v", this.ProposalCost) + `,`, `}`, }, "") return s @@ -1814,39 +1445,31 @@ func (this *GovernanceConfigV2) String() string { `MinPassThreshold:` + fmt.Sprintf("%v", this.MinPassThreshold) + `,`, `MinVetoThreshold:` + fmt.Sprintf("%v", this.MinVetoThreshold) + `,`, `ProposalFee:` + fmt.Sprintf("%v", this.ProposalFee) + `,`, + `LastProposalNonce:` + fmt.Sprintf("%v", this.LastProposalNonce) + `,`, `}`, }, "") return s } -func (this *VoteDetails) String() string { +func (this *OngoingVotedList) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&VoteDetails{`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `Power:` + fmt.Sprintf("%v", this.Power) + `,`, - `Balance:` + fmt.Sprintf("%v", this.Balance) + `,`, - `DelegatedTo:` + fmt.Sprintf("%v", this.DelegatedTo) + `,`, + s := strings.Join([]string{`&OngoingVotedList{`, + `Direct:` + fmt.Sprintf("%v", this.Direct) + `,`, + `Delegated:` + fmt.Sprintf("%v", this.Delegated) + `,`, `}`, }, "") return s } -func (this *VoteSet) String() string { +func (this *DelegatedSCVoteInfo) String() string { if this == nil { return "nil" } - repeatedStringForVoteItems := "[]*VoteDetails{" - for _, f := range this.VoteItems { - repeatedStringForVoteItems += strings.Replace(f.String(), "VoteDetails", "VoteDetails", 1) + "," - } - repeatedStringForVoteItems += "}" - s := strings.Join([]string{`&VoteSet{`, + s := strings.Join([]string{`&DelegatedSCVoteInfo{`, + `TotalPower:` + fmt.Sprintf("%v", this.TotalPower) + `,`, `UsedPower:` + fmt.Sprintf("%v", this.UsedPower) + `,`, - `UsedBalance:` + fmt.Sprintf("%v", this.UsedBalance) + `,`, - `TotalYes:` + fmt.Sprintf("%v", this.TotalYes) + `,`, - `TotalNo:` + fmt.Sprintf("%v", this.TotalNo) + `,`, - `TotalVeto:` + fmt.Sprintf("%v", this.TotalVeto) + `,`, - `VoteItems:` + repeatedStringForVoteItems + `,`, + `TotalStake:` + fmt.Sprintf("%v", this.TotalStake) + `,`, + `UsedStake:` + fmt.Sprintf("%v", this.UsedStake) + `,`, `}`, }, "") return s @@ -1889,10 +1512,10 @@ func (m *GeneralProposal) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IssuerAddress", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nonce", wireType) } - var byteLen int + m.Nonce = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGovernance @@ -1902,26 +1525,11 @@ func (m *GeneralProposal) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.Nonce |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthGovernance - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGovernance - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IssuerAddress = append(m.IssuerAddress[:0], dAtA[iNdEx:postIndex]...) - if m.IssuerAddress == nil { - m.IssuerAddress = []byte{} - } - iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field CommitHash", wireType) @@ -1958,9 +1566,9 @@ func (m *GeneralProposal) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartVoteNonce", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartVoteEpoch", wireType) } - m.StartVoteNonce = 0 + m.StartVoteEpoch = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGovernance @@ -1970,16 +1578,16 @@ func (m *GeneralProposal) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.StartVoteNonce |= uint64(b&0x7F) << shift + m.StartVoteEpoch |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndVoteNonce", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EndVoteEpoch", wireType) } - m.EndVoteNonce = 0 + m.EndVoteEpoch = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGovernance @@ -1989,7 +1597,7 @@ func (m *GeneralProposal) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.EndVoteNonce |= uint64(b&0x7F) << shift + m.EndVoteEpoch |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2109,10 +1717,10 @@ func (m *GeneralProposal) Unmarshal(dAtA []byte) error { } iNdEx = postIndex case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Passed", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Abstain", wireType) } - var v int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGovernance @@ -2122,33 +1730,13 @@ func (m *GeneralProposal) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Passed = bool(v != 0) - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Votes", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGovernance - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGovernance + if byteLen < 0 { + return ErrInvalidLengthGovernance } postIndex := iNdEx + byteLen if postIndex < 0 { @@ -2157,12 +1745,18 @@ func (m *GeneralProposal) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Votes = append(m.Votes, make([]byte, postIndex-iNdEx)) - copy(m.Votes[len(m.Votes)-1], dAtA[iNdEx:postIndex]) + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + if tmp, err := __caster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } else { + m.Abstain = tmp + } + } iNdEx = postIndex - case 10: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TopReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field QuorumStake", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -2189,14 +1783,18 @@ func (m *GeneralProposal) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TopReference = append(m.TopReference[:0], dAtA[iNdEx:postIndex]...) - if m.TopReference == nil { - m.TopReference = []byte{} + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + if tmp, err := __caster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } else { + m.QuorumStake = tmp + } } iNdEx = postIndex - case 11: + case 10: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Closed", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Passed", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -2213,186 +1811,12 @@ func (m *GeneralProposal) Unmarshal(dAtA []byte) error { break } } - m.Closed = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGovernance(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGovernance - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGovernance - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WhiteListProposal) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGovernance - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WhiteListProposal: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WhiteListProposal: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WhiteListAddress", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGovernance - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGovernance - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGovernance - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WhiteListAddress = append(m.WhiteListAddress[:0], dAtA[iNdEx:postIndex]...) - if m.WhiteListAddress == nil { - m.WhiteListAddress = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProposalStatus", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGovernance - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGovernance - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGovernance - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProposalStatus = append(m.ProposalStatus[:0], dAtA[iNdEx:postIndex]...) - if m.ProposalStatus == nil { - m.ProposalStatus = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGovernance(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGovernance - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGovernance - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HardForkProposal) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGovernance - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HardForkProposal: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HardForkProposal: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Passed = bool(v != 0) + case 11: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EpochToHardFork", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Closed", wireType) } - m.EpochToHardFork = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGovernance @@ -2402,14 +1826,15 @@ func (m *HardForkProposal) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.EpochToHardFork |= uint32(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - case 2: + m.Closed = bool(v != 0) + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NewSoftwareVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IssuerAddress", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -2436,14 +1861,14 @@ func (m *HardForkProposal) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.NewSoftwareVersion = append(m.NewSoftwareVersion[:0], dAtA[iNdEx:postIndex]...) - if m.NewSoftwareVersion == nil { - m.NewSoftwareVersion = []byte{} + m.IssuerAddress = append(m.IssuerAddress[:0], dAtA[iNdEx:postIndex]...) + if m.IssuerAddress == nil { + m.IssuerAddress = []byte{} } iNdEx = postIndex - case 3: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProposalStatus", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProposalCost", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -2470,9 +1895,13 @@ func (m *HardForkProposal) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ProposalStatus = append(m.ProposalStatus[:0], dAtA[iNdEx:postIndex]...) - if m.ProposalStatus == nil { - m.ProposalStatus = []byte{} + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + if tmp, err := __caster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } else { + m.ProposalCost = tmp + } } iNdEx = postIndex default: @@ -2696,84 +2125,41 @@ func (m *GovernanceConfigV2) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 2 { + if wireType != 5 { return fmt.Errorf("proto: wrong wireType = %d for field MinQuorum", wireType) } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGovernance - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGovernance - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGovernance - } - if postIndex > l { + var v uint32 + if (iNdEx + 4) > l { return io.ErrUnexpectedEOF } - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - if tmp, err := __caster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } else { - m.MinQuorum = tmp - } - } - iNdEx = postIndex + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.MinQuorum = float32(math.Float32frombits(v)) case 2: - if wireType != 2 { + if wireType != 5 { return fmt.Errorf("proto: wrong wireType = %d for field MinPassThreshold", wireType) } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGovernance - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGovernance + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGovernance + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.MinPassThreshold = float32(math.Float32frombits(v)) + case 3: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field MinVetoThreshold", wireType) } - if postIndex > l { + var v uint32 + if (iNdEx + 4) > l { return io.ErrUnexpectedEOF } - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - if tmp, err := __caster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } else { - m.MinPassThreshold = tmp - } - } - iNdEx = postIndex - case 3: + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.MinVetoThreshold = float32(math.Float32frombits(v)) + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MinVetoThreshold", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProposalFee", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -2805,15 +2191,15 @@ func (m *GovernanceConfigV2) Unmarshal(dAtA []byte) error { if tmp, err := __caster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } else { - m.MinVetoThreshold = tmp + m.ProposalFee = tmp } } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProposalFee", wireType) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProposalNonce", wireType) } - var byteLen int + m.LastProposalNonce = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGovernance @@ -2823,30 +2209,11 @@ func (m *GovernanceConfigV2) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.LastProposalNonce |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthGovernance - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGovernance - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - if tmp, err := __caster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } else { - m.ProposalFee = tmp - } - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGovernance(dAtA[iNdEx:]) @@ -2871,7 +2238,7 @@ func (m *GovernanceConfigV2) Unmarshal(dAtA []byte) error { } return nil } -func (m *VoteDetails) Unmarshal(dAtA []byte) error { +func (m *OngoingVotedList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2894,141 +2261,164 @@ func (m *VoteDetails) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VoteDetails: wiretype end group for non-group") + return fmt.Errorf("proto: OngoingVotedList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VoteDetails: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: OngoingVotedList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - m.Value = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGovernance - } - if iNdEx >= l { + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGovernance + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Direct = append(m.Direct, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGovernance + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGovernance + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGovernance + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - m.Value |= VoteValueType(b&0x7F) << shift - if b < 0x80 { - break - } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Direct) == 0 { + m.Direct = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGovernance + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Direct = append(m.Direct, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Direct", wireType) } case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Power", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGovernance - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGovernance - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGovernance - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - if tmp, err := __caster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } else { - m.Power = tmp - } - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Balance", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGovernance - } - if iNdEx >= l { + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGovernance + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Delegated = append(m.Delegated, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGovernance + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGovernance + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGovernance + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGovernance - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGovernance + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Delegated) == 0 { + m.Delegated = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGovernance + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Delegated = append(m.Delegated, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Delegated", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF - } - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - if tmp, err := __caster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } else { - m.Balance = tmp - } - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DelegatedTo", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGovernance - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGovernance - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGovernance - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DelegatedTo = append(m.DelegatedTo[:0], dAtA[iNdEx:postIndex]...) - if m.DelegatedTo == nil { - m.DelegatedTo = []byte{} - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGovernance(dAtA[iNdEx:]) @@ -3053,7 +2443,7 @@ func (m *VoteDetails) Unmarshal(dAtA []byte) error { } return nil } -func (m *VoteSet) Unmarshal(dAtA []byte) error { +func (m *DelegatedSCVoteInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3076,15 +2466,15 @@ func (m *VoteSet) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VoteSet: wiretype end group for non-group") + return fmt.Errorf("proto: DelegatedSCVoteInfo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VoteSet: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DelegatedSCVoteInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UsedPower", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TotalPower", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -3116,13 +2506,13 @@ func (m *VoteSet) Unmarshal(dAtA []byte) error { if tmp, err := __caster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } else { - m.UsedPower = tmp + m.TotalPower = tmp } } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UsedBalance", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UsedPower", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -3154,13 +2544,13 @@ func (m *VoteSet) Unmarshal(dAtA []byte) error { if tmp, err := __caster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } else { - m.UsedBalance = tmp + m.UsedPower = tmp } } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalYes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TotalStake", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -3192,51 +2582,13 @@ func (m *VoteSet) Unmarshal(dAtA []byte) error { if tmp, err := __caster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } else { - m.TotalYes = tmp + m.TotalStake = tmp } } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalNo", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGovernance - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGovernance - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGovernance - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - { - __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} - if tmp, err := __caster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } else { - m.TotalNo = tmp - } - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalVeto", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UsedStake", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -3268,42 +2620,8 @@ func (m *VoteSet) Unmarshal(dAtA []byte) error { if tmp, err := __caster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } else { - m.TotalVeto = tmp - } - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VoteItems", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGovernance - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.UsedStake = tmp } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGovernance - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGovernance - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VoteItems = append(m.VoteItems, &VoteDetails{}) - if err := m.VoteItems[len(m.VoteItems)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err } iNdEx = postIndex default: diff --git a/vm/systemSmartContracts/governance.proto b/vm/systemSmartContracts/governance.proto index 60db9ce8892..a60fee5b126 100644 --- a/vm/systemSmartContracts/governance.proto +++ b/vm/systemSmartContracts/governance.proto @@ -8,34 +8,26 @@ option (gogoproto.stable_marshaler_all) = true; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; enum VoteValueType { - Yes = 0; - No = 1; - Veto = 2; + Yes = 0; + No = 1; + Veto = 2; + Abstain = 3; } message GeneralProposal { - bytes IssuerAddress = 1 [(gogoproto.jsontag) = "IssuerAddress"]; + uint64 Nonce = 1 [(gogoproto.jsontag) = "Nonce"]; bytes CommitHash = 2 [(gogoproto.jsontag) = "CommitHash"]; - uint64 StartVoteNonce = 3 [(gogoproto.jsontag) = "StartVoteNonce"]; - uint64 EndVoteNonce = 4 [(gogoproto.jsontag) = "EndVoteNonce"]; + uint64 StartVoteEpoch = 3 [(gogoproto.jsontag) = "StartVoteEpoch"]; + uint64 EndVoteEpoch = 4 [(gogoproto.jsontag) = "EndVoteEpoch"]; bytes Yes = 5 [(gogoproto.jsontag) = "Yes", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; bytes No = 6 [(gogoproto.jsontag) = "No", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; bytes Veto = 7 [(gogoproto.jsontag) = "Veto", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; - bool Passed = 8 [(gogoproto.jsontag) = "Passed"]; - repeated bytes Votes = 9 [(gogoproto.jsontag) = "Votes"]; - bytes TopReference = 10 [(gogoproto.jsontag) = "TopReference"]; + bytes Abstain = 8 [(gogoproto.jsontag) = "Abstain", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; + bytes QuorumStake = 9 [(gogoproto.jsontag) = "QuorumStake", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; + bool Passed = 10 [(gogoproto.jsontag) = "Passed"]; bool Closed = 11 [(gogoproto.jsontag) = "Closed"]; -} - -message WhiteListProposal { - bytes WhiteListAddress = 1 [(gogoproto.jsontag) = "WhiteListAddress"]; - bytes ProposalStatus = 2 [(gogoproto.jsontag) = "ProposalStatus"]; -} - -message HardForkProposal { - uint32 EpochToHardFork = 1 [(gogoproto.jsontag) = "EpochToHardFork"]; - bytes NewSoftwareVersion = 2 [(gogoproto.jsontag) = "NewSoftwareVersion"]; - bytes ProposalStatus = 3 [(gogoproto.jsontag) = "ProposalStatus"]; + bytes IssuerAddress = 12 [(gogoproto.jsontag) = "IssuerAddress"]; + bytes ProposalCost = 13 [(gogoproto.jsontag) = "ProposalCost", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; } message GovernanceConfig { @@ -47,24 +39,21 @@ message GovernanceConfig { } message GovernanceConfigV2 { - bytes MinQuorum = 1 [(gogoproto.jsontag) = "MinQuorum", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; - bytes MinPassThreshold = 2 [(gogoproto.jsontag) = "MinPassThreshold", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; - bytes MinVetoThreshold = 3 [(gogoproto.jsontag) = "MinVetoThreshold", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; - bytes ProposalFee = 4 [(gogoproto.jsontag) = "ProposalFee", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; + float MinQuorum = 1 [(gogoproto.jsontag) = "MinQuorum"]; + float MinPassThreshold = 2 [(gogoproto.jsontag) = "MinPassThreshold"]; + float MinVetoThreshold = 3 [(gogoproto.jsontag) = "MinVetoThreshold"]; + bytes ProposalFee = 4 [(gogoproto.jsontag) = "ProposalFee", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; + uint64 LastProposalNonce = 5 [(gogoproto.jsontag) = "LastProposalNonce"]; } -message VoteDetails { - VoteValueType Value = 1 [(gogoproto.jsontag) = "Value"]; - bytes Power = 2 [(gogoproto.jsontag) = "Power", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; - bytes Balance = 3 [(gogoproto.jsontag) = "Balance", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; - bytes DelegatedTo = 4 [(gogoproto.jsontag) = "DelegatedTo"]; +message OngoingVotedList { + repeated uint64 Direct = 1 [(gogoproto.jsontag) = "Direct"]; + repeated uint64 Delegated = 2 [(gogoproto.jsontag) = "Delegated"]; } -message VoteSet { - bytes UsedPower = 1 [(gogoproto.jsontag) = "UsedPower", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; - bytes UsedBalance = 2 [(gogoproto.jsontag) = "UsedBalance", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; - bytes TotalYes = 3 [(gogoproto.jsontag) = "TotalYes", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; - bytes TotalNo = 4 [(gogoproto.jsontag) = "TotalNo", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; - bytes TotalVeto = 5 [(gogoproto.jsontag) = "TotalVeto", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; - repeated VoteDetails VoteItems = 6 [(gogoproto.jsontag) = "VoteItems"]; +message DelegatedSCVoteInfo { + bytes TotalPower = 1 [(gogoproto.jsontag) = "TotalPower", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; + bytes UsedPower = 2 [(gogoproto.jsontag) = "UsedPower", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; + bytes TotalStake = 3 [(gogoproto.jsontag) = "TotalStake", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; + bytes UsedStake = 4 [(gogoproto.jsontag) = "UsedStake", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; } diff --git a/vm/systemSmartContracts/governance_test.go b/vm/systemSmartContracts/governance_test.go index 79c2f0d3d2c..3faf8489503 100644 --- a/vm/systemSmartContracts/governance_test.go +++ b/vm/systemSmartContracts/governance_test.go @@ -3,7 +3,12 @@ package systemSmartContracts import ( "bytes" "errors" + "fmt" + "github.com/multiversx/mx-chain-go/process/smartContract/hooks" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" + "github.com/stretchr/testify/assert" "math/big" + "strings" "testing" "github.com/multiversx/mx-chain-core-go/core" @@ -17,8 +22,16 @@ import ( ) func createMockGovernanceArgs() ArgsNewGovernanceContract { + eei := createEEIWithBlockchainHook(&mock.BlockChainHookStub{CurrentEpochCalled: func() uint32 { + return 2 + }}) + + return createArgsWithEEI(eei) +} + +func createArgsWithEEI(eei vm.SystemEI) ArgsNewGovernanceContract { return ArgsNewGovernanceContract{ - Eei: &mock.SystemEIStub{}, + Eei: eei, GasCost: vm.GasCost{}, GovernanceConfig: config.GovernanceSystemSCConfig{ V1: config.GovernanceSystemSCConfigV1{ @@ -26,24 +39,82 @@ func createMockGovernanceArgs() ArgsNewGovernanceContract { MinPassThreshold: 1, MinQuorum: 2, MinVetoThreshold: 2, - ProposalCost: "100", + ProposalCost: "500", }, Active: config.GovernanceSystemSCConfigActive{ ProposalCost: "500", - MinQuorum: "50", - MinPassThreshold: "50", - MinVetoThreshold: "50", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, }, + ChangeConfigAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", }, Marshalizer: &mock.MarshalizerMock{}, Hasher: &hashingMocks.HasherMock{}, GovernanceSCAddress: vm.GovernanceSCAddress, DelegationMgrSCAddress: vm.DelegationManagerSCAddress, ValidatorSCAddress: vm.ValidatorSCAddress, + ConfigChangeAddress: bytes.Repeat([]byte{1}, 32), + UnBondPeriodInEpochs: 10, EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{ IsGovernanceFlagEnabledField: true, }, - InitialWhiteListedAddresses: [][]byte{vm.GovernanceSCAddress}, + } +} + +func createEEIWithBlockchainHook(blockchainHook vm.BlockchainHook) vm.ContextHandler { + eei, _ := NewVMContext(VMContextArgs{ + BlockChainHook: blockchainHook, + CryptoHook: hooks.NewVMCryptoHook(), + InputParser: &mock.ArgumentParserMock{}, + ValidatorAccountsDB: &stateMock.AccountsStub{}, + ChanceComputer: &mock.RaterMock{}, + EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, + }) + systemSCContainerStub := &mock.SystemSCContainerStub{GetCalled: func(key []byte) (vm.SystemSmartContract, error) { + return &mock.SystemSCStub{ExecuteCalled: func(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + return vmcommon.Ok + }}, nil + }} + _ = eei.SetSystemSCContainer(systemSCContainerStub) + + return eei +} + +func createGovernanceBlockChainHookStubContextHandler() (*governanceContract, *mock.BlockChainHookStub, vm.ContextHandler) { + blockChainHook := &mock.BlockChainHookStub{CurrentEpochCalled: func() uint32 { + return 2 + }} + eei := createEEIWithBlockchainHook(blockChainHook) + gsc, _ := NewGovernanceContract(createArgsWithEEI(eei)) + gsc.initV2(&vmcommon.ContractCallInput{VMInput: vmcommon.VMInput{CallerAddr: gsc.governanceSCAddress}}) + + addressList := [][]byte{vm.FirstDelegationSCAddress, vm.StakingSCAddress} + marshaledData, _ := gsc.marshalizer.Marshal(&DelegationContractList{addressList}) + + gsc.eei.SetStorageForAddress(gsc.delegationMgrSCAddress, []byte(delegationContractsList), marshaledData) + _ = saveDelegationManagementData(eei, gsc.marshalizer, gsc.delegationMgrSCAddress, &DelegationManagement{MinDelegationAmount: big.NewInt(10)}) + + userAddress := bytes.Repeat([]byte{2}, 32) + addStakeAndDelegationForAddress(gsc, userAddress) + + return gsc, blockChainHook, eei +} + +func addStakeAndDelegationForAddress(gsc *governanceContract, userAddress []byte) { + marshaledData, _ := gsc.marshalizer.Marshal(&ValidatorDataV2{TotalStakeValue: big.NewInt(100)}) + gsc.eei.SetStorageForAddress(gsc.validatorSCAddress, userAddress, marshaledData) + + addressList, _ := getDelegationContractList(gsc.eei, gsc.marshalizer, gsc.delegationMgrSCAddress) + + for index, delegationAddress := range addressList.Addresses { + fundKey := append([]byte(fundKeyPrefix), big.NewInt(int64(index)).Bytes()...) + + marshaledData, _ = gsc.marshalizer.Marshal(&DelegatorData{ActiveFund: fundKey}) + gsc.eei.SetStorageForAddress(delegationAddress, userAddress, marshaledData) + + marshaledData, _ = gsc.marshalizer.Marshal(&Fund{Value: big.NewInt(10)}) + gsc.eei.SetStorageForAddress(delegationAddress, fundKey, marshaledData) } } @@ -107,7 +178,7 @@ func TestNewGovernanceContract_ZeroBaseProposerCostShouldErr(t *testing.T) { t.Parallel() args := createMockGovernanceArgs() - args.GovernanceConfig.Active.ProposalCost = "" + args.GovernanceConfig.V1.ProposalCost = "" gsc, err := NewGovernanceContract(args) require.Nil(t, gsc) @@ -147,21 +218,17 @@ func TestNewGovernanceContract_InvalidGovernanceAddress(t *testing.T) { require.True(t, errors.Is(err, vm.ErrInvalidAddress)) } -func TestNewGovernanceContract_InvalidWhiteList(t *testing.T) { - t.Parallel() - +func TestGovernanceContract_SetNewGasCost(t *testing.T) { args := createMockGovernanceArgs() - args.InitialWhiteListedAddresses = nil - gsc, err := NewGovernanceContract(args) - require.Nil(t, gsc) - require.True(t, errors.Is(err, vm.ErrInvalidNumOfInitialWhiteListedAddress)) + gsc, _ := NewGovernanceContract(args) + require.False(t, gsc.IsInterfaceNil()) + require.True(t, gsc.CanUseContract()) - args.InitialWhiteListedAddresses = [][]byte{[]byte("someAddress")} - gsc, err = NewGovernanceContract(args) - require.Nil(t, gsc) - require.NotNil(t, err) - require.True(t, bytes.Contains([]byte(err.Error()), []byte("invalid address length for"))) + gasCost := vm.GasCost{MetaChainSystemSCsCost: vm.MetaChainSystemSCsCost{Vote: 1000000}} + gsc.SetNewGasCost(gasCost) + + assert.Equal(t, gsc.gasCost.MetaChainSystemSCsCost.Vote, gasCost.MetaChainSystemSCsCost.Vote) } func TestGovernanceContract_ExecuteNilVMInputShouldErr(t *testing.T) { @@ -207,7 +274,7 @@ func TestGovernanceContract_ExecuteInitV2InvalidConfig(t *testing.T) { t.Parallel() args := createMockGovernanceArgs() - args.GovernanceConfig.Active.MinQuorum = "" + args.GovernanceConfig.Active.MinQuorum = 0.0 gsc, _ := NewGovernanceContract(args) callInput := createVMInput(big.NewInt(0), "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) retCode := gsc.Execute(callInput) @@ -224,7 +291,7 @@ func TestGovernanceContract_ExecuteInitV2MarshalError(t *testing.T) { gsc, _ := NewGovernanceContract(args) callInput := createVMInput(big.NewInt(0), "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.ExecutionFailed, retCode) + require.Equal(t, vmcommon.UserError, retCode) } func TestGovernanceContract_ExecuteInitV2(t *testing.T) { @@ -247,78 +314,264 @@ func TestGovernanceContract_ExecuteInitV2(t *testing.T) { require.Equal(t, gsc.ownerAddress, vm.GovernanceSCAddress) } -func TestGovernanceContract_ProposalWrongCallValue(t *testing.T) { +func TestGovernanceContract_ChangeConfig(t *testing.T) { t.Parallel() args := createMockGovernanceArgs() - args.GovernanceConfig.Active.ProposalCost = "10" + args.Eei = &mock.SystemEIStub{ + BlockChainHookCalled: func() vm.BlockchainHook { + return &mock.BlockChainHookStub{ + CurrentNonceCalled: func() uint64 { + return 1 + }, + } + }, + GetStorageCalled: func(key []byte) []byte { + if bytes.Equal(key, []byte(governanceConfigKey)) { + configBytes, _ := args.Marshalizer.Marshal(&GovernanceConfigV2{}) + return configBytes + } + + return nil + }, + } gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(big.NewInt(9), "proposal", vm.GovernanceSCAddress, []byte("addr1"), nil) + + callInputArgs := [][]byte{ + []byte("1"), + []byte("10"), + []byte("10"), + []byte("15"), + } + initInput := createVMInput(zero, "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) + _ = gsc.Execute(initInput) + callInput := createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, callInputArgs) retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.OutOfFunds, retCode) + + require.Equal(t, vmcommon.Ok, retCode) } -func TestGovernanceContract_ProposalNotEnoughGas(t *testing.T) { +func TestGovernanceContract_ChangeConfigWrongCaller(t *testing.T) { t.Parallel() + retMessage := "" + errSubstr := "changeConfig can be called only by owner" args := createMockGovernanceArgs() args.Eei = &mock.SystemEIStub{ - UseGasCalled: func(gas uint64) error { - return errors.New("not enough gas") + AddReturnMessageCalled: func(msg string) { + retMessage = msg }, } gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(big.NewInt(500), "proposal", vm.GovernanceSCAddress, []byte("addr1"), nil) + initInput := createVMInput(zero, "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) + _ = gsc.Execute(initInput) + callInput := createVMInput(zero, "changeConfig", []byte("wrong caller"), vm.GovernanceSCAddress, nil) retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.OutOfGas, retCode) + + require.Equal(t, vmcommon.UserError, retCode) + require.Contains(t, retMessage, errSubstr) } -func TestGovernanceContract_ProposalInvalidArgumentsLenght(t *testing.T) { +func TestGovernanceContract_ChangeConfigWrongCallValue(t *testing.T) { t.Parallel() + retMessage := "" + errSubstr := "changeConfig can be called only without callValue" args := createMockGovernanceArgs() + args.Eei = &mock.SystemEIStub{ + AddReturnMessageCalled: func(msg string) { + retMessage = msg + }, + } + gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(big.NewInt(500), "proposal", vm.GovernanceSCAddress, []byte("addr1"), nil) + + initInput := createVMInput(zero, "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) + _ = gsc.Execute(initInput) + callInput := createVMInput(big.NewInt(10), "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, nil) retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.FunctionWrongSignature, retCode) + + require.Equal(t, vmcommon.UserError, retCode) + require.Contains(t, retMessage, errSubstr) +} + +func TestGovernanceContract_ChangeConfigWrongArgumentsLength(t *testing.T) { + t.Parallel() + + retMessage := "" + errSubstr := "changeConfig needs 4 arguments" + args := createMockGovernanceArgs() + args.Eei = &mock.SystemEIStub{ + AddReturnMessageCalled: func(msg string) { + retMessage = msg + }, + } + + gsc, _ := NewGovernanceContract(args) + + initInput := createVMInput(zero, "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) + _ = gsc.Execute(initInput) + callInput := createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, nil) + retCode := gsc.Execute(callInput) + + require.Equal(t, vmcommon.UserError, retCode) + require.Contains(t, retMessage, errSubstr) } -func TestGovernanceContract_ProposalCallerNptWhitelisted(t *testing.T) { +func TestGovernanceContract_ChangeConfigInvalidParams(t *testing.T) { t.Parallel() + retMessage := "" + errSubstr := "changeConfig first argument is incorrectly formatted" args := createMockGovernanceArgs() + args.Eei = &mock.SystemEIStub{ + AddReturnMessageCalled: func(msg string) { + retMessage = msg + }, + } + gsc, _ := NewGovernanceContract(args) + + initInput := createVMInput(zero, "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) + _ = gsc.Execute(initInput) + callInputArgs := [][]byte{ - []byte("arg1"), - []byte("arg2"), - []byte("arg3"), + []byte("invalid"), + []byte("10"), + []byte("10"), + []byte("5"), } - callInput := createVMInput(big.NewInt(500), "proposal", vm.GovernanceSCAddress, []byte("addr1"), callInputArgs) + callInput := createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, callInputArgs) retCode := gsc.Execute(callInput) + + require.Equal(t, vmcommon.UserError, retCode) + require.Contains(t, retMessage, errSubstr) + + errSubstr = "config incorrect minQuorum" + callInputArgs = [][]byte{ + []byte("1"), + []byte("invalid"), + []byte("10"), + []byte("5"), + } + callInput = createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, callInputArgs) + retCode = gsc.Execute(callInput) + + require.Equal(t, vmcommon.UserError, retCode) + require.Contains(t, retMessage, errSubstr) + + errSubstr = "config incorrect minVeto" + callInputArgs = [][]byte{ + []byte("1"), + []byte("10"), + []byte("invalid"), + []byte("5"), + } + callInput = createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, callInputArgs) + retCode = gsc.Execute(callInput) + + require.Equal(t, vmcommon.UserError, retCode) + require.Contains(t, retMessage, errSubstr) + + errSubstr = "config incorrect minPass" + callInputArgs = [][]byte{ + []byte("1"), + []byte("10"), + []byte("10"), + []byte("invalid"), + } + callInput = createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, callInputArgs) + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.Contains(t, retMessage, errSubstr) } -func TestGovernanceContract_ProposalInvalidReferenceLength(t *testing.T) { +func TestGovernanceContract_ChangeConfigGetConfigErr(t *testing.T) { t.Parallel() + retMessage := "" + errSubstr := "changeConfig error" args := createMockGovernanceArgs() args.Eei = &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - return []byte("storage item") + AddReturnMessageCalled: func(msg string) { + retMessage = msg }, - } - args.Marshalizer = &mock.MarshalizerStub{ - UnmarshalCalled: func(obj interface{}, buff []byte) error { - whitelistProposal, proposalOk := obj.(*GeneralProposal) - if proposalOk { - whitelistProposal.Passed = true + GetStorageCalled: func(key []byte) []byte { + if bytes.Equal(key, []byte(governanceConfigKey)) { + return []byte("invalid config") } + return nil }, } + + gsc, _ := NewGovernanceContract(args) + + initInput := createVMInput(zero, "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) + _ = gsc.Execute(initInput) + + callInputArgs := [][]byte{ + []byte("1"), + []byte("10"), + []byte("10"), + []byte("10"), + } + callInput := createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, callInputArgs) + retCode := gsc.Execute(callInput) + + require.Equal(t, vmcommon.UserError, retCode) + require.Contains(t, retMessage, errSubstr) +} + +func TestGovernanceContract_ProposalNotEnoughGas(t *testing.T) { + t.Parallel() + + args := createMockGovernanceArgs() + args.Eei = &mock.SystemEIStub{ + UseGasCalled: func(gas uint64) error { + return errors.New("not enough gas") + }, + } + + gsc, _ := NewGovernanceContract(args) + callInput := createVMInput(big.NewInt(500), "proposal", vm.GovernanceSCAddress, []byte("addr1"), nil) + retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.OutOfGas, retCode) +} + +func TestGovernanceContract_ProposalInvalidArgumentsLength(t *testing.T) { + t.Parallel() + + args := createMockGovernanceArgs() + gsc, _ := NewGovernanceContract(args) + callInput := createVMInput(big.NewInt(500), "proposal", vm.GovernanceSCAddress, []byte("addr1"), nil) + retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.FunctionWrongSignature, retCode) +} + +func TestGovernanceContract_ProposalWrongCallValue(t *testing.T) { + t.Parallel() + + args := createMockGovernanceArgs() + gsc, _ := NewGovernanceContract(args) + + callInput := createVMInput(big.NewInt(9), "proposal", vm.GovernanceSCAddress, []byte("addr1"), [][]byte{{1}, {1}, {1}}) + retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + + gsc.initV2(&vmcommon.ContractCallInput{VMInput: vmcommon.VMInput{CallerAddr: gsc.governanceSCAddress}}) + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.OutOfFunds, retCode) +} + +func TestGovernanceContract_ProposalInvalidReferenceLength(t *testing.T) { + t.Parallel() + + gsc, _, eei := createGovernanceBlockChainHookStubContextHandler() callInputArgs := [][]byte{ []byte("arg1"), []byte("arg2"), @@ -327,6 +580,7 @@ func TestGovernanceContract_ProposalInvalidReferenceLength(t *testing.T) { callInput := createVMInput(big.NewInt(500), "proposal", vm.GovernanceSCAddress, []byte("addr1"), callInputArgs) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "invalid github commit")) } func TestGovernanceContract_ProposalAlreadyExists(t *testing.T) { @@ -334,17 +588,18 @@ func TestGovernanceContract_ProposalAlreadyExists(t *testing.T) { proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = createMockStorer(vm.GovernanceSCAddress, proposalIdentifier, &GeneralProposal{}) - gsc, _ := NewGovernanceContract(args) + gsc, _, eei := createGovernanceBlockChainHookStubContextHandler() callInputArgs := [][]byte{ proposalIdentifier, []byte("arg2"), []byte("arg3"), } + + gsc.eei.SetStorage([]byte(proposalPrefix+string(proposalIdentifier)), []byte("1")) callInput := createVMInput(big.NewInt(500), "proposal", vm.GovernanceSCAddress, []byte("addr1"), callInputArgs) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, eei.GetReturnMessage(), "proposal already exists") } func TestGovernanceContract_ProposalInvalidVoteNonce(t *testing.T) { @@ -352,17 +607,16 @@ func TestGovernanceContract_ProposalInvalidVoteNonce(t *testing.T) { proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = createMockStorer(vm.GovernanceSCAddress, proposalIdentifier, nil) - gsc, _ := NewGovernanceContract(args) + gsc, _, eei := createGovernanceBlockChainHookStubContextHandler() callInputArgs := [][]byte{ proposalIdentifier, - []byte("arg2"), + []byte("5"), []byte("arg3"), } callInput := createVMInput(big.NewInt(500), "proposal", vm.GovernanceSCAddress, []byte("addr1"), callInputArgs) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, eei.GetReturnMessage(), vm.ErrInvalidStartEndVoteEpoch.Error()) } func TestGovernanceContract_ProposalOK(t *testing.T) { @@ -370,210 +624,45 @@ func TestGovernanceContract_ProposalOK(t *testing.T) { proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = createMockStorer(vm.GovernanceSCAddress, proposalIdentifier, nil) - gsc, _ := NewGovernanceContract(args) + gsc, _, _ := createGovernanceBlockChainHookStubContextHandler() + callInputArgs := [][]byte{ proposalIdentifier, - []byte("1"), - []byte("10"), + []byte("50"), + []byte("55"), } callInput := createVMInput(big.NewInt(500), "proposal", vm.GovernanceSCAddress, []byte("addr1"), callInputArgs) retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.Ok, retCode) } -func TestGovernanceContract_VoteWithFundsNotEnoughGas(t *testing.T) { +func TestGovernanceContract_VoteWithBadArgsOrCallValue(t *testing.T) { t.Parallel() - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - UseGasCalled: func(_ uint64) error { - return errors.New("not enough gas") - }, - } - gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(big.NewInt(500), "voteWithFunds", vm.GovernanceSCAddress, []byte("addr1"), make([][]byte, 0)) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.OutOfGas, retCode) -} - -func TestGovernanceContract_VoteWithFundsInvalidNumOfArguments(t *testing.T) { - t.Parallel() - - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - - args := createMockGovernanceArgs() - gsc, _ := NewGovernanceContract(args) - voteArgs := [][]byte{ - proposalIdentifier, - []byte("1"), - []byte("10"), - } - callInput := createVMInput(big.NewInt(500), "voteWithFunds", vm.GovernanceSCAddress, []byte("addr1"), voteArgs) + gsc, _, eei := createGovernanceBlockChainHookStubContextHandler() + callInput := createVMInput(big.NewInt(0), "vote", vm.GovernanceSCAddress, []byte("addr1"), [][]byte{[]byte("bad args")}) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.FunctionWrongSignature, retCode) -} - -func TestGovernanceContract_VoteWithFundsProposalNotFound(t *testing.T) { - t.Parallel() - - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - - args := createMockGovernanceArgs() - gsc, _ := NewGovernanceContract(args) - voteArgs := [][]byte{ - proposalIdentifier, - []byte("1"), - } - callInput := createVMInput(big.NewInt(500), "voteWithFunds", vm.GovernanceSCAddress, []byte("addr1"), voteArgs) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) -} - -func TestGovernanceContract_VoteWithFundsInvalidVoteType(t *testing.T) { - t.Parallel() - - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - - args := createMockGovernanceArgs() - args.Eei = createMockStorer(vm.GovernanceSCAddress, proposalIdentifier, &GeneralProposal{}) - gsc, _ := NewGovernanceContract(args) - voteArgs := [][]byte{ - proposalIdentifier, - []byte("1"), - } - callInput := createVMInput(big.NewInt(500), "voteWithFunds", vm.GovernanceSCAddress, []byte("addr1"), voteArgs) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) -} - -func TestGovernanceContract_VoteWithFundsInvalidCallValue(t *testing.T) { - t.Parallel() - - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - - args := createMockGovernanceArgs() - args.Eei = createMockStorer(vm.GovernanceSCAddress, proposalIdentifier, &GeneralProposal{}) - gsc, _ := NewGovernanceContract(args) - voteArgs := [][]byte{ - proposalIdentifier, - []byte("yes"), - } - callInput := createVMInput(big.NewInt(-500), "voteWithFunds", vm.GovernanceSCAddress, []byte("addr1"), voteArgs) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) -} - -func TestGovernanceContract_VoteWithFundsAddVoteError(t *testing.T) { - t.Parallel() - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - defaultMarshalizer := &mock.MarshalizerMock{} - - args := createMockGovernanceArgs() - args.Eei = createMockStorer(vm.GovernanceSCAddress, proposalIdentifier, &GeneralProposal{ - Yes: big.NewInt(0), - No: big.NewInt(0), - }) - args.Marshalizer = &mock.MarshalizerStub{ - UnmarshalCalled: func(obj interface{}, buff []byte) error { - return defaultMarshalizer.Unmarshal(obj, buff) - }, - MarshalCalled: func(obj interface{}) ([]byte, error) { - _, isVoteSetType := obj.(*VoteSet) - if isVoteSetType { - return nil, errors.New("invalid vote set") - } - return defaultMarshalizer.Marshal(obj) - }, - } - gsc, _ := NewGovernanceContract(args) - voteArgs := [][]byte{ - proposalIdentifier, - []byte("yes"), - } - callInput := createVMInput(big.NewInt(500), "voteWithFunds", vm.GovernanceSCAddress, []byte("addr1"), voteArgs) - retCode := gsc.Execute(callInput) + callInput.CallValue = big.NewInt(10) + retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) -} - -func TestGovernanceContract_VoteWithFundsAddSimpleVote(t *testing.T) { - t.Parallel() - - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - - args := createMockGovernanceArgs() - args.Eei = createMockStorer(vm.GovernanceSCAddress, proposalIdentifier, &GeneralProposal{ - Yes: big.NewInt(0), - No: big.NewInt(0), - }) - gsc, _ := NewGovernanceContract(args) - voteArgs := [][]byte{ - proposalIdentifier, - []byte("yes"), - } - callInput := createVMInput(big.NewInt(500), "voteWithFunds", vm.GovernanceSCAddress, []byte("addr1"), voteArgs) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.Ok, retCode) -} - -func TestGovernanceContract_VoteWithFundsTwiceBadVote(t *testing.T) { - t.Parallel() - - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - isGeneralProposalKey := bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) - if isGeneralProposalKey { - marshaledProposal, _ := args.Marshalizer.Marshal(&GeneralProposal{}) - return marshaledProposal - } + require.True(t, strings.Contains(eei.GetReturnMessage(), "function is not payable")) - marshaledVote, _ := args.Marshalizer.Marshal(&VoteSet{VoteItems: []*VoteDetails{{Value: No}}}) - return marshaledVote - }, - AddReturnMessageCalled: func(msg string) { - require.Equal(t, msg, "conflicting votes for same proposal") - }, - GetStorageFromAddressCalled: func(address []byte, key []byte) []byte { - marshalizer := &mock.MarshalizerMock{} - if bytes.Equal(address, vm.DelegationManagerSCAddress) && bytes.Equal(key, []byte(delegationManagementKey)) { - dManagementData := &DelegationManagement{MinDelegationAmount: big.NewInt(10)} - marshaledData, _ := marshalizer.Marshal(dManagementData) - return marshaledData - } - - return nil - }, - } - gsc, _ := NewGovernanceContract(args) - voteArgs := [][]byte{ - proposalIdentifier, - []byte("yes"), - } - callInput := createVMInput(big.NewInt(500), "voteWithFunds", vm.GovernanceSCAddress, []byte("addr1"), voteArgs) - retCode := gsc.Execute(callInput) + callInput.CallValue = big.NewInt(0) + callInput.Arguments = [][]byte{{1}, {2}} + retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) -} - -func TestGovernanceContract_VoteWithBadArgsOrCallValue(t *testing.T) { - t.Parallel() - - args := createMockGovernanceArgs() - - gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(big.NewInt(0), "vote", vm.GovernanceSCAddress, []byte("addr1"), [][]byte{[]byte("bad args")}) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.FunctionWrongSignature, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "only user can call this")) - callInput.CallValue = big.NewInt(10) + callInput.CallerAddr = bytes.Repeat([]byte{1}, 32) retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "not enough stake/delegate to vote")) } -func TestGovernanceContract_ValidatorVoteNotEnoughGas(t *testing.T) { +func TestGovernanceContract_VoteNotEnoughGas(t *testing.T) { t.Parallel() args := createMockGovernanceArgs() @@ -588,1637 +677,211 @@ func TestGovernanceContract_ValidatorVoteNotEnoughGas(t *testing.T) { require.Equal(t, vmcommon.OutOfGas, retCode) } -func TestGovernanceContract_ValidatorVoteInvalidProposal(t *testing.T) { +func TestGovernanceContract_VoteInvalidProposal(t *testing.T) { t.Parallel() - returnMessage := "" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - - args := createMockGovernanceArgs() - + callerAddress := bytes.Repeat([]byte{2}, 32) + proposalIdentifier := []byte("aaaaaaaaa") generalProposal := &GeneralProposal{ CommitHash: proposalIdentifier, - StartVoteNonce: 10, - EndVoteNonce: 15, + StartVoteEpoch: 10, + EndVoteEpoch: 15, } - args.Eei = &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - proposalBytes, _ := args.Marshalizer.Marshal(generalProposal) - return proposalBytes - } - return nil - }, - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 16 - }, - } - }, - AddReturnMessageCalled: func(msg string) { - returnMessage = msg - }, - } voteArgs := [][]byte{ - proposalIdentifier, + []byte("1"), []byte("yes"), } - gsc, _ := NewGovernanceContract(args) + gsc, blockchainHook, eei := createGovernanceBlockChainHookStubContextHandler() + blockchainHook.CurrentEpochCalled = func() uint32 { + return 16 + } + + gsc.eei.SetStorage(append([]byte(noncePrefix), voteArgs[0]...), proposalIdentifier) + _ = gsc.saveGeneralProposal(proposalIdentifier, generalProposal) + callInput := createVMInput(big.NewInt(0), "vote", callerAddress, vm.GovernanceSCAddress, voteArgs) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) - require.Equal(t, vm.ErrVotedForAnExpiredProposal.Error(), returnMessage) + require.Equal(t, eei.GetReturnMessage(), vm.ErrVotedForAnExpiredProposal.Error()) } -func TestGovernanceContract_ValidatorVoteInvalidVote(t *testing.T) { +func TestGovernanceContract_VoteInvalidVote(t *testing.T) { t.Parallel() - returnMessage := "" - errInvalidVoteSubstr := "invalid vote type option" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - - args := createMockGovernanceArgs() - + callerAddress := bytes.Repeat([]byte{2}, 32) + proposalIdentifier := []byte("aaaaaaaaa") generalProposal := &GeneralProposal{ CommitHash: proposalIdentifier, - StartVoteNonce: 10, - EndVoteNonce: 15, + StartVoteEpoch: 10, + EndVoteEpoch: 15, } - args.Eei = &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - proposalBytes, _ := args.Marshalizer.Marshal(generalProposal) - return proposalBytes - } - return nil - }, - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 14 - }, - } - }, - AddReturnMessageCalled: func(msg string) { - returnMessage = msg - }, - } voteArgs := [][]byte{ - proposalIdentifier, - []byte("wrong vote"), + []byte("1"), + []byte("invalid"), } - gsc, _ := NewGovernanceContract(args) + gsc, blockchainHook, eei := createGovernanceBlockChainHookStubContextHandler() + blockchainHook.CurrentEpochCalled = func() uint32 { + return 14 + } + + gsc.eei.SetStorage(append([]byte(noncePrefix), voteArgs[0]...), proposalIdentifier) + _ = gsc.saveGeneralProposal(proposalIdentifier, generalProposal) + callInput := createVMInput(big.NewInt(0), "vote", callerAddress, vm.GovernanceSCAddress, voteArgs) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, returnMessage, errInvalidVoteSubstr) + require.Equal(t, eei.GetReturnMessage(), "invalid argument: invalid vote type") } -func TestGovernanceContract_ValidatorVoteInvalidDelegated(t *testing.T) { +func TestGovernanceContract_VoteTwice(t *testing.T) { t.Parallel() - returnMessage := "" - errInvalidVoteSubstr := "invalid delegator address" - callerAddress := vm.FirstDelegationSCAddress - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - - args := createMockGovernanceArgs() + gsc, blockchainHook, eei := createGovernanceBlockChainHookStubContextHandler() + blockchainHook.CurrentEpochCalled = func() uint32 { + return 12 + } + callerAddress := bytes.Repeat([]byte{2}, 32) + proposalIdentifier := []byte("aaaaaaaaa") generalProposal := &GeneralProposal{ + ProposalCost: gsc.baseProposalCost, CommitHash: proposalIdentifier, - StartVoteNonce: 10, - EndVoteNonce: 15, + StartVoteEpoch: 10, + EndVoteEpoch: 15, + Yes: big.NewInt(0), + No: big.NewInt(0), + Veto: big.NewInt(0), + Abstain: big.NewInt(0), + QuorumStake: big.NewInt(0), } - args.Eei = &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - proposalBytes, _ := args.Marshalizer.Marshal(generalProposal) - return proposalBytes - } - return nil - }, - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 14 - }, - } - }, - AddReturnMessageCalled: func(msg string) { - returnMessage = msg - }, - } voteArgs := [][]byte{ - proposalIdentifier, + []byte("1"), []byte("yes"), - []byte("delegatedToWrongAddress"), - big.NewInt(1000).Bytes(), } - gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(big.NewInt(0), "delegateVote", callerAddress, vm.GovernanceSCAddress, voteArgs) + gsc.eei.SetStorage(append([]byte(noncePrefix), voteArgs[0]...), proposalIdentifier) + _ = gsc.saveGeneralProposal(proposalIdentifier, generalProposal) + + callInput := createVMInput(big.NewInt(0), "vote", callerAddress, vm.GovernanceSCAddress, voteArgs) retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.Ok, retCode) + + voteArgs[1] = []byte("no") + retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, returnMessage, errInvalidVoteSubstr) + require.Equal(t, eei.GetReturnMessage(), "double vote is not allowed") } -func TestGovernanceContract_ValidatorVoteComputePowerError(t *testing.T) { +func TestGovernanceContract_DelegateVoteUserErrors(t *testing.T) { t.Parallel() - - returnMessage := "" - errInvalidVoteSubstr := "could not return total stake for the provided address" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - - args := createMockGovernanceArgs() - - generalProposal := &GeneralProposal{ - CommitHash: proposalIdentifier, - StartVoteNonce: 10, - EndVoteNonce: 15, - } - args.Eei = &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - proposalBytes, _ := args.Marshalizer.Marshal(generalProposal) - return proposalBytes - } - - return nil - }, - GetStorageFromAddressCalled: func(_ []byte, _ []byte) []byte { - return []byte("invalid proposal bytes") - }, - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 14 - }, - } - }, - AddReturnMessageCalled: func(msg string) { - returnMessage = msg - }, - } - voteArgs := [][]byte{ - proposalIdentifier, - []byte("yes"), - } - gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(big.NewInt(0), "vote", callerAddress, vm.GovernanceSCAddress, voteArgs) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, returnMessage, errInvalidVoteSubstr) -} - -func TestGovernanceContract_ValidatorVoteInvalidVoteSetError(t *testing.T) { - t.Parallel() - - mockBlsKey := []byte("bls key") - mockValidatorBlsKeys := [][]byte{ - mockBlsKey, - mockBlsKey, - mockBlsKey, - mockBlsKey, - } - - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - votePower := big.NewInt(100).Bytes() - - args := createMockGovernanceArgs() - - generalProposal := &GeneralProposal{ - CommitHash: proposalIdentifier, - StartVoteNonce: 10, - EndVoteNonce: 15, - } - args.Eei = &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - proposalBytes, _ := args.Marshalizer.Marshal(generalProposal) - return proposalBytes - } - if bytes.Equal(key, append(proposalIdentifier, callerAddress...)) { - return []byte("invalid vote set") - } - - return nil - }, - GetStorageFromAddressCalled: func(address []byte, key []byte) []byte { - if bytes.Equal(address, args.ValidatorSCAddress) && bytes.Equal(key, callerAddress) { - auctionBytes, _ := args.Marshalizer.Marshal(&ValidatorDataV2{ - BlsPubKeys: mockValidatorBlsKeys, - TotalStakeValue: big.NewInt(0).SetBytes(votePower), - }) - - return auctionBytes - } - - return nil - }, - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 14 - }, - } - }, - } - voteArgs := [][]byte{ - proposalIdentifier, - []byte("yes"), - } - gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(big.NewInt(0), "vote", callerAddress, vm.GovernanceSCAddress, voteArgs) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.ExecutionFailed, retCode) -} - -func TestGovernanceContract_DelegateVoteVoteNotEnoughPower(t *testing.T) { - t.Parallel() - - mockBlsKey := []byte("bls key") - returnMessage := "" - errInvalidVoteSubstr := "not enough voting power to cast this vote" - mockValidatorBlsKeys := [][]byte{ - mockBlsKey, - mockBlsKey, - mockBlsKey, - mockBlsKey, - } - - callerAddress := vm.FirstDelegationSCAddress - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - votePower := big.NewInt(100).Bytes() - - args := createMockGovernanceArgs() - - generalProposal := &GeneralProposal{ - CommitHash: proposalIdentifier, - StartVoteNonce: 10, - EndVoteNonce: 15, - } - args.Eei = &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - proposalBytes, _ := args.Marshalizer.Marshal(generalProposal) - return proposalBytes - } - - return nil - }, - AddReturnMessageCalled: func(msg string) { - returnMessage = msg - }, - GetStorageFromAddressCalled: func(address []byte, key []byte) []byte { - if bytes.Equal(address, args.ValidatorSCAddress) && bytes.Equal(key, callerAddress) { - auctionBytes, _ := args.Marshalizer.Marshal(&ValidatorDataV2{ - BlsPubKeys: mockValidatorBlsKeys, - TotalStakeValue: big.NewInt(0).SetBytes(votePower), - }) - - return auctionBytes - } - if bytes.Equal(address, vm.DelegationManagerSCAddress) && bytes.Equal(key, []byte(delegationContractsList)) { - contractList := &DelegationContractList{} - marshaledData, _ := args.Marshalizer.Marshal(contractList) - return marshaledData - } - - return nil - }, - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 14 - }, - } - }, - } - - voteArgs := [][]byte{ - proposalIdentifier, - []byte("yes"), - big.NewInt(100000).Bytes(), - callerAddress, - } - gsc, _ := NewGovernanceContract(args) - - callInput := createVMInput(big.NewInt(0), "delegateVote", callerAddress, vm.GovernanceSCAddress, voteArgs) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, returnMessage, errInvalidVoteSubstr) -} - -func TestGovernanceContract_DelegateVoteSuccess(t *testing.T) { - t.Parallel() - - mockBlsKey := []byte("bls key") - mockValidatorBlsKeys := [][]byte{ - mockBlsKey, - mockBlsKey, - mockBlsKey, - mockBlsKey, - } - - callerAddress := vm.FirstDelegationSCAddress - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - votePower := big.NewInt(100) - - args := createMockGovernanceArgs() - - generalProposal := &GeneralProposal{ - CommitHash: proposalIdentifier, - StartVoteNonce: 10, - EndVoteNonce: 15, - Yes: big.NewInt(10), - } - args.Eei = &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - proposalBytes, _ := args.Marshalizer.Marshal(generalProposal) - return proposalBytes - } - - return nil - }, - GetStorageFromAddressCalled: func(address []byte, key []byte) []byte { - if bytes.Equal(address, args.ValidatorSCAddress) && bytes.Equal(key, callerAddress) { - auctionBytes, _ := args.Marshalizer.Marshal(&ValidatorDataV2{ - BlsPubKeys: mockValidatorBlsKeys, - TotalStakeValue: big.NewInt(0).Set(votePower), - }) - - return auctionBytes - } - if bytes.Equal(address, vm.DelegationManagerSCAddress) && bytes.Equal(key, []byte(delegationContractsList)) { - contractList := &DelegationContractList{} - marshaledData, _ := args.Marshalizer.Marshal(contractList) - return marshaledData - } - - return nil - }, - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 14 - }, - } - }, - } - - voteArgs := [][]byte{ - proposalIdentifier, - []byte("yes"), - big.NewInt(10).Bytes(), - callerAddress, - } - gsc, _ := NewGovernanceContract(args) - - callInput := createVMInput(big.NewInt(0), "delegateVote", callerAddress, vm.GovernanceSCAddress, voteArgs) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.Ok, retCode) -} - -func TestGovernanceContract_ValidatorVote(t *testing.T) { - t.Parallel() - - mockBlsKey := []byte("bls key") - mockValidatorBlsKeys := [][]byte{ - mockBlsKey, - mockBlsKey, - mockBlsKey, - mockBlsKey, - } - - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - votePower := big.NewInt(10) - proposalKey := append([]byte(proposalPrefix), proposalIdentifier...) - voteItemKey := append(proposalKey, callerAddress...) - - finalVoteSet := &VoteSet{} - finalProposal := &GeneralProposal{} - - args := createMockGovernanceArgs() - - generalProposal := &GeneralProposal{ - CommitHash: proposalIdentifier, - StartVoteNonce: 10, - EndVoteNonce: 15, - Yes: big.NewInt(0), - } - args.Eei = &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - proposalBytes, _ := args.Marshalizer.Marshal(generalProposal) - return proposalBytes - } - if bytes.Equal(key, append([]byte(stakeLockPrefix), callerAddress...)) { - return big.NewInt(10).Bytes() - } - - return nil - }, - GetStorageFromAddressCalled: func(address []byte, key []byte) []byte { - if bytes.Equal(address, args.ValidatorSCAddress) && bytes.Equal(key, callerAddress) { - auctionBytes, _ := args.Marshalizer.Marshal(&ValidatorDataV2{ - BlsPubKeys: mockValidatorBlsKeys, - TotalStakeValue: big.NewInt(100), - }) - - return auctionBytes - } - if bytes.Equal(address, vm.DelegationManagerSCAddress) && bytes.Equal(key, []byte(delegationContractsList)) { - contractList := &DelegationContractList{Addresses: [][]byte{vm.FirstDelegationSCAddress}} - marshaledData, _ := args.Marshalizer.Marshal(contractList) - return marshaledData - } - - return nil - }, - - SetStorageCalled: func(key []byte, value []byte) { - if bytes.Equal(key, voteItemKey) { - _ = args.Marshalizer.Unmarshal(finalVoteSet, value) - } - if bytes.Equal(key, proposalKey) { - _ = args.Marshalizer.Unmarshal(finalProposal, value) - } - }, - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 14 - }, - } - }, - } - - voteArgs := [][]byte{ - proposalIdentifier, - []byte("yes"), - } - gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(big.NewInt(0), "vote", callerAddress, vm.GovernanceSCAddress, voteArgs) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.Ok, retCode) - require.Equal(t, votePower, finalProposal.Yes) - require.Equal(t, 1, len(finalProposal.Votes)) - require.Equal(t, votePower, finalVoteSet.TotalYes) - require.Equal(t, votePower, finalVoteSet.UsedPower) - require.Equal(t, big.NewInt(0), finalVoteSet.UsedBalance) -} - -func TestGovernanceContract_ValidatorVoteTwice(t *testing.T) { - t.Parallel() - - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - - args := createMockGovernanceArgs() - - generalProposal := &GeneralProposal{ - CommitHash: proposalIdentifier, - StartVoteNonce: 10, - EndVoteNonce: 15, - Yes: big.NewInt(0), - } - args.Eei = &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - proposalBytes, _ := args.Marshalizer.Marshal(generalProposal) - return proposalBytes - } - if bytes.Equal(key, append(proposalIdentifier, callerAddress...)) { - voteSetBytes, _ := args.Marshalizer.Marshal(&VoteSet{VoteItems: []*VoteDetails{{Value: 0}}}) - return voteSetBytes - } - - return nil - }, - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 14 - }, - } - }, - AddReturnMessageCalled: func(msg string) { - require.Equal(t, msg, "vote only once") - }, - } - - voteArgs := [][]byte{ - proposalIdentifier, - []byte("yes"), - } - gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(big.NewInt(0), "vote", callerAddress, vm.GovernanceSCAddress, voteArgs) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) -} - -func TestGovernanceContract_DelegateVoteUserErrors(t *testing.T) { - t.Parallel() - - args := createMockGovernanceArgs() - - mockEI := &mock.SystemEIStub{} - args.Eei = mockEI - - gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(big.NewInt(0), "delegateVote", []byte("address"), vm.GovernanceSCAddress, nil) - - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - - callInput.Arguments = [][]byte{{1}, {2}, {3}, {4}} - callInput.CallValue = big.NewInt(10) - retCode = gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - require.Equal(t, mockEI.ReturnMessage, "function is not payable") - - mockEI.UseGasCalled = func(_ uint64) error { - return vm.ErrNotEnoughGas - } - callInput.CallValue = big.NewInt(0) - args.Eei = mockEI - retCode = gsc.Execute(callInput) - require.Equal(t, vmcommon.OutOfGas, retCode) - - mockEI.AddReturnMessageCalled = func(msg string) { - require.Equal(t, msg, "only SC can call this") - } - mockEI.UseGasCalled = func(gas uint64) error { - return nil - } - args.Eei = mockEI - retCode = gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - - mockEI.AddReturnMessageCalled = func(msg string) { - require.Equal(t, msg, "invalid delegator address") - } - callInput.CallerAddr = vm.ESDTSCAddress - retCode = gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - - mockEI.AddReturnMessageCalled = func(msg string) { - require.Equal(t, msg, vm.ErrProposalNotFound.Error()) - } - args.Eei = mockEI - callInput.Arguments[3] = vm.ESDTSCAddress - retCode = gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - - mockEI.GetStorageCalled = func(key []byte) []byte { - proposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{}) - return proposalBytes - } - mockEI.AddReturnMessageCalled = func(msg string) { - require.True(t, bytes.Contains([]byte(msg), []byte("invalid vote type option: "))) - } - args.Eei = mockEI - retCode = gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) -} - -func TestGovernanceContract_ClaimFundsWrongCallValue(t *testing.T) { - t.Parallel() - - returnMessage := "" - expectedErrorSubstr := "invalid callValue" - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - AddReturnMessageCalled: func(msg string) { - returnMessage = msg - }, - } - - gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(big.NewInt(9), "claimFunds", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, returnMessage, expectedErrorSubstr) -} - -func TestGovernanceContract_ClaimFundsWrongNumberOfArguments(t *testing.T) { - t.Parallel() - - returnMessage := "" - expectedErrorSubstr := "invalid number of arguments" - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - AddReturnMessageCalled: func(msg string) { - returnMessage = msg - }, - } - - gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(big.NewInt(0), "claimFunds", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.FunctionWrongSignature, retCode) - require.Contains(t, returnMessage, expectedErrorSubstr) -} - -func TestGovernanceContract_ClaimFundsStillLocked(t *testing.T) { - t.Parallel() - - returnMessage := "" - expectedErrorSubstr := "your funds are still locked" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - AddReturnMessageCalled: func(msg string) { - returnMessage = msg - }, - GetStorageCalled: func(key []byte) []byte { - expectedKeyPrefix := append([]byte(fundsLockPrefix), proposalIdentifier...) - if bytes.Equal(key, append(expectedKeyPrefix, callerAddress...)) { - voteSetBytes, _ := args.Marshalizer.Marshal(&VoteSet{ - UsedBalance: big.NewInt(100), - }) - return voteSetBytes - } - if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - proposalByes, _ := args.Marshalizer.Marshal(&GeneralProposal{EndVoteNonce: 100}) - return proposalByes - } - - return nil - }, - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 11 - }, - } - }, - } - claimArgs := [][]byte{ - proposalIdentifier, - } - - gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(zero, "claimFunds", callerAddress, vm.GovernanceSCAddress, claimArgs) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, returnMessage, expectedErrorSubstr) -} - -func TestGovernanceContract_ClaimFundsNothingToClaim(t *testing.T) { - t.Parallel() - - returnMessage := "" - expectedErrorSubstr := "no funds to claim for this proposal" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - AddReturnMessageCalled: func(msg string) { - returnMessage = msg - }, - GetStorageCalled: func(key []byte) []byte { - expectedKeyPrefix := append([]byte(fundsLockPrefix), proposalIdentifier...) - if bytes.Equal(key, append(expectedKeyPrefix, callerAddress...)) { - voteSetBytes, _ := args.Marshalizer.Marshal(&VoteSet{ - UsedBalance: zero, - }) - return voteSetBytes - } - - return nil - }, - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 11 - }, - } - }, - } - claimArgs := [][]byte{ - proposalIdentifier, - } - - gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(zero, "claimFunds", callerAddress, vm.GovernanceSCAddress, claimArgs) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, returnMessage, expectedErrorSubstr) -} - -func TestGovernanceContract_ClaimFunds(t *testing.T) { - t.Parallel() - - callerAddress := []byte("address") - voteValue := big.NewInt(10) - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - - finalVoteSet := &VoteSet{} - transferFrom := make([]byte, 0) - transferTo := make([]byte, 0) - transferValue := big.NewInt(0) - - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - expectedKeyPrefix := append([]byte(fundsLockPrefix), proposalIdentifier...) - if bytes.Equal(key, append(expectedKeyPrefix, callerAddress...)) { - voteSetBytes, _ := args.Marshalizer.Marshal(&VoteSet{ - UsedBalance: voteValue, - }) - return voteSetBytes - } - if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - proposalByes, _ := args.Marshalizer.Marshal(&GeneralProposal{EndVoteNonce: 100}) - return proposalByes - } - - return nil - }, - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 101 - }, - } - }, - SetStorageCalled: func(key []byte, value []byte) { - proposalKey := append([]byte(proposalPrefix), proposalIdentifier...) - if bytes.Equal(key, append(proposalKey, callerAddress...)) { - _ = args.Marshalizer.Unmarshal(finalVoteSet, value) - } - }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, _ []byte) error { - transferTo = destination - transferFrom = sender - transferValue.Set(value) - - return nil - }, - } - claimArgs := [][]byte{ - proposalIdentifier, - } - - gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(zero, "claimFunds", callerAddress, vm.GovernanceSCAddress, claimArgs) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.Ok, retCode) - require.Equal(t, args.GovernanceSCAddress, transferFrom) - require.Equal(t, callerAddress, transferTo) - require.Equal(t, voteValue, transferValue) -} - -func TestGovernanceContract_WhiteListProposal(t *testing.T) { - t.Parallel() - - callerAddress := []byte("address") - finalWhitelistProposal := &WhiteListProposal{} - finalProposal := &GeneralProposal{} - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 1 - }, - } - }, - SetStorageCalled: func(key []byte, value []byte) { - if bytes.Equal(key, append([]byte(whiteListPrefix), callerAddress...)) { - _ = args.Marshalizer.Unmarshal(finalWhitelistProposal, value) - } - if bytes.Equal(key, append([]byte(proposalPrefix), callerAddress...)) { - _ = args.Marshalizer.Unmarshal(finalProposal, value) - } - }, - } - - gsc, _ := NewGovernanceContract(args) - - callInputArgs := [][]byte{ - proposalIdentifier, - []byte("1"), - []byte("10"), - } - proposalCost, _ := big.NewInt(0).SetString(args.GovernanceConfig.Active.ProposalCost, conversionBase) - callInput := createVMInput(proposalCost, "whiteList", callerAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.Ok, retCode) - require.Equal(t, callerAddress, finalWhitelistProposal.WhiteListAddress) - require.Equal(t, append([]byte(proposalPrefix), callerAddress...), finalWhitelistProposal.ProposalStatus) - require.Equal(t, proposalIdentifier, finalProposal.CommitHash) -} - -func TestGovernanceContract_WhiteListProposalInvalidCallValue(t *testing.T) { - t.Parallel() - - retMessage := "" - errSubstr := "invalid callValue" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 1 - }, - } - }, - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - } - - gsc, _ := NewGovernanceContract(args) - - callInputArgs := [][]byte{ - proposalIdentifier, - []byte("1"), - []byte("10"), - } - callInput := createVMInput(big.NewInt(0), "whiteList", callerAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.OutOfFunds, retCode) - require.Contains(t, retMessage, errSubstr) -} - -func TestGovernanceContract_WhiteListProposalNotEnoughGas(t *testing.T) { - t.Parallel() - - retMessage := "" - errSubstr := "not enough gas" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 1 - }, - } - }, - UseGasCalled: func(_ uint64) error { - return errors.New("not enough gas") - }, - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - } - - gsc, _ := NewGovernanceContract(args) - - callInputArgs := [][]byte{ - proposalIdentifier, - []byte("1"), - []byte("10"), - } - proposalCost, _ := big.NewInt(0).SetString(args.GovernanceConfig.Active.ProposalCost, conversionBase) - callInput := createVMInput(proposalCost, "whiteList", callerAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.OutOfGas, retCode) - require.Contains(t, retMessage, errSubstr) -} - -func TestGovernanceContract_WhiteListInvalidArgumentsLength(t *testing.T) { - t.Parallel() - - retMessage := "" - errSubstr := "invalid number of arguments" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 1 - }, - } - }, - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - } - - gsc, _ := NewGovernanceContract(args) - - callInputArgs := [][]byte{ - proposalIdentifier, - []byte("1"), - []byte("10"), - []byte("10"), - } - proposalCost, _ := big.NewInt(0).SetString(args.GovernanceConfig.Active.ProposalCost, conversionBase) - callInput := createVMInput(proposalCost, "whiteList", callerAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.FunctionWrongSignature, retCode) - require.Contains(t, retMessage, errSubstr) -} - -func TestGovernanceContract_WhiteListProposalAlreadyExists(t *testing.T) { - t.Parallel() - - retMessage := "" - errSubstr := "cannot re-propose existing proposal" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 1 - }, - } - }, - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - return []byte("proposal exists") - } - - return nil - }, - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - } - - gsc, _ := NewGovernanceContract(args) - - callInputArgs := [][]byte{ - proposalIdentifier, - []byte("1"), - []byte("10"), - } - proposalCost, _ := big.NewInt(0).SetString(args.GovernanceConfig.Active.ProposalCost, conversionBase) - callInput := createVMInput(proposalCost, "whiteList", callerAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, retMessage, errSubstr) -} - -func TestGovernanceContract_WhiteListAlreadyWhitelisted(t *testing.T) { - t.Parallel() - - retMessage := "" - errSubstr := "address is already whitelisted" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 1 - }, - } - }, - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(whiteListPrefix), callerAddress...)) { - return []byte("whitelisted") - } - if bytes.Equal(key, append([]byte(proposalPrefix), callerAddress...)) { - proposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ - Passed: true, - }) - return proposalBytes - } - - return nil - }, - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - } - - gsc, _ := NewGovernanceContract(args) - - callInputArgs := [][]byte{ - proposalIdentifier, - []byte("1"), - []byte("10"), - } - proposalCost, _ := big.NewInt(0).SetString(args.GovernanceConfig.Active.ProposalCost, conversionBase) - callInput := createVMInput(proposalCost, "whiteList", callerAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, retMessage, errSubstr) -} - -func TestGovernanceContract_WhiteListInvalidProposalLength(t *testing.T) { - t.Parallel() - - retMessage := "" - errSubstr := "invalid github commit length" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength-1) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 1 - }, - } - }, - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - } - - gsc, _ := NewGovernanceContract(args) - - callInputArgs := [][]byte{ - proposalIdentifier, - []byte("1"), - []byte("10"), - } - proposalCost, _ := big.NewInt(0).SetString(args.GovernanceConfig.Active.ProposalCost, conversionBase) - callInput := createVMInput(proposalCost, "whiteList", callerAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, retMessage, errSubstr) -} - -func TestGovernanceContract_WhiteListInvalidNonces(t *testing.T) { - t.Parallel() - - retMessage := "" - errSubstr := "invalid start/end vote nonce" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 1 - }, - } - }, - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - } - - gsc, _ := NewGovernanceContract(args) - - callInputArgs := [][]byte{ - proposalIdentifier, - []byte("1"), - []byte("invalid"), - } - proposalCost, _ := big.NewInt(0).SetString(args.GovernanceConfig.Active.ProposalCost, conversionBase) - callInput := createVMInput(proposalCost, "whiteList", callerAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, retMessage, errSubstr) -} - -func TestGovernanceContract_HardForkProposal(t *testing.T) { - t.Parallel() - - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 1 - }, - } - }, - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(whiteListPrefix), callerAddress...)) { - return []byte("whitelisted caller") - } - if bytes.Equal(key, append([]byte(proposalPrefix), callerAddress...)) { - whitelistProposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ - Passed: true, - }) - return whitelistProposalBytes - } - - return nil - }, - } - - gsc, _ := NewGovernanceContract(args) - - callInputArgs := [][]byte{ - []byte("1"), - []byte("10"), - proposalIdentifier, - []byte("5"), - []byte("10"), - } - proposalCost, _ := big.NewInt(0).SetString(args.GovernanceConfig.Active.ProposalCost, conversionBase) - callInput := createVMInput(proposalCost, "hardFork", callerAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.Ok, retCode) -} - -func TestGovernanceContract_HardForkProposalInvalidCallValue(t *testing.T) { - t.Parallel() - - retMessage := "" - errSubstr := "invalid proposal cost" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - } - - gsc, _ := NewGovernanceContract(args) - - callInputArgs := [][]byte{ - []byte("1"), - []byte("10"), - proposalIdentifier, - []byte("5"), - []byte("10"), - } - callInput := createVMInput(big.NewInt(0), "hardFork", callerAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.OutOfFunds, retCode) - require.Contains(t, retMessage, errSubstr) -} - -func TestGovernanceContract_HardForkProposalNotEnoughGas(t *testing.T) { - t.Parallel() - - retMessage := "" - errSubstr := "not enough gas" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - UseGasCalled: func(gas uint64) error { - return errors.New("not enough gas") - }, - } - - gsc, _ := NewGovernanceContract(args) - - callInputArgs := [][]byte{ - []byte("1"), - []byte("10"), - proposalIdentifier, - []byte("5"), - []byte("10"), - } - - proposalCost, _ := big.NewInt(0).SetString(args.GovernanceConfig.Active.ProposalCost, conversionBase) - callInput := createVMInput(proposalCost, "hardFork", callerAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.OutOfGas, retCode) - require.Contains(t, retMessage, errSubstr) -} - -func TestGovernanceContract_HardForkInvalidArgumentsLenght(t *testing.T) { - t.Parallel() - - retMessage := "" - errSubstr := "invalid number of arguments" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - } - - gsc, _ := NewGovernanceContract(args) - - callInputArgs := [][]byte{ - []byte("1"), - []byte("10"), - proposalIdentifier, - []byte("5"), - } - - proposalCost, _ := big.NewInt(0).SetString(args.GovernanceConfig.Active.ProposalCost, conversionBase) - callInput := createVMInput(proposalCost, "hardFork", callerAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.FunctionWrongSignature, retCode) - require.Contains(t, retMessage, errSubstr) -} - -func TestGovernanceContract_HardForkProposalNotWhitelisted(t *testing.T) { - t.Parallel() - - retMessage := "" - errSubstr := "called address is not whiteListed" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - } - - gsc, _ := NewGovernanceContract(args) - - callInputArgs := [][]byte{ - []byte("1"), - []byte("10"), - proposalIdentifier, - []byte("5"), - []byte("10"), - } - - proposalCost, _ := big.NewInt(0).SetString(args.GovernanceConfig.Active.ProposalCost, conversionBase) - callInput := createVMInput(proposalCost, "hardFork", callerAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, retMessage, errSubstr) -} - -func TestGovernanceContract_HardForkProposalInvalidCommitLength(t *testing.T) { - t.Parallel() - - retMessage := "" - errSubstr := "invalid github commit length" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength-1) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(whiteListPrefix), callerAddress...)) { - return []byte("whitelisted caller") - } - if bytes.Equal(key, append([]byte(proposalPrefix), callerAddress...)) { - whitelistProposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ - Passed: true, - }) - return whitelistProposalBytes - } - - return nil - }, - } - gsc, _ := NewGovernanceContract(args) - - callInputArgs := [][]byte{ - []byte("1"), - []byte("10"), - proposalIdentifier, - []byte("5"), - []byte("10"), - } - - proposalCost, _ := big.NewInt(0).SetString(args.GovernanceConfig.Active.ProposalCost, conversionBase) - callInput := createVMInput(proposalCost, "hardFork", callerAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, retMessage, errSubstr) -} - -func TestGovernanceContract_HardForkProposalProposalAlreadyExists(t *testing.T) { - t.Parallel() - - retMessage := "" - errSubstr := "proposal already exists" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(whiteListPrefix), callerAddress...)) { - return []byte("whitelisted caller") - } - if bytes.Equal(key, append([]byte(proposalPrefix), callerAddress...)) { - whitelistProposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ - Passed: true, - }) - return whitelistProposalBytes - } - if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - return []byte("proposal exists") - } - - return nil - }, - } - - gsc, _ := NewGovernanceContract(args) - - callInputArgs := [][]byte{ - []byte("1"), - []byte("10"), - proposalIdentifier, - []byte("5"), - []byte("10"), - } - - proposalCost, _ := big.NewInt(0).SetString(args.GovernanceConfig.Active.ProposalCost, conversionBase) - callInput := createVMInput(proposalCost, "hardFork", callerAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, retMessage, errSubstr) -} - -func TestGovernanceContract_HardForkProposalInvalidNonce(t *testing.T) { - t.Parallel() - - retMessage := "" - errSubstr := "invalid start/end vote nonce" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(whiteListPrefix), callerAddress...)) { - return []byte("whitelisted caller") - } - if bytes.Equal(key, append([]byte(proposalPrefix), callerAddress...)) { - whitelistProposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ - Passed: true, - }) - return whitelistProposalBytes - } - - return nil - }, - } - - gsc, _ := NewGovernanceContract(args) - - callInputArgs := [][]byte{ - []byte("1"), - []byte("10"), - proposalIdentifier, - []byte("5"), - []byte("invalid"), - } - - proposalCost, _ := big.NewInt(0).SetString(args.GovernanceConfig.Active.ProposalCost, conversionBase) - callInput := createVMInput(proposalCost, "hardFork", callerAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, retMessage, errSubstr) -} - -func TestGovernanceContract_ChangeConfig(t *testing.T) { - t.Parallel() - - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 1 - }, - } - }, - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, []byte(governanceConfigKey)) { - configBytes, _ := args.Marshalizer.Marshal(&GovernanceConfigV2{}) - return configBytes - } - - return nil - }, - } - - gsc, _ := NewGovernanceContract(args) - - callInputArgs := [][]byte{ - []byte("1"), - []byte("10"), - []byte("10"), - []byte("5"), - } - initInput := createVMInput(zero, "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) - _ = gsc.Execute(initInput) - callInput := createVMInput(zero, "changeConfig", vm.GovernanceSCAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.Ok, retCode) -} - -func TestGovernanceContract_ChangeConfigWrongCaller(t *testing.T) { - t.Parallel() - - retMessage := "" - errSubstr := "changeConfig can be called only by owner" - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - } - - gsc, _ := NewGovernanceContract(args) - initInput := createVMInput(zero, "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) - _ = gsc.Execute(initInput) - callInput := createVMInput(zero, "changeConfig", []byte("wrong caller"), vm.GovernanceSCAddress, nil) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, retMessage, errSubstr) -} - -func TestGovernanceContract_ChangeConfigWrongCallValue(t *testing.T) { - t.Parallel() - - retMessage := "" - errSubstr := "changeConfig can be called only without callValue" - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - } - - gsc, _ := NewGovernanceContract(args) - - initInput := createVMInput(zero, "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) - _ = gsc.Execute(initInput) - callInput := createVMInput(big.NewInt(10), "changeConfig", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, retMessage, errSubstr) -} - -func TestGovernanceContract_ChangeConfigWrongArgumentsLength(t *testing.T) { - t.Parallel() - - retMessage := "" - errSubstr := "changeConfig needs 4 arguments" - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - } - - gsc, _ := NewGovernanceContract(args) - - initInput := createVMInput(zero, "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) - _ = gsc.Execute(initInput) - callInput := createVMInput(zero, "changeConfig", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, retMessage, errSubstr) -} - -func TestGovernanceContract_ChangeConfigInvalidParams(t *testing.T) { - t.Parallel() - - retMessage := "" - errSubstr := "changeConfig first argument is incorrectly formatted" - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - } - - gsc, _ := NewGovernanceContract(args) - - initInput := createVMInput(zero, "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) - _ = gsc.Execute(initInput) - - callInputArgs := [][]byte{ - []byte("invalid"), - []byte("10"), - []byte("10"), - []byte("5"), - } - callInput := createVMInput(zero, "changeConfig", vm.GovernanceSCAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, retMessage, errSubstr) - - errSubstr = "changeConfig second argument is incorrectly formatted" - callInputArgs = [][]byte{ - []byte("1"), - []byte("invalid"), - []byte("10"), - []byte("5"), + + gsc, blockchainHook, eei := createGovernanceBlockChainHookStubContextHandler() + blockchainHook.CurrentNonceCalled = func() uint64 { + return 12 } - callInput = createVMInput(zero, "changeConfig", vm.GovernanceSCAddress, vm.GovernanceSCAddress, callInputArgs) - retCode = gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, retMessage, errSubstr) + callerAddress := bytes.Repeat([]byte{2}, 32) + proposalIdentifier := []byte("aaaaaaaaa") + generalProposal := &GeneralProposal{ + CommitHash: proposalIdentifier, + StartVoteEpoch: 10, + EndVoteEpoch: 15, + Yes: big.NewInt(0), + No: big.NewInt(0), + Veto: big.NewInt(0), + Abstain: big.NewInt(0), + } - errSubstr = "changeConfig third argument is incorrectly formatted" - callInputArgs = [][]byte{ + voteArgs := [][]byte{ []byte("1"), - []byte("10"), - []byte("invalid"), - []byte("5"), + []byte("yes"), } - callInput = createVMInput(zero, "changeConfig", vm.GovernanceSCAddress, vm.GovernanceSCAddress, callInputArgs) - retCode = gsc.Execute(callInput) + gsc.eei.SetStorage(voteArgs[0], proposalIdentifier) + _ = gsc.saveGeneralProposal(proposalIdentifier, generalProposal) + callInput := createVMInput(big.NewInt(0), "delegateVote", callerAddress, vm.GovernanceSCAddress, voteArgs) + retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, retMessage, errSubstr) + require.Equal(t, eei.GetReturnMessage(), "invalid number of arguments") - errSubstr = "changeConfig fourth argument is incorrectly formatted" - callInputArgs = [][]byte{ - []byte("1"), - []byte("10"), - []byte("10"), - []byte("invalid"), - } - callInput = createVMInput(zero, "changeConfig", vm.GovernanceSCAddress, vm.GovernanceSCAddress, callInputArgs) + callInput.Arguments = append(callInput.Arguments, []byte{1}, []byte{2}) + callInput.CallValue = big.NewInt(10) retCode = gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, retMessage, errSubstr) + require.True(t, strings.Contains(eei.GetReturnMessage(), "function is not payable")) + + callInput.CallValue = big.NewInt(0) + callInput.GasProvided = 0 + gsc.gasCost.MetaChainSystemSCsCost.DelegateVote = 10 + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.OutOfGas, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "not enough gas")) } -func TestGovernanceContract_ChangeConfigGetConfigErr(t *testing.T) { +func TestGovernanceContract_DelegateVoteMoreErrors(t *testing.T) { t.Parallel() - retMessage := "" - errSubstr := "changeConfig error" - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, []byte(governanceConfigKey)) { - return []byte("invalid config") - } - - return nil - }, + gsc, blockchainHook, eei := createGovernanceBlockChainHookStubContextHandler() + blockchainHook.CurrentEpochCalled = func() uint32 { + return 12 } - gsc, _ := NewGovernanceContract(args) - - initInput := createVMInput(zero, "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) - _ = gsc.Execute(initInput) + callerAddress := bytes.Repeat([]byte{2}, 32) + proposalIdentifier := []byte("aaaaaaaaa") + generalProposal := &GeneralProposal{ + ProposalCost: gsc.baseProposalCost, + CommitHash: proposalIdentifier, + StartVoteEpoch: 10, + EndVoteEpoch: 15, + Yes: big.NewInt(0), + No: big.NewInt(0), + Veto: big.NewInt(0), + Abstain: big.NewInt(0), + QuorumStake: big.NewInt(0), + } - callInputArgs := [][]byte{ + voteArgs := [][]byte{ []byte("1"), - []byte("10"), - []byte("10"), - []byte("5"), + []byte("yes"), + {1}, + big.NewInt(10000).Bytes(), } - callInput := createVMInput(zero, "changeConfig", vm.GovernanceSCAddress, vm.GovernanceSCAddress, callInputArgs) + + gsc.eei.SetStorage(append([]byte(noncePrefix), voteArgs[0]...), proposalIdentifier) + _ = gsc.saveGeneralProposal(proposalIdentifier, generalProposal) + + callInput := createVMInput(big.NewInt(0), "delegateVote", callerAddress, vm.GovernanceSCAddress, voteArgs) + retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "only SC can call this")) + callInput.CallerAddr = vm.ESDTSCAddress + retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, retMessage, errSubstr) + require.True(t, strings.Contains(eei.GetReturnMessage(), "invalid delegator address")) + + callInput.Arguments[2] = callerAddress + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "not enough stake/delegate to vote")) + + addStakeAndDelegationForAddress(gsc, callInput.CallerAddr) + + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "not enough voting power to cast this vote")) + + callInput.Arguments[3] = big.NewInt(12).Bytes() + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.Ok, retCode) + + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "double vote is not allowed")) } func TestGovernanceContract_CloseProposal(t *testing.T) { @@ -2236,30 +899,26 @@ func TestGovernanceContract_CloseProposal(t *testing.T) { } }, GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(whiteListPrefix), callerAddress...)) { - return []byte("whitelisted caller") - } - if bytes.Equal(key, append([]byte(proposalPrefix), callerAddress...)) { - whitelistProposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ - Passed: true, - }) - return whitelistProposalBytes - } if bytes.Equal(key, []byte(governanceConfigKey)) { configBytes, _ := args.Marshalizer.Marshal(&GovernanceConfigV2{ - MinQuorum: big.NewInt(10), - MinVetoThreshold: big.NewInt(10), - MinPassThreshold: big.NewInt(10), + MinQuorum: 0.1, + MinVetoThreshold: 0.1, + MinPassThreshold: 0.1, }) return configBytes } + if bytes.Equal(key, append([]byte(noncePrefix), byte(1))) { + return proposalIdentifier + } if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - whitelistProposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ - Yes: big.NewInt(10), - No: big.NewInt(10), - Veto: big.NewInt(10), + proposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ + Yes: big.NewInt(10), + No: big.NewInt(10), + Veto: big.NewInt(10), + Abstain: big.NewInt(10), + IssuerAddress: callerAddress, }) - return whitelistProposalBytes + return proposalBytes } return nil @@ -2268,9 +927,7 @@ func TestGovernanceContract_CloseProposal(t *testing.T) { gsc, _ := NewGovernanceContract(args) - callInputArgs := [][]byte{ - proposalIdentifier, - } + callInputArgs := [][]byte{{1}} callInput := createVMInput(zero, "closeProposal", callerAddress, vm.GovernanceSCAddress, callInputArgs) retCode := gsc.Execute(callInput) @@ -2304,32 +961,6 @@ func TestGovernanceContract_CloseProposalWrongCallValue(t *testing.T) { require.Contains(t, retMessage, errSubstr) } -func TestGovernanceContract_CloseProposalNotWhitelisted(t *testing.T) { - t.Parallel() - - retMessage := "" - errSubstr := "caller is not whitelisted" - callerAddress := []byte("address") - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - AddReturnMessageCalled: func(msg string) { - retMessage = msg - }, - } - - gsc, _ := NewGovernanceContract(args) - callInputArgs := [][]byte{ - proposalIdentifier, - } - - callInput := createVMInput(zero, "closeProposal", callerAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) - - require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, retMessage, errSubstr) -} - func TestGovernanceContract_CloseProposalWrongArgumentsLength(t *testing.T) { t.Parallel() @@ -2342,14 +973,11 @@ func TestGovernanceContract_CloseProposalWrongArgumentsLength(t *testing.T) { retMessage = msg }, GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(whiteListPrefix), callerAddress...)) { - return []byte("whitelisted caller") - } if bytes.Equal(key, append([]byte(proposalPrefix), callerAddress...)) { - whitelistProposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ + proposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ Passed: true, }) - return whitelistProposalBytes + return proposalBytes } return nil @@ -2377,14 +1005,11 @@ func TestGovernanceContract_CloseProposalNotEnoughGas(t *testing.T) { retMessage = msg }, GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(whiteListPrefix), callerAddress...)) { - return []byte("whitelisted caller") - } if bytes.Equal(key, append([]byte(proposalPrefix), callerAddress...)) { - whitelistProposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ + proposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ Passed: true, }) - return whitelistProposalBytes + return proposalBytes } return nil @@ -2418,16 +1043,6 @@ func TestGovernanceContract_CloseProposalGetProposalErr(t *testing.T) { retMessage = msg }, GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(whiteListPrefix), callerAddress...)) { - return []byte("whitelisted caller") - } - if bytes.Equal(key, append([]byte(proposalPrefix), callerAddress...)) { - whitelistProposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ - Passed: true, - }) - return whitelistProposalBytes - } - return nil }, } @@ -2456,23 +1071,17 @@ func TestGovernanceContract_CloseProposalAlreadyClosed(t *testing.T) { retMessage = msg }, GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(whiteListPrefix), callerAddress...)) { - return []byte("whitelisted caller") - } - if bytes.Equal(key, append([]byte(proposalPrefix), callerAddress...)) { - whitelistProposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ - Passed: true, - }) - return whitelistProposalBytes + if bytes.Equal(key, append([]byte(noncePrefix), byte(1))) { + return proposalIdentifier } if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - whitelistProposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ + proposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ Yes: big.NewInt(10), No: big.NewInt(10), Veto: big.NewInt(10), Closed: true, }) - return whitelistProposalBytes + return proposalBytes } return nil @@ -2480,13 +1089,11 @@ func TestGovernanceContract_CloseProposalAlreadyClosed(t *testing.T) { } gsc, _ := NewGovernanceContract(args) - callInputArgs := [][]byte{ - proposalIdentifier, - } + callInputArgs := [][]byte{{1}} callInput := createVMInput(zero, "closeProposal", callerAddress, vm.GovernanceSCAddress, callInputArgs) retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.Ok, retCode) + require.Equal(t, vmcommon.UserError, retCode) require.Contains(t, retMessage, errSubstr) } @@ -2494,7 +1101,7 @@ func TestGovernanceContract_CloseProposalVoteNotfinished(t *testing.T) { t.Parallel() retMessage := "" - errSubstr := "proposal can be closed only after nonce" + errSubstr := "proposal can be closed only after epoch" callerAddress := []byte("address") proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) args := createMockGovernanceArgs() @@ -2503,30 +1110,25 @@ func TestGovernanceContract_CloseProposalVoteNotfinished(t *testing.T) { retMessage = msg }, GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(whiteListPrefix), callerAddress...)) { - return []byte("whitelisted caller") - } - if bytes.Equal(key, append([]byte(proposalPrefix), callerAddress...)) { - whitelistProposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ - Passed: true, - }) - return whitelistProposalBytes + if bytes.Equal(key, append([]byte(noncePrefix), byte(1))) { + return proposalIdentifier } if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - whitelistProposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ - Yes: big.NewInt(10), - No: big.NewInt(10), - Veto: big.NewInt(10), - EndVoteNonce: 10, + proposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ + Yes: big.NewInt(10), + No: big.NewInt(10), + Veto: big.NewInt(10), + EndVoteEpoch: 10, + IssuerAddress: callerAddress, }) - return whitelistProposalBytes + return proposalBytes } return nil }, BlockChainHookCalled: func() vm.BlockchainHook { return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { + CurrentEpochCalled: func() uint32 { return 1 }, } @@ -2534,9 +1136,7 @@ func TestGovernanceContract_CloseProposalVoteNotfinished(t *testing.T) { } gsc, _ := NewGovernanceContract(args) - callInputArgs := [][]byte{ - proposalIdentifier, - } + callInputArgs := [][]byte{{1}} callInput := createVMInput(zero, "closeProposal", callerAddress, vm.GovernanceSCAddress, callInputArgs) retCode := gsc.Execute(callInput) @@ -2544,11 +1144,11 @@ func TestGovernanceContract_CloseProposalVoteNotfinished(t *testing.T) { require.Contains(t, retMessage, errSubstr) } -func TestGovernanceContract_CloseProposalComputeResultsErr(t *testing.T) { +func TestGovernanceContract_CloseProposalCallerNotIssuer(t *testing.T) { t.Parallel() retMessage := "" - errSubstr := "computeEndResults error" + errSubstr := "only the issuer can close the proposal" callerAddress := []byte("address") proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) args := createMockGovernanceArgs() @@ -2557,32 +1157,32 @@ func TestGovernanceContract_CloseProposalComputeResultsErr(t *testing.T) { retMessage = msg }, GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(whiteListPrefix), callerAddress...)) { - return []byte("whitelisted caller") - } - if bytes.Equal(key, append([]byte(proposalPrefix), callerAddress...)) { - whitelistProposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ - Passed: true, - }) - return whitelistProposalBytes + if bytes.Equal(key, append([]byte(noncePrefix), byte(1))) { + return proposalIdentifier } if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - whitelistProposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ - Yes: big.NewInt(10), - No: big.NewInt(10), - Veto: big.NewInt(10), + proposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ + Yes: big.NewInt(10), + No: big.NewInt(10), + Veto: big.NewInt(10), + EndVoteEpoch: 10, }) - return whitelistProposalBytes + return proposalBytes } return nil }, + BlockChainHookCalled: func() vm.BlockchainHook { + return &mock.BlockChainHookStub{ + CurrentEpochCalled: func() uint32 { + return 1 + }, + } + }, } gsc, _ := NewGovernanceContract(args) - callInputArgs := [][]byte{ - proposalIdentifier, - } + callInputArgs := [][]byte{{1}} callInput := createVMInput(zero, "closeProposal", callerAddress, vm.GovernanceSCAddress, callInputArgs) retCode := gsc.Execute(callInput) @@ -2590,54 +1190,67 @@ func TestGovernanceContract_CloseProposalComputeResultsErr(t *testing.T) { require.Contains(t, retMessage, errSubstr) } -func TestGovernanceContract_GetValidatorVotingPower(t *testing.T) { +func TestGovernanceContract_CloseProposalComputeResultsErr(t *testing.T) { t.Parallel() - votingPowerResult := make([]byte, 0) - mockBlsKey := []byte("bls key") - mockValidatorBlsKeys := [][]byte{ - mockBlsKey, - mockBlsKey, - mockBlsKey, - mockBlsKey, - } - + retMessage := "" + errSubstr := "computeEndResults error" callerAddress := []byte("address") - + proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - GetStorageFromAddressCalled: func(address []byte, key []byte) []byte { - if bytes.Equal(address, args.ValidatorSCAddress) && bytes.Equal(key, callerAddress) { - auctionBytes, _ := args.Marshalizer.Marshal(&ValidatorDataV2{ - BlsPubKeys: mockValidatorBlsKeys, - TotalStakeValue: big.NewInt(100), + AddReturnMessageCalled: func(msg string) { + retMessage = msg + }, + GetStorageCalled: func(key []byte) []byte { + if bytes.Equal(key, append([]byte(noncePrefix), byte(1))) { + return proposalIdentifier + } + if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { + proposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ + Yes: big.NewInt(10), + No: big.NewInt(10), + Veto: big.NewInt(10), + IssuerAddress: callerAddress, }) - - return auctionBytes + return proposalBytes } return nil }, - FinishCalled: func(value []byte) { - votingPowerResult = value - }, } + + gsc, _ := NewGovernanceContract(args) + callInputArgs := [][]byte{{1}} + callInput := createVMInput(zero, "closeProposal", callerAddress, vm.GovernanceSCAddress, callInputArgs) + retCode := gsc.Execute(callInput) + + require.Equal(t, vmcommon.UserError, retCode) + require.Contains(t, retMessage, errSubstr) +} + +func TestGovernanceContract_GetVotingPower(t *testing.T) { + t.Parallel() + + gsc, _, eei := createGovernanceBlockChainHookStubContextHandler() + callerAddress := bytes.Repeat([]byte{2}, 32) callInputArgs := [][]byte{ callerAddress, } - gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(big.NewInt(0), "getValidatorVotingPower", callerAddress, vm.GovernanceSCAddress, callInputArgs) + + callInput := createVMInput(big.NewInt(0), "viewVotingPower", vm.GovernanceSCAddress, vm.GovernanceSCAddress, callInputArgs) retCode := gsc.Execute(callInput) + fmt.Println(eei.GetReturnMessage()) require.Equal(t, vmcommon.Ok, retCode) - require.Equal(t, big.NewInt(10).Bytes(), votingPowerResult) + + vmOutput := eei.CreateVMOutput() + require.Equal(t, big.NewInt(10).Bytes(), vmOutput.ReturnData[0]) } -func TestGovernanceContract_GetValidatorVotingPowerWrongCallValue(t *testing.T) { +func TestGovernanceContract_GetVVotingPowerWrongCallValue(t *testing.T) { t.Parallel() retMessage := "" - callerAddress := []byte("address") args := createMockGovernanceArgs() args.Eei = &mock.SystemEIStub{ AddReturnMessageCalled: func(msg string) { @@ -2646,17 +1259,17 @@ func TestGovernanceContract_GetValidatorVotingPowerWrongCallValue(t *testing.T) } gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(big.NewInt(10), "getValidatorVotingPower", callerAddress, vm.GovernanceSCAddress, nil) + callInput := createVMInput(big.NewInt(10), "viewVotingPower", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, retMessage, vm.TransactionValueMustBeZero) + require.Contains(t, retMessage, vm.ErrCallValueMustBeZero.Error()) } -func TestGovernanceContract_GetValidatorVotingPowerWrongArgumentsLength(t *testing.T) { +func TestGovernanceContract_GetVotingPowerWrongArgumentsLength(t *testing.T) { t.Parallel() retMessage := "" - errSubstr := "function accepts only one argument, the validator address" + errSubstr := vm.ErrInvalidNumOfArguments.Error() callerAddress := []byte("address") args := createMockGovernanceArgs() args.Eei = &mock.SystemEIStub{ @@ -2670,17 +1283,17 @@ func TestGovernanceContract_GetValidatorVotingPowerWrongArgumentsLength(t *testi callerAddress, callerAddress, } - callInput := createVMInput(zero, "getValidatorVotingPower", callerAddress, vm.GovernanceSCAddress, callInputArgs) + callInput := createVMInput(zero, "viewVotingPower", vm.GovernanceSCAddress, vm.GovernanceSCAddress, callInputArgs) retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.FunctionWrongSignature, retCode) + require.Equal(t, vmcommon.UserError, retCode) require.Contains(t, retMessage, errSubstr) } -func TestGovernanceContract_GetValidatorVotingPowerInvalidArgument(t *testing.T) { +func TestGovernanceContract_GetVotingPowerInvalidCaller(t *testing.T) { t.Parallel() retMessage := "" - errSubstr := "invalid argument - validator address" + errSubstr := vm.ErrInvalidCaller.Error() callerAddress := []byte("address") args := createMockGovernanceArgs() args.Eei = &mock.SystemEIStub{ @@ -2693,13 +1306,13 @@ func TestGovernanceContract_GetValidatorVotingPowerInvalidArgument(t *testing.T) callInputArgs := [][]byte{ []byte("address_wrong"), } - callInput := createVMInput(zero, "getValidatorVotingPower", callerAddress, vm.GovernanceSCAddress, callInputArgs) + callInput := createVMInput(zero, "viewVotingPower", callerAddress, vm.GovernanceSCAddress, callInputArgs) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) require.Contains(t, retMessage, errSubstr) } -func TestGovernanceContract_GetValidatorVotingPowerComputeErr(t *testing.T) { +func TestGovernanceContract_GetVotingPowerComputeErr(t *testing.T) { t.Parallel() callerAddress := []byte("address") @@ -2714,75 +1327,183 @@ func TestGovernanceContract_GetValidatorVotingPowerComputeErr(t *testing.T) { callInputArgs := [][]byte{ callerAddress, } - callInput := createVMInput(zero, "getValidatorVotingPower", callerAddress, vm.GovernanceSCAddress, callInputArgs) + callInput := createVMInput(zero, "viewVotingPower", callerAddress, vm.GovernanceSCAddress, callInputArgs) retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.ExecutionFailed, retCode) + require.Equal(t, vmcommon.UserError, retCode) } -func TestGovernanceContract_GetBalanceVotingPower(t *testing.T) { +func TestGovernanceContract_ViewConfig(t *testing.T) { t.Parallel() - votingPowerResult := make([]byte, 0) - callerAddress := []byte("address") args := createMockGovernanceArgs() + returnMessage := "" + mockEEI := &mock.SystemEIStub{ + GetStorageFromAddressCalled: func(_ []byte, _ []byte) []byte { + return []byte("invalid data") + }, + AddReturnMessageCalled: func(msg string) { + returnMessage = msg + }, + } + args.Eei = mockEEI - args.Eei = &mock.SystemEIStub{ - FinishCalled: func(value []byte) { - votingPowerResult = value + gsc, _ := NewGovernanceContract(args) + callInputArgs := [][]byte{ + callerAddress, + } + callInput := createVMInput(zero, "viewConfig", callerAddress, vm.GovernanceSCAddress, callInputArgs) + retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, returnMessage, vm.ErrInvalidCaller.Error()) + + callInput.CallerAddr = callInput.RecipientAddr + callInput.Arguments = [][]byte{} + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, returnMessage, "element was not found") + + mockEEI.GetStorageCalled = func(key []byte) []byte { + proposalBytes, _ := args.Marshalizer.Marshal(&GovernanceConfigV2{ + ProposalFee: big.NewInt(10), + LastProposalNonce: 10, + MinQuorum: 0.4, + MinPassThreshold: 0.4, + MinVetoThreshold: 0.4, + }) + return proposalBytes + } + + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.Ok, retCode) +} + +func TestGovernanceContract_ViewUserHistory(t *testing.T) { + t.Parallel() + + callerAddress := []byte("address") + args := createMockGovernanceArgs() + returnMessage := "" + mockEEI := &mock.SystemEIStub{ + GetStorageFromAddressCalled: func(_ []byte, _ []byte) []byte { + return []byte("invalid data") + }, + AddReturnMessageCalled: func(msg string) { + returnMessage = msg }, } + args.Eei = mockEEI + + gsc, _ := NewGovernanceContract(args) callInputArgs := [][]byte{ - big.NewInt(400).Bytes(), + callerAddress, + } + callInput := createVMInput(zero, "viewUserVoteHistory", callerAddress, vm.GovernanceSCAddress, callInputArgs) + retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, returnMessage, vm.ErrInvalidCaller.Error()) + + callInput.CallerAddr = callInput.RecipientAddr + callInput.Arguments = [][]byte{callerAddress} + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.Ok, retCode) + + mockEEI.GetStorageCalled = func(key []byte) []byte { + proposalBytes, _ := args.Marshalizer.Marshal(&OngoingVotedList{ + Delegated: []uint64{1, 2}, + Direct: []uint64{1, 2}, + }) + return proposalBytes } - gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(big.NewInt(0), "getBalanceVotingPower", callerAddress, vm.GovernanceSCAddress, callInputArgs) - retCode := gsc.Execute(callInput) + + retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.Ok, retCode) - require.Equal(t, big.NewInt(20).Bytes(), votingPowerResult) } -func TestGovernanceContract_GetBalanceVotingPowerWrongCallValue(t *testing.T) { +func TestGovernanceContract_ViewProposal(t *testing.T) { t.Parallel() - retMessage := "" callerAddress := []byte("address") args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ + returnMessage := "" + mockEEI := &mock.SystemEIStub{ AddReturnMessageCalled: func(msg string) { - retMessage = msg + returnMessage = msg }, } + args.Eei = mockEEI gsc, _ := NewGovernanceContract(args) - callInput := createVMInput(big.NewInt(10), "getBalanceVotingPower", callerAddress, vm.GovernanceSCAddress, nil) + + callInput := createVMInput(zero, "viewProposal", callerAddress, vm.GovernanceSCAddress, [][]byte{}) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) - require.Contains(t, retMessage, vm.TransactionValueMustBeZero) + require.Equal(t, returnMessage, vm.ErrInvalidCaller.Error()) + + callInput.CallerAddr = callInput.RecipientAddr + callInput.Arguments = [][]byte{callerAddress} + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, returnMessage, vm.ErrProposalNotFound.Error()) + + mockEEI.GetStorageCalled = func(key []byte) []byte { + proposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ + Yes: big.NewInt(10), + No: big.NewInt(10), + Veto: big.NewInt(10), + Abstain: big.NewInt(10), + ProposalCost: big.NewInt(10), + QuorumStake: big.NewInt(10), + Closed: true, + }) + return proposalBytes + } + + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.Ok, retCode) } -func TestGovernanceContract_GetBalanceVotingPowerWrongArgumentsLength(t *testing.T) { +func TestGovernanceContract_ViewDelegatedVoteInfo(t *testing.T) { t.Parallel() - retMessage := "" - errSubstr := "function accepts only one argument" callerAddress := []byte("address") args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ + returnMessage := "" + mockEEI := &mock.SystemEIStub{ AddReturnMessageCalled: func(msg string) { - retMessage = msg + returnMessage = msg }, } + args.Eei = mockEEI gsc, _ := NewGovernanceContract(args) - callInputArgs := [][]byte{ - big.NewInt(400).Bytes(), - big.NewInt(400).Bytes(), - } - callInput := createVMInput(zero, "getBalanceVotingPower", callerAddress, vm.GovernanceSCAddress, callInputArgs) + + callInput := createVMInput(zero, "viewDelegatedVoteInfo", callerAddress, vm.GovernanceSCAddress, [][]byte{}) retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.FunctionWrongSignature, retCode) - require.Contains(t, retMessage, errSubstr) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, returnMessage, vm.ErrInvalidCaller.Error()) + + callInput.CallerAddr = callInput.RecipientAddr + callInput.Arguments = [][]byte{callerAddress} + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, returnMessage, vm.ErrInvalidNumOfArguments.Error()) + + callInput.Arguments = [][]byte{callerAddress, callerAddress} + + mockEEI.GetStorageCalled = func(key []byte) []byte { + delegatedVoteInfo, _ := args.Marshalizer.Marshal(&DelegatedSCVoteInfo{ + UsedPower: big.NewInt(10), + UsedStake: big.NewInt(100), + TotalPower: big.NewInt(1000), + TotalStake: big.NewInt(10000), + }) + return delegatedVoteInfo + } + + retCode = gsc.Execute(callInput) + fmt.Println(returnMessage) + require.Equal(t, vmcommon.Ok, retCode) } // ======== Begin testing of helper functions @@ -2926,396 +1647,144 @@ func TestGovernanceContract_ProposalExists(t *testing.T) { require.True(t, correctKeyCalled) } -func TestGovernanceContract_GetValidProposalNotFound(t *testing.T) { - t.Parallel() - - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - args := createMockGovernanceArgs() - gsc, _ := NewGovernanceContract(args) - - proposal, err := gsc.getValidProposal(proposalIdentifier) - require.Nil(t, proposal) - require.Equal(t, vm.ErrProposalNotFound, err) -} - -func TestGovernanceContract_GetValidProposalNotStarted(t *testing.T) { - t.Parallel() - - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - generalProposal := &GeneralProposal{ - CommitHash: proposalIdentifier, - StartVoteNonce: 10, - } - - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - proposalBytes, _ := args.Marshalizer.Marshal(generalProposal) - return proposalBytes - } - return nil - }, - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 9 - }, - } - }, - } - gsc, _ := NewGovernanceContract(args) - - proposal, err := gsc.getValidProposal(proposalIdentifier) - require.Nil(t, proposal) - require.Equal(t, vm.ErrVotingNotStartedForProposal, err) -} - -func TestGovernanceContract_GetValidProposalVotingFinished(t *testing.T) { - t.Parallel() - - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - generalProposal := &GeneralProposal{ - CommitHash: proposalIdentifier, - StartVoteNonce: 10, - EndVoteNonce: 15, - } - - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - proposalBytes, _ := args.Marshalizer.Marshal(generalProposal) - return proposalBytes - } - return nil - }, - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 16 - }, - } - }, - } - gsc, _ := NewGovernanceContract(args) - - proposal, err := gsc.getValidProposal(proposalIdentifier) - require.Nil(t, proposal) - require.Equal(t, vm.ErrVotedForAnExpiredProposal, err) -} - -func TestGovernanceContract_GetValidProposal(t *testing.T) { - t.Parallel() - - proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - generalProposal := &GeneralProposal{ - CommitHash: proposalIdentifier, - StartVoteNonce: 10, - EndVoteNonce: 15, - } - - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { - proposalBytes, _ := args.Marshalizer.Marshal(generalProposal) - return proposalBytes - } - return nil - }, - BlockChainHookCalled: func() vm.BlockchainHook { - return &mock.BlockChainHookStub{ - CurrentNonceCalled: func() uint64 { - return 11 - }, - } - }, - } - gsc, _ := NewGovernanceContract(args) - - proposal, err := gsc.getValidProposal(proposalIdentifier) - require.Nil(t, err) - require.Equal(t, proposalIdentifier, proposal.CommitHash) -} - -func TestGovernanceContract_IsWhitelistedNotFound(t *testing.T) { - t.Parallel() - - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - return nil - }, - } - gsc, _ := NewGovernanceContract(args) - - isWhiteListed := gsc.isWhiteListed([]byte("address")) - require.False(t, isWhiteListed) -} - -func TestGovernanceContract_IsWhitelistedUnmarshalErrorReturnsFalse(t *testing.T) { - t.Parallel() - - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - return []byte("invalid proposal") - }, - } - gsc, _ := NewGovernanceContract(args) - - isWhiteListed := gsc.isWhiteListed([]byte("address")) - require.False(t, isWhiteListed) -} - -func TestGovernanceContract_IsWhitelistedProposalNotVoted(t *testing.T) { - t.Parallel() - - address := []byte("address") - generalProposal := &GeneralProposal{ - Passed: false, - } - - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(whiteListPrefix), address...)) { - return []byte{1} - } - - if bytes.Equal(key, append([]byte(proposalPrefix), address...)) { - proposalBytes, _ := args.Marshalizer.Marshal(generalProposal) - return proposalBytes - } - - return nil - }, - } - gsc, _ := NewGovernanceContract(args) - - isWhiteListed := gsc.isWhiteListed(address) - require.False(t, isWhiteListed) -} - -func TestGovernanceContract_IsWhitelistedProposalVoted(t *testing.T) { - t.Parallel() - - address := []byte("address") - generalProposal := &GeneralProposal{ - Passed: true, - } - - args := createMockGovernanceArgs() - args.Eei = &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - if bytes.Equal(key, append([]byte(whiteListPrefix), address...)) { - return []byte{1} - } - - if bytes.Equal(key, append([]byte(proposalPrefix), address...)) { - proposalBytes, _ := args.Marshalizer.Marshal(generalProposal) - return proposalBytes - } - return nil - }, - } - gsc, _ := NewGovernanceContract(args) - - isWhiteListed := gsc.isWhiteListed([]byte("address")) - require.True(t, isWhiteListed) -} - -func TestGovernanceContract_ApplyVoteInvalid(t *testing.T) { +func TestGovernanceContract_addNewVote(t *testing.T) { t.Parallel() - voteDetails := &VoteDetails{ - Value: 100, - } - - voteSet := &VoteSet{} - proposal := &GeneralProposal{} - args := createMockGovernanceArgs() gsc, _ := NewGovernanceContract(args) - _, _, err := gsc.applyVote(voteDetails, voteSet, proposal) - require.NotNil(t, err) - require.Contains(t, err.Error(), vm.ErrInvalidArgument.Error()) -} - -func TestGovernanceContract_ApplyVote(t *testing.T) { - t.Parallel() - - voteDetails := &VoteDetails{ - Value: Yes, - Power: big.NewInt(10), - Balance: big.NewInt(100), - } - - voteSet := &VoteSet{ - UsedPower: big.NewInt(5), - UsedBalance: big.NewInt(25), - TotalYes: big.NewInt(5), - VoteItems: []*VoteDetails{ - { - Value: Yes, - Power: big.NewInt(5), - Balance: big.NewInt(25), - }, - }, - } proposal := &GeneralProposal{ - Yes: big.NewInt(10), - No: big.NewInt(10), - Veto: big.NewInt(0), + Yes: big.NewInt(0), + No: big.NewInt(0), + Abstain: big.NewInt(0), + Veto: big.NewInt(0), } - args := createMockGovernanceArgs() - gsc, _ := NewGovernanceContract(args) - - voteSetResponse, generalProposalResponse, err := gsc.applyVote(voteDetails, voteSet, proposal) - require.Nil(t, err) - require.Equal(t, big.NewInt(20), generalProposalResponse.Yes) - require.Equal(t, big.NewInt(15), voteSetResponse.TotalYes) -} - -func TestGovernanceContract_ComputeAccountLeveledPower(t *testing.T) { - t.Parallel() - - args := createMockGovernanceArgs() - gsc, _ := NewGovernanceContract(args) - - voteSet := &VoteSet{ - UsedBalance: big.NewInt(0), - } - - for i := 0; i < 10; i++ { - balancedPower, _ := gsc.computeAccountLeveledPower(big.NewInt(100), voteSet) - - powerBefore := big.NewInt(0).Sqrt(voteSet.UsedBalance) - voteSet.UsedBalance.Add(voteSet.UsedBalance, big.NewInt(100)) - powerAfter := big.NewInt(0).Sqrt(voteSet.UsedBalance) - require.Equal(t, big.NewInt(0).Sub(powerAfter, powerBefore), balancedPower) - } -} + _ = gsc.addNewVote(yesString, big.NewInt(9), proposal) + require.Equal(t, proposal.Yes, big.NewInt(9)) -func TestGovernanceContract_IsValidVoteString(t *testing.T) { - t.Parallel() + _ = gsc.addNewVote(noString, big.NewInt(99), proposal) + require.Equal(t, proposal.No, big.NewInt(99)) - args := createMockGovernanceArgs() - gsc, _ := NewGovernanceContract(args) + _ = gsc.addNewVote(vetoString, big.NewInt(999), proposal) + require.Equal(t, proposal.Veto, big.NewInt(999)) - require.True(t, gsc.isValidVoteString("yes")) - require.True(t, gsc.isValidVoteString("no")) - require.True(t, gsc.isValidVoteString("veto")) - require.False(t, gsc.isValidVoteString("invalid")) + _ = gsc.addNewVote(abstainString, big.NewInt(9999), proposal) + require.Equal(t, proposal.Abstain, big.NewInt(9999)) } func TestComputeEndResults(t *testing.T) { t.Parallel() + retMessage := "" args := createMockGovernanceArgs() args.Eei = &mock.SystemEIStub{ GetStorageCalled: func(key []byte) []byte { if bytes.Equal(key, []byte(governanceConfigKey)) { configBytes, _ := args.Marshalizer.Marshal(&GovernanceConfigV2{ - MinQuorum: big.NewInt(100), - MinPassThreshold: big.NewInt(51), - MinVetoThreshold: big.NewInt(30), + MinQuorum: 0.4, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.3, + ProposalFee: big.NewInt(10), }) return configBytes } return nil }, + GetBalanceCalled: func(_ []byte) *big.Int { + return big.NewInt(100) + }, + FinishCalled: func(value []byte) { + retMessage = string(value) + }, } gsc, _ := NewGovernanceContract(args) didNotPassQuorum := &GeneralProposal{ - Yes: big.NewInt(50), - No: big.NewInt(0), - Veto: big.NewInt(0), + Yes: big.NewInt(20), + No: big.NewInt(0), + Veto: big.NewInt(0), + Abstain: big.NewInt(10), } err := gsc.computeEndResults(didNotPassQuorum) require.Nil(t, err) + require.Equal(t, "Proposal did not reach minQuorum", retMessage) require.False(t, didNotPassQuorum.Passed) didNotPassVotes := &GeneralProposal{ - Yes: big.NewInt(50), - No: big.NewInt(50), - Veto: big.NewInt(0), + Yes: big.NewInt(50), + No: big.NewInt(50), + Veto: big.NewInt(0), + Abstain: big.NewInt(10), } err = gsc.computeEndResults(didNotPassVotes) require.Nil(t, err) + require.Equal(t, "Proposal rejected", retMessage) require.False(t, didNotPassVotes.Passed) didNotPassVotes2 := &GeneralProposal{ - Yes: big.NewInt(50), - No: big.NewInt(51), - Veto: big.NewInt(0), + Yes: big.NewInt(50), + No: big.NewInt(51), + Veto: big.NewInt(0), + Abstain: big.NewInt(10), } err = gsc.computeEndResults(didNotPassVotes2) require.Nil(t, err) + require.Equal(t, "Proposal rejected", retMessage) require.False(t, didNotPassVotes2.Passed) didNotPassVeto := &GeneralProposal{ - Yes: big.NewInt(51), - No: big.NewInt(50), - Veto: big.NewInt(30), + Yes: big.NewInt(51), + No: big.NewInt(50), + Veto: big.NewInt(70), + Abstain: big.NewInt(10), } err = gsc.computeEndResults(didNotPassVeto) require.Nil(t, err) + require.Equal(t, "Proposal vetoed", retMessage) require.False(t, didNotPassVeto.Passed) pass := &GeneralProposal{ - Yes: big.NewInt(51), - No: big.NewInt(50), - Veto: big.NewInt(29), + Yes: big.NewInt(70), + No: big.NewInt(50), + Veto: big.NewInt(10), + Abstain: big.NewInt(10), } err = gsc.computeEndResults(pass) require.Nil(t, err) + require.Equal(t, "Proposal passed", retMessage) require.True(t, pass.Passed) } -func createMockStorer(callerAddress []byte, proposalIdentifier []byte, proposal *GeneralProposal) *mock.SystemEIStub { - return &mock.SystemEIStub{ - GetStorageCalled: func(key []byte) []byte { - marshalizer := &mock.MarshalizerMock{} - isWhiteListKey := bytes.Equal(key, append([]byte(whiteListPrefix), callerAddress...)) - if isWhiteListKey { - whiteList, _ := marshalizer.Marshal(&WhiteListProposal{ - WhiteListAddress: callerAddress, - ProposalStatus: append([]byte(proposalPrefix), callerAddress...), - }) - return whiteList - } - isWhiteListProposalKey := bytes.Equal(key, append([]byte(proposalPrefix), callerAddress...)) - if isWhiteListProposalKey { - whiteList, _ := marshalizer.Marshal(&GeneralProposal{ - Passed: true, - }) - return whiteList - } +func TestGovernanceContract_ProposeVoteClose(t *testing.T) { + t.Parallel() - isGeneralProposalKey := bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) - if isGeneralProposalKey && proposal != nil { - marshaledProposal, _ := marshalizer.Marshal(proposal) + callerAddress := bytes.Repeat([]byte{2}, 32) + proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - return marshaledProposal - } + gsc, blockchainHook, _ := createGovernanceBlockChainHookStubContextHandler() - return nil - }, - GetStorageFromAddressCalled: func(address []byte, key []byte) []byte { - marshalizer := &mock.MarshalizerMock{} - if bytes.Equal(address, vm.DelegationManagerSCAddress) && bytes.Equal(key, []byte(delegationManagementKey)) { - dManagementData := &DelegationManagement{MinDelegationAmount: big.NewInt(10)} - marshaledData, _ := marshalizer.Marshal(dManagementData) - return marshaledData - } + callInputArgs := [][]byte{ + proposalIdentifier, + big.NewInt(50).Bytes(), + big.NewInt(55).Bytes(), + } + callInput := createVMInput(big.NewInt(500), "proposal", callerAddress, vm.GovernanceSCAddress, callInputArgs) + retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.Ok, retCode) - return nil - }, + currentEpoch := uint32(52) + blockchainHook.CurrentEpochCalled = func() uint32 { + return currentEpoch } + + callInput = createVMInput(big.NewInt(0), "vote", callerAddress, vm.GovernanceSCAddress, [][]byte{big.NewInt(1).Bytes(), []byte("yes")}) + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.Ok, retCode) + + currentEpoch = 56 + callInput = createVMInput(big.NewInt(0), "closeProposal", callerAddress, vm.GovernanceSCAddress, [][]byte{big.NewInt(1).Bytes()}) + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.Ok, retCode) } diff --git a/vm/systemSmartContracts/validator.go b/vm/systemSmartContracts/validator.go index 5f73941ae28..7a67c7e1e3b 100644 --- a/vm/systemSmartContracts/validator.go +++ b/vm/systemSmartContracts/validator.go @@ -1222,11 +1222,6 @@ func (v *validatorSC) unStake(args *vmcommon.ContractCallInput) vmcommon.ReturnC return vmcommon.UserError } - if isStakeLocked(v.eei, v.governanceSCAddress, args.CallerAddr) { - v.eei.AddReturnMessage("stake is locked for voting") - return vmcommon.UserError - } - // continue by unstaking tokens as well validatorConfig := v.getConfig(v.eei.BlockChainHook().CurrentEpoch()) returnCode = v.processUnStakeTokensFromNodes(registrationData, validatorConfig, numSuccessFromWaiting, 0) @@ -1541,10 +1536,6 @@ func (v *validatorSC) unStakeTokens(args *vmcommon.ContractCallInput) vmcommon.R v.eei.AddReturnMessage("should have specified one argument containing the unstake value") return vmcommon.UserError } - if isStakeLocked(v.eei, v.governanceSCAddress, args.CallerAddr) { - v.eei.AddReturnMessage("stake is locked for voting") - return vmcommon.UserError - } unStakeValue := big.NewInt(0).SetBytes(args.Arguments[0]) unStakedEpoch := v.eei.BlockChainHook().CurrentEpoch() @@ -1709,6 +1700,9 @@ func (v *validatorSC) unBondTokens(args *vmcommon.ContractCallInput) vmcommon.Re } if totalUnBond.Cmp(zero) == 0 { v.eei.AddReturnMessage("no tokens that can be unbond at this time") + if v.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + return vmcommon.UserError + } return vmcommon.Ok } @@ -2134,6 +2128,9 @@ func (v *validatorSC) getBlsKeysStatus(args *vmcommon.ContractCallInput) vmcommo if len(registrationData.BlsPubKeys) == 0 { v.eei.AddReturnMessage("no bls keys") + if v.enableEpochsHandler.IsMultiClaimOnDelegationEnabled() { + return vmcommon.UserError + } return vmcommon.Ok } diff --git a/vm/systemSmartContracts/validator_test.go b/vm/systemSmartContracts/validator_test.go index 328414510c6..c66873e8596 100644 --- a/vm/systemSmartContracts/validator_test.go +++ b/vm/systemSmartContracts/validator_test.go @@ -63,6 +63,7 @@ func createMockArgumentsForValidatorSCWithSystemScAddresses( IsUnBondTokensV2FlagEnabledField: true, IsValidatorToDelegationFlagEnabledField: true, IsDoubleKeyProtectionFlagEnabledField: true, + IsMultiClaimOnDelegationEnabledField: true, }, } @@ -3028,7 +3029,7 @@ func TestValidatorStakingSC_getBlsStatusNoBlsKeys(t *testing.T) { arguments.Arguments = append(arguments.Arguments, []byte("erd key")) returnCode := sc.Execute(arguments) - assert.Equal(t, vmcommon.Ok, returnCode) + assert.Equal(t, vmcommon.UserError, returnCode) assert.True(t, strings.Contains(eei.returnMessage, "no bls keys")) } @@ -3416,48 +3417,6 @@ func TestStakingValidatorSC_UnstakeAllTokensWithActiveNodesShouldError(t *testin assert.True(t, strings.Contains(vmOutput.ReturnMessage, "cannot unStake tokens, the validator would remain without min deposit, nodes are still active")) } -func TestStakingValidatorSC_UnstakeTokensWithLockedFundsShouldError(t *testing.T) { - t.Parallel() - - minStakeValue := big.NewInt(1000) - unbondPeriod := uint64(10) - startEpoch := uint32(56) - epoch := startEpoch - blockChainHook := &mock.BlockChainHookStub{ - CurrentEpochCalled: func() uint32 { - epoch++ - return epoch - }, - } - args := createMockArgumentsForValidatorSC() - enableEpochsHandler, _ := args.EnableEpochsHandler.(*testscommon.EnableEpochsHandlerStub) - enableEpochsHandler.IsStakingV2FlagEnabledField = true - eei := createVmContextWithStakingSc(minStakeValue, unbondPeriod, blockChainHook) - args.Eei = eei - caller := []byte("caller") - sc, _ := NewValidatorSmartContract(args) - _ = sc.saveRegistrationData( - caller, - &ValidatorDataV2{ - RegisterNonce: 0, - Epoch: 0, - RewardAddress: caller, - TotalStakeValue: big.NewInt(1010), - LockedStake: big.NewInt(1000), - MaxStakePerNode: big.NewInt(0), - BlsPubKeys: [][]byte{[]byte("key")}, - NumRegistered: 1, - UnstakedInfo: nil, - TotalUnstaked: nil, - }, - ) - - stakeLockKey := append([]byte(stakeLockPrefix), caller...) - eei.SetStorageForAddress(sc.governanceSCAddress, stakeLockKey, big.NewInt(0).SetUint64(10000).Bytes()) - callFunctionAndCheckResult(t, "unStakeTokens", sc, caller, [][]byte{big.NewInt(1).Bytes()}, zero, vmcommon.UserError) - assert.Equal(t, eei.returnMessage, "stake is locked for voting") -} - func TestStakingValidatorSC_UnstakeTokensShouldWork(t *testing.T) { t.Parallel()