diff --git a/api/groups/blockGroup.go b/api/groups/blockGroup.go index e8182e70327..26d9c05b000 100644 --- a/api/groups/blockGroup.go +++ b/api/groups/blockGroup.go @@ -10,8 +10,8 @@ import ( "github.com/gin-gonic/gin" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" - "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/api/errors" "github.com/multiversx/mx-chain-go/api/shared" "github.com/multiversx/mx-chain-go/api/shared/logging" @@ -33,7 +33,7 @@ type blockFacadeHandler interface { GetBlockByHash(hash string, options api.BlockQueryOptions) (*api.Block, error) GetBlockByNonce(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) GetBlockByRound(round uint64, options api.BlockQueryOptions) (*api.Block, error) - GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) + GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) IsInterfaceNil() bool } diff --git a/api/groups/blockGroup_test.go b/api/groups/blockGroup_test.go index 6bc58267f3e..b190c2f0561 100644 --- a/api/groups/blockGroup_test.go +++ b/api/groups/blockGroup_test.go @@ -10,8 +10,8 @@ import ( "strings" "testing" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" - "github.com/multiversx/mx-chain-core-go/data/outport" apiErrors "github.com/multiversx/mx-chain-go/api/errors" "github.com/multiversx/mx-chain-go/api/groups" "github.com/multiversx/mx-chain-go/api/mock" @@ -39,7 +39,7 @@ func TestNewBlockGroup(t *testing.T) { type alteredAccountsForBlockResponse struct { Data struct { - Accounts []*outport.AlteredAccount `json:"accounts"` + Accounts []*alteredAccount.AlteredAccount `json:"accounts"` } `json:"data"` Error string `json:"error"` Code string `json:"code"` @@ -251,7 +251,7 @@ func TestBlockGroup_getAlteredAccountsByNonce(t *testing.T) { t.Parallel() facade := &mock.FacadeStub{ - GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { + GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { return nil, expectedErr }, } @@ -275,7 +275,7 @@ func TestBlockGroup_getAlteredAccountsByNonce(t *testing.T) { Nonce: providedNonce, }, } - expectedResponse := []*outport.AlteredAccount{ + expectedResponse := []*alteredAccount.AlteredAccount{ { Address: "alice", Balance: "100000", @@ -283,7 +283,7 @@ func TestBlockGroup_getAlteredAccountsByNonce(t *testing.T) { } facade := &mock.FacadeStub{ - GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { + GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { require.Equal(t, expectedOptions, options) return expectedResponse, nil }, @@ -320,7 +320,7 @@ func TestBlockGroup_getAlteredAccountsByHash(t *testing.T) { providedHash := hex.EncodeToString([]byte("hash")) facade := &mock.FacadeStub{ - GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { + GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { return nil, expectedErr }, } @@ -344,7 +344,7 @@ func TestBlockGroup_getAlteredAccountsByHash(t *testing.T) { Hash: []byte("hash"), }, } - expectedResponse := []*outport.AlteredAccount{ + expectedResponse := []*alteredAccount.AlteredAccount{ { Address: "alice", Balance: "100000", @@ -352,7 +352,7 @@ func TestBlockGroup_getAlteredAccountsByHash(t *testing.T) { } facade := &mock.FacadeStub{ - GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { + GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { require.Equal(t, providedHash, hex.EncodeToString(options.Hash)) require.Equal(t, expectedOptions, options) return expectedResponse, nil diff --git a/api/middleware/responseLogger.go b/api/middleware/responseLogger.go index 36ff6261ec7..233ad88b809 100644 --- a/api/middleware/responseLogger.go +++ b/api/middleware/responseLogger.go @@ -66,7 +66,7 @@ func (rlm *responseLoggerMiddleware) logRequestAndResponse(c *gin.Context, durat reqBody := c.Request.Body reqBodyBytes, err := ioutil.ReadAll(reqBody) if err != nil { - log.Error(err.Error()) + log.Debug(err.Error()) return } diff --git a/api/mock/facadeStub.go b/api/mock/facadeStub.go index a31b3dab13d..b88c3e01709 100644 --- a/api/mock/facadeStub.go +++ b/api/mock/facadeStub.go @@ -5,9 +5,9 @@ import ( "math/big" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" - outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" @@ -56,7 +56,7 @@ type FacadeStub struct { GetNFTTokenIDsRegisteredByAddressCalled func(address string, options api.AccountQueryOptions) ([]string, api.BlockInfo, error) GetBlockByHashCalled func(hash string, options api.BlockQueryOptions) (*api.Block, error) GetBlockByNonceCalled func(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) - GetAlteredAccountsForBlockCalled func(options api.GetAlteredAccountsForBlockOptions) ([]*outportcore.AlteredAccount, error) + GetAlteredAccountsForBlockCalled func(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) GetBlockByRoundCalled func(round uint64, options api.BlockQueryOptions) (*api.Block, error) GetInternalShardBlockByNonceCalled func(format common.ApiOutputFormat, nonce uint64) (interface{}, error) GetInternalShardBlockByHashCalled func(format common.ApiOutputFormat, hash string) (interface{}, error) @@ -411,7 +411,7 @@ func (f *FacadeStub) GetBlockByRound(round uint64, options api.BlockQueryOptions } // GetAlteredAccountsForBlock - -func (f *FacadeStub) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outportcore.AlteredAccount, error) { +func (f *FacadeStub) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { if f.GetAlteredAccountsForBlockCalled != nil { return f.GetAlteredAccountsForBlockCalled(options) } diff --git a/api/shared/interface.go b/api/shared/interface.go index 0063d64321e..f0a9a98359e 100644 --- a/api/shared/interface.go +++ b/api/shared/interface.go @@ -5,9 +5,9 @@ import ( "github.com/gin-gonic/gin" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" - outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" @@ -77,7 +77,7 @@ type FacadeHandler interface { GetBlockByHash(hash string, options api.BlockQueryOptions) (*api.Block, error) GetBlockByNonce(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) GetBlockByRound(round uint64, options api.BlockQueryOptions) (*api.Block, error) - GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outportcore.AlteredAccount, error) + GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) GetInternalShardBlockByNonce(format common.ApiOutputFormat, nonce uint64) (interface{}, error) GetInternalShardBlockByHash(format common.ApiOutputFormat, hash string) (interface{}, error) GetInternalShardBlockByRound(format common.ApiOutputFormat, round uint64) (interface{}, error) diff --git a/cmd/keygenerator/converter/pidPubkeyConverter.go b/cmd/keygenerator/converter/pidPubkeyConverter.go index 1cff0dfa0d7..41eeea15fa1 100644 --- a/cmd/keygenerator/converter/pidPubkeyConverter.go +++ b/cmd/keygenerator/converter/pidPubkeyConverter.go @@ -6,11 +6,8 @@ import ( "github.com/multiversx/mx-chain-crypto-go/signing/secp256k1" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/p2p/factory" - logger "github.com/multiversx/mx-chain-logger-go" ) -var log = logger.GetOrCreate("cmd/keygenerator/converter") - type pidPubkeyConverter struct { keyGen crypto.KeyGenerator p2PKeyConverter p2p.P2PKeyConverter diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index c3704f25719..68e5c80c6fa 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -715,6 +715,9 @@ "erd1najnxxweyw6plhg8efql330nttrj6l5cf87wqsuym85s9ha0hmdqnqgenp", #shard 2 ] MaxNumAddressesInTransferRole = 100 + DNSV2Addresses =[ + "erd1qqqqqqqqqqqqqpgqr46jrxr6r2unaqh75ugd308dwx5vgnhwh47qtvepe3", + ] [Hardfork] EnableTrigger = true diff --git a/cmd/node/config/enableEpochs.toml b/cmd/node/config/enableEpochs.toml index 12741cd03aa..959b5a361b4 100644 --- a/cmd/node/config/enableEpochs.toml +++ b/cmd/node/config/enableEpochs.toml @@ -248,15 +248,21 @@ # MultiClaimOnDelegationEnableEpoch represents the epoch when the multi claim on delegation is enabled MultiClaimOnDelegationEnableEpoch = 3 - # BLSMultiSignerEnableEpoch represents the activation epoch for different types of BLS multi-signers - BLSMultiSignerEnableEpoch = [ - { EnableEpoch = 0, Type = "no-KOSK"}, - { EnableEpoch = 1, Type = "KOSK"} - ] + # ChangeUsernameEnableEpoch represents the epoch when changing username is enabled + ChangeUsernameEnableEpoch = 2 # SetGuardianEnableEpoch represents the epoch when the guard account feature is enabled in the protocol SetGuardianEnableEpoch = 2 + # ConsistentTokensValuesLengthCheckEnableEpoch represents the epoch when the consistent tokens values length check is enabled + ConsistentTokensValuesLengthCheckEnableEpoch = 2 + + # BLSMultiSignerEnableEpoch represents the activation epoch for different types of BLS multi-signers + BLSMultiSignerEnableEpoch = [ + { EnableEpoch = 0, Type = "no-KOSK" }, + { EnableEpoch = 1, Type = "KOSK" } + ] + # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ { EpochEnable = 0, MaxNumNodes = 36, NodesToShufflePerShard = 4 }, diff --git a/cmd/node/config/external.toml b/cmd/node/config/external.toml index 3e55323e94e..8f0bd3e9817 100644 --- a/cmd/node/config/external.toml +++ b/cmd/node/config/external.toml @@ -37,10 +37,23 @@ # RequestTimeoutSec defines the timeout in seconds for the http client RequestTimeoutSec = 60 -[WebSocketConnector] + # MarshallerType is used to define the marshaller type to be used for inner + # marshalled structures in block events data + MarshallerType = "json" + +[HostDriverConfig] # This flag shall only be used for observer nodes Enabled = false - URL = "localhost:22111" + # This flag will start the WebSocket connector as server or client (can be "client" or "server") + Mode = "client" + # URL for the WebSocket client/server connection + # This value represents the IP address and port number that the WebSocket client or server will use to establish a connection. + URL = "127.0.0.1:22111" + # After a message will be sent it will wait for an ack message if this flag is enabled WithAcknowledge = true # Currently, only "json" is supported. In the future, "gogo protobuf" could also be supported MarshallerType = "json" + # The number of seconds when the client will try again to send the data + RetryDurationInSec = 5 + # Sets if, in case of data payload processing error, we should block or not the advancement to the next processing event. Set this to true if you wish the node to stop processing blocks if the client/server encounters errors while processing requests. + BlockingAckOnError = false diff --git a/cmd/node/config/prefs.toml b/cmd/node/config/prefs.toml index d6339fca6ab..98d5c02557f 100644 --- a/cmd/node/config/prefs.toml +++ b/cmd/node/config/prefs.toml @@ -8,7 +8,7 @@ # In multikey mode, all bls keys not mentioned in NamedIdentity section will use this one as default NodeDisplayName = "" - # Identity represents the keybase's identity when the node does not run in multikey mode + # Identity represents the keybase/GitHub identity when the node does not run in multikey mode # In multikey mode, all bls keys not mentioned in NamedIdentity section will use this one as default Identity = "" @@ -50,10 +50,28 @@ # { File = "external.toml", Path = "ElasticSearchConnector.Enabled", Value = "true" } #] +# BlockProcessingCutoff can be used to stop processing blocks at a certain round, nonce or epoch. +# This can be useful for snapshotting different stuff and also for debugging purposes. +[BlockProcessingCutoff] + # If set to true, the node will stop at the given coordinate + Enabled = false + + # Mode represents the cutoff mode. possible values: "pause" or "process-error". + # "pause" mode will halt the processing at the block with the given coordinates. Useful for snapshots/analytics + # "process-error" will return an error when processing the block with the given coordinates. Useful for debugging + Mode = "pause" + + # CutoffTrigger represents the kind of coordinate to look after when cutting off the processing. + # Possible values: "round", "nonce", or "epoch" + CutoffTrigger = "round" + + # The minimum value of the cutoff. For example, if CutoffType is set to "round", and Value to 20, then the node will stop processing at round 20+ + Value = 0 + # NamedIdentity represents an identity that runs nodes on the multikey # There can be multiple identities set on the same node, each one of them having different bls keys, just by duplicating the NamedIdentity [[NamedIdentity]] - # Identity represents the keybase identity for the current NamedIdentity + # Identity represents the keybase/GitHub identity for the current NamedIdentity Identity = "" # NodeName represents the name that will be given to the names of the current identity NodeName = "" diff --git a/cmd/node/config/systemSmartContractsConfig.toml b/cmd/node/config/systemSmartContractsConfig.toml index 256dc292171..512596dff05 100644 --- a/cmd/node/config/systemSmartContractsConfig.toml +++ b/cmd/node/config/systemSmartContractsConfig.toml @@ -17,7 +17,7 @@ OwnerAddress = "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c" [GovernanceSystemSCConfig] - ChangeConfigAddress = "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80" #should use a multisign contract instead of a wallet address + OwnerAddress = "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80" #should use a multisign contract instead of a wallet address [GovernanceSystemSCConfig.V1] NumNodes = 500 ProposalCost = "5000000000000000000" #5 eGLD @@ -26,6 +26,7 @@ MinVetoThreshold = 50 [GovernanceSystemSCConfig.Active] ProposalCost = "1000000000000000000000" #1000 eGLD + LostProposalFee = "10000000000000000000" #10 eGLD MinQuorum = 0.5 #fraction of value 0.5 - 50% MinPassThreshold = 0.5 #fraction of value 0.5 - 50% MinVetoThreshold = 0.33 #fraction of value 0.33 - 33% diff --git a/cmd/node/flags.go b/cmd/node/flags.go index 0cb32cb937e..004f8d0c024 100644 --- a/cmd/node/flags.go +++ b/cmd/node/flags.go @@ -384,6 +384,12 @@ var ( Usage: "String flag for specifying the desired `operation mode`(s) of the node, resulting in altering some configuration values accordingly. Possible values are: snapshotless-observer, full-archive, db-lookup-extension, historical-balances or `\"\"` (empty). Multiple values can be separated via ,", Value: "", } + + // repopulateTokensSupplies defines a flag that, if set, will repopulate the tokens supplies database by iterating over the trie + repopulateTokensSupplies = cli.BoolFlag{ + Name: "repopulate-tokens-supplies", + Usage: "Boolean flag for repopulating the tokens supplies database. It will delete the current data, iterate over the entire trie and add he new obtained supplies", + } ) func getFlags() []cli.Flag { @@ -443,6 +449,7 @@ func getFlags() []cli.Flag { dbDirectory, logsDirectory, operationMode, + repopulateTokensSupplies, } } @@ -472,6 +479,7 @@ func getFlagsConfig(ctx *cli.Context, log logger.Logger) *config.ContextFlagsCon flagsConfig.NoKeyProvided = ctx.GlobalBool(noKey.Name) flagsConfig.SnapshotsEnabled = ctx.GlobalBool(snapshotsEnabled.Name) flagsConfig.OperationMode = ctx.GlobalString(operationMode.Name) + flagsConfig.RepopulateTokensSupplies = ctx.GlobalBool(repopulateTokensSupplies.Name) return flagsConfig } @@ -578,6 +586,11 @@ func applyCompatibleConfigs(log logger.Logger, configs *config.Configs) error { return fmt.Errorf("import-db-no-sig-check can only be used with the import-db flag") } + if configs.PreferencesConfig.BlockProcessingCutoff.Enabled { + log.Debug("node is started by using the block processing cut-off - will disable the watchdog") + configs.FlagsConfig.DisableConsensusWatchdog = true + } + operationModes, err := operationmodes.ParseOperationModes(configs.FlagsConfig.OperationMode) if err != nil { return err diff --git a/common/constants.go b/common/constants.go index 66cbd149718..521ef905d8e 100644 --- a/common/constants.go +++ b/common/constants.go @@ -730,10 +730,6 @@ const InvalidMessageBlacklistDuration = time.Second * 3600 // rating to a minimum threshold due to improper messages const PublicKeyBlacklistDuration = time.Second * 7200 -// WrongP2PMessageBlacklistDuration represents the time to keep a peer id in the blacklist if it sends a message that -// do not follow this protocol -const WrongP2PMessageBlacklistDuration = time.Second * 7200 - // InvalidSigningBlacklistDuration defines the time to keep a peer id in blacklist if it signs a message with invalid signature const InvalidSigningBlacklistDuration = time.Second * 7200 @@ -774,9 +770,6 @@ const HardforkResolversIdentifier = "hardfork resolver" // EpochStartInterceptorsIdentifier represents the identifier that is used in the start-in-epoch process const EpochStartInterceptorsIdentifier = "epoch start interceptor" -// GetNodeFromDBErrorString represents the string which is returned when a getting node from DB returns an error -const GetNodeFromDBErrorString = "getNodeFromDB error" - // TimeoutGettingTrieNodes defines the timeout in trie sync operation if no node is received const TimeoutGettingTrieNodes = 2 * time.Minute // to consider syncing a very large trie node of 64MB at ~1MB/s @@ -831,6 +824,28 @@ const ( ApiOutputFormatProto ApiOutputFormat = 1 ) +// BlockProcessingCutoffMode represents the type to be used to identify the mode of the block processing cutoff +type BlockProcessingCutoffMode string + +const ( + // BlockProcessingCutoffModePause represents the mode where the node will pause the processing at the given coordinates + BlockProcessingCutoffModePause = "pause" + // BlockProcessingCutoffModeProcessError represents the mode where the node will reprocess with error the block at the given coordinates + BlockProcessingCutoffModeProcessError = "process-error" +) + +// BlockProcessingCutoffTrigger represents the trigger of the cutoff potentially used in block processing +type BlockProcessingCutoffTrigger string + +const ( + // BlockProcessingCutoffByNonce represents the cutoff by nonce + BlockProcessingCutoffByNonce BlockProcessingCutoffTrigger = "nonce" + // BlockProcessingCutoffByRound represents the cutoff by round + BlockProcessingCutoffByRound BlockProcessingCutoffTrigger = "round" + // BlockProcessingCutoffByEpoch represents the cutoff by epoch + BlockProcessingCutoffByEpoch BlockProcessingCutoffTrigger = "epoch" +) + // MaxIndexOfTxInMiniBlock defines the maximum index of a tx inside one mini block const MaxIndexOfTxInMiniBlock = int32(29999) diff --git a/common/dtos.go b/common/dtos.go index 312562fa6cb..e7876a9131b 100644 --- a/common/dtos.go +++ b/common/dtos.go @@ -1,6 +1,8 @@ package common -import "github.com/multiversx/mx-chain-core-go/data/outport" +import ( + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" +) // GetProofResponse is a struct that stores the response of a GetProof API request type GetProofResponse struct { @@ -71,5 +73,5 @@ type EpochStartDataAPI struct { // AlteredAccountsForBlockAPIResponse holds the altered accounts for a certain block type AlteredAccountsForBlockAPIResponse struct { - Accounts []*outport.AlteredAccount `json:"accounts"` + Accounts []*alteredAccount.AlteredAccount `json:"accounts"` } diff --git a/common/enablers/enableEpochsHandler.go b/common/enablers/enableEpochsHandler.go index 59cee759e8a..cac29504579 100644 --- a/common/enablers/enableEpochsHandler.go +++ b/common/enablers/enableEpochsHandler.go @@ -121,6 +121,8 @@ func (handler *enableEpochsHandler) EpochConfirmed(epoch uint32, _ uint64) { handler.setFlagValue(epoch >= handler.enableEpochsConfig.MultiClaimOnDelegationEnableEpoch, handler.multiClaimOnDelegationFlag, "multiClaimOnDelegationFlag", epoch, handler.enableEpochsConfig.MultiClaimOnDelegationEnableEpoch) handler.setFlagValue(epoch >= handler.enableEpochsConfig.SetGuardianEnableEpoch, handler.setGuardianFlag, "setGuardianFlag", epoch, handler.enableEpochsConfig.SetGuardianEnableEpoch) handler.setFlagValue(epoch >= handler.enableEpochsConfig.KeepExecOrderOnCreatedSCRsEnableEpoch, handler.keepExecOrderOnCreatedSCRsFlag, "keepExecOrderOnCreatedSCRsFlag", epoch, handler.enableEpochsConfig.KeepExecOrderOnCreatedSCRsEnableEpoch) + handler.setFlagValue(epoch >= handler.enableEpochsConfig.ChangeUsernameEnableEpoch, handler.changeUsernameFlag, "changeUsername", epoch, handler.enableEpochsConfig.ChangeUsernameEnableEpoch) + handler.setFlagValue(epoch >= handler.enableEpochsConfig.ConsistentTokensValuesLengthCheckEnableEpoch, handler.consistentTokensValuesCheckFlag, "consistentTokensValuesCheckFlag", epoch, handler.enableEpochsConfig.ConsistentTokensValuesLengthCheckEnableEpoch) } func (handler *enableEpochsHandler) setFlagValue(value bool, flag *atomic.Flag, flagName string, epoch uint32, flagEpoch uint32) { diff --git a/common/enablers/enableEpochsHandler_test.go b/common/enablers/enableEpochsHandler_test.go index 28e905aae64..76589bebc95 100644 --- a/common/enablers/enableEpochsHandler_test.go +++ b/common/enablers/enableEpochsHandler_test.go @@ -93,6 +93,7 @@ func createEnableEpochsConfig() config.EnableEpochs { RuntimeCodeSizeFixEnableEpoch: 77, MultiClaimOnDelegationEnableEpoch: 78, KeepExecOrderOnCreatedSCRsEnableEpoch: 79, + ChangeUsernameEnableEpoch: 80, } } @@ -215,13 +216,15 @@ func TestNewEnableEpochsHandler_EpochConfirmed(t *testing.T) { assert.True(t, handler.IsMaxBlockchainHookCountersFlagEnabled()) assert.True(t, handler.IsAlwaysSaveTokenMetaDataEnabled()) assert.True(t, handler.IsRuntimeCodeSizeFixEnabled()) + assert.True(t, handler.IsConsistentTokensValuesLengthCheckEnabled()) assert.False(t, handler.IsKeepExecOrderOnCreatedSCRsEnabled()) assert.False(t, handler.IsMultiClaimOnDelegationEnabled()) + assert.False(t, handler.IsChangeUsernameEnabled()) }) t.Run("flags with == condition should be set, along with all >=", func(t *testing.T) { t.Parallel() - epoch := uint32(79) + epoch := uint32(80) cfg := createEnableEpochsConfig() cfg.StakingV2EnableEpoch = epoch cfg.ESDTEnableEpoch = epoch @@ -318,6 +321,7 @@ func TestNewEnableEpochsHandler_EpochConfirmed(t *testing.T) { assert.True(t, handler.IsAlwaysSaveTokenMetaDataEnabled()) assert.True(t, handler.IsRuntimeCodeSizeFixEnabled()) assert.True(t, handler.IsKeepExecOrderOnCreatedSCRsEnabled()) + assert.True(t, handler.IsChangeUsernameEnabled()) }) t.Run("flags with < should be set", func(t *testing.T) { t.Parallel() @@ -414,5 +418,6 @@ func TestNewEnableEpochsHandler_EpochConfirmed(t *testing.T) { assert.False(t, handler.IsAlwaysSaveTokenMetaDataEnabled()) assert.False(t, handler.IsRuntimeCodeSizeFixEnabled()) assert.False(t, handler.IsKeepExecOrderOnCreatedSCRsEnabled()) + assert.False(t, handler.IsChangeUsernameEnabled()) }) } diff --git a/common/enablers/epochFlags.go b/common/enablers/epochFlags.go index e77279928cb..6f0efe9c0d1 100644 --- a/common/enablers/epochFlags.go +++ b/common/enablers/epochFlags.go @@ -93,6 +93,8 @@ type epochFlagsHolder struct { setGuardianFlag *atomic.Flag keepExecOrderOnCreatedSCRsFlag *atomic.Flag multiClaimOnDelegationFlag *atomic.Flag + changeUsernameFlag *atomic.Flag + consistentTokensValuesCheckFlag *atomic.Flag } func newEpochFlagsHolder() *epochFlagsHolder { @@ -184,7 +186,9 @@ func newEpochFlagsHolder() *epochFlagsHolder { alwaysSaveTokenMetaDataFlag: &atomic.Flag{}, setGuardianFlag: &atomic.Flag{}, keepExecOrderOnCreatedSCRsFlag: &atomic.Flag{}, + consistentTokensValuesCheckFlag: &atomic.Flag{}, multiClaimOnDelegationFlag: &atomic.Flag{}, + changeUsernameFlag: &atomic.Flag{}, } } @@ -671,6 +675,11 @@ func (holder *epochFlagsHolder) IsSetGuardianEnabled() bool { return holder.setGuardianFlag.IsSet() } +// IsConsistentTokensValuesLengthCheckEnabled returns true if consistentTokensValuesCheckFlag is enabled +func (holder *epochFlagsHolder) IsConsistentTokensValuesLengthCheckEnabled() bool { + return holder.consistentTokensValuesCheckFlag.IsSet() +} + // IsKeepExecOrderOnCreatedSCRsEnabled returns true if keepExecOrderOnCreatedSCRsFlag is enabled func (holder *epochFlagsHolder) IsKeepExecOrderOnCreatedSCRsEnabled() bool { return holder.keepExecOrderOnCreatedSCRsFlag.IsSet() @@ -680,3 +689,8 @@ func (holder *epochFlagsHolder) IsKeepExecOrderOnCreatedSCRsEnabled() bool { func (holder *epochFlagsHolder) IsMultiClaimOnDelegationEnabled() bool { return holder.multiClaimOnDelegationFlag.IsSet() } + +// IsChangeUsernameEnabled returns true if changeUsernameFlag is enabled +func (holder *epochFlagsHolder) IsChangeUsernameEnabled() bool { + return holder.changeUsernameFlag.IsSet() +} diff --git a/common/errors.go b/common/errors.go index 88be5aad958..47b976de9a8 100644 --- a/common/errors.go +++ b/common/errors.go @@ -7,3 +7,6 @@ var ErrInvalidTimeout = errors.New("invalid timeout value") // ErrNilWasmChangeLocker signals that a nil wasm change locker has been provided var ErrNilWasmChangeLocker = errors.New("nil wasm change locker") + +// ErrNilStateSyncNotifierSubscriber signals that a nil state sync notifier subscriber has been provided +var ErrNilStateSyncNotifierSubscriber = errors.New("nil state sync notifier subscriber") diff --git a/common/interface.go b/common/interface.go index e992d41ec5b..fb9d83a0150 100644 --- a/common/interface.go +++ b/common/interface.go @@ -54,6 +54,12 @@ type TrieStats interface { GetTrieStats(address string, rootHash []byte) (*statistics.TrieStatsDTO, error) } +// StorageMarker is used to mark the given storer as synced and active +type StorageMarker interface { + MarkStorerAsSyncedAndActive(storer StorageManager) + IsInterfaceNil() bool +} + // KeyBuilder is used for building trie keys as you traverse the trie type KeyBuilder interface { BuildKey(keyPart []byte) @@ -70,9 +76,8 @@ type DataTrieHandler interface { // StorageManager manages all trie storage operations type StorageManager interface { - Get(key []byte) ([]byte, error) + TrieStorageInteractor GetFromCurrentEpoch(key []byte) ([]byte, error) - Put(key []byte, val []byte) error PutInEpoch(key []byte, val []byte, epoch uint32) error PutInEpochWithoutCache(key []byte, val []byte, epoch uint32) error TakeSnapshot(address string, rootHash []byte, mainTrieRootHash []byte, iteratorChannels *TrieIteratorChannels, missingNodesChan chan []byte, stats SnapshotStatisticsHandler, epoch uint32) @@ -83,7 +88,6 @@ type StorageManager interface { EnterPruningBufferingMode() ExitPruningBufferingMode() AddDirtyCheckpointHashes([]byte, ModifiedHashes) bool - Remove(hash []byte) error SetEpochForPutOperation(uint32) ShouldTakeSnapshot() bool GetBaseTrieStorageManager() StorageManager @@ -92,8 +96,14 @@ type StorageManager interface { IsInterfaceNil() bool } -// DBWriteCacher is used to cache changes made to the trie, and only write to the database when it's needed -type DBWriteCacher interface { +// TrieStorageInteractor defines the methods used for interacting with the trie storage +type TrieStorageInteractor interface { + BaseStorer + GetIdentifier() string +} + +// BaseStorer define the base methods needed for a storer +type BaseStorer interface { Put(key, val []byte) error Get(key []byte) ([]byte, error) Remove(key []byte) error @@ -103,7 +113,7 @@ type DBWriteCacher interface { // SnapshotDbHandler is used to keep track of how many references a snapshot db has type SnapshotDbHandler interface { - DBWriteCacher + BaseStorer IsInUse() bool DecreaseNumReferences() IncreaseNumReferences() @@ -339,6 +349,8 @@ type EnableEpochsHandler interface { IsSetGuardianEnabled() bool IsKeepExecOrderOnCreatedSCRsEnabled() bool IsMultiClaimOnDelegationEnabled() bool + IsChangeUsernameEnabled() bool + IsConsistentTokensValuesLengthCheckEnabled() bool IsInterfaceNil() bool } @@ -363,3 +375,16 @@ type ManagedPeersHolder interface { IsMultiKeyMode() bool IsInterfaceNil() bool } + +// MissingTrieNodesNotifier defines the operations of an entity that notifies about missing trie nodes +type MissingTrieNodesNotifier interface { + RegisterHandler(handler StateSyncNotifierSubscriber) error + AsyncNotifyMissingTrieNode(hash []byte) + IsInterfaceNil() bool +} + +// StateSyncNotifierSubscriber defines the operations of an entity that subscribes to a missing trie nodes notifier +type StateSyncNotifierSubscriber interface { + MissingDataTrieNodeFound(hash []byte) + IsInterfaceNil() bool +} diff --git a/common/logging/errorLogging.go b/common/logging/errorLogging.go index 17693e3b4f3..94bc88ae74d 100644 --- a/common/logging/errorLogging.go +++ b/common/logging/errorLogging.go @@ -1,8 +1,8 @@ package logging import ( + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" - chainErrors "github.com/multiversx/mx-chain-go/errors" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -24,7 +24,7 @@ func logErrAsLevelExceptAsDebugIfClosingError(logInstance logger.Logger, logLeve return } - if chainErrors.IsClosingError(err) { + if core.IsClosingError(err) { logLevel = logger.LogDebug } diff --git a/config/config.go b/config/config.go index 63c2d8e38f2..7ffd1e35d37 100644 --- a/config/config.go +++ b/config/config.go @@ -421,6 +421,7 @@ type VirtualMachineGasConfig struct { type BuiltInFunctionsConfig struct { AutomaticCrawlerAddresses []string MaxNumAddressesInTransferRole uint32 + DNSV2Addresses []string } // HardforkConfig holds the configuration for the hardfork trigger diff --git a/config/contextFlagsConfig.go b/config/contextFlagsConfig.go index 360eeabf349..c5ccc61bca1 100644 --- a/config/contextFlagsConfig.go +++ b/config/contextFlagsConfig.go @@ -28,6 +28,7 @@ type ContextFlagsConfig struct { NoKeyProvided bool SnapshotsEnabled bool OperationMode string + RepopulateTokensSupplies bool } // ImportDbConfig will hold the import-db parameters diff --git a/config/epochConfig.go b/config/epochConfig.go index deadeb79d11..838af4b95bf 100644 --- a/config/epochConfig.go +++ b/config/epochConfig.go @@ -95,8 +95,10 @@ type EnableEpochs struct { AlwaysSaveTokenMetaDataEnableEpoch uint32 KeepExecOrderOnCreatedSCRsEnableEpoch uint32 MultiClaimOnDelegationEnableEpoch uint32 + ChangeUsernameEnableEpoch uint32 BLSMultiSignerEnableEpoch []MultiSignerConfig SetGuardianEnableEpoch uint32 + ConsistentTokensValuesLengthCheckEnableEpoch uint32 } // GasScheduleByEpochs represents a gas schedule toml entry that will be applied from the provided epoch diff --git a/config/externalConfig.go b/config/externalConfig.go index 760e1dcd99b..f5609ed3a6c 100644 --- a/config/externalConfig.go +++ b/config/externalConfig.go @@ -4,7 +4,7 @@ package config type ExternalConfig struct { ElasticSearchConnector ElasticSearchConfig EventNotifierConnector EventNotifierConfig - WebSocketConnector WebSocketDriverConfig + HostDriverConfig HostDriverConfig } // ElasticSearchConfig will hold the configuration for the elastic search @@ -27,6 +27,7 @@ type EventNotifierConfig struct { Username string Password string RequestTimeoutSec int + MarshallerType string } // CovalentConfig will hold the configurations for covalent indexer @@ -37,10 +38,13 @@ type CovalentConfig struct { RouteAcknowledgeData string } -// WebSocketDriverConfig will hold the configuration for web socket driver -type WebSocketDriverConfig struct { - Enabled bool - WithAcknowledge bool - URL string - MarshallerType string +// HostDriverConfig will hold the configuration for WebSocket driver +type HostDriverConfig struct { + Enabled bool + WithAcknowledge bool + BlockingAckOnError bool + URL string + MarshallerType string + Mode string + RetryDurationInSec int } diff --git a/config/prefsConfig.go b/config/prefsConfig.go index 4a6df0c9a73..34861d647e8 100644 --- a/config/prefsConfig.go +++ b/config/prefsConfig.go @@ -2,8 +2,9 @@ package config // Preferences will hold the configuration related to node's preferences type Preferences struct { - Preferences PreferencesConfig - NamedIdentity []NamedIdentity + Preferences PreferencesConfig + BlockProcessingCutoff BlockProcessingCutoffConfig + NamedIdentity []NamedIdentity } // PreferencesConfig will hold the fields which are node specific such as the display name @@ -25,6 +26,14 @@ type OverridableConfig struct { Value string } +// BlockProcessingCutoffConfig holds the configuration for the block processing cutoff +type BlockProcessingCutoffConfig struct { + Enabled bool + Mode string + CutoffTrigger string + Value uint64 +} + // NamedIdentity will hold the fields which are node named identities type NamedIdentity struct { Identity string diff --git a/config/systemSmartContractsConfig.go b/config/systemSmartContractsConfig.go index 895aea97c8b..d48027574eb 100644 --- a/config/systemSmartContractsConfig.go +++ b/config/systemSmartContractsConfig.go @@ -45,6 +45,7 @@ type GovernanceSystemSCConfigV1 struct { // system smart contract once it activates type GovernanceSystemSCConfigActive struct { ProposalCost string + LostProposalFee string MinQuorum float64 MinPassThreshold float64 MinVetoThreshold float64 @@ -52,9 +53,9 @@ type GovernanceSystemSCConfigActive struct { // GovernanceSystemSCConfig defines the set of constants to initialize the governance system smart contract type GovernanceSystemSCConfig struct { - V1 GovernanceSystemSCConfigV1 - Active GovernanceSystemSCConfigActive - ChangeConfigAddress string + V1 GovernanceSystemSCConfigV1 + Active GovernanceSystemSCConfigActive + OwnerAddress string } // DelegationManagerSystemSCConfig defines a set of constants to initialize the delegation manager system smart contract diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index d7a3b1c7170..61de536871c 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -324,6 +324,12 @@ func TestTomlPreferencesParser(t *testing.T) { RedundancyLevel: redundancyLevel, PreferredConnections: []string{prefPubKey0, prefPubKey1}, }, + BlockProcessingCutoff: BlockProcessingCutoffConfig{ + Enabled: true, + Mode: "pause", + CutoffTrigger: "round", + Value: 55, + }, } testString := ` @@ -336,6 +342,12 @@ func TestTomlPreferencesParser(t *testing.T) { "` + prefPubKey0 + `", "` + prefPubKey1 + `" ] + +[BlockProcessingCutoff] + Enabled = true + Mode = "pause" + CutoffTrigger = "round" + Value = 55 ` cfg := Preferences{} @@ -689,14 +701,20 @@ func TestEnableEpochConfig(t *testing.T) { # RuntimeMemStoreLimitEnableEpoch represents the epoch when the condition for Runtime MemStore is enabled RuntimeMemStoreLimitEnableEpoch = 63 - # SetGuardianEnableEpoch represents the epoch when guard account feature is enabled - SetGuardianEnableEpoch = 64 + # SetGuardianEnableEpoch represents the epoch when guard account feature is enabled + SetGuardianEnableEpoch = 64 # KeepExecOrderOnCreatedSCRsEnableEpoch represents the epoch when the execution order of created SCRs is ensured - KeepExecOrderOnCreatedSCRsEnableEpoch = 64 + KeepExecOrderOnCreatedSCRsEnableEpoch = 65 # MultiClaimOnDelegationEnableEpoch represents the epoch when the multi claim on delegation function is enabled - MultiClaimOnDelegationEnableEpoch = 65 + MultiClaimOnDelegationEnableEpoch = 66 + + # ChangeUsernameEnableEpoch represents the epoch when change username is enabled + ChangeUsernameEnableEpoch = 67 + + # ConsistentTokensValuesLengthCheckEnableEpoch represents the epoch when the consistent tokens values length check is enabled + ConsistentTokensValuesLengthCheckEnableEpoch = 68 # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ @@ -742,60 +760,62 @@ func TestEnableEpochConfig(t *testing.T) { NodesToShufflePerShard: 80, }, }, - BlockGasAndFeesReCheckEnableEpoch: 13, - StakingV2EnableEpoch: 18, - StakeEnableEpoch: 17, - DoubleKeyProtectionEnableEpoch: 19, - ESDTEnableEpoch: 20, - GovernanceEnableEpoch: 21, - DelegationManagerEnableEpoch: 22, - DelegationSmartContractEnableEpoch: 23, - CorrectLastUnjailedEnableEpoch: 24, - BalanceWaitingListsEnableEpoch: 14, - ReturnDataToLastTransferEnableEpoch: 15, - SenderInOutTransferEnableEpoch: 16, - RelayedTransactionsV2EnableEpoch: 25, - UnbondTokensV2EnableEpoch: 26, - SaveJailedAlwaysEnableEpoch: 27, - ValidatorToDelegationEnableEpoch: 29, - ReDelegateBelowMinCheckEnableEpoch: 28, - WaitingListFixEnableEpoch: 30, - IncrementSCRNonceInMultiTransferEnableEpoch: 31, - ESDTMultiTransferEnableEpoch: 32, - GlobalMintBurnDisableEpoch: 33, - ESDTTransferRoleEnableEpoch: 34, - BuiltInFunctionOnMetaEnableEpoch: 35, - ComputeRewardCheckpointEnableEpoch: 36, - SCRSizeInvariantCheckEnableEpoch: 37, - BackwardCompSaveKeyValueEnableEpoch: 38, - ESDTNFTCreateOnMultiShardEnableEpoch: 39, - MetaESDTSetEnableEpoch: 40, - AddTokensToDelegationEnableEpoch: 41, - MultiESDTTransferFixOnCallBackOnEnableEpoch: 42, - OptimizeGasUsedInCrossMiniBlocksEnableEpoch: 43, - FixOOGReturnCodeEnableEpoch: 44, - RemoveNonUpdatedStorageEnableEpoch: 45, - OptimizeNFTStoreEnableEpoch: 46, - CreateNFTThroughExecByCallerEnableEpoch: 47, - IsPayableBySCEnableEpoch: 48, - CleanUpInformativeSCRsEnableEpoch: 49, - StorageAPICostOptimizationEnableEpoch: 50, - TransformToMultiShardCreateEnableEpoch: 51, - ESDTRegisterAndSetAllRolesEnableEpoch: 52, - FailExecutionOnEveryAPIErrorEnableEpoch: 53, - ManagedCryptoAPIsEnableEpoch: 54, - ESDTMetadataContinuousCleanupEnableEpoch: 55, - FixAsyncCallBackArgsListEnableEpoch: 56, - FixOldTokenLiquidityEnableEpoch: 57, - SetSenderInEeiOutputTransferEnableEpoch: 58, - MaxBlockchainHookCountersEnableEpoch: 59, - WipeSingleNFTLiquidityDecreaseEnableEpoch: 60, - AlwaysSaveTokenMetaDataEnableEpoch: 61, - RuntimeCodeSizeFixEnableEpoch: 62, - RuntimeMemStoreLimitEnableEpoch: 63, - SetGuardianEnableEpoch: 64, - MultiClaimOnDelegationEnableEpoch: 65, - KeepExecOrderOnCreatedSCRsEnableEpoch: 64, + BlockGasAndFeesReCheckEnableEpoch: 13, + StakingV2EnableEpoch: 18, + StakeEnableEpoch: 17, + DoubleKeyProtectionEnableEpoch: 19, + ESDTEnableEpoch: 20, + GovernanceEnableEpoch: 21, + DelegationManagerEnableEpoch: 22, + DelegationSmartContractEnableEpoch: 23, + CorrectLastUnjailedEnableEpoch: 24, + BalanceWaitingListsEnableEpoch: 14, + ReturnDataToLastTransferEnableEpoch: 15, + SenderInOutTransferEnableEpoch: 16, + RelayedTransactionsV2EnableEpoch: 25, + UnbondTokensV2EnableEpoch: 26, + SaveJailedAlwaysEnableEpoch: 27, + ValidatorToDelegationEnableEpoch: 29, + ReDelegateBelowMinCheckEnableEpoch: 28, + WaitingListFixEnableEpoch: 30, + IncrementSCRNonceInMultiTransferEnableEpoch: 31, + ESDTMultiTransferEnableEpoch: 32, + GlobalMintBurnDisableEpoch: 33, + ESDTTransferRoleEnableEpoch: 34, + BuiltInFunctionOnMetaEnableEpoch: 35, + ComputeRewardCheckpointEnableEpoch: 36, + SCRSizeInvariantCheckEnableEpoch: 37, + BackwardCompSaveKeyValueEnableEpoch: 38, + ESDTNFTCreateOnMultiShardEnableEpoch: 39, + MetaESDTSetEnableEpoch: 40, + AddTokensToDelegationEnableEpoch: 41, + MultiESDTTransferFixOnCallBackOnEnableEpoch: 42, + OptimizeGasUsedInCrossMiniBlocksEnableEpoch: 43, + FixOOGReturnCodeEnableEpoch: 44, + RemoveNonUpdatedStorageEnableEpoch: 45, + OptimizeNFTStoreEnableEpoch: 46, + CreateNFTThroughExecByCallerEnableEpoch: 47, + IsPayableBySCEnableEpoch: 48, + CleanUpInformativeSCRsEnableEpoch: 49, + StorageAPICostOptimizationEnableEpoch: 50, + TransformToMultiShardCreateEnableEpoch: 51, + ESDTRegisterAndSetAllRolesEnableEpoch: 52, + FailExecutionOnEveryAPIErrorEnableEpoch: 53, + ManagedCryptoAPIsEnableEpoch: 54, + ESDTMetadataContinuousCleanupEnableEpoch: 55, + FixAsyncCallBackArgsListEnableEpoch: 56, + FixOldTokenLiquidityEnableEpoch: 57, + SetSenderInEeiOutputTransferEnableEpoch: 58, + MaxBlockchainHookCountersEnableEpoch: 59, + WipeSingleNFTLiquidityDecreaseEnableEpoch: 60, + AlwaysSaveTokenMetaDataEnableEpoch: 61, + RuntimeCodeSizeFixEnableEpoch: 62, + RuntimeMemStoreLimitEnableEpoch: 63, + SetGuardianEnableEpoch: 64, + KeepExecOrderOnCreatedSCRsEnableEpoch: 65, + MultiClaimOnDelegationEnableEpoch: 66, + ChangeUsernameEnableEpoch: 67, + ConsistentTokensValuesLengthCheckEnableEpoch: 68, BLSMultiSignerEnableEpoch: []MultiSignerConfig{ { EnableEpoch: 0, diff --git a/consensus/message.pb.go b/consensus/message.pb.go index fea3604fb71..bb28b0a277d 100644 --- a/consensus/message.pb.go +++ b/consensus/message.pb.go @@ -6,14 +6,13 @@ package consensus import ( bytes "bytes" fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" reflect "reflect" strings "strings" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/consensus/mock/bootstrapperStub.go b/consensus/mock/bootstrapperStub.go index 171062a29f1..bd4a1b98bf2 100644 --- a/consensus/mock/bootstrapperStub.go +++ b/consensus/mock/bootstrapperStub.go @@ -11,7 +11,7 @@ type BootstrapperStub struct { CreateAndCommitEmptyBlockCalled func(uint32) (data.BodyHandler, data.HeaderHandler, error) AddSyncStateListenerCalled func(func(bool)) GetNodeStateCalled func() common.NodeState - StartSyncingBlocksCalled func() + StartSyncingBlocksCalled func() error } // CreateAndCommitEmptyBlock - @@ -40,8 +40,12 @@ func (boot *BootstrapperStub) GetNodeState() common.NodeState { } // StartSyncingBlocks - -func (boot *BootstrapperStub) StartSyncingBlocks() { - boot.StartSyncingBlocksCalled() +func (boot *BootstrapperStub) StartSyncingBlocks() error { + if boot.StartSyncingBlocksCalled != nil { + return boot.StartSyncingBlocksCalled() + } + + return nil } // Close - diff --git a/consensus/spos/bls/subroundStartRound.go b/consensus/spos/bls/subroundStartRound.go index a5c1f179609..344d26251e5 100644 --- a/consensus/spos/bls/subroundStartRound.go +++ b/consensus/spos/bls/subroundStartRound.go @@ -275,15 +275,15 @@ func (sr *subroundStartRound) indexRoundIfNeeded(pubKeys []string) { round := sr.RoundHandler().Index() roundInfo := &outportcore.RoundInfo{ - Index: uint64(round), + Round: uint64(round), SignersIndexes: signersIndexes, BlockWasProposed: false, ShardId: shardId, Epoch: epoch, - Timestamp: time.Duration(sr.RoundTimeStamp.Unix()), + Timestamp: uint64(sr.RoundTimeStamp.Unix()), } - sr.outportHandler.SaveRoundsInfo([]*outportcore.RoundInfo{roundInfo}) + sr.outportHandler.SaveRoundsInfo(&outportcore.RoundsInfo{RoundsInfo: []*outportcore.RoundInfo{roundInfo}}) } func (sr *subroundStartRound) generateNextConsensusGroup(roundIndex int64) error { diff --git a/dataRetriever/blockchain/blockchain_test.go b/dataRetriever/blockchain/blockchain_test.go index 3980d2723f7..212d2755adc 100644 --- a/dataRetriever/blockchain/blockchain_test.go +++ b/dataRetriever/blockchain/blockchain_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/mock" "github.com/stretchr/testify/assert" @@ -81,3 +82,14 @@ func TestBlockChain_SettersAndGettersNilValues(t *testing.T) { assert.Nil(t, bc.GetCurrentBlockHeader()) assert.Empty(t, bc.GetCurrentBlockRootHash()) } + +func TestBlockChain_SettersInvalidValues(t *testing.T) { + t.Parallel() + + bc, _ := NewBlockChain(&mock.AppStatusHandlerStub{}) + err := bc.SetGenesisHeader(&block.MetaBlock{}) + assert.Equal(t, err, data.ErrInvalidHeaderType) + + err = bc.SetCurrentBlockHeaderAndRootHash(&block.MetaBlock{}, []byte("root hash")) + assert.Equal(t, err, data.ErrInvalidHeaderType) +} diff --git a/dataRetriever/blockchain/metachain_test.go b/dataRetriever/blockchain/metachain_test.go index eb0f589f899..684aa95477c 100644 --- a/dataRetriever/blockchain/metachain_test.go +++ b/dataRetriever/blockchain/metachain_test.go @@ -82,3 +82,14 @@ func TestMetaChain_SettersAndGettersNilValues(t *testing.T) { assert.Nil(t, mc.GetCurrentBlockHeader()) assert.Empty(t, mc.GetCurrentBlockRootHash()) } + +func TestMetaChain_SettersInvalidValues(t *testing.T) { + t.Parallel() + + bc, _ := NewMetaChain(&mock.AppStatusHandlerStub{}) + err := bc.SetGenesisHeader(&block.Header{}) + assert.Equal(t, err, ErrWrongTypeInSet) + + err = bc.SetCurrentBlockHeaderAndRootHash(&block.Header{}, []byte("root hash")) + assert.Equal(t, err, ErrWrongTypeInSet) +} diff --git a/dataRetriever/chainStorer_test.go b/dataRetriever/chainStorer_test.go index 3b4373641af..73093ccde7e 100644 --- a/dataRetriever/chainStorer_test.go +++ b/dataRetriever/chainStorer_test.go @@ -239,6 +239,9 @@ func TestBlockChain_GetStorer(t *testing.T) { assert.True(t, peerBlockUnit == storer) storer, _ = b.GetStorer(4) assert.True(t, headerUnit == storer) + storer, err := b.GetStorer(5) + assert.True(t, errors.Is(err, dataRetriever.ErrStorerNotFound)) + assert.Nil(t, storer) } func TestBlockChain_GetAllStorers(t *testing.T) { diff --git a/dataRetriever/dataPool/dataPool_test.go b/dataRetriever/dataPool/dataPool_test.go index 2eb98aee5a6..11a94c5e488 100644 --- a/dataRetriever/dataPool/dataPool_test.go +++ b/dataRetriever/dataPool/dataPool_test.go @@ -209,6 +209,7 @@ func TestNewDataPool_OkValsShouldWork(t *testing.T) { assert.True(t, args.SmartContracts == tdp.SmartContracts()) assert.True(t, args.PeerAuthentications == tdp.PeerAuthentications()) assert.True(t, args.Heartbeats == tdp.Heartbeats()) + assert.True(t, args.ValidatorsInfo == tdp.ValidatorsInfo()) } func TestNewDataPool_Close(t *testing.T) { diff --git a/dataRetriever/dataPool/headersCache/headersCache.go b/dataRetriever/dataPool/headersCache/headersCache.go index bba024f30db..4b1ef31d8d9 100644 --- a/dataRetriever/dataPool/headersCache/headersCache.go +++ b/dataRetriever/dataPool/headersCache/headersCache.go @@ -53,7 +53,7 @@ func (cache *headersCache) addHeader(headerHash []byte, header data.HeaderHandle return true } -//tryToDoEviction will check if pool is full and if it is will do eviction +// tryToDoEviction will check if pool is full and if so, it will do the eviction func (cache *headersCache) tryToDoEviction(shardId uint32) { numHeaders := cache.getNumHeaders(shardId) if int(numHeaders) >= cache.maxHeadersPerShard { diff --git a/dataRetriever/dataPool/headersCache/headersPool_test.go b/dataRetriever/dataPool/headersCache/headersPool_test.go index 3865c6b9c47..2b2fb4cf3c6 100644 --- a/dataRetriever/dataPool/headersCache/headersPool_test.go +++ b/dataRetriever/dataPool/headersCache/headersPool_test.go @@ -1,6 +1,7 @@ package headersCache_test import ( + "errors" "fmt" "sort" "sync" @@ -16,6 +17,45 @@ import ( "github.com/stretchr/testify/require" ) +func TestNewHeadersCacher(t *testing.T) { + t.Parallel() + + t.Run("invalid MaxHeadersPerShard should error", testNewHeadersCacher( + config.HeadersPoolConfig{ + MaxHeadersPerShard: 0, + })) + t.Run("invalid NumElementsToRemoveOnEviction should error", testNewHeadersCacher( + config.HeadersPoolConfig{ + MaxHeadersPerShard: 1, + NumElementsToRemoveOnEviction: 0, + })) + t.Run("invalid config should error", testNewHeadersCacher( + config.HeadersPoolConfig{ + MaxHeadersPerShard: 1, + NumElementsToRemoveOnEviction: 3, + })) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + headersCacher, err := headersCache.NewHeadersPool(config.HeadersPoolConfig{ + MaxHeadersPerShard: 2, + NumElementsToRemoveOnEviction: 1, + }) + require.NoError(t, err) + require.NotNil(t, headersCacher) + }) +} + +func testNewHeadersCacher(cfg config.HeadersPoolConfig) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() + + headersCacher, err := headersCache.NewHeadersPool(cfg) + require.True(t, errors.Is(err, headersCache.ErrInvalidHeadersCacheParameter)) + require.Nil(t, headersCacher) + } +} + func TestNewHeadersCacher_AddHeadersInCache(t *testing.T) { t.Parallel() @@ -28,11 +68,16 @@ func TestNewHeadersCacher_AddHeadersInCache(t *testing.T) { nonce := uint64(1) shardId := uint32(0) + headers, _, err := headersCacher.GetHeadersByNonceAndShardId(nonce, shardId) + require.Equal(t, headersCache.ErrHeaderNotFound, err) + require.Nil(t, headers) + headerHash1 := []byte("hash1") headerHash2 := []byte("hash2") testHdr1 := &block.Header{Nonce: nonce, ShardID: shardId} testHdr2 := &block.Header{Nonce: nonce, ShardID: shardId, Round: 100} + headersCacher.AddHeader([]byte("nil header hash"), nil) // coverage headersCacher.AddHeader(headerHash1, testHdr1) headersCacher.AddHeader(headerHash2, testHdr2) @@ -45,7 +90,7 @@ func TestNewHeadersCacher_AddHeadersInCache(t *testing.T) { require.Equal(t, testHdr2, header) expectedHeaders := []data.HeaderHandler{testHdr1, testHdr2} - headers, _, err := headersCacher.GetHeadersByNonceAndShardId(nonce, shardId) + headers, _, err = headersCacher.GetHeadersByNonceAndShardId(nonce, shardId) require.Nil(t, err) require.Equal(t, expectedHeaders, headers) } @@ -70,6 +115,8 @@ func Test_RemoveHeaderByHash(t *testing.T) { headersCacher.AddHeader(headerHash1, testHdr1) headersCacher.AddHeader(headerHash2, testHdr2) + headersCacher.RemoveHeaderByHash([]byte("")) + headersCacher.RemoveHeaderByHash([]byte("missing hash")) headersCacher.RemoveHeaderByHash(headerHash1) header, err := headersCacher.GetHeaderByHash(headerHash1) require.Nil(t, header) @@ -101,6 +148,8 @@ func TestHeadersCacher_AddHeadersInCacheAndRemoveByNonceAndShardId(t *testing.T) headersCacher.AddHeader(headerHash1, testHdr1) headersCacher.AddHeader(headerHash2, testHdr2) + headersCacher.RemoveHeaderByNonceAndShardId(nonce, 100) + headersCacher.RemoveHeaderByNonceAndShardId(100, shardId) headersCacher.RemoveHeaderByNonceAndShardId(nonce, shardId) header, err := headersCacher.GetHeaderByHash(headerHash1) require.Nil(t, header) @@ -577,6 +626,7 @@ func TestHeadersPool_RegisterHandler(t *testing.T) { wasCalled = true wg.Done() } + headersCacher.RegisterHandler(nil) headersCacher.RegisterHandler(handler) header, hash := createASliceOfHeaders(1, 0) headersCacher.AddHeader(hash[0], &header[0]) @@ -603,6 +653,25 @@ func TestHeadersPool_Clear(t *testing.T) { require.Equal(t, 0, headersCacher.GetNumHeaders(0)) } +func TestHeadersPool_IsInterfaceNil(t *testing.T) { + t.Parallel() + + headersCacher, _ := headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: 0, + }, + ) + require.True(t, headersCacher.IsInterfaceNil()) + + headersCacher, _ = headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: 1000, + NumElementsToRemoveOnEviction: 10, + }, + ) + require.False(t, headersCacher.IsInterfaceNil()) +} + func createASliceOfHeaders(numHeaders int, shardId uint32) ([]block.Header, [][]byte) { headers := make([]block.Header, 0) headersHashes := make([][]byte, 0) diff --git a/dataRetriever/factory/containers/resolversContainer_test.go b/dataRetriever/factory/containers/resolversContainer_test.go index 969a93d6034..cf3dcfbe8d8 100644 --- a/dataRetriever/factory/containers/resolversContainer_test.go +++ b/dataRetriever/factory/containers/resolversContainer_test.go @@ -251,7 +251,7 @@ func TestResolversContainer_IterateNilHandlerShouldNotPanic(t *testing.T) { defer func() { r := recover() if r != nil { - assert.Fail(t, "should not have paniced") + assert.Fail(t, "should not have panicked") } }() @@ -269,7 +269,7 @@ func TestResolversContainer_IterateNotAValidKeyShouldWorkAndNotPanic(t *testing. defer func() { r := recover() if r != nil { - assert.Fail(t, "should not have paniced") + assert.Fail(t, "should not have panicked") } }() @@ -292,7 +292,7 @@ func TestResolversContainer_IterateNotAValidValueShouldWorkAndNotPanic(t *testin defer func() { r := recover() if r != nil { - assert.Fail(t, "should not have paniced") + assert.Fail(t, "should not have panicked") } }() diff --git a/dataRetriever/factory/epochProviders/currentEpochProvidersFactory_test.go b/dataRetriever/factory/epochProviders/currentEpochProvidersFactory_test.go index 236c47e4c35..7335f591826 100644 --- a/dataRetriever/factory/epochProviders/currentEpochProvidersFactory_test.go +++ b/dataRetriever/factory/epochProviders/currentEpochProvidersFactory_test.go @@ -25,7 +25,7 @@ func TestCreateCurrentEpochProvider_NilCurrentEpochProvider(t *testing.T) { assert.IsType(t, disabled.NewEpochProvider(), cnep) } -func TestCreateCurrentEpochProvider_ArithemticEpochProvider(t *testing.T) { +func TestCreateCurrentEpochProvider_ArithmeticEpochProvider(t *testing.T) { t.Parallel() cnep, err := CreateCurrentEpochProvider( diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go index 2d6fd0d3bf6..889481e9fde 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go @@ -8,7 +8,6 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/factory/containers" "github.com/multiversx/mx-chain-go/dataRetriever/resolvers" - triesFactory "github.com/multiversx/mx-chain-go/trie/factory" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/process/factory" @@ -142,7 +141,7 @@ func (mrcf *metaResolversContainerFactory) AddShardTrieNodeResolvers(container d identifierTrieNodes := factory.AccountTrieNodesTopic + shardC.CommunicationIdentifier(idx) resolver, err := mrcf.createTrieNodesResolver( identifierTrieNodes, - triesFactory.UserAccountTrie, + dataRetriever.UserAccountsUnit.String(), idx, ) if err != nil { @@ -295,7 +294,7 @@ func (mrcf *metaResolversContainerFactory) generateTrieNodesResolvers() error { identifierTrieNodes := factory.AccountTrieNodesTopic + core.CommunicationIdentifierBetweenShards(core.MetachainShardId, core.MetachainShardId) resolver, err := mrcf.createTrieNodesResolver( identifierTrieNodes, - triesFactory.UserAccountTrie, + dataRetriever.UserAccountsUnit.String(), core.MetachainShardId, ) if err != nil { @@ -308,7 +307,7 @@ func (mrcf *metaResolversContainerFactory) generateTrieNodesResolvers() error { identifierTrieNodes = factory.ValidatorTrieNodesTopic + core.CommunicationIdentifierBetweenShards(core.MetachainShardId, core.MetachainShardId) resolver, err = mrcf.createTrieNodesResolver( identifierTrieNodes, - triesFactory.PeerAccountTrie, + dataRetriever.PeerAccountsUnit.String(), core.MetachainShardId, ) if err != nil { diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go index 3c7c8ee020d..bb396cbcb7b 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/factory/resolverscontainer" @@ -18,7 +19,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" - triesFactory "github.com/multiversx/mx-chain-go/trie/factory" "github.com/stretchr/testify/assert" ) @@ -82,13 +82,24 @@ func createStoreForMeta() dataRetriever.StorageService { func createTriesHolderForMeta() common.TriesHolder { triesHolder := state.NewDataTriesHolder() - triesHolder.Put([]byte(triesFactory.UserAccountTrie), &trieMock.TrieStub{}) - triesHolder.Put([]byte(triesFactory.PeerAccountTrie), &trieMock.TrieStub{}) + triesHolder.Put([]byte(dataRetriever.UserAccountsUnit.String()), &trieMock.TrieStub{}) + triesHolder.Put([]byte(dataRetriever.PeerAccountsUnit.String()), &trieMock.TrieStub{}) return triesHolder } // ------- NewResolversContainerFactory +func TestNewMetaResolversContainerFactory_NewNumGoRoutinesThrottlerFailsShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.NumConcurrentResolvingJobs = 0 + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.Equal(t, core.ErrNotPositiveValue, err) +} + func TestNewMetaResolversContainerFactory_NilShardCoordinatorShouldErr(t *testing.T) { t.Parallel() @@ -281,6 +292,18 @@ func TestMetaResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { assert.Equal(t, totalResolvers+noOfShards, container.Len()) } +func TestMetaResolversContainerFactory_IsInterfaceNil(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.ShardCoordinator = nil + rcf, _ := resolverscontainer.NewMetaResolversContainerFactory(args) + assert.True(t, rcf.IsInterfaceNil()) + + rcf, _ = resolverscontainer.NewMetaResolversContainerFactory(getArgumentsMeta()) + assert.False(t, rcf.IsInterfaceNil()) +} + func getArgumentsMeta() resolverscontainer.FactoryArgs { return resolverscontainer.FactoryArgs{ ShardCoordinator: mock.NewOneShardCoordinatorMock(), diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go index 4d644cf78c2..7a4fb1a282a 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go @@ -9,7 +9,6 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever/factory/containers" "github.com/multiversx/mx-chain-go/dataRetriever/resolvers" "github.com/multiversx/mx-chain-go/process/factory" - triesFactory "github.com/multiversx/mx-chain-go/trie/factory" ) var _ dataRetriever.ResolversContainerFactory = (*shardResolversContainerFactory)(nil) @@ -232,7 +231,7 @@ func (srcf *shardResolversContainerFactory) generateTrieNodesResolvers() error { identifierTrieNodes := factory.AccountTrieNodesTopic + shardC.CommunicationIdentifier(core.MetachainShardId) resolver, err := srcf.createTrieNodesResolver( identifierTrieNodes, - triesFactory.UserAccountTrie, + dataRetriever.UserAccountsUnit.String(), core.MetachainShardId, ) if err != nil { diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go index f55fe63774e..51195e6c5a8 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/factory/resolverscontainer" @@ -18,7 +19,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" - triesFactory "github.com/multiversx/mx-chain-go/trie/factory" "github.com/stretchr/testify/assert" ) @@ -88,13 +88,24 @@ func createStoreForShard() dataRetriever.StorageService { func createTriesHolderForShard() common.TriesHolder { triesHolder := state.NewDataTriesHolder() - triesHolder.Put([]byte(triesFactory.UserAccountTrie), &trieMock.TrieStub{}) - triesHolder.Put([]byte(triesFactory.PeerAccountTrie), &trieMock.TrieStub{}) + triesHolder.Put([]byte(dataRetriever.UserAccountsUnit.String()), &trieMock.TrieStub{}) + triesHolder.Put([]byte(dataRetriever.PeerAccountsUnit.String()), &trieMock.TrieStub{}) return triesHolder } // ------- NewResolversContainerFactory +func TestNewShardResolversContainerFactory_NewNumGoRoutinesThrottlerFailsShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.NumConcurrentResolvingJobs = 0 + rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.Equal(t, core.ErrNotPositiveValue, err) +} + func TestNewShardResolversContainerFactory_NilShardCoordinatorShouldErr(t *testing.T) { t.Parallel() @@ -337,6 +348,18 @@ func TestShardResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { assert.Equal(t, totalResolvers, container.Len()) } +func TestShardResolversContainerFactory_IsInterfaceNil(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.ShardCoordinator = nil + rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) + assert.True(t, rcf.IsInterfaceNil()) + + rcf, _ = resolverscontainer.NewShardResolversContainerFactory(getArgumentsMeta()) + assert.False(t, rcf.IsInterfaceNil()) +} + func getArgumentsShard() resolverscontainer.FactoryArgs { return resolverscontainer.FactoryArgs{ ShardCoordinator: mock.NewOneShardCoordinatorMock(), diff --git a/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go b/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go index 0157ca5c634..d7407bdb1ba 100644 --- a/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go +++ b/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go @@ -231,6 +231,7 @@ func (brcf *baseRequestersContainerFactory) createMiniBlocksRequester(responseTo func (brcf *baseRequestersContainerFactory) newImportDBTrieStorage( mainStorer storage.Storer, checkpointsStorer storage.Storer, + storageIdentifier dataRetriever.UnitType, ) (common.StorageManager, dataRetriever.TrieDataGetter, error) { pathManager, err := storageFactory.CreatePathManager( storageFactory.ArgCreatePathManager{ @@ -261,6 +262,7 @@ func (brcf *baseRequestersContainerFactory) newImportDBTrieStorage( MaxTrieLevelInMem: brcf.generalConfig.StateTriesConfig.MaxStateTrieLevelInMemory, SnapshotsEnabled: brcf.snapshotsEnabled, IdleProvider: disabled.NewProcessStatusHandler(), + Identifier: storageIdentifier.String(), } return trieFactoryInstance.Create(args) } diff --git a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go index 498d02cc1b3..092ef541a5c 100644 --- a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go +++ b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go @@ -194,6 +194,7 @@ func (mrcf *metaRequestersContainerFactory) generateTrieNodesRequesters() error storageManager, userAccountsDataTrie, err := mrcf.newImportDBTrieStorage( userAccountsStorer, userAccountsCheckpointStorer, + dataRetriever.UserAccountsUnit, ) if err != nil { return fmt.Errorf("%w while creating user accounts data trie storage getter", err) @@ -230,6 +231,7 @@ func (mrcf *metaRequestersContainerFactory) generateTrieNodesRequesters() error storageManager, peerAccountsDataTrie, err := mrcf.newImportDBTrieStorage( peerAccountsStorer, peerAccountsCheckpointStorer, + dataRetriever.PeerAccountsUnit, ) if err != nil { return fmt.Errorf("%w while creating peer accounts data trie storage getter", err) diff --git a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go index f1298ae1391..dcf0acf6583 100644 --- a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go +++ b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go @@ -169,6 +169,7 @@ func (srcf *shardRequestersContainerFactory) generateTrieNodesRequesters() error storageManager, userAccountsDataTrie, err := srcf.newImportDBTrieStorage( userAccountsStorer, userAccountsCheckpointStorer, + dataRetriever.UserAccountsUnit, ) if err != nil { return fmt.Errorf("%w while creating user accounts data trie storage getter", err) diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index 88ba134aa6c..77f59710677 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -316,7 +316,7 @@ type WhiteListHandler interface { IsInterfaceNil() bool } -// DebugHandler defines an interface for debugging the reqested-resolved data +// DebugHandler defines an interface for debugging the requested-resolved data type DebugHandler interface { LogRequestedData(topic string, hashes [][]byte, numReqIntra int, numReqCross int) LogFailedToResolveData(topic string, hash []byte, err error) diff --git a/dataRetriever/mock/marshalizerMock.go b/dataRetriever/mock/marshalizerMock.go index 5299a5bb257..550359b01ca 100644 --- a/dataRetriever/mock/marshalizerMock.go +++ b/dataRetriever/mock/marshalizerMock.go @@ -32,7 +32,7 @@ func (mm *MarshalizerMock) Unmarshal(obj interface{}, buff []byte) error { } if obj == nil { - return errors.New("nil object to serilize to") + return errors.New("nil object to serialize to") } if buff == nil { diff --git a/dataRetriever/provider/miniBlocks_test.go b/dataRetriever/provider/miniBlocks_test.go index c935345363d..3ccbeba3490 100644 --- a/dataRetriever/provider/miniBlocks_test.go +++ b/dataRetriever/provider/miniBlocks_test.go @@ -2,6 +2,7 @@ package provider_test import ( "bytes" + "errors" "fmt" "testing" @@ -240,13 +241,24 @@ func TestMiniBlockProvider_GetMiniBlocksFromStorerShouldBeFoundInStorage(t *test existingHashes := [][]byte{ []byte("hash1"), []byte("hash2"), + []byte("hash3"), } requestedHashes := existingHashes + cnt := 0 arg := createMockMiniblockProviderArgs(nil, existingHashes) + arg.Marshalizer = &testscommon.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + cnt++ + if cnt == 1 { + return errors.New("unmarshal fails for coverage") + } + return nil + }, + } mbp, _ := provider.NewMiniBlockProvider(arg) miniBlocksAndHashes, missingHashes := mbp.GetMiniBlocksFromStorer(requestedHashes) assert.Equal(t, 2, len(miniBlocksAndHashes)) - assert.Equal(t, 0, len(missingHashes)) + assert.Equal(t, 1, len(missingHashes)) } diff --git a/dataRetriever/requestHandlers/requestHandler_test.go b/dataRetriever/requestHandlers/requestHandler_test.go index 0c9abb97036..48d27f46217 100644 --- a/dataRetriever/requestHandlers/requestHandler_test.go +++ b/dataRetriever/requestHandlers/requestHandler_test.go @@ -37,711 +37,909 @@ func createRequestersFinderStubThatShouldNotBeCalled(tb testing.TB) *dataRetriev } } -func TestNewResolverRequestHandlerNilFinder(t *testing.T) { +func TestNewResolverRequestHandler(t *testing.T) { t.Parallel() - rrh, err := NewResolverRequestHandler( - nil, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + t.Run("nil finder should error", func(t *testing.T) { + t.Parallel() - assert.Nil(t, rrh) - assert.Equal(t, dataRetriever.ErrNilRequestersFinder, err) -} + rrh, err := NewResolverRequestHandler( + nil, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) -func TestNewResolverRequestHandlerNilRequestedItemsHandler(t *testing.T) { - t.Parallel() + assert.Nil(t, rrh) + assert.Equal(t, dataRetriever.ErrNilRequestersFinder, err) + }) + t.Run("nil requested items handler should error", func(t *testing.T) { + t.Parallel() - rrh, err := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{}, - nil, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + rrh, err := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{}, + nil, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - assert.Nil(t, rrh) - assert.Equal(t, dataRetriever.ErrNilRequestedItemsHandler, err) -} + assert.Nil(t, rrh) + assert.Equal(t, dataRetriever.ErrNilRequestedItemsHandler, err) + }) + t.Run("nil whitelist handler should error", func(t *testing.T) { + t.Parallel() -func TestNewResolverRequestHandlerMaxTxRequestTooSmall(t *testing.T) { - t.Parallel() + rrh, err := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{}, + &mock.RequestedItemsHandlerStub{}, + nil, + 1, + 0, + time.Second, + ) - rrh, err := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{}, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 0, - 0, - time.Second, - ) + assert.Nil(t, rrh) + assert.Equal(t, dataRetriever.ErrNilWhiteListHandler, err) + }) + t.Run("invalid max txs to request should error", func(t *testing.T) { + t.Parallel() - assert.Nil(t, rrh) - assert.Equal(t, dataRetriever.ErrInvalidMaxTxRequest, err) -} + rrh, err := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{}, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 0, + 0, + time.Second, + ) -func TestNewResolverRequestHandler(t *testing.T) { - t.Parallel() + assert.Nil(t, rrh) + assert.Equal(t, dataRetriever.ErrInvalidMaxTxRequest, err) + }) + t.Run("invalid request interval should error", func(t *testing.T) { + t.Parallel() - rrh, err := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{}, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + rrh, err := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{}, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Millisecond-time.Nanosecond, + ) + + assert.Nil(t, rrh) + assert.True(t, errors.Is(err, dataRetriever.ErrRequestIntervalTooSmall)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + rrh, err := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{}, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - assert.Nil(t, err) - assert.NotNil(t, rrh) + assert.Nil(t, err) + assert.NotNil(t, rrh) + }) } -func TestResolverRequestHandler_RequestTransactionErrorWhenGettingCrossShardRequesterShouldNotPanic(t *testing.T) { +func TestResolverRequestHandler_RequestTransaction(t *testing.T) { t.Parallel() - defer func() { - r := recover() - if r != nil { - assert.Fail(t, "should not panic") - } - }() + t.Run("no hash should not panic", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return nil, errExpected + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + require.Fail(t, "should have not been called") + return nil, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestTransaction(0, make([][]byte, 0)) -} + rrh.RequestTransaction(0, make([][]byte, 0)) + }) + t.Run("error when getting cross shard requester should not panic", func(t *testing.T) { + t.Parallel() -func TestResolverRequestHandler_RequestTransactionWrongResolverShouldNotPanic(t *testing.T) { - t.Parallel() + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() - defer func() { - r := recover() - if r != nil { - assert.Fail(t, "should not panic") - } - }() + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return nil, errExpected + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - wrongTxRequester := &dataRetrieverMocks.NonceRequesterStub{} + rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) + }) + t.Run("uncastable requester should not panic", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return wrongTxRequester, nil + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() + + wrongTxRequester := &dataRetrieverMocks.NonceRequesterStub{} + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return wrongTxRequester, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestTransaction(0, make([][]byte, 0)) -} + rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) + }) + t.Run("should request", func(t *testing.T) { + t.Parallel() -func TestResolverRequestHandler_RequestTransactionShouldRequestTransactions(t *testing.T) { - t.Parallel() + chTxRequested := make(chan struct{}) + txRequester := &dataRetrieverMocks.HashSliceRequesterStub{ + RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { + chTxRequested <- struct{}{} + return nil + }, + } - chTxRequested := make(chan struct{}) - txRequester := &dataRetrieverMocks.HashSliceRequesterStub{ - RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { - chTxRequested <- struct{}{} - return nil - }, - } + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return txRequester, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return txRequester, nil + rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) + + select { + case <-chTxRequested: + case <-time.After(timeoutSendRequests): + assert.Fail(t, "timeout while waiting to call RequestDataFromHashArray") + } + + time.Sleep(time.Second) + }) + t.Run("should request 4 times if different shards", func(t *testing.T) { + t.Parallel() + + numRequests := uint32(0) + txRequester := &dataRetrieverMocks.HashSliceRequesterStub{ + RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { + atomic.AddUint32(&numRequests, 1) + return nil }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + } - rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) + timeSpan := time.Second + timeCache := cache.NewTimeCache(timeSpan) + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return txRequester, nil + }, + }, + timeCache, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - select { - case <-chTxRequested: - case <-time.After(timeoutSendRequests): - assert.Fail(t, "timeout while waiting to call RequestDataFromHashArray") - } + rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) + rrh.RequestTransaction(1, [][]byte{[]byte("txHash")}) + rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) + rrh.RequestTransaction(1, [][]byte{[]byte("txHash")}) - time.Sleep(time.Second) -} + time.Sleep(time.Second) // let the go routines finish + assert.Equal(t, uint32(2), atomic.LoadUint32(&numRequests)) + time.Sleep(time.Second) // sweep will take effect -func TestResolverRequestHandler_RequestTransactionShouldRequest4TimesIfDifferentShardsAndEnoughTime(t *testing.T) { - t.Parallel() + rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) + rrh.RequestTransaction(1, [][]byte{[]byte("txHash")}) + rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) + rrh.RequestTransaction(1, [][]byte{[]byte("txHash")}) - numRequests := uint32(0) - txRequester := &dataRetrieverMocks.HashSliceRequesterStub{ - RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { - atomic.AddUint32(&numRequests, 1) - return nil - }, - } + time.Sleep(time.Second) // let the go routines finish + assert.Equal(t, uint32(4), atomic.LoadUint32(&numRequests)) + }) + t.Run("errors on request should not panic", func(t *testing.T) { + t.Parallel() - timeSpan := time.Second - timeCache := cache.NewTimeCache(timeSpan) - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return txRequester, nil + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() + + chTxRequested := make(chan struct{}) + txRequester := &dataRetrieverMocks.HashSliceRequesterStub{ + RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { + chTxRequested <- struct{}{} + return errExpected }, - }, - timeCache, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + } - rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) - rrh.RequestTransaction(1, [][]byte{[]byte("txHash")}) - rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) - rrh.RequestTransaction(1, [][]byte{[]byte("txHash")}) + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return txRequester, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - time.Sleep(time.Second) // let the go routines finish - assert.Equal(t, uint32(2), atomic.LoadUint32(&numRequests)) - time.Sleep(time.Second) // sweep will take effect + rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) - rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) - rrh.RequestTransaction(1, [][]byte{[]byte("txHash")}) - rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) - rrh.RequestTransaction(1, [][]byte{[]byte("txHash")}) + select { + case <-chTxRequested: + case <-time.After(timeoutSendRequests): + assert.Fail(t, "timeout while waiting to call RequestDataFromHashArray") + } - time.Sleep(time.Second) // let the go routines finish - assert.Equal(t, uint32(4), atomic.LoadUint32(&numRequests)) + time.Sleep(time.Second) + }) } -func TestResolverRequestHandler_RequestTransactionErrorsOnRequestShouldNotPanic(t *testing.T) { +func TestResolverRequestHandler_RequestMiniBlock(t *testing.T) { t.Parallel() - defer func() { - r := recover() - if r != nil { - assert.Fail(t, "should not panic") - } - }() + t.Run("hash already requested", func(t *testing.T) { + t.Parallel() - chTxRequested := make(chan struct{}) - txRequester := &dataRetrieverMocks.HashSliceRequesterStub{ - RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { - chTxRequested <- struct{}{} - return errExpected - }, - } + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return txRequester, nil + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + require.Fail(t, "should not have been called") + return nil, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{ + HasCalled: func(key string) bool { + return true + }, + }, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) + rrh.RequestMiniBlock(0, make([]byte, 0)) + }) + t.Run("CrossShardRequester returns error", func(t *testing.T) { + t.Parallel() - select { - case <-chTxRequested: - case <-time.After(timeoutSendRequests): - assert.Fail(t, "timeout while waiting to call RequestDataFromHashArray") - } + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() - time.Sleep(time.Second) -} + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return nil, errExpected + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) -func TestResolverRequestHandler_RequestMiniBlockErrorWhenGettingCrossShardRequesterShouldNotPanic(t *testing.T) { - t.Parallel() + rrh.RequestMiniBlock(0, make([]byte, 0)) + }) + t.Run("RequestDataFromHash error", func(t *testing.T) { + t.Parallel() - defer func() { - r := recover() - if r != nil { - assert.Fail(t, "should not panic") + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() + + mbRequester := &dataRetrieverMocks.RequesterStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + return errExpected + }, + } + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return mbRequester, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) + + rrh.RequestMiniBlock(0, []byte("mbHash")) + }) + t.Run("should request", func(t *testing.T) { + t.Parallel() + + wasCalled := false + mbRequester := &dataRetrieverMocks.RequesterStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + wasCalled = true + return nil + }, } - }() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return nil, errExpected + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return mbRequester, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestMiniBlock(0, make([]byte, 0)) -} + rrh.RequestMiniBlock(0, []byte("mbHash")) -func TestResolverRequestHandler_RequestMiniBlockErrorsOnRequestShouldNotPanic(t *testing.T) { - t.Parallel() + assert.True(t, wasCalled) + }) + t.Run("should call with the correct epoch", func(t *testing.T) { + t.Parallel() - defer func() { - r := recover() - if r != nil { - assert.Fail(t, "should not panic") + expectedEpoch := uint32(7) + mbRequester := &dataRetrieverMocks.RequesterStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + assert.Equal(t, expectedEpoch, epoch) + return nil + }, } - }() - - mbRequester := &dataRetrieverMocks.RequesterStub{ - RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { - return errExpected - }, - } - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return mbRequester, nil + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return mbRequester, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) + + rrh.SetEpoch(expectedEpoch) - rrh.RequestMiniBlock(0, []byte("mbHash")) + rrh.RequestMiniBlock(0, []byte("mbHash")) + }) } -func TestResolverRequestHandler_RequestMiniBlockShouldCallRequestOnResolver(t *testing.T) { +func TestResolverRequestHandler_RequestShardHeader(t *testing.T) { t.Parallel() - wasCalled := false - mbRequester := &dataRetrieverMocks.RequesterStub{ - RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { - wasCalled = true - return nil - }, - } + t.Run("hash already requested should work", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return mbRequester, nil + rrh, _ := NewResolverRequestHandler( + createRequestersFinderStubThatShouldNotBeCalled(t), + &mock.RequestedItemsHandlerStub{ + HasCalled: func(key string) bool { + return true + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) - - rrh.RequestMiniBlock(0, []byte("mbHash")) + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - assert.True(t, wasCalled) -} + rrh.RequestShardHeader(0, make([]byte, 0)) + }) + t.Run("no hash should work", func(t *testing.T) { + t.Parallel() -func TestResolverRequestHandler_RequestMiniBlockShouldCallWithTheCorrectEpoch(t *testing.T) { - t.Parallel() + rrh, _ := NewResolverRequestHandler( + createRequestersFinderStubThatShouldNotBeCalled(t), + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - expectedEpoch := uint32(7) - mbRequester := &dataRetrieverMocks.RequesterStub{ - RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { - assert.Equal(t, expectedEpoch, epoch) - return nil - }, - } + rrh.RequestShardHeader(1, make([]byte, 0)) + }) + t.Run("RequestDataFromHash returns error should work", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return mbRequester, nil + mbRequester := &dataRetrieverMocks.RequesterStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + return errExpected }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + } - rrh.SetEpoch(expectedEpoch) + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return mbRequester, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestMiniBlock(0, []byte("mbHash")) -} + rrh.RequestShardHeader(0, []byte("hdrHash")) + }) + t.Run("should call request", func(t *testing.T) { + t.Parallel() -func TestResolverRequestHandler_RequestShardHeaderHashAlreadyRequestedShouldNotRequest(t *testing.T) { - t.Parallel() + wasCalled := false + mbRequester := &dataRetrieverMocks.RequesterStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + wasCalled = true + return nil + }, + } - rrh, _ := NewResolverRequestHandler( - createRequestersFinderStubThatShouldNotBeCalled(t), - &mock.RequestedItemsHandlerStub{ - HasCalled: func(key string) bool { - return true + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return mbRequester, nil + }, }, - }, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestShardHeader(0, make([]byte, 0)) + rrh.RequestShardHeader(0, []byte("hdrHash")) + + assert.True(t, wasCalled) + }) } -func TestResolverRequestHandler_RequestShardHeaderHashBadRequest(t *testing.T) { +func TestResolverRequestHandler_RequestMetaHeader(t *testing.T) { t.Parallel() - rrh, _ := NewResolverRequestHandler( - createRequestersFinderStubThatShouldNotBeCalled(t), - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + t.Run("header already requested should work", func(t *testing.T) { + t.Parallel() - rrh.RequestShardHeader(1, make([]byte, 0)) -} + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{}, + &mock.RequestedItemsHandlerStub{ + HasCalled: func(key string) bool { + return true + }, + }, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) -func TestResolverRequestHandler_RequestShardHeaderShouldCallRequestOnResolver(t *testing.T) { - t.Parallel() + rrh.RequestMetaHeader([]byte("hdrHash")) + }) + t.Run("cast fail should work", func(t *testing.T) { + t.Parallel() - wasCalled := false - mbRequester := &dataRetrieverMocks.RequesterStub{ - RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { - wasCalled = true - return nil - }, - } + req := &dataRetrieverMocks.RequesterStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + require.Fail(t, "should have not been called") + return nil + }, + } - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return mbRequester, nil + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + return req, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestShardHeader(0, []byte("hdrHash")) + rrh.RequestMetaHeader([]byte("hdrHash")) + }) + t.Run("MetaChainRequester returns error should work", func(t *testing.T) { + t.Parallel() - assert.True(t, wasCalled) -} + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + return nil, errExpected + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) -func TestResolverRequestHandler_RequestMetadHeaderHashAlreadyRequestedShouldNotRequest(t *testing.T) { - t.Parallel() + rrh.RequestMetaHeader([]byte("hdrHash")) + }) + t.Run("RequestDataFromHash returns error should work", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - createRequestersFinderStubThatShouldNotBeCalled(t), - &mock.RequestedItemsHandlerStub{ - HasCalled: func(key string) bool { - return true + req := &dataRetrieverMocks.HeaderRequesterStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + return errExpected }, - }, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + } - rrh.RequestMetaHeader(make([]byte, 0)) -} + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + return req, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) -func TestResolverRequestHandler_RequestMetadHeaderHashNotHeaderResolverShouldNotRequest(t *testing.T) { - t.Parallel() + rrh.RequestMetaHeader([]byte("hdrHash")) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - wasCalled := false - mbRequester := &dataRetrieverMocks.RequesterStub{ - RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { - wasCalled = true - return nil - }, - } + wasCalled := false + mbRequester := &dataRetrieverMocks.HeaderRequesterStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + wasCalled = true + return nil + }, + } - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { - return mbRequester, nil + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + return mbRequester, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestMetaHeader([]byte("hdrHash")) + rrh.RequestMetaHeader([]byte("hdrHash")) - assert.False(t, wasCalled) + assert.True(t, wasCalled) + }) } -func TestResolverRequestHandler_RequestMetaHeaderShouldCallRequestOnResolver(t *testing.T) { +func TestResolverRequestHandler_RequestShardHeaderByNonce(t *testing.T) { t.Parallel() - wasCalled := false - mbRequester := &dataRetrieverMocks.HeaderRequesterStub{ - RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { - wasCalled = true - return nil - }, - } + t.Run("nonce already requested should work", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { - return mbRequester, nil + called := false + rrh, _ := NewResolverRequestHandler( + createRequestersFinderStubThatShouldNotBeCalled(t), + &mock.RequestedItemsHandlerStub{ + HasCalled: func(key string) bool { + called = true + return true + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) - - rrh.RequestMetaHeader([]byte("hdrHash")) - - assert.True(t, wasCalled) -} + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) -func TestResolverRequestHandler_RequestShardHeaderByNonceAlreadyRequestedShouldNotRequest(t *testing.T) { - t.Parallel() + rrh.RequestShardHeaderByNonce(0, 0) + require.True(t, called) + }) + t.Run("invalid nonce should work", func(t *testing.T) { + t.Parallel() - called := false - rrh, _ := NewResolverRequestHandler( - createRequestersFinderStubThatShouldNotBeCalled(t), - &mock.RequestedItemsHandlerStub{ - HasCalled: func(key string) bool { - called = true - return true + called := false + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, err error) { + called = true + return nil, errExpected + }, }, - }, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + core.MetachainShardId, + time.Second, + ) - rrh.RequestShardHeaderByNonce(0, 0) - require.True(t, called) -} + rrh.RequestShardHeaderByNonce(1, 0) + require.True(t, called) + }) + t.Run("finder returns error should work and not panic", func(t *testing.T) { + t.Parallel() -func TestResolverRequestHandler_RequestShardHeaderByNonceBadRequest(t *testing.T) { - t.Parallel() + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() - localErr := errors.New("err") - called := false - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, err error) { - called = true - return nil, localErr + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { + return nil, errExpected + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - core.MetachainShardId, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestShardHeaderByNonce(1, 0) - require.True(t, called) -} + rrh.RequestShardHeaderByNonce(0, 0) + }) + t.Run("cast fails should work and not panic", func(t *testing.T) { + t.Parallel() -func TestResolverRequestHandler_RequestShardHeaderByNonceFinderReturnsErrorShouldNotPanic(t *testing.T) { - t.Parallel() + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() - defer func() { - r := recover() - if r != nil { - assert.Fail(t, "should not panic") - } - }() + hdrRequester := &dataRetrieverMocks.RequesterStub{} - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { - return nil, errExpected + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { + return hdrRequester, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestShardHeaderByNonce(0, 0) -} + rrh.RequestShardHeaderByNonce(0, 0) + }) + t.Run("resolver fails should work and not panic", func(t *testing.T) { + t.Parallel() -func TestResolverRequestHandler_RequestShardHeaderByNonceFinderReturnsAWrongResolverShouldNotPanic(t *testing.T) { - t.Parallel() + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() - defer func() { - r := recover() - if r != nil { - assert.Fail(t, "should not panic") + hdrRequester := &dataRetrieverMocks.NonceRequesterStub{ + RequestDataFromNonceCalled: func(nonce uint64, epoch uint32) error { + return errExpected + }, } - }() - - hdrRequester := &dataRetrieverMocks.RequesterStub{ - RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { - return errExpected - }, - } - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { - return hdrRequester, nil + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { + return hdrRequester, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) - - rrh.RequestShardHeaderByNonce(0, 0) -} + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) -func TestResolverRequestHandler_RequestShardHeaderByNonceResolverFailsShouldNotPanic(t *testing.T) { - t.Parallel() + rrh.RequestShardHeaderByNonce(0, 0) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - defer func() { - r := recover() - if r != nil { - assert.Fail(t, "should not panic") + wasCalled := false + hdrRequester := &dataRetrieverMocks.NonceRequesterStub{ + RequestDataFromNonceCalled: func(nonce uint64, epoch uint32) error { + wasCalled = true + return nil + }, } - }() - - hdrRequester := &dataRetrieverMocks.RequesterStub{ - RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { - return errExpected - }, - } - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { - return hdrRequester, nil + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { + return hdrRequester, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) + + rrh.RequestShardHeaderByNonce(0, 0) - rrh.RequestShardHeaderByNonce(0, 0) + assert.True(t, wasCalled) + }) } -func TestResolverRequestHandler_RequestShardHeaderByNonceShouldRequest(t *testing.T) { +func TestResolverRequestHandler_RequestMetaHeaderByNonce(t *testing.T) { t.Parallel() - wasCalled := false - hdrRequester := &dataRetrieverMocks.NonceRequesterStub{ - RequestDataFromNonceCalled: func(nonce uint64, epoch uint32) error { - wasCalled = true - return nil - }, - } + t.Run("nonce already requested should work", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { - return hdrRequester, nil + rrh, _ := NewResolverRequestHandler( + createRequestersFinderStubThatShouldNotBeCalled(t), + &mock.RequestedItemsHandlerStub{ + HasCalled: func(key string) bool { + return true + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestShardHeaderByNonce(0, 0) + rrh.RequestMetaHeaderByNonce(0) + }) + t.Run("MetaChainRequester returns error should work", func(t *testing.T) { + t.Parallel() - assert.True(t, wasCalled) -} + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + return nil, errExpected + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{ + AddCalled: func(keys [][]byte) { + require.Fail(t, "should not have been called") + }, + }, + 100, + 0, + time.Second, + ) -func TestResolverRequestHandler_RequestMetaHeaderHashAlreadyRequestedShouldNotRequest(t *testing.T) { - t.Parallel() + rrh.RequestMetaHeaderByNonce(0) + }) + t.Run("RequestDataFromNonce returns error should work", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - createRequestersFinderStubThatShouldNotBeCalled(t), - &mock.RequestedItemsHandlerStub{ - HasCalled: func(key string) bool { - return true + hdrRequester := &dataRetrieverMocks.HeaderRequesterStub{ + RequestDataFromNonceCalled: func(nonce uint64, epoch uint32) error { + return errExpected }, - }, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + } - rrh.RequestMetaHeaderByNonce(0) -} + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + return hdrRequester, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) -func TestResolverRequestHandler_RequestMetaHeaderByNonceShouldRequest(t *testing.T) { - t.Parallel() + rrh.RequestMetaHeaderByNonce(0) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - wasCalled := false - hdrRequester := &dataRetrieverMocks.HeaderRequesterStub{ - RequestDataFromNonceCalled: func(nonce uint64, epoch uint32) error { - wasCalled = true - return nil - }, - } + wasCalled := false + hdrRequester := &dataRetrieverMocks.HeaderRequesterStub{ + RequestDataFromNonceCalled: func(nonce uint64, epoch uint32) error { + wasCalled = true + return nil + }, + } - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { - return hdrRequester, nil + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + return hdrRequester, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 100, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) - rrh.RequestMetaHeaderByNonce(0) + rrh.RequestMetaHeaderByNonce(0) - assert.True(t, wasCalled) + assert.True(t, wasCalled) + }) } func TestResolverRequestHandler_RequestScrErrorWhenGettingCrossShardRequesterShouldNotPanic(t *testing.T) { @@ -910,168 +1108,211 @@ func TestResolverRequestHandler_RequestRewardShouldRequestReward(t *testing.T) { time.Sleep(time.Second) } -func TestRequestTrieNodes_ShouldWork(t *testing.T) { +func TestRequestTrieNodes(t *testing.T) { t.Parallel() - chTxRequested := make(chan struct{}) - requesterMock := &dataRetrieverMocks.HashSliceRequesterStub{ - RequestDataFromHashArrayCalled: func(hash [][]byte, epoch uint32) error { - chTxRequested <- struct{}{} - return nil - }, - } + t.Run("should work", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - MetaCrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { - return requesterMock, nil + chTxRequested := make(chan struct{}) + requesterMock := &dataRetrieverMocks.HashSliceRequesterStub{ + RequestDataFromHashArrayCalled: func(hash [][]byte, epoch uint32) error { + chTxRequested <- struct{}{} + return nil }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + } - rrh.RequestTrieNodes(0, [][]byte{[]byte("hash")}, "topic") - select { - case <-chTxRequested: - case <-time.After(timeoutSendRequests): - assert.Fail(t, "timeout while waiting to call RequestDataFromHashArray") - } + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaCrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { + return requesterMock, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - time.Sleep(time.Second) -} + rrh.RequestTrieNodes(0, [][]byte{[]byte("hash")}, "topic") + select { + case <-chTxRequested: + case <-time.After(timeoutSendRequests): + assert.Fail(t, "timeout while waiting to call RequestDataFromHashArray") + } -func TestRequestTrieNodes_NilResolver(t *testing.T) { - t.Parallel() + time.Sleep(time.Second) + }) + t.Run("nil resolver", func(t *testing.T) { + t.Parallel() - localError := errors.New("test error") - called := false - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - MetaCrossShardRequesterCalled: func(baseTopic string, shId uint32) (requester dataRetriever.Requester, err error) { - called = true - return nil, localError + localError := errors.New("test error") + called := false + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaCrossShardRequesterCalled: func(baseTopic string, shId uint32) (requester dataRetriever.Requester, err error) { + called = true + return nil, localError + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) - - rrh.RequestTrieNodes(core.MetachainShardId, [][]byte{[]byte("hash")}, "topic") - assert.True(t, called) -} + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) -func TestRequestStartOfEpochMetaBlock_MissingResolver(t *testing.T) { - t.Parallel() + rrh.RequestTrieNodes(core.MetachainShardId, [][]byte{[]byte("hash")}, "topic") + assert.True(t, called) + }) + t.Run("no hash", func(t *testing.T) { + t.Parallel() - called := false - localError := errors.New("test error") - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { - called = true - return nil, localError + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaCrossShardRequesterCalled: func(baseTopic string, shId uint32) (requester dataRetriever.Requester, err error) { + require.Fail(t, "should have not been called") + return nil, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestStartOfEpochMetaBlock(0) - assert.True(t, called) + rrh.RequestTrieNodes(core.MetachainShardId, [][]byte{}, "topic") + }) } -func TestRequestStartOfEpochMetaBlock_WrongResolver(t *testing.T) { +func TestResolverRequestHandler_RequestStartOfEpochMetaBlock(t *testing.T) { t.Parallel() - called := false - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { - called = true - return &dataRetrieverMocks.RequesterStub{}, nil + t.Run("epoch already requested", func(t *testing.T) { + t.Parallel() + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { + require.Fail(t, "should not have been called") + return nil, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{ + HasCalled: func(key string) bool { + return true + }, + }, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestStartOfEpochMetaBlock(0) - assert.True(t, called) -} + rrh.RequestStartOfEpochMetaBlock(0) + }) + t.Run("missing resolver", func(t *testing.T) { + t.Parallel() -func TestRequestStartOfEpochMetaBlock_RequestDataFromEpochError(t *testing.T) { - t.Parallel() + called := false + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { + called = true + return nil, errExpected + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - called := false - localError := errors.New("test error") - requesterMock := &dataRetrieverMocks.EpochRequesterStub{ - RequestDataFromEpochCalled: func(identifier []byte) error { - called = true - return localError - }, - } + rrh.RequestStartOfEpochMetaBlock(0) + assert.True(t, called) + }) + t.Run("wrong resolver", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { - return requesterMock, nil + called := false + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { + called = true + return &dataRetrieverMocks.RequesterStub{}, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestStartOfEpochMetaBlock(0) - assert.True(t, called) -} + rrh.RequestStartOfEpochMetaBlock(0) + assert.True(t, called) + }) + t.Run("RequestDataFromEpoch fails", func(t *testing.T) { + t.Parallel() -func TestRequestStartOfEpochMetaBlock_AddError(t *testing.T) { - t.Parallel() + called := false + requesterMock := &dataRetrieverMocks.EpochRequesterStub{ + RequestDataFromEpochCalled: func(identifier []byte) error { + called = true + return errExpected + }, + } - called := false - localError := errors.New("test error") - requesterMock := &dataRetrieverMocks.EpochRequesterStub{ - RequestDataFromEpochCalled: func(identifier []byte) error { - return nil - }, - } + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { + return requesterMock, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { - return requesterMock, nil + rrh.RequestStartOfEpochMetaBlock(0) + assert.True(t, called) + }) + t.Run("add error", func(t *testing.T) { + t.Parallel() + + called := false + requesterMock := &dataRetrieverMocks.EpochRequesterStub{ + RequestDataFromEpochCalled: func(identifier []byte) error { + return nil }, - }, - &mock.RequestedItemsHandlerStub{ - AddCalled: func(key string) error { - called = true - return localError + } + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { + return requesterMock, nil + }, }, - }, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{ + AddCalled: func(key string) error { + called = true + return errExpected + }, + }, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestStartOfEpochMetaBlock(0) - assert.True(t, called) + rrh.RequestStartOfEpochMetaBlock(0) + assert.True(t, called) + }) } func TestResolverRequestHandler_RequestTrieNodeRequestFails(t *testing.T) { @@ -1182,14 +1423,12 @@ func TestResolverRequestHandler_RequestTrieNodeNotAValidResolver(t *testing.T) { assert.True(t, called) } -//------- RequestPeerAuthentications - func TestResolverRequestHandler_RequestPeerAuthenticationsByHashes(t *testing.T) { t.Parallel() providedHashes := [][]byte{[]byte("h1"), []byte("h2")} providedShardId := uint32(15) - t.Run("CrossShardRequester returns error", func(t *testing.T) { + t.Run("MetaChainRequester returns error", func(t *testing.T) { t.Parallel() wasCalled := false @@ -1219,18 +1458,12 @@ func TestResolverRequestHandler_RequestPeerAuthenticationsByHashes(t *testing.T) t.Run("cast fails", func(t *testing.T) { t.Parallel() - wasCalled := false - mbRequester := &dataRetrieverMocks.RequesterStub{ - RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { - wasCalled = true - return nil - }, - } + req := &dataRetrieverMocks.NonceRequesterStub{} rrh, _ := NewResolverRequestHandler( &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (dataRetriever.Requester, error) { assert.Equal(t, common.PeerAuthenticationTopic, baseTopic) - return mbRequester, errExpected + return req, nil }, }, &mock.RequestedItemsHandlerStub{}, @@ -1241,7 +1474,6 @@ func TestResolverRequestHandler_RequestPeerAuthenticationsByHashes(t *testing.T) ) rrh.RequestPeerAuthenticationsByHashes(providedShardId, providedHashes) - assert.False(t, wasCalled) }) t.Run("RequestDataFromHashArray returns error", func(t *testing.T) { t.Parallel() @@ -1261,7 +1493,12 @@ func TestResolverRequestHandler_RequestPeerAuthenticationsByHashes(t *testing.T) return paRequester, nil }, }, - &mock.RequestedItemsHandlerStub{}, + &mock.RequestedItemsHandlerStub{ + AddCalled: func(key string) error { + require.Fail(t, "should not have been called") + return nil + }, + }, &mock.WhiteListHandlerStub{}, 1, 0, @@ -1311,7 +1548,33 @@ func TestResolverRequestHandler_RequestPeerAuthenticationsByHashes(t *testing.T) func TestResolverRequestHandler_RequestValidatorInfo(t *testing.T) { t.Parallel() + t.Run("hash already requested should work", func(t *testing.T) { + t.Parallel() + + providedHash := []byte("provided hash") + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + require.Fail(t, "should not have been called") + return nil, nil + }, + }, + &mock.RequestedItemsHandlerStub{ + HasCalled: func(key string) bool { + return true + }, + }, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + rrh.RequestValidatorInfo(providedHash) + }) t.Run("MetaChainRequester returns error", func(t *testing.T) { + t.Parallel() + providedHash := []byte("provided hash") wasCalled := false res := &dataRetrieverMocks.RequesterStub{ @@ -1324,7 +1587,7 @@ func TestResolverRequestHandler_RequestValidatorInfo(t *testing.T) { rrh, _ := NewResolverRequestHandler( &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { - return res, errors.New("provided err") + return res, errExpected }, }, &mock.RequestedItemsHandlerStub{}, @@ -1337,7 +1600,39 @@ func TestResolverRequestHandler_RequestValidatorInfo(t *testing.T) { rrh.RequestValidatorInfo(providedHash) assert.False(t, wasCalled) }) + t.Run("RequestDataFromHash returns error", func(t *testing.T) { + t.Parallel() + + providedHash := []byte("provided hash") + res := &dataRetrieverMocks.RequesterStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + return errExpected + }, + } + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + return res, nil + }, + }, + &mock.RequestedItemsHandlerStub{ + AddCalled: func(key string) error { + require.Fail(t, "should not have been called") + return nil + }, + }, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + rrh.RequestValidatorInfo(providedHash) + }) t.Run("should work", func(t *testing.T) { + t.Parallel() + providedHash := []byte("provided hash") wasCalled := false res := &dataRetrieverMocks.RequesterStub{ @@ -1370,7 +1665,28 @@ func TestResolverRequestHandler_RequestValidatorInfo(t *testing.T) { func TestResolverRequestHandler_RequestValidatorsInfo(t *testing.T) { t.Parallel() + t.Run("no hash", func(t *testing.T) { + t.Parallel() + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + require.Fail(t, "should not have been called") + return nil, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + rrh.RequestValidatorsInfo([][]byte{}) + }) t.Run("MetaChainRequester returns error", func(t *testing.T) { + t.Parallel() + providedHash := []byte("provided hash") wasCalled := false res := &dataRetrieverMocks.RequesterStub{ @@ -1383,7 +1699,7 @@ func TestResolverRequestHandler_RequestValidatorsInfo(t *testing.T) { rrh, _ := NewResolverRequestHandler( &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { - return res, errors.New("provided err") + return res, errExpected }, }, &mock.RequestedItemsHandlerStub{}, @@ -1396,7 +1712,39 @@ func TestResolverRequestHandler_RequestValidatorsInfo(t *testing.T) { rrh.RequestValidatorsInfo([][]byte{providedHash}) assert.False(t, wasCalled) }) + t.Run("RequestDataFromHashArray returns error", func(t *testing.T) { + t.Parallel() + + providedHash := []byte("provided hash") + res := &dataRetrieverMocks.HashSliceRequesterStub{ + RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { + return errExpected + }, + } + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + return res, nil + }, + }, + &mock.RequestedItemsHandlerStub{ + AddCalled: func(key string) error { + require.Fail(t, "should not have been called") + return nil + }, + }, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + rrh.RequestValidatorsInfo([][]byte{providedHash}) + }) t.Run("cast fails", func(t *testing.T) { + t.Parallel() + providedHash := []byte("provided hash") mbRequester := &dataRetrieverMocks.NonceRequesterStub{} // uncastable to HashSliceRequester wasCalled := false @@ -1421,6 +1769,8 @@ func TestResolverRequestHandler_RequestValidatorsInfo(t *testing.T) { assert.False(t, wasCalled) }) t.Run("should work", func(t *testing.T) { + t.Parallel() + providedHashes := [][]byte{[]byte("provided hash 1"), []byte("provided hash 2")} wasCalled := false res := &dataRetrieverMocks.HashSliceRequesterStub{ @@ -1449,3 +1799,201 @@ func TestResolverRequestHandler_RequestValidatorsInfo(t *testing.T) { assert.True(t, wasCalled) }) } + +func TestResolverRequestHandler_RequestMiniblocks(t *testing.T) { + t.Parallel() + + t.Run("no hash should work", func(t *testing.T) { + t.Parallel() + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { + require.Fail(t, "should have not been called") + return nil, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + rrh.RequestMiniBlocks(0, [][]byte{}) + }) + t.Run("CrossShardRequester fails should work", func(t *testing.T) { + t.Parallel() + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { + return nil, errExpected + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + rrh.RequestMiniBlocks(0, [][]byte{[]byte("mbHash")}) + }) + t.Run("cast fails should work", func(t *testing.T) { + t.Parallel() + + nonceRequester := &dataRetrieverMocks.NonceRequesterStub{} // uncastable to HashSliceRequester + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { + return nonceRequester, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{ + AddCalled: func(keys [][]byte) { + require.Fail(t, "should have not been called") + }, + }, + 100, + 0, + time.Second, + ) + + rrh.RequestMiniBlocks(0, [][]byte{[]byte("mbHash")}) + }) + t.Run("request data fails should work", func(t *testing.T) { + t.Parallel() + + mbRequester := &dataRetrieverMocks.HashSliceRequesterStub{ + RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { + return errExpected + }, + } + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { + return mbRequester, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + rrh.RequestMiniBlocks(0, [][]byte{[]byte("mbHash")}) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { + return &dataRetrieverMocks.HashSliceRequesterStub{}, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + rrh.RequestMiniBlocks(0, [][]byte{[]byte("mbHash")}) + }) +} + +func TestResolverRequestHandler_RequestInterval(t *testing.T) { + t.Parallel() + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{}, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + require.Equal(t, time.Second, rrh.RequestInterval()) +} + +func TestResolverRequestHandler_NumPeersToQuery(t *testing.T) { + t.Parallel() + + t.Run("get returns error", func(t *testing.T) { + t.Parallel() + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + GetCalled: func(key string) (dataRetriever.Requester, error) { + return nil, errExpected + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + _, _, err := rrh.GetNumPeersToQuery("key") + require.Equal(t, errExpected, err) + + err = rrh.SetNumPeersToQuery("key", 1, 1) + require.Equal(t, errExpected, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + req := &dataRetrieverMocks.RequesterStub{ + SetNumPeersToQueryCalled: func(intra int, cross int) { + require.Equal(t, 1, intra) + require.Equal(t, 1, cross) + }, + NumPeersToQueryCalled: func() (int, int) { + return 10, 10 + }, + } + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + GetCalled: func(key string) (dataRetriever.Requester, error) { + return req, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + intra, cross, err := rrh.GetNumPeersToQuery("key") + require.NoError(t, err) + require.Equal(t, 10, intra) + require.Equal(t, 10, cross) + + err = rrh.SetNumPeersToQuery("key", 1, 1) + require.NoError(t, err) + }) +} + +func TestResolverRequestHandler_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var rrh *resolverRequestHandler + require.True(t, rrh.IsInterfaceNil()) + + rrh, _ = NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{}, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + require.False(t, rrh.IsInterfaceNil()) +} diff --git a/dataRetriever/resolvers/epochproviders/disabled/disabledEpochProvider_test.go b/dataRetriever/resolvers/epochproviders/disabled/disabledEpochProvider_test.go new file mode 100644 index 00000000000..b4b5fb95fac --- /dev/null +++ b/dataRetriever/resolvers/epochproviders/disabled/disabledEpochProvider_test.go @@ -0,0 +1,19 @@ +package disabled + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestEpochProvider(t *testing.T) { + t.Parallel() + + var ep *epochProvider + require.True(t, ep.IsInterfaceNil()) + + ep = NewEpochProvider() + require.False(t, ep.IsInterfaceNil()) + require.True(t, ep.EpochIsActiveInNetwork(1)) + ep.EpochConfirmed(0, 0) +} diff --git a/dataRetriever/resolvers/headerResolver_test.go b/dataRetriever/resolvers/headerResolver_test.go index 9b36dc5d0c7..e71fff039bd 100644 --- a/dataRetriever/resolvers/headerResolver_test.go +++ b/dataRetriever/resolvers/headerResolver_test.go @@ -3,6 +3,8 @@ package resolvers_test import ( "bytes" "errors" + "fmt" + "math" "sync" "testing" @@ -194,6 +196,32 @@ func TestHeaderResolver_ProcessReceivedMessage_WrongIdentifierStartBlock(t *test assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) } +func TestHeaderResolver_ProcessReceivedMessageEpochTypeUnknownEpochShouldWork(t *testing.T) { + t.Parallel() + + arg := createMockArgHeaderResolver() + arg.HdrStorage = &storageStubs.StorerStub{ + SearchFirstCalled: func(key []byte) (i []byte, e error) { + return []byte("hash"), nil + }, + } + wasSent := false + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + wasSent = true + return nil + }, + } + hdrRes, _ := resolvers.NewHeaderResolver(arg) + + requestedData := []byte(fmt.Sprintf("epoch_%d", math.MaxUint32)) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.EpochType, requestedData), "") + assert.NoError(t, err) + assert.True(t, wasSent) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + func TestHeaderResolver_ProcessReceivedMessage_Ok(t *testing.T) { t.Parallel() @@ -260,6 +288,43 @@ func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSend assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) } +func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSendFullHistory(t *testing.T) { + t.Parallel() + + requestedData := []byte("aaaa") + + searchWasCalled := false + sendWasCalled := false + + headers := &mock.HeadersCacherStub{} + + headers.GetHeaderByHashCalled = func(hash []byte) (handler data.HeaderHandler, e error) { + if bytes.Equal(requestedData, hash) { + searchWasCalled = true + return &block.Header{}, nil + } + return nil, errors.New("0") + } + + arg := createMockArgHeaderResolver() + arg.IsFullHistoryNode = true + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + sendWasCalled = true + return nil + }, + } + arg.Headers = headers + hdrRes, _ := resolvers.NewHeaderResolver(arg) + + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId) + assert.Nil(t, err) + assert.True(t, searchWasCalled) + assert.True(t, sendWasCalled) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + func TestHeaderResolver_ProcessReceivedMessageRequestHashTypeFoundInHdrPoolMarshalizerFailsShouldErr(t *testing.T) { t.Parallel() @@ -542,6 +607,102 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) } +func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoolButMarshalFailsShouldError(t *testing.T) { + t.Parallel() + + requestedNonce := uint64(67) + targetShardId := uint32(9) + wasResolved := false + + headers := &mock.HeadersCacherStub{} + headers.GetHeaderByHashCalled = func(hash []byte) (handler data.HeaderHandler, e error) { + return nil, errors.New("err") + } + headers.GetHeaderByNonceAndShardIdCalled = func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + wasResolved = true + return []data.HeaderHandler{&block.Header{}, &block.Header{}}, [][]byte{[]byte("1"), []byte("2")}, nil + } + + arg := createMockArgHeaderResolver() + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + assert.Fail(t, "should not have been called") + return nil + }, + TargetShardIDCalled: func() uint32 { + return targetShardId + }, + } + arg.Headers = headers + arg.HeadersNoncesStorage = &storageStubs.StorerStub{ + GetFromEpochCalled: func(key []byte, epoch uint32) ([]byte, error) { + return nil, errKeyNotFound + }, + SearchFirstCalled: func(key []byte) (i []byte, e error) { + return nil, errKeyNotFound + }, + } + initialMarshaller := arg.Marshaller + arg.Marshaller = &mock.MarshalizerStub{ + UnmarshalCalled: initialMarshaller.Unmarshal, + MarshalCalled: func(obj interface{}) ([]byte, error) { + return nil, expectedErr + }, + } + hdrRes, _ := resolvers.NewHeaderResolver(arg) + + err := hdrRes.ProcessReceivedMessage( + createRequestMsg(dataRetriever.NonceType, arg.NonceConverter.ToByteSlice(requestedNonce)), + fromConnectedPeerId, + ) + + assert.True(t, errors.Is(err, expectedErr)) + assert.True(t, wasResolved) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + +func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeNotFoundInHdrNoncePoolShouldRetFromPoolAndSend(t *testing.T) { + t.Parallel() + + requestedNonce := uint64(67) + wasSend := false + hash := []byte("aaaa") + + headers := &mock.HeadersCacherStub{} + headers.GetHeaderByHashCalled = func(hash []byte) (handler data.HeaderHandler, e error) { + return &block.Header{}, nil + } + headers.GetHeaderByNonceAndShardIdCalled = func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + assert.Fail(t, "should not have been called") + return nil, nil, nil + } + arg := createMockArgHeaderResolver() + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + wasSend = true + return nil + }, + } + arg.Headers = headers + arg.HeadersNoncesStorage = &storageStubs.StorerStub{ + SearchFirstCalled: func(key []byte) (i []byte, e error) { + return hash, nil + }, + } + hdrRes, _ := resolvers.NewHeaderResolver(arg) + + err := hdrRes.ProcessReceivedMessage( + createRequestMsg(dataRetriever.NonceType, arg.NonceConverter.ToByteSlice(requestedNonce)), + fromConnectedPeerId, + ) + + assert.Nil(t, err) + assert.True(t, wasSend) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoolCheckRetErr(t *testing.T) { t.Parallel() diff --git a/dataRetriever/resolvers/messageProcessor_test.go b/dataRetriever/resolvers/messageProcessor_test.go index b4a10bc24b0..05fb0dcd127 100644 --- a/dataRetriever/resolvers/messageProcessor_test.go +++ b/dataRetriever/resolvers/messageProcessor_test.go @@ -18,6 +18,16 @@ const fromConnectedPeer = core.PeerID("from connected peer") //------- canProcessMessage +func TestMessageProcessor_CanProcessNilMessageShouldErr(t *testing.T) { + t.Parallel() + + mp := &messageProcessor{} + + err := mp.canProcessMessage(nil, "") + + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessage)) +} + func TestMessageProcessor_CanProcessErrorsShouldErr(t *testing.T) { t.Parallel() diff --git a/dataRetriever/resolvers/miniblockResolver_test.go b/dataRetriever/resolvers/miniblockResolver_test.go index 94d82e2bf92..1b336c50396 100644 --- a/dataRetriever/resolvers/miniblockResolver_test.go +++ b/dataRetriever/resolvers/miniblockResolver_test.go @@ -95,6 +95,17 @@ func TestNewMiniblockResolver_NilThrottlerShouldErr(t *testing.T) { assert.True(t, check.IfNil(mbRes)) } +func TestNewMiniblockResolver_NilDataPackerShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockArgMiniblockResolver() + arg.DataPacker = nil + mbRes, err := resolvers.NewMiniblockResolver(arg) + + assert.Equal(t, dataRetriever.ErrNilDataPacker, err) + assert.True(t, check.IfNil(mbRes)) +} + func TestNewMiniblockResolver_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -250,6 +261,147 @@ func TestMiniblockResolver_ProcessReceivedMessageFoundInPoolMarshalizerFailShoul assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) } +func TestMiniblockResolver_ProcessReceivedMessageUnmarshalFails(t *testing.T) { + t.Parallel() + + goodMarshalizer := &mock.MarshalizerMock{} + cnt := 0 + marshalizer := &mock.MarshalizerStub{ + MarshalCalled: goodMarshalizer.Marshal, + UnmarshalCalled: func(obj interface{}, buff []byte) error { + cnt++ + if cnt > 1 { + return expectedErr + } + return goodMarshalizer.Unmarshal(obj, buff) + }, + } + mbHash := []byte("aaa") + miniBlockList := make([][]byte, 0) + miniBlockList = append(miniBlockList, mbHash) + requestedBuff, merr := goodMarshalizer.Marshal(&batch.Batch{Data: miniBlockList}) + + assert.Nil(t, merr) + + cache := testscommon.NewCacherStub() + cache.PeekCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false + } + + arg := createMockArgMiniblockResolver() + arg.MiniBlockPool = cache + arg.MiniBlockStorage = &storageStubs.StorerStub{ + GetCalled: func(key []byte) (i []byte, e error) { + body := block.MiniBlock{} + buff, _ := goodMarshalizer.Marshal(&body) + return buff, nil + }, + } + arg.Marshaller = marshalizer + arg.DataPacker = &mock.DataPackerStub{ + PackDataInChunksCalled: func(data [][]byte, limit int) ([][]byte, error) { + assert.Fail(t, "should not have been called") + return nil, nil + }, + } + mbRes, _ := resolvers.NewMiniblockResolver(arg) + + err := mbRes.ProcessReceivedMessage( + createRequestMsg(dataRetriever.HashArrayType, requestedBuff), + fromConnectedPeerId, + ) + + assert.True(t, errors.Is(err, expectedErr)) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + +func TestMiniblockResolver_ProcessReceivedMessagePackDataInChunksFails(t *testing.T) { + t.Parallel() + + goodMarshalizer := &mock.MarshalizerMock{} + mbHash := []byte("aaa") + miniBlockList := make([][]byte, 0) + miniBlockList = append(miniBlockList, mbHash) + requestedBuff, merr := goodMarshalizer.Marshal(&batch.Batch{Data: miniBlockList}) + + assert.Nil(t, merr) + + cache := testscommon.NewCacherStub() + cache.PeekCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false + } + + arg := createMockArgMiniblockResolver() + arg.MiniBlockPool = cache + arg.MiniBlockStorage = &storageStubs.StorerStub{ + GetCalled: func(key []byte) (i []byte, e error) { + body := block.MiniBlock{} + buff, _ := goodMarshalizer.Marshal(&body) + return buff, nil + }, + } + arg.Marshaller = goodMarshalizer + arg.DataPacker = &mock.DataPackerStub{ + PackDataInChunksCalled: func(data [][]byte, limit int) ([][]byte, error) { + return nil, expectedErr + }, + } + mbRes, _ := resolvers.NewMiniblockResolver(arg) + + err := mbRes.ProcessReceivedMessage( + createRequestMsg(dataRetriever.HashArrayType, requestedBuff), + fromConnectedPeerId, + ) + + assert.True(t, errors.Is(err, expectedErr)) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + +func TestMiniblockResolver_ProcessReceivedMessageSendFails(t *testing.T) { + t.Parallel() + + goodMarshalizer := &mock.MarshalizerMock{} + mbHash := []byte("aaa") + miniBlockList := make([][]byte, 0) + miniBlockList = append(miniBlockList, mbHash) + requestedBuff, merr := goodMarshalizer.Marshal(&batch.Batch{Data: miniBlockList}) + + assert.Nil(t, merr) + + cache := testscommon.NewCacherStub() + cache.PeekCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false + } + + arg := createMockArgMiniblockResolver() + arg.MiniBlockPool = cache + arg.MiniBlockStorage = &storageStubs.StorerStub{ + GetCalled: func(key []byte) (i []byte, e error) { + body := block.MiniBlock{} + buff, _ := goodMarshalizer.Marshal(&body) + return buff, nil + }, + } + arg.Marshaller = goodMarshalizer + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + return expectedErr + }, + } + mbRes, _ := resolvers.NewMiniblockResolver(arg) + + err := mbRes.ProcessReceivedMessage( + createRequestMsg(dataRetriever.HashArrayType, requestedBuff), + fromConnectedPeerId, + ) + + assert.True(t, errors.Is(err, expectedErr)) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + func TestMiniblockResolver_ProcessReceivedMessageNotFoundInPoolShouldRetFromStorageAndSend(t *testing.T) { t.Parallel() @@ -297,6 +449,57 @@ func TestMiniblockResolver_ProcessReceivedMessageNotFoundInPoolShouldRetFromStor assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) } +func TestMiniblockResolver_ProcessReceivedMessageMarshalFails(t *testing.T) { + t.Parallel() + + mbHash := []byte("aaa") + marshalizer := &mock.MarshalizerMock{} + miniBlockList := make([][]byte, 0) + miniBlockList = append(miniBlockList, mbHash) + requestedBuff, _ := marshalizer.Marshal(&batch.Batch{Data: miniBlockList}) + + wasResolved := false + + cache := testscommon.NewCacherStub() + cache.PeekCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false + } + + store := &storageStubs.StorerStub{} + store.SearchFirstCalled = func(key []byte) (i []byte, e error) { + wasResolved = true + mb, _ := marshalizer.Marshal(&block.MiniBlock{}) + return mb, nil + } + + arg := createMockArgMiniblockResolver() + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + assert.Fail(t, "should have not been called") + return nil + }, + } + arg.MiniBlockPool = cache + arg.MiniBlockStorage = store + arg.Marshaller = &mock.MarshalizerStub{ + UnmarshalCalled: marshalizer.Unmarshal, + MarshalCalled: func(obj interface{}) ([]byte, error) { + return nil, expectedErr + }, + } + mbRes, _ := resolvers.NewMiniblockResolver(arg) + + err := mbRes.ProcessReceivedMessage( + createRequestMsg(dataRetriever.HashType, requestedBuff), + fromConnectedPeerId, + ) + + assert.True(t, errors.Is(err, expectedErr)) + assert.True(t, wasResolved) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + func TestMiniblockResolver_ProcessReceivedMessageMissingDataShouldNotSend(t *testing.T) { t.Parallel() diff --git a/dataRetriever/resolvers/peerAuthenticationResolver_test.go b/dataRetriever/resolvers/peerAuthenticationResolver_test.go index 22b75093a4a..962d50be2ec 100644 --- a/dataRetriever/resolvers/peerAuthenticationResolver_test.go +++ b/dataRetriever/resolvers/peerAuthenticationResolver_test.go @@ -331,16 +331,20 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { pk1 := "pk01" pk2 := "pk02" + pk3 := "pk03" providedKeys := make(map[string]interface{}) providedKeys[pk1] = createMockPeerAuthenticationObject() providedKeys[pk2] = createMockPeerAuthenticationObject() + providedKeys[pk3] = createMockPeerAuthenticationObject() pks := make([][]byte, 0) pks = append(pks, []byte(pk1)) pks = append(pks, []byte(pk2)) + pks = append(pks, []byte(pk3)) hashes := make([][]byte, 0) hashes = append(hashes, []byte("pk01")) // exists in cache hashes = append(hashes, []byte("pk1")) // no entries + hashes = append(hashes, []byte("pk03")) // unmarshal fails providedHashes, err := arg.Marshaller.Marshal(batch.Batch{Data: hashes}) assert.Nil(t, err) @@ -366,7 +370,18 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { }, } arg.DataPacker, _ = partitioning.NewSizeDataPacker(arg.Marshaller) - + initialMarshaller := arg.Marshaller + cnt := 0 + arg.Marshaller = &mock.MarshalizerStub{ + MarshalCalled: initialMarshaller.Marshal, + UnmarshalCalled: func(obj interface{}, buff []byte) error { + cnt++ + if cnt == 4 { // pk03 + return expectedErr + } + return initialMarshaller.Unmarshal(obj, buff) + }, + } res, err := resolvers.NewPeerAuthenticationResolver(arg) assert.Nil(t, err) assert.False(t, res.IsInterfaceNil()) @@ -375,6 +390,31 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { assert.Nil(t, err) assert.True(t, wasSent) }) + t.Run("resolveMultipleHashesRequest: PackDataInChunks returns error", func(t *testing.T) { + t.Parallel() + + cache := testscommon.NewCacherStub() + cache.PeekCalled = func(key []byte) (value interface{}, ok bool) { + return createMockPeerAuthenticationObject(), true + } + + arg := createMockArgPeerAuthenticationResolver() + arg.PeerAuthenticationPool = cache + arg.DataPacker = &mock.DataPackerStub{ + PackDataInChunksCalled: func(data [][]byte, limit int) ([][]byte, error) { + return nil, expectedErr + }, + } + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + hashes := getKeysSlice() + providedHashes, err := arg.Marshaller.Marshal(batch.Batch{Data: hashes}) + assert.Nil(t, err) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer) + assert.True(t, errors.Is(err, expectedErr)) + }) t.Run("resolveMultipleHashesRequest: Send returns error", func(t *testing.T) { t.Parallel() diff --git a/dataRetriever/resolvers/transactionResolver_test.go b/dataRetriever/resolvers/transactionResolver_test.go index 13d1323c68c..d75d2192789 100644 --- a/dataRetriever/resolvers/transactionResolver_test.go +++ b/dataRetriever/resolvers/transactionResolver_test.go @@ -269,6 +269,51 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxPoolMarshalizerFailShouldRetN assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) } +func TestTxResolver_ProcessReceivedMessageBatchMarshalFailShouldRetNilAndErr(t *testing.T) { + t.Parallel() + + marshalizerMock := &mock.MarshalizerMock{} + cnt := 0 + marshalizerStub := &mock.MarshalizerStub{ + MarshalCalled: func(obj interface{}) (i []byte, e error) { + cnt++ + if cnt > 1 { + return nil, expectedErr + } + return marshalizerMock.Marshal(obj) + }, + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return marshalizerMock.Unmarshal(obj, buff) + }, + } + txReturned := &transaction.Transaction{ + Nonce: 10, + } + txPool := testscommon.NewShardedDataStub() + txPool.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { + if bytes.Equal([]byte("aaa"), key) { + return txReturned, true + } + + return nil, false + } + + arg := createMockArgTxResolver() + arg.TxPool = txPool + arg.Marshaller = marshalizerStub + txRes, _ := resolvers.NewTxResolver(arg) + + data, _ := marshalizerMock.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: []byte("aaa")}) + + msg := &p2pmocks.P2PMessageMock{DataField: data} + + err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + + assert.True(t, errors.Is(err, expectedErr)) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + func TestTxResolver_ProcessReceivedMessageFoundInTxStorageShouldRetValAndSend(t *testing.T) { t.Parallel() @@ -480,6 +525,83 @@ func TestTxResolver_ProcessReceivedMessageRequestedTwoSmallTransactionsFoundOnly assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) } +func TestTxResolver_ProcessReceivedMessageHashArrayUnmarshalFails(t *testing.T) { + t.Parallel() + + arg := createMockArgTxResolver() + marshalizer := arg.Marshaller + cnt := 0 + arg.Marshaller = &mock.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + cnt++ + if cnt > 1 { + return expectedErr + } + return marshalizer.Unmarshal(obj, buff) + }, + } + txRes, _ := resolvers.NewTxResolver(arg) + + data, _ := marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashArrayType, Value: []byte("buff")}) + msg := &p2pmocks.P2PMessageMock{DataField: data} + + err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + + assert.True(t, errors.Is(err, expectedErr)) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + +func TestTxResolver_ProcessReceivedMessageHashArrayPackDataInChunksFails(t *testing.T) { + t.Parallel() + + txHash1 := []byte("txHash1") + txHash2 := []byte("txHash2") + + arg := createMockArgTxResolver() + arg.DataPacker = &mock.DataPackerStub{ + PackDataInChunksCalled: func(data [][]byte, limit int) ([][]byte, error) { + return nil, expectedErr + }, + } + txRes, _ := resolvers.NewTxResolver(arg) + + buff, _ := arg.Marshaller.Marshal(&batch.Batch{Data: [][]byte{txHash1, txHash2}}) + data, _ := arg.Marshaller.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashArrayType, Value: buff}) + msg := &p2pmocks.P2PMessageMock{DataField: data} + + err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + + assert.True(t, errors.Is(err, expectedErr)) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + +func TestTxResolver_ProcessReceivedMessageHashArraySendFails(t *testing.T) { + t.Parallel() + + txHash1 := []byte("txHash1") + txHash2 := []byte("txHash2") + + arg := createMockArgTxResolver() + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + return expectedErr + }, + } + txRes, _ := resolvers.NewTxResolver(arg) + + buff, _ := arg.Marshaller.Marshal(&batch.Batch{Data: [][]byte{txHash1, txHash2}}) + data, _ := arg.Marshaller.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashArrayType, Value: buff}) + msg := &p2pmocks.P2PMessageMock{DataField: data} + + err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + + assert.True(t, errors.Is(err, expectedErr)) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + func TestTxResolver_Close(t *testing.T) { t.Parallel() diff --git a/dataRetriever/resolvers/trieNodeResolver_test.go b/dataRetriever/resolvers/trieNodeResolver_test.go index e3281c9139e..dd7325d533b 100644 --- a/dataRetriever/resolvers/trieNodeResolver_test.go +++ b/dataRetriever/resolvers/trieNodeResolver_test.go @@ -249,6 +249,49 @@ func TestTrieNodeResolver_ProcessReceivedMessageTrieErrorsShouldErr(t *testing.T assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) } +func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesUnmarshalFails(t *testing.T) { + t.Parallel() + + arg := createMockArgTrieNodeResolver() + initialMarshaller := arg.Marshaller + cnt := 0 + arg.Marshaller = &mock.MarshalizerStub{ + MarshalCalled: initialMarshaller.Marshal, + UnmarshalCalled: func(obj interface{}, buff []byte) error { + cnt++ + if cnt > 1 { + return expectedErr + } + return initialMarshaller.Unmarshal(obj, buff) + }, + } + arg.TrieDataGetter = &trieMock.TrieStub{ + GetSerializedNodeCalled: func(_ []byte) ([]byte, error) { + assert.Fail(t, "should have not called send") + return nil, nil + }, + } + tnRes, _ := resolvers.NewTrieNodeResolver(arg) + + b := &batch.Batch{ + Data: [][]byte{[]byte("hash1")}, + } + buffBatch, _ := arg.Marshaller.Marshal(b) + + data, _ := arg.Marshaller.Marshal( + &dataRetriever.RequestData{ + Type: dataRetriever.HashArrayType, + Value: buffBatch, + }, + ) + msg := &p2pmocks.P2PMessageMock{DataField: data} + + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + assert.Equal(t, expectedErr, err) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesGetSerializedNodeErrorsShouldNotSend(t *testing.T) { t.Parallel() diff --git a/dataRetriever/resolvers/validatorInfoResolver_test.go b/dataRetriever/resolvers/validatorInfoResolver_test.go index 88d115de3cb..0d5916c710e 100644 --- a/dataRetriever/resolvers/validatorInfoResolver_test.go +++ b/dataRetriever/resolvers/validatorInfoResolver_test.go @@ -379,7 +379,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { require.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), dataRetriever.ErrValidatorInfoNotFound.Error())) }) - t.Run("pack data in chuncks returns error", func(t *testing.T) { + t.Run("pack data in chunks returns error", func(t *testing.T) { t.Parallel() args := createMockArgValidatorInfoResolver() @@ -408,6 +408,40 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer) assert.Equal(t, expectedErr, err) }) + t.Run("send returns error", func(t *testing.T) { + t.Parallel() + + numOfProvidedData := 3 + providedHashes := make([][]byte, 0) + providedData := make([]state.ValidatorInfo, 0) + for i := 0; i < numOfProvidedData; i++ { + hashStr := fmt.Sprintf("hash%d", i) + providedHashes = append(providedHashes, []byte(hashStr)) + pkStr := fmt.Sprintf("pk%d", i) + providedData = append(providedData, createMockValidatorInfo([]byte(pkStr))) + } + args := createMockArgValidatorInfoResolver() + numOfCalls := 0 + args.ValidatorInfoPool = &testscommon.ShardedDataStub{ + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + val := providedData[numOfCalls] + numOfCalls++ + return val, true + }, + } + args.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + return expectedErr + }, + } + args.DataPacker, _ = partitioning.NewSimpleDataPacker(args.Marshaller) + res, _ := resolvers.NewValidatorInfoResolver(args) + require.False(t, check.IfNil(res)) + + buff, _ := args.Marshaller.Marshal(&batch.Batch{Data: providedHashes}) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer) + assert.Equal(t, expectedErr, err) + }) t.Run("all hashes in one chunk should work", func(t *testing.T) { t.Parallel() diff --git a/dataRetriever/shardedData/shardedData_test.go b/dataRetriever/shardedData/shardedData_test.go index d00e7939535..d9ab827df10 100644 --- a/dataRetriever/shardedData/shardedData_test.go +++ b/dataRetriever/shardedData/shardedData_test.go @@ -78,7 +78,7 @@ func TestShardedData_StorageEvictsData(t *testing.T) { } assert.Less(t, sd.ShardDataStore("1").Len(), int(defaultTestConfig.Capacity), - "Transaction pool entries excedes the maximum configured number") + "Transaction pool entries exceeds the maximum configured number") } func TestShardedData_NoDuplicates(t *testing.T) { @@ -124,6 +124,8 @@ func TestShardedData_RemoveData(t *testing.T) { sd, _ := NewShardedData("", defaultTestConfig) + sd.RemoveData([]byte{}, "missing_cache_id") // coverage + sd.AddData([]byte("tx_hash1"), &transaction.Transaction{Nonce: 1}, 0, "1") assert.Equal(t, 1, sd.ShardDataStore("1").Len(), "AddData failed, length should be 1") @@ -146,11 +148,13 @@ func TestShardedData_RemoveData(t *testing.T) { "FindAndRemoveData failed, length should be 1 in shard 2") } -func TestShardedData_Clear(t *testing.T) { +func TestShardedData_ClearShardStore(t *testing.T) { t.Parallel() sd, _ := NewShardedData("", defaultTestConfig) + sd.ClearShardStore("missing_cache_id") // coverage + sd.AddData([]byte("tx_hash1"), &transaction.Transaction{Nonce: 1}, 0, "1") sd.AddData([]byte("tx_hash2"), &transaction.Transaction{Nonce: 2}, 0, "2") sd.AddData([]byte("tx_hash1"), &transaction.Transaction{Nonce: 1}, 0, "2") @@ -224,7 +228,7 @@ func TestShardedData_RegisterAddedDataHandlerShouldWork(t *testing.T) { } } -func TestShardedData_RegisterAddedDataHandlerReallyAddsAhandler(t *testing.T) { +func TestShardedData_RegisterAddedDataHandlerReallyAddsHandler(t *testing.T) { t.Parallel() f := func(key []byte, value interface{}) { @@ -307,4 +311,49 @@ func TestShardedData_SearchFirstDataFoundShouldRetResults(t *testing.T) { assert.True(t, ok) } -// TODO: Add high load test, reach maximum capacity and inspect RAM usage. EN-6735. +func TestShardedData_RemoveSetOfDataFromPool(t *testing.T) { + t.Parallel() + + sd, _ := NewShardedData("", defaultTestConfig) + + sd.RemoveSetOfDataFromPool([][]byte{}, "missing_cache_id") // coverage + + sd.AddData([]byte("aaa"), "a1", 2, "0") + _, ok := sd.SearchFirstData([]byte("aaa")) + assert.True(t, ok) + sd.RemoveSetOfDataFromPool([][]byte{[]byte("aaa")}, "0") + _, ok = sd.SearchFirstData([]byte("aaa")) + assert.False(t, ok) +} + +func TestShardedData_ImmunizeSetOfDataAgainstEviction(t *testing.T) { + t.Parallel() + + sd, _ := NewShardedData("", defaultTestConfig) + sd.ImmunizeSetOfDataAgainstEviction([][]byte{[]byte("aaa")}, "0") +} + +func TestShardedData_GetCounts(t *testing.T) { + t.Parallel() + + sd, _ := NewShardedData("", defaultTestConfig) + + sd.RemoveSetOfDataFromPool([][]byte{}, "missing_cache_id") // coverage + + sd.AddData([]byte("aaa"), "a1", 2, "0") + sd.AddData([]byte("bbb"), "b1", 2, "0") + counts := sd.GetCounts() + assert.Equal(t, int64(2), counts.GetTotal()) +} + +func TestShardedData_Diagnose(t *testing.T) { + t.Parallel() + + sd, _ := NewShardedData("", defaultTestConfig) + + sd.RemoveSetOfDataFromPool([][]byte{}, "missing_cache_id") // coverage + + sd.AddData([]byte("aaa"), "a1", 2, "0") + sd.AddData([]byte("bbb"), "b1", 2, "0") + sd.Diagnose(true) +} diff --git a/dataRetriever/storageRequesters/headerRequester_test.go b/dataRetriever/storageRequesters/headerRequester_test.go index 82724e0e705..73e54a96e4c 100644 --- a/dataRetriever/storageRequesters/headerRequester_test.go +++ b/dataRetriever/storageRequesters/headerRequester_test.go @@ -1,7 +1,6 @@ package storagerequesters import ( - "errors" "math" "testing" "time" @@ -140,7 +139,6 @@ func TestHeaderRequester_SetEpochHandlerShouldWork(t *testing.T) { func TestHeaderRequester_RequestDataFromHashNotFoundNotBufferedChannelShouldErr(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected error") newEpochCalled := false sendCalled := false arg := createMockHeaderRequesterArg() @@ -173,7 +171,6 @@ func TestHeaderRequester_RequestDataFromHashNotFoundNotBufferedChannelShouldErr( func TestHeaderRequester_RequestDataFromHashNotFoundShouldErr(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected error") newEpochCalled := false sendCalled := false arg := createMockHeaderRequesterArg() @@ -248,7 +245,6 @@ func TestHeaderRequester_RequestDataFromHashShouldWork(t *testing.T) { func TestHeaderRequester_RequestDataFromNonceNotFoundShouldErr(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected error") newEpochCalled := false sendCalled := false arg := createMockHeaderRequesterArg() @@ -324,32 +320,72 @@ func TestHeaderRequester_RequestDataFromNonceShouldWork(t *testing.T) { assert.True(t, sendCalled) } -func TestHeaderRequester_RequestDataFromEpochShouldWork(t *testing.T) { +func TestHeaderRequester_RequestDataFromEpoch(t *testing.T) { t.Parallel() - sendCalled := false - epochIdentifier := []byte(core.EpochStartIdentifier(math.MaxUint32)) - arg := createMockHeaderRequesterArg() - arg.HdrStorage = &storageStubs.StorerStub{ - SearchFirstCalled: func(key []byte) ([]byte, error) { - assert.Equal(t, epochIdentifier, key) - return make([]byte, 0), nil - }, - } - arg.ManualEpochStartNotifier = &mock.ManualEpochStartNotifierStub{} - arg.Messenger = &p2pmocks.MessengerStub{ - SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { - sendCalled = true + t.Run("unknown epoch should error", func(t *testing.T) { + t.Parallel() - return nil - }, - } - hdReq, _ := NewHeaderRequester(arg) + epochIdentifier := []byte("unknown epoch") + arg := createMockHeaderRequesterArg() + arg.HdrStorage = &storageStubs.StorerStub{ + SearchFirstCalled: func(key []byte) ([]byte, error) { + assert.Fail(t, "should not have been called") + return make([]byte, 0), nil + }, + } + hdReq, _ := NewHeaderRequester(arg) + + err := hdReq.RequestDataFromEpoch(epochIdentifier) + assert.Equal(t, core.ErrInvalidIdentifierForEpochStartBlockRequest, err) + }) + t.Run("identifier not found should error", func(t *testing.T) { + t.Parallel() + + epochIdentifier := []byte(core.EpochStartIdentifier(100)) + arg := createMockHeaderRequesterArg() + arg.HdrStorage = &storageStubs.StorerStub{ + SearchFirstCalled: func(key []byte) ([]byte, error) { + return make([]byte, 0), expectedErr + }, + } + arg.Messenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + assert.Fail(t, "should not have been called") + return nil + }, + } + hdReq, _ := NewHeaderRequester(arg) + + err := hdReq.RequestDataFromEpoch(epochIdentifier) + assert.Equal(t, expectedErr, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + sendCalled := false + epochIdentifier := []byte(core.EpochStartIdentifier(math.MaxUint32)) + arg := createMockHeaderRequesterArg() + arg.HdrStorage = &storageStubs.StorerStub{ + SearchFirstCalled: func(key []byte) ([]byte, error) { + assert.Equal(t, epochIdentifier, key) + return make([]byte, 0), nil + }, + } + arg.Messenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + sendCalled = true - err := hdReq.RequestDataFromEpoch(epochIdentifier) + return nil + }, + } + hdReq, _ := NewHeaderRequester(arg) - assert.Nil(t, err) - assert.True(t, sendCalled) + err := hdReq.RequestDataFromEpoch(epochIdentifier) + + assert.Nil(t, err) + assert.True(t, sendCalled) + }) } func TestHeaderRequester_Close(t *testing.T) { diff --git a/dataRetriever/storageRequesters/sliceRequester_test.go b/dataRetriever/storageRequesters/sliceRequester_test.go index 75f3232b388..0693257464b 100644 --- a/dataRetriever/storageRequesters/sliceRequester_test.go +++ b/dataRetriever/storageRequesters/sliceRequester_test.go @@ -17,6 +17,8 @@ import ( "github.com/stretchr/testify/assert" ) +var expectedErr = errors.New("expected err") + func createMockSliceRequesterArg() ArgSliceRequester { return ArgSliceRequester{ Messenger: &mock.MessageHandlerStub{}, @@ -108,7 +110,6 @@ func TestNewSliceRequester_ShouldWork(t *testing.T) { func TestSliceRequester_RequestDataFromHashNotFoundShouldErr(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected error") sendWasCalled := false arg := createMockSliceRequesterArg() arg.Storage = &storageStubs.StorerStub{ @@ -140,6 +141,32 @@ func TestSliceRequester_RequestDataFromHashNotFoundShouldErr(t *testing.T) { } } +func TestSliceRequester_RequestDataFromHashMarshalFails(t *testing.T) { + t.Parallel() + + arg := createMockSliceRequesterArg() + arg.Marshalizer = &mock.MarshalizerStub{ + MarshalCalled: func(obj interface{}) ([]byte, error) { + return nil, expectedErr + }, + } + arg.Storage = &storageStubs.StorerStub{ + GetCalled: func(key []byte) ([]byte, error) { + return make([]byte, 0), nil + }, + } + arg.Messenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + assert.Fail(t, "should not have been called") + return nil + }, + } + sr, _ := NewSliceRequester(arg) + + err := sr.RequestDataFromHash([]byte("hash"), 0) + assert.Equal(t, expectedErr, err) +} + func TestSliceRequester_RequestDataFromHashShouldWork(t *testing.T) { t.Parallel() @@ -164,6 +191,37 @@ func TestSliceRequester_RequestDataFromHashShouldWork(t *testing.T) { assert.True(t, sendWasCalled) } +func TestSliceRequester_RequestDataFromHashesPackDataInChunksFails(t *testing.T) { + t.Parallel() + + numGetCalled := 0 + arg := createMockSliceRequesterArg() + arg.Storage = &storageStubs.StorerStub{ + GetCalled: func(key []byte) ([]byte, error) { + numGetCalled++ + return make([]byte, 0), nil + }, + } + arg.Messenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + assert.Fail(t, "should not have been called") + return nil + }, + } + arg.DataPacker = &mock.DataPackerStub{ + PackDataInChunksCalled: func(data [][]byte, limit int) ([][]byte, error) { + return nil, expectedErr + }, + } + sr, _ := NewSliceRequester(arg) + + hashes := [][]byte{[]byte("hash1"), []byte("hash2")} + err := sr.RequestDataFromHashArray(hashes, 0) + + assert.Equal(t, expectedErr, err) + assert.Equal(t, len(hashes), numGetCalled) +} + func TestSliceRequester_RequestDataFromHashesShouldWork(t *testing.T) { t.Parallel() @@ -197,7 +255,6 @@ func TestSliceRequester_GetErroredShouldReturnErr(t *testing.T) { numSendCalled := 0 numGetCalled := 0 - expectedErr := errors.New("expected err") arg := createMockSliceRequesterArg() arg.Storage = &storageStubs.StorerStub{ GetCalled: func(key []byte) ([]byte, error) { @@ -240,7 +297,6 @@ func TestSliceRequester_SendErroredShouldReturnErr(t *testing.T) { numSendCalled := 0 numGetCalled := 0 - expectedErr := errors.New("expected err") arg := createMockSliceRequesterArg() arg.Storage = &storageStubs.StorerStub{ GetCalled: func(key []byte) ([]byte, error) { diff --git a/dataRetriever/storageRequesters/trieNodeRequester_test.go b/dataRetriever/storageRequesters/trieNodeRequester_test.go index 042c1390826..7fd87cf6dc2 100644 --- a/dataRetriever/storageRequesters/trieNodeRequester_test.go +++ b/dataRetriever/storageRequesters/trieNodeRequester_test.go @@ -11,8 +11,8 @@ import ( "github.com/multiversx/mx-chain-core-go/data/endProcess" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/mock" - "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" ) @@ -23,7 +23,7 @@ func createMockTrieRequesterArguments() ArgTrieRequester { ResponseTopicName: "", Marshalizer: &mock.MarshalizerStub{}, TrieDataGetter: &trieMock.TrieStub{}, - TrieStorageManager: &testscommon.StorageManagerStub{}, + TrieStorageManager: &storageManager.StorageManagerStub{}, ManualEpochStartNotifier: &mock.ManualEpochStartNotifierStub{}, ChanGracefullyClose: make(chan endProcess.ArgEndProcess, 1), DelayBeforeGracefulClose: 0, @@ -127,6 +127,38 @@ func TestTrieNodeRequester_RequestDataFromHashShouldWork(t *testing.T) { assert.Equal(t, uint32(1), atomic.LoadUint32(&numSendToConnectedPeerCalled)) } +func TestTrieNodeRequester_RequestDataFromHashArrayMarshalFails(t *testing.T) { + t.Parallel() + + args := createMockTrieRequesterArguments() + buff := []byte("data") + args.TrieDataGetter = &trieMock.TrieStub{ + GetSerializedNodesCalled: func(bytes []byte, u uint64) ([][]byte, uint64, error) { + return [][]byte{buff}, 1, nil + }, + } + args.Messenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + assert.Fail(t, "should not have been called") + return nil + }, + } + args.Marshalizer = &mock.MarshalizerStub{ + MarshalCalled: func(obj interface{}) ([]byte, error) { + return nil, expectedErr + }, + } + tnr, _ := NewTrieNodeRequester(args) + + err := tnr.RequestDataFromHashArray( + [][]byte{ + []byte("hash1"), + []byte("hash2"), + }, 0) + assert.Equal(t, expectedErr, err) + assert.Equal(t, 0, len(args.ChanGracefullyClose)) +} + func TestTrieNodeRequester_RequestDataFromHashArrayShouldWork(t *testing.T) { t.Parallel() @@ -159,3 +191,30 @@ func TestTrieNodeRequester_RequestDataFromHashArrayShouldWork(t *testing.T) { assert.Equal(t, uint32(1), atomic.LoadUint32(&numSendToConnectedPeerCalled)) assert.Equal(t, uint32(2), atomic.LoadUint32(&numGetSerializedNodesCalled)) } + +func TestTrieNodeRequester_Close(t *testing.T) { + t.Parallel() + + t.Run("trieStorageManager.Close error should error", func(t *testing.T) { + t.Parallel() + + args := createMockTrieRequesterArguments() + args.TrieStorageManager = &storageManager.StorageManagerStub{ + CloseCalled: func() error { + return expectedErr + }, + } + tnr, _ := NewTrieNodeRequester(args) + + err := tnr.Close() + assert.Equal(t, expectedErr, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + tnr, _ := NewTrieNodeRequester(createMockTrieRequesterArguments()) + + err := tnr.Close() + assert.NoError(t, err) + }) +} diff --git a/dataRetriever/topicSender/diffPeerListCreator_test.go b/dataRetriever/topicSender/diffPeerListCreator_test.go index be87e933ca4..4b63b757608 100644 --- a/dataRetriever/topicSender/diffPeerListCreator_test.go +++ b/dataRetriever/topicSender/diffPeerListCreator_test.go @@ -75,7 +75,7 @@ func TestNewDiffPeerListCreator_ShouldWork(t *testing.T) { assert.Equal(t, excludedTopic, dplc.ExcludedPeersOnTopic()) } -func TestMakeDiffList_EmptyExcludedShoudRetAllPeersList(t *testing.T) { +func TestMakeDiffList_EmptyExcludedShouldRetAllPeersList(t *testing.T) { t.Parallel() allPeers := []core.PeerID{core.PeerID("peer1"), core.PeerID("peer2")} @@ -240,3 +240,21 @@ func TestDiffPeerListCreator_IntraShardPeersList(t *testing.T) { assert.Equal(t, peerList, dplc.IntraShardPeerList()) } + +func TestDiffPeerListCreator_FullHistoryList(t *testing.T) { + t.Parallel() + + peerList := []core.PeerID{"pid1", "pid2"} + dplc, _ := topicsender.NewDiffPeerListCreator( + &mock.MessageHandlerStub{ + ConnectedFullHistoryPeersOnTopicCalled: func(topic string) []core.PeerID { + return peerList + }, + }, + mainTopic, + intraTopic, + excludedTopic, + ) + + assert.Equal(t, peerList, dplc.FullHistoryList()) +} diff --git a/dataRetriever/txpool/shardedTxPool_test.go b/dataRetriever/txpool/shardedTxPool_test.go index b08ab8daa76..6fdaa4676ad 100644 --- a/dataRetriever/txpool/shardedTxPool_test.go +++ b/dataRetriever/txpool/shardedTxPool_test.go @@ -76,6 +76,13 @@ func Test_NewShardedTxPool_WhenBadConfig(t *testing.T) { require.NotNil(t, err) require.Errorf(t, err, dataRetriever.ErrCacheConfigInvalidShards.Error()) + args = goodArgs + args.TxGasHandler = nil + pool, err = NewShardedTxPool(args) + require.Nil(t, pool) + require.NotNil(t, err) + require.Errorf(t, err, dataRetriever.ErrNilTxGasHandler.Error()) + args = goodArgs args.TxGasHandler = &txcachemocks.TxGasHandlerMock{ MinimumGasMove: 50000, @@ -167,6 +174,7 @@ func Test_AddData(t *testing.T) { pool := poolAsInterface.(*shardedTxPool) cache := pool.getTxCache("0") + pool.AddData([]byte("hash-invalid-cache"), createTx("alice", 0), 0, "invalid-cache-id") pool.AddData([]byte("hash-x"), createTx("alice", 42), 0, "0") pool.AddData([]byte("hash-y"), createTx("alice", 43), 0, "0") require.Equal(t, 2, cache.Len()) @@ -346,6 +354,23 @@ func Test_Keys(t *testing.T) { require.ElementsMatch(t, txsHashes, pool.Keys()) } +func TestShardedTxPool_Diagnose(t *testing.T) { + t.Parallel() + + poolAsInterface, _ := newTxPoolToTest() + pool := poolAsInterface.(*shardedTxPool) + pool.AddData([]byte("hash"), createTx("alice", 10), 0, "0") + pool.Diagnose(true) +} + +func TestShardedTxPool_ImmunizeSetOfDataAgainstEviction(t *testing.T) { + t.Parallel() + + poolAsInterface, _ := newTxPoolToTest() + pool := poolAsInterface.(*shardedTxPool) + pool.ImmunizeSetOfDataAgainstEviction([][]byte{[]byte("hash")}, "0") +} + func Test_IsInterfaceNil(t *testing.T) { poolAsInterface, _ := newTxPoolToTest() require.False(t, check.IfNil(poolAsInterface)) @@ -421,5 +446,3 @@ func newTxPoolToTest() (dataRetriever.ShardedDataCacherNotifier, error) { } return NewShardedTxPool(args) } - -// TODO: Add high load test, reach maximum capacity and inspect RAM usage. EN-6735. diff --git a/dataRetriever/unitType_test.go b/dataRetriever/unitType_test.go new file mode 100644 index 00000000000..83c4381a3b9 --- /dev/null +++ b/dataRetriever/unitType_test.go @@ -0,0 +1,68 @@ +package dataRetriever + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestUnitType_String(t *testing.T) { + t.Parallel() + + ut := TransactionUnit + require.Equal(t, "TransactionUnit", ut.String()) + ut = MiniBlockUnit + require.Equal(t, "MiniBlockUnit", ut.String()) + ut = PeerChangesUnit + require.Equal(t, "PeerChangesUnit", ut.String()) + ut = BlockHeaderUnit + require.Equal(t, "BlockHeaderUnit", ut.String()) + ut = MetaBlockUnit + require.Equal(t, "MetaBlockUnit", ut.String()) + ut = UnsignedTransactionUnit + require.Equal(t, "UnsignedTransactionUnit", ut.String()) + ut = RewardTransactionUnit + require.Equal(t, "RewardTransactionUnit", ut.String()) + ut = MetaHdrNonceHashDataUnit + require.Equal(t, "MetaHdrNonceHashDataUnit", ut.String()) + ut = HeartbeatUnit + require.Equal(t, "HeartbeatUnit", ut.String()) + ut = BootstrapUnit + require.Equal(t, "BootstrapUnit", ut.String()) + ut = StatusMetricsUnit + require.Equal(t, "StatusMetricsUnit", ut.String()) + ut = TxLogsUnit + require.Equal(t, "TxLogsUnit", ut.String()) + ut = MiniblocksMetadataUnit + require.Equal(t, "MiniblocksMetadataUnit", ut.String()) + ut = EpochByHashUnit + require.Equal(t, "EpochByHashUnit", ut.String()) + ut = MiniblockHashByTxHashUnit + require.Equal(t, "MiniblockHashByTxHashUnit", ut.String()) + ut = ReceiptsUnit + require.Equal(t, "ReceiptsUnit", ut.String()) + ut = ResultsHashesByTxHashUnit + require.Equal(t, "ResultsHashesByTxHashUnit", ut.String()) + ut = TrieEpochRootHashUnit + require.Equal(t, "TrieEpochRootHashUnit", ut.String()) + ut = ESDTSuppliesUnit + require.Equal(t, "ESDTSuppliesUnit", ut.String()) + ut = RoundHdrHashDataUnit + require.Equal(t, "RoundHdrHashDataUnit", ut.String()) + ut = UserAccountsUnit + require.Equal(t, "UserAccountsUnit", ut.String()) + ut = UserAccountsCheckpointsUnit + require.Equal(t, "UserAccountsCheckpointsUnit", ut.String()) + ut = PeerAccountsUnit + require.Equal(t, "PeerAccountsUnit", ut.String()) + ut = PeerAccountsCheckpointsUnit + require.Equal(t, "PeerAccountsCheckpointsUnit", ut.String()) + ut = ScheduledSCRsUnit + require.Equal(t, "ScheduledSCRsUnit", ut.String()) + + ut = 200 + require.Equal(t, "ShardHdrNonceHashDataUnit100", ut.String()) + + ut = 99 + require.Equal(t, "unknown type 99", ut.String()) +} diff --git a/dblookupext/esdtSupply/proto/supplyESDT.proto b/dblookupext/esdtSupply/proto/supplyESDT.proto index 1e12b568d07..91417272e7b 100644 --- a/dblookupext/esdtSupply/proto/supplyESDT.proto +++ b/dblookupext/esdtSupply/proto/supplyESDT.proto @@ -12,4 +12,5 @@ message SupplyESDT { bytes Supply = 1 [(gogoproto.jsontag) = "value", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; bytes Burned = 2 [(gogoproto.jsontag) = "burned", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; bytes Minted = 3 [(gogoproto.jsontag) = "minted", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; + bool RecomputedSupply = 4 [(gogoproto.jsontag) = "recomputedSupply"]; } diff --git a/dblookupext/esdtSupply/supplyESDT.pb.go b/dblookupext/esdtSupply/supplyESDT.pb.go index f330cc75b17..342c1ec11a3 100644 --- a/dblookupext/esdtSupply/supplyESDT.pb.go +++ b/dblookupext/esdtSupply/supplyESDT.pb.go @@ -29,9 +29,10 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // SupplyESDT is used to store information a shard esdt token supply type SupplyESDT struct { - Supply *math_big.Int `protobuf:"bytes,1,opt,name=Supply,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"value"` - Burned *math_big.Int `protobuf:"bytes,2,opt,name=Burned,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"burned"` - Minted *math_big.Int `protobuf:"bytes,3,opt,name=Minted,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"minted"` + Supply *math_big.Int `protobuf:"bytes,1,opt,name=Supply,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"value"` + Burned *math_big.Int `protobuf:"bytes,2,opt,name=Burned,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"burned"` + Minted *math_big.Int `protobuf:"bytes,3,opt,name=Minted,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"minted"` + RecomputedSupply bool `protobuf:"varint,4,opt,name=RecomputedSupply,proto3" json:"recomputedSupply"` } func (m *SupplyESDT) Reset() { *m = SupplyESDT{} } @@ -83,6 +84,13 @@ func (m *SupplyESDT) GetMinted() *math_big.Int { return nil } +func (m *SupplyESDT) GetRecomputedSupply() bool { + if m != nil { + return m.RecomputedSupply + } + return false +} + func init() { proto.RegisterType((*SupplyESDT)(nil), "proto.SupplyESDT") } @@ -90,26 +98,28 @@ func init() { func init() { proto.RegisterFile("supplyESDT.proto", fileDescriptor_173c6d56cc05b222) } var fileDescriptor_173c6d56cc05b222 = []byte{ - // 294 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0xd1, 0x31, 0x4e, 0xc3, 0x30, - 0x14, 0x06, 0x60, 0xbb, 0xa8, 0x19, 0x2c, 0x06, 0xd4, 0xa9, 0x62, 0x78, 0x45, 0x4c, 0x2c, 0x49, - 0x06, 0x46, 0xb6, 0xd0, 0x0e, 0x1d, 0x58, 0x5a, 0x26, 0x36, 0x27, 0x31, 0x8e, 0x21, 0x89, 0xa3, - 0xc4, 0xae, 0xca, 0xc6, 0x11, 0x38, 0x06, 0xe2, 0x24, 0x8c, 0x11, 0x53, 0xa6, 0x42, 0x9c, 0x05, - 0x75, 0xea, 0x11, 0x10, 0x0e, 0x02, 0x0e, 0xd0, 0xc9, 0xfe, 0x7f, 0xc9, 0xfe, 0xa4, 0xf7, 0xc8, - 0x51, 0xa5, 0x8b, 0x22, 0x7d, 0x98, 0x2d, 0xa7, 0xd7, 0x5e, 0x51, 0x4a, 0x25, 0x47, 0x43, 0x7b, - 0x1c, 0xbb, 0x5c, 0xa8, 0x44, 0x87, 0x5e, 0x24, 0x33, 0x9f, 0x4b, 0x2e, 0x7d, 0x5b, 0x87, 0xfa, - 0xd6, 0x26, 0x1b, 0xec, 0xad, 0x7f, 0x75, 0xfa, 0x36, 0x20, 0x64, 0xf9, 0xfb, 0xd5, 0xe8, 0x8e, - 0x38, 0x7d, 0x1a, 0xe3, 0x13, 0x7c, 0x76, 0x18, 0x2c, 0xb6, 0x9b, 0xc9, 0x70, 0x45, 0x53, 0xcd, - 0x5e, 0xde, 0x27, 0xb3, 0x8c, 0xaa, 0xc4, 0x0f, 0x05, 0xf7, 0xe6, 0xb9, 0xba, 0xf8, 0xe7, 0x64, - 0x3a, 0x55, 0x62, 0xc5, 0xca, 0x6a, 0xed, 0x67, 0x6b, 0x37, 0x4a, 0xa8, 0xc8, 0xdd, 0x48, 0x96, - 0xcc, 0xe5, 0xd2, 0x8f, 0xa9, 0xa2, 0x5e, 0x20, 0xf8, 0x3c, 0x57, 0x97, 0xb4, 0x52, 0xac, 0x5c, - 0xfc, 0x08, 0xa3, 0x7b, 0xe2, 0x04, 0xba, 0xcc, 0x59, 0x3c, 0x1e, 0x58, 0x6b, 0xb9, 0xdd, 0x4c, - 0x9c, 0xd0, 0x36, 0x7b, 0xc4, 0x7a, 0xe2, 0x1b, 0xbb, 0x12, 0xb9, 0x62, 0xf1, 0xf8, 0xe0, 0x0f, - 0xcb, 0x6c, 0xb3, 0x47, 0xac, 0x27, 0x82, 0x69, 0xdd, 0x02, 0x6a, 0x5a, 0x40, 0xbb, 0x16, 0xf0, - 0xa3, 0x01, 0xfc, 0x6c, 0x00, 0xbf, 0x1a, 0xc0, 0xb5, 0x01, 0xdc, 0x18, 0xc0, 0x1f, 0x06, 0xf0, - 0xa7, 0x01, 0xb4, 0x33, 0x80, 0x9f, 0x3a, 0x40, 0x75, 0x07, 0xa8, 0xe9, 0x00, 0xdd, 0x10, 0x56, - 0xc5, 0xaa, 0x9f, 0x4f, 0xe8, 0xd8, 0x0d, 0x9d, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0x86, 0x09, - 0xb3, 0x0a, 0xeb, 0x01, 0x00, 0x00, + // 326 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0xd1, 0x3f, 0x4e, 0xc3, 0x30, + 0x14, 0x06, 0xf0, 0x98, 0xd2, 0x08, 0x59, 0x0c, 0x55, 0xc4, 0x50, 0x31, 0xbc, 0x54, 0x4c, 0x5d, + 0x92, 0x0c, 0x8c, 0x2c, 0x28, 0xb4, 0x43, 0x07, 0x96, 0x94, 0x89, 0x2d, 0x7f, 0x4c, 0x6a, 0xa8, + 0xe3, 0xc8, 0xb1, 0xab, 0xb2, 0x71, 0x04, 0x06, 0x0e, 0x81, 0x38, 0x09, 0x63, 0xc7, 0x4e, 0x85, + 0xba, 0x0b, 0xea, 0xd4, 0x23, 0x20, 0x9c, 0x0a, 0x10, 0xac, 0x9d, 0x92, 0xef, 0xb3, 0xfd, 0x7e, + 0x92, 0x8d, 0x5b, 0x95, 0x2a, 0xcb, 0xf1, 0x7d, 0x7f, 0xd8, 0xbb, 0xf2, 0x4b, 0xc1, 0x25, 0x77, + 0x9a, 0xe6, 0x73, 0xec, 0xe5, 0x54, 0x8e, 0x54, 0xe2, 0xa7, 0x9c, 0x05, 0x39, 0xcf, 0x79, 0x60, + 0xea, 0x44, 0xdd, 0x98, 0x64, 0x82, 0xf9, 0xab, 0x4f, 0x9d, 0x3c, 0x35, 0x30, 0x1e, 0x7e, 0x8f, + 0x72, 0x6e, 0xb1, 0x5d, 0xa7, 0x36, 0xea, 0xa0, 0xee, 0x61, 0x18, 0xad, 0x17, 0x6e, 0x73, 0x12, + 0x8f, 0x15, 0x79, 0x79, 0x73, 0xfb, 0x2c, 0x96, 0xa3, 0x20, 0xa1, 0xb9, 0x3f, 0x28, 0xe4, 0xd9, + 0x2f, 0x87, 0xa9, 0xb1, 0xa4, 0x13, 0x22, 0xaa, 0x69, 0xc0, 0xa6, 0x5e, 0x3a, 0x8a, 0x69, 0xe1, + 0xa5, 0x5c, 0x10, 0x2f, 0xe7, 0x41, 0x16, 0xcb, 0xd8, 0x0f, 0x69, 0x3e, 0x28, 0xe4, 0x45, 0x5c, + 0x49, 0x22, 0xa2, 0xad, 0xe0, 0xdc, 0x61, 0x3b, 0x54, 0xa2, 0x20, 0x59, 0x7b, 0xcf, 0x58, 0xc3, + 0xf5, 0xc2, 0xb5, 0x13, 0xd3, 0xec, 0x10, 0xab, 0x89, 0x2f, 0xec, 0x92, 0x16, 0x92, 0x64, 0xed, + 0xc6, 0x0f, 0xc6, 0x4c, 0xb3, 0x43, 0xac, 0x26, 0x9c, 0x73, 0xdc, 0x8a, 0x48, 0xca, 0x59, 0xa9, + 0x24, 0xc9, 0xb6, 0xf7, 0xb9, 0xdf, 0x41, 0xdd, 0x83, 0xf0, 0x68, 0xbd, 0x70, 0x5b, 0xe2, 0xcf, + 0x5a, 0xf4, 0x6f, 0x77, 0xd8, 0x9b, 0x2d, 0xc1, 0x9a, 0x2f, 0xc1, 0xda, 0x2c, 0x01, 0x3d, 0x68, + 0x40, 0xcf, 0x1a, 0xd0, 0xab, 0x06, 0x34, 0xd3, 0x80, 0xe6, 0x1a, 0xd0, 0xbb, 0x06, 0xf4, 0xa1, + 0xc1, 0xda, 0x68, 0x40, 0x8f, 0x2b, 0xb0, 0x66, 0x2b, 0xb0, 0xe6, 0x2b, 0xb0, 0xae, 0x31, 0xa9, + 0x32, 0x59, 0x4f, 0x49, 0x6c, 0xf3, 0xc6, 0xa7, 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x29, 0x1e, + 0x87, 0x1e, 0x2d, 0x02, 0x00, 0x00, } func (this *SupplyESDT) Equal(that interface{}) bool { @@ -149,17 +159,21 @@ func (this *SupplyESDT) Equal(that interface{}) bool { return false } } + if this.RecomputedSupply != that1.RecomputedSupply { + return false + } return true } func (this *SupplyESDT) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 7) + s := make([]string, 0, 8) s = append(s, "&esdtSupply.SupplyESDT{") s = append(s, "Supply: "+fmt.Sprintf("%#v", this.Supply)+",\n") s = append(s, "Burned: "+fmt.Sprintf("%#v", this.Burned)+",\n") s = append(s, "Minted: "+fmt.Sprintf("%#v", this.Minted)+",\n") + s = append(s, "RecomputedSupply: "+fmt.Sprintf("%#v", this.RecomputedSupply)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -191,6 +205,16 @@ func (m *SupplyESDT) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.RecomputedSupply { + i-- + if m.RecomputedSupply { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} size := __caster.Size(m.Minted) @@ -259,6 +283,9 @@ func (m *SupplyESDT) Size() (n int) { l = __caster.Size(m.Minted) n += 1 + l + sovSupplyESDT(uint64(l)) } + if m.RecomputedSupply { + n += 2 + } return n } @@ -276,6 +303,7 @@ func (this *SupplyESDT) String() string { `Supply:` + fmt.Sprintf("%v", this.Supply) + `,`, `Burned:` + fmt.Sprintf("%v", this.Burned) + `,`, `Minted:` + fmt.Sprintf("%v", this.Minted) + `,`, + `RecomputedSupply:` + fmt.Sprintf("%v", this.RecomputedSupply) + `,`, `}`, }, "") return s @@ -431,6 +459,26 @@ func (m *SupplyESDT) Unmarshal(dAtA []byte) error { } } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RecomputedSupply", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSupplyESDT + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RecomputedSupply = bool(v != 0) default: iNdEx = preIndex skippy, err := skipSupplyESDT(dAtA[iNdEx:]) diff --git a/epochStart/bootstrap/metaStorageHandler.go b/epochStart/bootstrap/metaStorageHandler.go index 0a25fa08b45..7a036096e4f 100644 --- a/epochStart/bootstrap/metaStorageHandler.go +++ b/epochStart/bootstrap/metaStorageHandler.go @@ -54,6 +54,7 @@ func NewMetaStorageHandler( CreateTrieEpochRootHashStorer: false, NodeProcessingMode: nodeProcessingMode, SnapshotsEnabled: snapshotsEnabled, + RepopulateTokensSupplies: false, // tokens supplies cannot be repopulated at this time ManagedPeersHolder: managedPeersHolder, }, ) diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 5e082537258..743250c5bde 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -1066,7 +1066,7 @@ func (e *epochStartBootstrap) syncUserAccountsState(rootHash []byte) error { } e.mutTrieStorageManagers.RLock() - trieStorageManager := e.trieStorageManagers[factory.UserAccountTrie] + trieStorageManager := e.trieStorageManagers[dataRetriever.UserAccountsUnit.String()] e.mutTrieStorageManagers.RUnlock() argsUserAccountsSyncer := syncer.ArgsNewUserAccountsSyncer{ @@ -1081,7 +1081,6 @@ func (e *epochStartBootstrap) syncUserAccountsState(rootHash []byte) error { MaxHardCapForMissingNodes: e.maxHardCapForMissingNodes, TrieSyncerVersion: e.trieSyncerVersion, CheckNodesOnDisk: e.checkNodesOnDisk, - StorageMarker: storageMarker.NewTrieStorageMarker(), UserAccountsSyncStatisticsHandler: e.trieSyncStatisticsProvider, AppStatusHandler: e.statusHandler, }, @@ -1094,7 +1093,7 @@ func (e *epochStartBootstrap) syncUserAccountsState(rootHash []byte) error { return err } - err = accountsDBSyncer.SyncAccounts(rootHash) + err = accountsDBSyncer.SyncAccounts(rootHash, storageMarker.NewTrieStorageMarker()) if err != nil { return err } @@ -1123,6 +1122,7 @@ func (e *epochStartBootstrap) createStorageService( CreateTrieEpochRootHashStorer: createTrieEpochRootHashStorer, NodeProcessingMode: e.nodeProcessingMode, SnapshotsEnabled: e.flagsConfig.SnapshotsEnabled, + RepopulateTokensSupplies: e.flagsConfig.RepopulateTokensSupplies, ManagedPeersHolder: e.cryptoComponentsHolder.ManagedPeersHolder(), }) if err != nil { @@ -1138,7 +1138,7 @@ func (e *epochStartBootstrap) createStorageService( func (e *epochStartBootstrap) syncValidatorAccountsState(rootHash []byte) error { e.mutTrieStorageManagers.RLock() - peerTrieStorageManager := e.trieStorageManagers[factory.PeerAccountTrie] + peerTrieStorageManager := e.trieStorageManagers[dataRetriever.PeerAccountsUnit.String()] e.mutTrieStorageManagers.RUnlock() argsValidatorAccountsSyncer := syncer.ArgsNewValidatorAccountsSyncer{ @@ -1153,7 +1153,6 @@ func (e *epochStartBootstrap) syncValidatorAccountsState(rootHash []byte) error MaxHardCapForMissingNodes: e.maxHardCapForMissingNodes, TrieSyncerVersion: e.trieSyncerVersion, CheckNodesOnDisk: e.checkNodesOnDisk, - StorageMarker: storageMarker.NewTrieStorageMarker(), UserAccountsSyncStatisticsHandler: statistics.NewTrieSyncStatistics(), AppStatusHandler: disabledCommon.NewAppStatusHandler(), }, @@ -1163,7 +1162,7 @@ func (e *epochStartBootstrap) syncValidatorAccountsState(rootHash []byte) error return err } - err = accountsDBSyncer.SyncAccounts(rootHash) + err = accountsDBSyncer.SyncAccounts(rootHash, storageMarker.NewTrieStorageMarker()) if err != nil { return err } diff --git a/epochStart/bootstrap/shardStorageHandler.go b/epochStart/bootstrap/shardStorageHandler.go index 9ffc5384e31..2ffdfe23ccd 100644 --- a/epochStart/bootstrap/shardStorageHandler.go +++ b/epochStart/bootstrap/shardStorageHandler.go @@ -58,6 +58,7 @@ func NewShardStorageHandler( CreateTrieEpochRootHashStorer: false, NodeProcessingMode: nodeProcessingMode, SnapshotsEnabled: snapshotsEnabled, + RepopulateTokensSupplies: false, // tokens supplies cannot be repopulated at this time ManagedPeersHolder: managedPeersHolder, }, ) diff --git a/epochStart/interface.go b/epochStart/interface.go index 33cd7a1e233..fc4364afc43 100644 --- a/epochStart/interface.go +++ b/epochStart/interface.go @@ -115,12 +115,6 @@ type PendingMiniBlocksSyncHandler interface { IsInterfaceNil() bool } -// AccountsDBSyncer defines the methods for the accounts db syncer -type AccountsDBSyncer interface { - SyncAccounts(rootHash []byte) error - IsInterfaceNil() bool -} - // StartOfEpochMetaSyncer defines the methods to synchronize epoch start meta block from the network when nothing is known type StartOfEpochMetaSyncer interface { SyncEpochStartMeta(waitTime time.Duration) (data.MetaHeaderHandler, error) diff --git a/epochStart/metachain/baseRewards_test.go b/epochStart/metachain/baseRewards_test.go index c84e6b1d246..57fd8ad7a9b 100644 --- a/epochStart/metachain/baseRewards_test.go +++ b/epochStart/metachain/baseRewards_test.go @@ -1136,11 +1136,11 @@ func getBaseRewardsArguments() BaseRewardsCreatorArgs { hasher := sha256.NewSha256() marshalizer := &marshal.GogoProtoMarshalizer{} - storageManagerArgs, options := storage.GetStorageManagerArgsAndOptions() + storageManagerArgs := storage.GetStorageManagerArgs() storageManagerArgs.Marshalizer = marshalizer storageManagerArgs.Hasher = hasher - trieFactoryManager, _ := trie.CreateTrieStorageManager(storageManagerArgs, options) + trieFactoryManager, _ := trie.CreateTrieStorageManager(storageManagerArgs, storage.GetStorageManagerOptions()) userAccountsDB := createAccountsDB(hasher, marshalizer, factory.NewAccountCreator(), trieFactoryManager) shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) shardCoordinator.CurrentShard = core.MetachainShardId diff --git a/epochStart/metachain/systemSCs_test.go b/epochStart/metachain/systemSCs_test.go index bfa00d3c444..8b48dc948f7 100644 --- a/epochStart/metachain/systemSCs_test.go +++ b/epochStart/metachain/systemSCs_test.go @@ -910,13 +910,13 @@ func createAccountsDB( func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEpochs, trieStorer storage.Storer) (ArgsNewEpochStartSystemSCProcessing, vm.SystemSCContainer) { hasher := sha256.NewSha256() marshalizer := &marshal.GogoProtoMarshalizer{} - storageManagerArgs, options := stateMock.GetStorageManagerArgsAndOptions() + storageManagerArgs := stateMock.GetStorageManagerArgs() storageManagerArgs.Marshalizer = marshalizer storageManagerArgs.Hasher = hasher storageManagerArgs.MainStorer = trieStorer storageManagerArgs.CheckpointsStorer = trieStorer - trieFactoryManager, _ := trie.CreateTrieStorageManager(storageManagerArgs, options) + trieFactoryManager, _ := trie.CreateTrieStorageManager(storageManagerArgs, stateMock.GetStorageManagerOptions()) userAccountsDB := createAccountsDB(hasher, marshalizer, factory.NewAccountCreator(), trieFactoryManager) peerAccountsDB := createAccountsDB(hasher, marshalizer, factory.NewPeerAccountCreator(), trieFactoryManager) en := forking.NewGenericEpochNotifier() @@ -953,23 +953,24 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp nodesSetup := &mock.NodesSetupStub{} argsHook := hooks.ArgBlockChainHook{ - Accounts: userAccountsDB, - PubkeyConv: &testscommon.PubkeyConverterMock{}, - StorageService: &storageStubs.ChainStorerStub{}, - BlockChain: blockChain, - ShardCoordinator: &mock.ShardCoordinatorStub{}, - Marshalizer: marshalizer, - Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - BuiltInFunctions: vmcommonBuiltInFunctions.NewBuiltInFunctionContainer(), - NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, - GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, - DataPool: testDataPool, - CompiledSCPool: testDataPool.SmartContracts(), - EpochNotifier: en, - EnableEpochsHandler: enableEpochsHandler, - NilCompiledSCStore: true, - GasSchedule: gasScheduleNotifier, - Counter: &testscommon.BlockChainHookCounterStub{}, + Accounts: userAccountsDB, + PubkeyConv: &testscommon.PubkeyConverterMock{}, + StorageService: &storageStubs.ChainStorerStub{}, + BlockChain: blockChain, + ShardCoordinator: &mock.ShardCoordinatorStub{}, + Marshalizer: marshalizer, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + BuiltInFunctions: vmcommonBuiltInFunctions.NewBuiltInFunctionContainer(), + NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, + GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, + DataPool: testDataPool, + CompiledSCPool: testDataPool.SmartContracts(), + EpochNotifier: en, + EnableEpochsHandler: enableEpochsHandler, + NilCompiledSCStore: true, + GasSchedule: gasScheduleNotifier, + Counter: &testscommon.BlockChainHookCounterStub{}, + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } blockChainHookImpl, _ := hooks.NewBlockChainHookImpl(argsHook) @@ -996,8 +997,9 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, - ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", + OwnerAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", @@ -1113,7 +1115,7 @@ func createEconomicsData() process.EconomicsDataHandler { EpochNotifier: &epochNotifier.EpochNotifierStub{}, EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economicsHandler.NewEconomicsData(argsNewEconomicsData) return economicsData diff --git a/epochStart/mock/storageManagerStub.go b/epochStart/mock/storageManagerStub.go index 21e28a13065..da4d434ed8d 100644 --- a/epochStart/mock/storageManagerStub.go +++ b/epochStart/mock/storageManagerStub.go @@ -7,13 +7,13 @@ import ( // StorageManagerStub -- type StorageManagerStub struct { - DatabaseCalled func() common.DBWriteCacher + DatabaseCalled func() common.BaseStorer TakeSnapshotCalled func([]byte) SetCheckpointCalled func([]byte) PruneCalled func([]byte) CancelPruneCalled func([]byte) MarkForEvictionCalled func([]byte, common.ModifiedHashes) error - GetDbThatContainsHashCalled func([]byte) common.DBWriteCacher + GetDbThatContainsHashCalled func([]byte) common.BaseStorer GetSnapshotThatContainsHashCalled func(rootHash []byte) common.SnapshotDbHandler IsPruningEnabledCalled func() bool EnterSnapshotModeCalled func() @@ -22,7 +22,7 @@ type StorageManagerStub struct { } // Database -- -func (sms *StorageManagerStub) Database() common.DBWriteCacher { +func (sms *StorageManagerStub) Database() common.BaseStorer { if sms.DatabaseCalled != nil { return sms.DatabaseCalled() } diff --git a/epochStart/shardchain/trigger.go b/epochStart/shardchain/trigger.go index cd0637d724e..76a949b6961 100644 --- a/epochStart/shardchain/trigger.go +++ b/epochStart/shardchain/trigger.go @@ -22,7 +22,6 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-logger-go" @@ -997,7 +996,7 @@ func (t *trigger) SetProcessed(header data.HeaderHandler, _ data.BodyHandler) { errNotCritical = t.shardHdrStorage.Put([]byte(epochStartIdentifier), shardHdrBuff) if errNotCritical != nil { logLevel := logger.LogWarning - if errors.IsClosingError(errNotCritical) { + if core.IsClosingError(errNotCritical) { logLevel = logger.LogDebug } log.Log(logLevel, "SetProcessed put to shard header storage error", "error", errNotCritical) diff --git a/errors/closingError.go b/errors/closingError.go deleted file mode 100644 index 81d051990b6..00000000000 --- a/errors/closingError.go +++ /dev/null @@ -1,17 +0,0 @@ -package errors - -import ( - "strings" - - "github.com/multiversx/mx-chain-go/storage" -) - -// IsClosingError returns true if the provided error is used whenever the node is in the closing process -func IsClosingError(err error) bool { - if err == nil { - return false - } - - return strings.Contains(err.Error(), storage.ErrDBIsClosed.Error()) || - strings.Contains(err.Error(), ErrContextClosing.Error()) -} diff --git a/errors/closingError_test.go b/errors/closingError_test.go deleted file mode 100644 index 27316a4dc42..00000000000 --- a/errors/closingError_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package errors_test - -import ( - "fmt" - "testing" - - "github.com/multiversx/mx-chain-go/errors" - "github.com/multiversx/mx-chain-go/storage" - "github.com/stretchr/testify/assert" -) - -func TestIsClosingError(t *testing.T) { - t.Parallel() - - t.Run("nil error should return false", func(t *testing.T) { - t.Parallel() - - assert.False(t, errors.IsClosingError(nil)) - }) - t.Run("context closing error should return true", func(t *testing.T) { - t.Parallel() - - assert.True(t, errors.IsClosingError(fmt.Errorf("%w random string", errors.ErrContextClosing))) - }) - t.Run("DB closed error should return true", func(t *testing.T) { - t.Parallel() - - assert.True(t, errors.IsClosingError(fmt.Errorf("%w random string", storage.ErrDBIsClosed))) - }) - t.Run("contains 'DB is closed' should return true", func(t *testing.T) { - t.Parallel() - - assert.True(t, errors.IsClosingError(fmt.Errorf("random string DB is closed random string"))) - }) - t.Run("contains 'DB is closed' should return true", func(t *testing.T) { - t.Parallel() - - assert.True(t, errors.IsClosingError(fmt.Errorf("random string context closing random string"))) - }) - t.Run("random error should return false", func(t *testing.T) { - t.Parallel() - - assert.False(t, errors.IsClosingError(fmt.Errorf("random error"))) - }) -} diff --git a/errors/errors.go b/errors/errors.go index 3a043421c25..131f93f2b72 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -1,6 +1,8 @@ package errors -import "errors" +import ( + "errors" +) // ErrAccountsAdapterCreation signals that the accounts adapter cannot be created based on provided data var ErrAccountsAdapterCreation = errors.New("error creating accounts adapter") @@ -299,15 +301,9 @@ var ErrNilP2pPublicKey = errors.New("nil p2p public key") // ErrNilRater signals that a nil rater was provided var ErrNilRater = errors.New("nil rater") -// ErrNilRatingData signals that nil rating data were provided -var ErrNilRatingData = errors.New("nil rating data") - // ErrNilRatingsInfoHandler signals that nil ratings data information was provided var ErrNilRatingsInfoHandler = errors.New("nil ratings info handler") -// ErrNilRequestedItemHandler signals that a nil requested items handler was provided -var ErrNilRequestedItemHandler = errors.New("nil requested item handler") - // ErrNilRequestHandler signals that a nil request handler was provided var ErrNilRequestHandler = errors.New("nil request handler") @@ -323,9 +319,6 @@ var ErrNilRoundHandler = errors.New("nil roundHandler") // ErrNilShardCoordinator signals that a nil shard coordinator was provided var ErrNilShardCoordinator = errors.New("nil shard coordinator provided") -// ErrNilSmartContractParser signals that a nil smart contract parser was provided -var ErrNilSmartContractParser = errors.New("nil smart contract parser") - // ErrNilSoftwareVersion signals that a nil software version was provided var ErrNilSoftwareVersion = errors.New("nil software version") @@ -401,12 +394,6 @@ var ErrNilVmMarshalizer = errors.New("nil vm marshalizer") // ErrNilWatchdog signals that a nil watchdog was provided var ErrNilWatchdog = errors.New("nil watchdog") -// ErrNilWhiteListHandler signals that a nil whitelist handler was provided -var ErrNilWhiteListHandler = errors.New("nil white list handler") - -// ErrNilWhiteListVerifiedTxs signals that a nil whitelist for verified transactions was prvovided -var ErrNilWhiteListVerifiedTxs = errors.New("nil white list verified txs") - // ErrPollingFunctionRegistration signals an error while registering the polling function registration var ErrPollingFunctionRegistration = errors.New("cannot register handler func for num of connected peers") @@ -473,9 +460,6 @@ var ErrNilScheduledTxsExecutionHandler = errors.New("nil scheduled transactions // ErrNilScheduledProcessor signals that a nil scheduled processor was provided var ErrNilScheduledProcessor = errors.New("nil scheduled processor") -// ErrContextClosing signals that the parent context requested the closing of its children -var ErrContextClosing = errors.New("context closing") - // ErrNilTxsSender signals that a nil transactions sender has been provided var ErrNilTxsSender = errors.New("nil transactions sender has been provided") @@ -562,3 +546,9 @@ var ErrNilLogger = errors.New("nil logger") // ErrNilShuffleOutCloser signals that a nil shuffle out closer has been provided var ErrNilShuffleOutCloser = errors.New("nil shuffle out closer") + +// ErrNilHistoryRepository signals that history processor is nil +var ErrNilHistoryRepository = errors.New("history repository is nil") + +// ErrNilMissingTrieNodesNotifier signals that a nil missing trie nodes notifier was provided +var ErrNilMissingTrieNodesNotifier = errors.New("nil missing trie nodes notifier") diff --git a/facade/initial/initialNodeFacade.go b/facade/initial/initialNodeFacade.go index 06c40d1bf74..cd268eacf0a 100644 --- a/facade/initial/initialNodeFacade.go +++ b/facade/initial/initialNodeFacade.go @@ -6,9 +6,9 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" - outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" @@ -271,7 +271,7 @@ func (inf *initialNodeFacade) GetBlockByRound(_ uint64, _ api.BlockQueryOptions) } // GetAlteredAccountsForBlock returns nil and error -func (inf *initialNodeFacade) GetAlteredAccountsForBlock(_ api.GetAlteredAccountsForBlockOptions) ([]*outportcore.AlteredAccount, error) { +func (inf *initialNodeFacade) GetAlteredAccountsForBlock(_ api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { return nil, errNodeStarting } diff --git a/facade/initial/initialNodeFacade_test.go b/facade/initial/initialNodeFacade_test.go index f157571da48..bba4b57eaa7 100644 --- a/facade/initial/initialNodeFacade_test.go +++ b/facade/initial/initialNodeFacade_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-go/facade" "github.com/multiversx/mx-chain-go/node/external" @@ -20,14 +19,14 @@ func TestInitialNodeFacade(t *testing.T) { inf, err := NewInitialNodeFacade("127.0.0.1:8080", true, nil) assert.Equal(t, facade.ErrNilStatusMetrics, err) - assert.True(t, check.IfNil(inf)) + assert.Nil(t, inf) }) t.Run("should work", func(t *testing.T) { t.Parallel() inf, err := NewInitialNodeFacade("127.0.0.1:8080", true, &testscommon.StatusMetricsStub{}) assert.Nil(t, err) - assert.False(t, check.IfNil(inf)) + assert.NotNil(t, inf) }) } @@ -233,5 +232,94 @@ func TestInitialNodeFacade_AllMethodsShouldNotPanic(t *testing.T) { assert.Equal(t, api.GuardianData{}, guardianData) assert.Equal(t, errNodeStarting, err) - assert.False(t, check.IfNil(inf)) + mainTrieResponse, dataTrieResponse, err := inf.GetProofDataTrie("", "", "") + assert.Nil(t, mainTrieResponse) + assert.Nil(t, dataTrieResponse) + assert.Equal(t, errNodeStarting, err) + + codeHash, blockInfo, err := inf.GetCodeHash("", api.AccountQueryOptions{}) + assert.Nil(t, codeHash) + assert.Equal(t, api.BlockInfo{}, blockInfo) + assert.Equal(t, errNodeStarting, err) + + accountsResponse, blockInfo, err := inf.GetAccounts([]string{}, api.AccountQueryOptions{}) + assert.Nil(t, accountsResponse) + assert.Equal(t, api.BlockInfo{}, blockInfo) + assert.Equal(t, errNodeStarting, err) + + stakeValue, err := inf.GetTotalStakedValue() + assert.Nil(t, stakeValue) + assert.Equal(t, errNodeStarting, err) + + ratings := inf.GetConnectedPeersRatings() + assert.Equal(t, "", ratings) + + epochStartData, err := inf.GetEpochStartDataAPI(0) + assert.Nil(t, epochStartData) + assert.Equal(t, errNodeStarting, err) + + alteredAcc, err := inf.GetAlteredAccountsForBlock(api.GetAlteredAccountsForBlockOptions{}) + assert.Nil(t, alteredAcc) + assert.Equal(t, errNodeStarting, err) + + block, err := inf.GetInternalMetaBlockByHash(0, "") + assert.Nil(t, block) + assert.Equal(t, errNodeStarting, err) + + block, err = inf.GetInternalMetaBlockByNonce(0, 0) + assert.Nil(t, block) + assert.Equal(t, errNodeStarting, err) + + block, err = inf.GetInternalMetaBlockByRound(0, 0) + assert.Nil(t, block) + assert.Equal(t, errNodeStarting, err) + + block, err = inf.GetInternalStartOfEpochMetaBlock(0, 0) + assert.Nil(t, block) + assert.Equal(t, errNodeStarting, err) + + validatorsInfo, err := inf.GetInternalStartOfEpochValidatorsInfo(0) + assert.Nil(t, validatorsInfo) + assert.Equal(t, errNodeStarting, err) + + block, err = inf.GetInternalShardBlockByHash(0, "") + assert.Nil(t, block) + assert.Equal(t, errNodeStarting, err) + + block, err = inf.GetInternalShardBlockByNonce(0, 0) + assert.Nil(t, block) + assert.Equal(t, errNodeStarting, err) + + block, err = inf.GetInternalShardBlockByRound(0, 0) + assert.Nil(t, block) + assert.Equal(t, errNodeStarting, err) + + block, err = inf.GetInternalMiniBlockByHash(0, "", 0) + assert.Nil(t, block) + assert.Equal(t, errNodeStarting, err) + + esdtData, blockInfo, err := inf.GetESDTData("", "", 0, api.AccountQueryOptions{}) + assert.Nil(t, esdtData) + assert.Equal(t, api.BlockInfo{}, blockInfo) + assert.Equal(t, errNodeStarting, err) + + genesisBalances, err := inf.GetGenesisBalances() + assert.Nil(t, genesisBalances) + assert.Equal(t, errNodeStarting, err) + + txPoolGaps, err := inf.GetTransactionsPoolNonceGapsForSender("") + assert.Nil(t, txPoolGaps) + assert.Equal(t, errNodeStarting, err) + + assert.NotNil(t, inf) +} + +func TestInitialNodeFacade_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var inf *initialNodeFacade + assert.True(t, inf.IsInterfaceNil()) + + inf, _ = NewInitialNodeFacade("127.0.0.1:7799", true, &testscommon.StatusMetricsStub{}) + assert.False(t, inf.IsInterfaceNil()) } diff --git a/facade/interface.go b/facade/interface.go index 09c93a04368..f965c946ac3 100644 --- a/facade/interface.go +++ b/facade/interface.go @@ -5,9 +5,9 @@ import ( "math/big" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" - outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/debug" @@ -124,7 +124,7 @@ type ApiResolver interface { GetBlockByHash(hash string, options api.BlockQueryOptions) (*api.Block, error) GetBlockByNonce(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) GetBlockByRound(round uint64, options api.BlockQueryOptions) (*api.Block, error) - GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outportcore.AlteredAccount, error) + GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) GetInternalShardBlockByNonce(format common.ApiOutputFormat, nonce uint64) (interface{}, error) GetInternalShardBlockByHash(format common.ApiOutputFormat, hash string) (interface{}, error) GetInternalShardBlockByRound(format common.ApiOutputFormat, round uint64) (interface{}, error) diff --git a/facade/mock/apiResolverStub.go b/facade/mock/apiResolverStub.go index 4b32df61b95..fb615fe2f08 100644 --- a/facade/mock/apiResolverStub.go +++ b/facade/mock/apiResolverStub.go @@ -3,8 +3,8 @@ package mock import ( "context" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" - outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/node/external" @@ -24,7 +24,7 @@ type ApiResolverStub struct { GetBlockByHashCalled func(hash string, options api.BlockQueryOptions) (*api.Block, error) GetBlockByNonceCalled func(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) GetBlockByRoundCalled func(round uint64, options api.BlockQueryOptions) (*api.Block, error) - GetAlteredAccountsForBlockCalled func(options api.GetAlteredAccountsForBlockOptions) ([]*outportcore.AlteredAccount, error) + GetAlteredAccountsForBlockCalled func(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) GetTransactionHandler func(hash string, withEvents bool) (*transaction.ApiTransactionResult, error) GetInternalShardBlockByNonceCalled func(format common.ApiOutputFormat, nonce uint64) (interface{}, error) GetInternalShardBlockByHashCalled func(format common.ApiOutputFormat, hash string) (interface{}, error) @@ -81,7 +81,7 @@ func (ars *ApiResolverStub) GetBlockByRound(round uint64, options api.BlockQuery } // GetAlteredAccountsForBlock - -func (ars *ApiResolverStub) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outportcore.AlteredAccount, error) { +func (ars *ApiResolverStub) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { if ars.GetAlteredAccountsForBlockCalled != nil { return ars.GetAlteredAccountsForBlockCalled(options) } diff --git a/facade/mock/nodeStub.go b/facade/mock/nodeStub.go index 729e2d568d4..4c69d0e2790 100644 --- a/facade/mock/nodeStub.go +++ b/facade/mock/nodeStub.go @@ -52,6 +52,7 @@ type NodeStub struct { GetProofCalled func(rootHash string, key string) (*common.GetProofResponse, error) GetProofDataTrieCalled func(rootHash string, address string, key string) (*common.GetProofResponse, *common.GetProofResponse, error) VerifyProofCalled func(rootHash string, address string, proof [][]byte) (bool, error) + GetTokenSupplyCalled func(token string) (*api.ESDTSupply, error) } // GetProof - @@ -268,7 +269,10 @@ func (ns *NodeStub) GetAllESDTTokens(address string, options api.AccountQueryOpt } // GetTokenSupply - -func (ns *NodeStub) GetTokenSupply(_ string) (*api.ESDTSupply, error) { +func (ns *NodeStub) GetTokenSupply(token string) (*api.ESDTSupply, error) { + if ns.GetTokenSupplyCalled != nil { + return ns.GetTokenSupplyCalled(token) + } return nil, nil } diff --git a/facade/nodeFacade.go b/facade/nodeFacade.go index eb2523e08a9..649e5f9f3bf 100644 --- a/facade/nodeFacade.go +++ b/facade/nodeFacade.go @@ -12,9 +12,9 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/throttler" chainData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" apiData "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" - "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" @@ -68,8 +68,6 @@ type nodeFacade struct { accountsState state.AccountsAdapter peerState state.AccountsAdapter blockchain chainData.ChainHandler - ctx context.Context - cancelFunc func() } // NewNodeFacade creates a new Facade with a NodeWrapper @@ -115,7 +113,6 @@ func NewNodeFacade(arg ArgNodeFacade) (*nodeFacade, error) { peerState: arg.PeerState, blockchain: arg.Blockchain, } - nf.ctx, nf.cancelFunc = context.WithCancel(context.Background()) return nf, nil } @@ -502,7 +499,7 @@ func (nf *nodeFacade) GetBlockByRound(round uint64, options apiData.BlockQueryOp } // GetAlteredAccountsForBlock returns the altered accounts for a given block -func (nf *nodeFacade) GetAlteredAccountsForBlock(options apiData.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { +func (nf *nodeFacade) GetAlteredAccountsForBlock(options apiData.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { return nf.apiResolver.GetAlteredAccountsForBlock(options) } @@ -557,8 +554,6 @@ func (nf *nodeFacade) GetInternalMiniBlockByHash(format common.ApiOutputFormat, func (nf *nodeFacade) Close() error { log.LogIfError(nf.apiResolver.Close()) - nf.cancelFunc() - return nil } diff --git a/facade/nodeFacade_test.go b/facade/nodeFacade_test.go index a1a39380fbc..1f68c7c5108 100644 --- a/facade/nodeFacade_test.go +++ b/facade/nodeFacade_test.go @@ -12,8 +12,8 @@ import ( "github.com/multiversx/mx-chain-core-go/core" atomicCore "github.com/multiversx/mx-chain-core-go/core/atomic" - "github.com/multiversx/mx-chain-core-go/core/check" nodeData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/esdt" @@ -26,15 +26,15 @@ import ( "github.com/multiversx/mx-chain-go/heartbeat/data" "github.com/multiversx/mx-chain-go/node/external" "github.com/multiversx/mx-chain-go/process" + txSimData "github.com/multiversx/mx-chain-go/process/txsimulator/data" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" vmcommon "github.com/multiversx/mx-chain-vm-common-go" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -// TODO increase code coverage +var expectedErr = errors.New("expected error") func createMockArguments() ArgNodeFacade { return ArgNodeFacade{ @@ -72,88 +72,144 @@ func createMockArguments() ArgNodeFacade { } } -// ------- NewNodeFacade - -func TestNewNodeFacade_WithNilNodeShouldErr(t *testing.T) { +func TestNewNodeFacade(t *testing.T) { t.Parallel() - arg := createMockArguments() - arg.Node = nil - nf, err := NewNodeFacade(arg) + t.Run("nil Node should error", func(t *testing.T) { + t.Parallel() - assert.True(t, check.IfNil(nf)) - assert.Equal(t, ErrNilNode, err) -} + arg := createMockArguments() + arg.Node = nil + nf, err := NewNodeFacade(arg) -func TestNewNodeFacade_WithNilApiResolverShouldErr(t *testing.T) { - t.Parallel() + require.Nil(t, nf) + require.Equal(t, ErrNilNode, err) + }) + t.Run("nil ApiResolver should error", func(t *testing.T) { + t.Parallel() - arg := createMockArguments() - arg.ApiResolver = nil - nf, err := NewNodeFacade(arg) + arg := createMockArguments() + arg.ApiResolver = nil + nf, err := NewNodeFacade(arg) - assert.True(t, check.IfNil(nf)) - assert.Equal(t, ErrNilApiResolver, err) -} + require.Nil(t, nf) + require.Equal(t, ErrNilApiResolver, err) + }) + t.Run("nil TxSimulatorProcessor should error", func(t *testing.T) { + t.Parallel() -func TestNewNodeFacade_WithInvalidSimultaneousRequestsShouldErr(t *testing.T) { - t.Parallel() + arg := createMockArguments() + arg.TxSimulatorProcessor = nil + nf, err := NewNodeFacade(arg) - arg := createMockArguments() - arg.WsAntifloodConfig.WebServerAntifloodEnabled = true - arg.WsAntifloodConfig.SimultaneousRequests = 0 - nf, err := NewNodeFacade(arg) + require.Nil(t, nf) + require.Equal(t, ErrNilTransactionSimulatorProcessor, err) + }) + t.Run("invalid ApiRoutesConfig should error", func(t *testing.T) { + t.Parallel() - assert.True(t, check.IfNil(nf)) - assert.True(t, errors.Is(err, ErrInvalidValue)) -} + arg := createMockArguments() + arg.ApiRoutesConfig = config.ApiRoutesConfig{} + nf, err := NewNodeFacade(arg) -func TestNewNodeFacade_WithInvalidSameSourceResetIntervalInSecShouldErr(t *testing.T) { - t.Parallel() + require.Nil(t, nf) + require.True(t, errors.Is(err, ErrNoApiRoutesConfig)) + }) + t.Run("invalid SimultaneousRequests should error", func(t *testing.T) { + t.Parallel() - arg := createMockArguments() - arg.WsAntifloodConfig.WebServerAntifloodEnabled = true - arg.WsAntifloodConfig.SameSourceResetIntervalInSec = 0 - nf, err := NewNodeFacade(arg) + arg := createMockArguments() + arg.WsAntifloodConfig.WebServerAntifloodEnabled = true + arg.WsAntifloodConfig.SimultaneousRequests = 0 + nf, err := NewNodeFacade(arg) - assert.True(t, check.IfNil(nf)) - assert.True(t, errors.Is(err, ErrInvalidValue)) -} + require.Nil(t, nf) + require.True(t, errors.Is(err, ErrInvalidValue)) + }) + t.Run("invalid SameSourceRequests should error", func(t *testing.T) { + t.Parallel() -func TestNewNodeFacade_WithInvalidSameSourceRequestsShouldErr(t *testing.T) { - t.Parallel() + arg := createMockArguments() + arg.WsAntifloodConfig.WebServerAntifloodEnabled = true + arg.WsAntifloodConfig.SameSourceRequests = 0 + nf, err := NewNodeFacade(arg) - arg := createMockArguments() - arg.WsAntifloodConfig.WebServerAntifloodEnabled = true - arg.WsAntifloodConfig.SameSourceRequests = 0 - nf, err := NewNodeFacade(arg) + require.Nil(t, nf) + require.True(t, errors.Is(err, ErrInvalidValue)) + }) + t.Run("invalid SameSourceResetIntervalInSec should error", func(t *testing.T) { + t.Parallel() - assert.True(t, check.IfNil(nf)) - assert.True(t, errors.Is(err, ErrInvalidValue)) -} + arg := createMockArguments() + arg.WsAntifloodConfig.WebServerAntifloodEnabled = true + arg.WsAntifloodConfig.SameSourceResetIntervalInSec = 0 + nf, err := NewNodeFacade(arg) -func TestNewNodeFacade_WithInvalidApiRoutesConfigShouldErr(t *testing.T) { - t.Parallel() + require.Nil(t, nf) + require.True(t, errors.Is(err, ErrInvalidValue)) + }) + t.Run("invalid TrieOperationsDeadlineMilliseconds should error", func(t *testing.T) { + t.Parallel() - arg := createMockArguments() - arg.ApiRoutesConfig = config.ApiRoutesConfig{} - nf, err := NewNodeFacade(arg) + arg := createMockArguments() + arg.WsAntifloodConfig.WebServerAntifloodEnabled = true + arg.WsAntifloodConfig.TrieOperationsDeadlineMilliseconds = 0 + nf, err := NewNodeFacade(arg) - assert.True(t, check.IfNil(nf)) - assert.True(t, errors.Is(err, ErrNoApiRoutesConfig)) -} + require.Nil(t, nf) + require.True(t, errors.Is(err, ErrInvalidValue)) + }) + t.Run("nil AccountsState should error", func(t *testing.T) { + t.Parallel() -func TestNewNodeFacade_WithValidNodeShouldReturnNotNil(t *testing.T) { - t.Parallel() + arg := createMockArguments() + arg.WsAntifloodConfig.WebServerAntifloodEnabled = true // coverage + arg.AccountsState = nil + nf, err := NewNodeFacade(arg) - arg := createMockArguments() - nf, err := NewNodeFacade(arg) + require.Nil(t, nf) + require.Equal(t, ErrNilAccountState, err) + }) + t.Run("nil PeerState should error", func(t *testing.T) { + t.Parallel() - assert.False(t, check.IfNil(nf)) - assert.Nil(t, err) -} + arg := createMockArguments() + arg.PeerState = nil + nf, err := NewNodeFacade(arg) + + require.Nil(t, nf) + require.Equal(t, ErrNilPeerState, err) + }) + t.Run("nil Blockchain should error", func(t *testing.T) { + t.Parallel() + + arg := createMockArguments() + arg.Blockchain = nil + nf, err := NewNodeFacade(arg) + + require.Nil(t, nf) + require.Equal(t, ErrNilBlockchain, err) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + arg := createMockArguments() + arg.WsAntifloodConfig.EndpointsThrottlers = []config.EndpointsThrottlersConfig{ + { + Endpoint: "endpoint_1", + MaxNumGoRoutines: 10, + }, { + Endpoint: "endpoint_2", + MaxNumGoRoutines: 0, // NewNumGoRoutinesThrottler fails for coverage + }, + } + nf, err := NewNodeFacade(arg) -// ------- Methods + require.NotNil(t, nf) + require.NoError(t, err) + }) +} func TestNodeFacade_GetBalanceWithValidAddressShouldReturnBalance(t *testing.T) { t.Parallel() @@ -175,8 +231,8 @@ func TestNodeFacade_GetBalanceWithValidAddressShouldReturnBalance(t *testing.T) amount, _, err := nf.GetBalance(addr, api.AccountQueryOptions{}) - assert.Nil(t, err) - assert.Equal(t, balance, amount) + require.NoError(t, err) + require.Equal(t, balance, amount) } func TestNodeFacade_GetBalanceWithUnknownAddressShouldReturnZeroBalance(t *testing.T) { @@ -201,8 +257,8 @@ func TestNodeFacade_GetBalanceWithUnknownAddressShouldReturnZeroBalance(t *testi nf, _ := NewNodeFacade(arg) amount, _, err := nf.GetBalance(unknownAddr, api.AccountQueryOptions{}) - assert.Nil(t, err) - assert.Equal(t, zeroBalance, amount) + require.NoError(t, err) + require.Equal(t, zeroBalance, amount) } func TestNodeFacade_GetBalanceWithErrorOnNodeShouldReturnZeroBalanceAndError(t *testing.T) { @@ -222,8 +278,8 @@ func TestNodeFacade_GetBalanceWithErrorOnNodeShouldReturnZeroBalanceAndError(t * nf, _ := NewNodeFacade(arg) amount, _, err := nf.GetBalance(addr, api.AccountQueryOptions{}) - assert.NotNil(t, err) - assert.Equal(t, zeroBalance, amount) + require.NotNil(t, err) + require.Equal(t, zeroBalance, amount) } func TestNodeFacade_GetTransactionWithValidInputsShouldNotReturnError(t *testing.T) { @@ -246,8 +302,8 @@ func TestNodeFacade_GetTransactionWithValidInputsShouldNotReturnError(t *testing nf, _ := NewNodeFacade(arg) tx, err := nf.GetTransaction(testHash, false) - assert.Nil(t, err) - assert.Equal(t, testTx, tx) + require.NoError(t, err) + require.Equal(t, testTx, tx) } func TestNodeFacade_GetTransactionWithUnknowHashShouldReturnNilAndNoError(t *testing.T) { @@ -267,8 +323,8 @@ func TestNodeFacade_GetTransactionWithUnknowHashShouldReturnNilAndNoError(t *tes nf, _ := NewNodeFacade(arg) tx, err := nf.GetTransaction("unknownHash", false) - assert.Nil(t, err) - assert.Nil(t, tx) + require.NoError(t, err) + require.Nil(t, tx) } func TestNodeFacade_SetSyncer(t *testing.T) { @@ -279,25 +335,43 @@ func TestNodeFacade_SetSyncer(t *testing.T) { sync := &mock.SyncTimerMock{} nf.SetSyncer(sync) - assert.Equal(t, sync, nf.GetSyncer()) + require.Equal(t, sync, nf.GetSyncer()) } func TestNodeFacade_GetAccount(t *testing.T) { t.Parallel() - getAccountCalled := false - node := &mock.NodeStub{} - node.GetAccountCalled = func(address string, _ api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { - getAccountCalled = true - return api.AccountResponse{}, api.BlockInfo{}, nil - } + t.Run("should error", func(t *testing.T) { + t.Parallel() - arg := createMockArguments() - arg.Node = node - nf, _ := NewNodeFacade(arg) + arg := createMockArguments() + arg.Node = &mock.NodeStub{ + GetAccountCalled: func(_ string, _ api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { + return api.AccountResponse{}, api.BlockInfo{}, expectedErr + }, + } + nf, _ := NewNodeFacade(arg) - _, _, _ = nf.GetAccount("test", api.AccountQueryOptions{}) - assert.True(t, getAccountCalled) + _, _, err := nf.GetAccount("test", api.AccountQueryOptions{}) + require.Equal(t, expectedErr, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + getAccountCalled := false + node := &mock.NodeStub{} + node.GetAccountCalled = func(address string, _ api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { + getAccountCalled = true + return api.AccountResponse{}, api.BlockInfo{}, nil + } + + arg := createMockArguments() + arg.Node = node + nf, _ := NewNodeFacade(arg) + + _, _, _ = nf.GetAccount("test", api.AccountQueryOptions{}) + require.True(t, getAccountCalled) + }) } func TestNodeFacade_GetAccounts(t *testing.T) { @@ -311,16 +385,15 @@ func TestNodeFacade_GetAccounts(t *testing.T) { nf, _ := NewNodeFacade(arg) resp, blockInfo, err := nf.GetAccounts([]string{"test1", "test2"}, api.AccountQueryOptions{}) - assert.Nil(t, resp) - assert.Empty(t, blockInfo) - assert.Error(t, err) - assert.Equal(t, "too many addresses in the bulk request (provided: 2, maximum: 1)", err.Error()) + require.Nil(t, resp) + require.Empty(t, blockInfo) + require.Error(t, err) + require.Equal(t, "too many addresses in the bulk request (provided: 2, maximum: 1)", err.Error()) }) t.Run("node responds with error, should err", func(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected error") node := &mock.NodeStub{} node.GetAccountCalled = func(address string, _ api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { return api.AccountResponse{}, api.BlockInfo{}, expectedErr @@ -332,9 +405,9 @@ func TestNodeFacade_GetAccounts(t *testing.T) { nf, _ := NewNodeFacade(arg) resp, blockInfo, err := nf.GetAccounts([]string{"test"}, api.AccountQueryOptions{}) - assert.Nil(t, resp) - assert.Empty(t, blockInfo) - assert.Equal(t, expectedErr, err) + require.Nil(t, resp) + require.Empty(t, blockInfo) + require.Equal(t, expectedErr, err) }) t.Run("should work", func(t *testing.T) { @@ -352,9 +425,9 @@ func TestNodeFacade_GetAccounts(t *testing.T) { nf, _ := NewNodeFacade(arg) resp, blockInfo, err := nf.GetAccounts([]string{"test"}, api.AccountQueryOptions{}) - assert.NoError(t, err) - assert.Empty(t, blockInfo) - assert.Equal(t, &expectedAcount, resp["test"]) + require.NoError(t, err) + require.Empty(t, blockInfo) + require.Equal(t, &expectedAcount, resp["test"]) }) } @@ -372,8 +445,8 @@ func TestNodeFacade_GetUsername(t *testing.T) { nf, _ := NewNodeFacade(arg) username, _, err := nf.GetUsername("test", api.AccountQueryOptions{}) - assert.NoError(t, err) - assert.Equal(t, expectedUsername, username) + require.NoError(t, err) + require.Equal(t, expectedUsername, username) } func TestNodeFacade_GetCodeHash(t *testing.T) { @@ -390,8 +463,8 @@ func TestNodeFacade_GetCodeHash(t *testing.T) { nf, _ := NewNodeFacade(arg) codeHash, _, err := nf.GetCodeHash("test", api.AccountQueryOptions{}) - assert.NoError(t, err) - assert.Equal(t, expectedCodeHash, codeHash) + require.NoError(t, err) + require.Equal(t, expectedCodeHash, codeHash) } func TestNodeFacade_GetHeartbeatsReturnsNilShouldErr(t *testing.T) { @@ -408,8 +481,8 @@ func TestNodeFacade_GetHeartbeatsReturnsNilShouldErr(t *testing.T) { result, err := nf.GetHeartbeats() - assert.Nil(t, result) - assert.Equal(t, ErrHeartbeatsNotActive, err) + require.Nil(t, result) + require.Equal(t, ErrHeartbeatsNotActive, err) } func TestNodeFacade_GetHeartbeats(t *testing.T) { @@ -439,7 +512,7 @@ func TestNodeFacade_GetHeartbeats(t *testing.T) { result, err := nf.GetHeartbeats() - assert.Nil(t, err) + require.NoError(t, err) fmt.Println(result) } @@ -458,7 +531,7 @@ func TestNodeFacade_GetDataValue(t *testing.T) { require.NoError(t, err) _, _ = nf.ExecuteSCQuery(nil) - assert.True(t, wasCalled) + require.True(t, wasCalled) } func TestNodeFacade_EmptyRestInterface(t *testing.T) { @@ -468,7 +541,7 @@ func TestNodeFacade_EmptyRestInterface(t *testing.T) { arg.FacadeConfig.RestApiInterface = "" nf, _ := NewNodeFacade(arg) - assert.Equal(t, DefaultRestInterface, nf.RestApiInterface()) + require.Equal(t, DefaultRestInterface, nf.RestApiInterface()) } func TestNodeFacade_RestInterface(t *testing.T) { @@ -479,7 +552,7 @@ func TestNodeFacade_RestInterface(t *testing.T) { arg.FacadeConfig.RestApiInterface = intf nf, _ := NewNodeFacade(arg) - assert.Equal(t, intf, nf.RestApiInterface()) + require.Equal(t, intf, nf.RestApiInterface()) } func TestNodeFacade_ValidatorStatisticsApi(t *testing.T) { @@ -497,8 +570,8 @@ func TestNodeFacade_ValidatorStatisticsApi(t *testing.T) { nf, _ := NewNodeFacade(arg) res, err := nf.ValidatorStatisticsApi() - assert.Nil(t, err) - assert.Equal(t, mapToRet, res) + require.NoError(t, err) + require.Equal(t, mapToRet, res) } func TestNodeFacade_SendBulkTransactions(t *testing.T) { @@ -521,9 +594,9 @@ func TestNodeFacade_SendBulkTransactions(t *testing.T) { txs = append(txs, &transaction.Transaction{Nonce: 1}) res, err := nf.SendBulkTransactions(txs) - assert.Nil(t, err) - assert.Equal(t, expectedNumOfSuccessfulTxs, res) - assert.True(t, sendBulkTxsWasCalled) + require.NoError(t, err) + require.Equal(t, expectedNumOfSuccessfulTxs, res) + require.True(t, sendBulkTxsWasCalled) } func TestNodeFacade_StatusMetrics(t *testing.T) { @@ -543,7 +616,7 @@ func TestNodeFacade_StatusMetrics(t *testing.T) { _ = nf.StatusMetrics() - assert.True(t, apiResolverMetricsRequested) + require.True(t, apiResolverMetricsRequested) } func TestNodeFacade_PprofEnabled(t *testing.T) { @@ -553,7 +626,7 @@ func TestNodeFacade_PprofEnabled(t *testing.T) { arg.FacadeConfig.PprofEnabled = true nf, _ := NewNodeFacade(arg) - assert.True(t, nf.PprofEnabled()) + require.True(t, nf.PprofEnabled()) } func TestNodeFacade_RestAPIServerDebugMode(t *testing.T) { @@ -563,7 +636,7 @@ func TestNodeFacade_RestAPIServerDebugMode(t *testing.T) { arg.RestAPIServerDebugMode = true nf, _ := NewNodeFacade(arg) - assert.True(t, nf.RestAPIServerDebugMode()) + require.True(t, nf.RestAPIServerDebugMode()) } func TestNodeFacade_CreateTransaction(t *testing.T) { @@ -582,14 +655,13 @@ func TestNodeFacade_CreateTransaction(t *testing.T) { _, _, _ = nf.CreateTransaction(&external.ArgsCreateTransaction{}) - assert.True(t, nodeCreateTxWasCalled) + require.True(t, nodeCreateTxWasCalled) } func TestNodeFacade_Trigger(t *testing.T) { t.Parallel() wasCalled := false - expectedErr := errors.New("expected err") arg := createMockArguments() epoch := uint32(4638) recoveredEpoch := uint32(0) @@ -607,10 +679,10 @@ func TestNodeFacade_Trigger(t *testing.T) { err := nf.Trigger(epoch, true) - assert.True(t, wasCalled) - assert.Equal(t, expectedErr, err) - assert.Equal(t, epoch, atomic.LoadUint32(&recoveredEpoch)) - assert.True(t, recoveredWithEarlyEndOfEpoch.IsSet()) + require.True(t, wasCalled) + require.Equal(t, expectedErr, err) + require.Equal(t, epoch, atomic.LoadUint32(&recoveredEpoch)) + require.True(t, recoveredWithEarlyEndOfEpoch.IsSet()) } func TestNodeFacade_IsSelfTrigger(t *testing.T) { @@ -628,8 +700,8 @@ func TestNodeFacade_IsSelfTrigger(t *testing.T) { isSelf := nf.IsSelfTrigger() - assert.True(t, wasCalled) - assert.True(t, isSelf) + require.True(t, wasCalled) + require.True(t, isSelf) } func TestNodeFacade_EncodeDecodeAddressPubkey(t *testing.T) { @@ -639,12 +711,12 @@ func TestNodeFacade_EncodeDecodeAddressPubkey(t *testing.T) { arg := createMockArguments() nf, _ := NewNodeFacade(arg) encoded, err := nf.EncodeAddressPubkey(buff) - assert.Nil(t, err) + require.NoError(t, err) recoveredBytes, err := nf.DecodeAddressPubkey(encoded) - assert.Nil(t, err) - assert.Equal(t, buff, recoveredBytes) + require.NoError(t, err) + require.Equal(t, buff, recoveredBytes) } func TestNodeFacade_GetQueryHandler(t *testing.T) { @@ -663,9 +735,9 @@ func TestNodeFacade_GetQueryHandler(t *testing.T) { qh, err := nf.GetQueryHandler("") - assert.Nil(t, qh) - assert.Nil(t, err) - assert.True(t, wasCalled) + require.Nil(t, qh) + require.NoError(t, err) + require.True(t, wasCalled) } func TestNodeFacade_GetPeerInfo(t *testing.T) { @@ -684,8 +756,20 @@ func TestNodeFacade_GetPeerInfo(t *testing.T) { val, err := nf.GetPeerInfo("") - assert.Nil(t, err) - assert.Equal(t, []core.QueryP2PPeerInfo{pinfo}, val) + require.NoError(t, err) + require.Equal(t, []core.QueryP2PPeerInfo{pinfo}, val) +} + +func TestNodeFacade_GetThrottlerForEndpointAntifloodDisabledShouldReturnDisabled(t *testing.T) { + t.Parallel() + + arg := createMockArguments() + nf, _ := NewNodeFacade(arg) + + thr, ok := nf.GetThrottlerForEndpoint("any-endpoint") + require.NotNil(t, thr) + require.True(t, ok) + require.Equal(t, "*disabled.disabledThrottler", fmt.Sprintf("%T", thr)) } func TestNodeFacade_GetThrottlerForEndpointNoConfigShouldReturnNilAndFalse(t *testing.T) { @@ -698,8 +782,8 @@ func TestNodeFacade_GetThrottlerForEndpointNoConfigShouldReturnNilAndFalse(t *te thr, ok := nf.GetThrottlerForEndpoint("any-endpoint") - assert.Nil(t, thr) - assert.False(t, ok) + require.Nil(t, thr) + require.False(t, ok) } func TestNodeFacade_GetThrottlerForEndpointNotFoundShouldReturnNilAndFalse(t *testing.T) { @@ -717,8 +801,8 @@ func TestNodeFacade_GetThrottlerForEndpointNotFoundShouldReturnNilAndFalse(t *te thr, ok := nf.GetThrottlerForEndpoint("different-endpoint") - assert.Nil(t, thr) - assert.False(t, ok) + require.Nil(t, thr) + require.False(t, ok) } func TestNodeFacade_GetThrottlerForEndpointShouldFindAndReturn(t *testing.T) { @@ -736,8 +820,8 @@ func TestNodeFacade_GetThrottlerForEndpointShouldFindAndReturn(t *testing.T) { thr, ok := nf.GetThrottlerForEndpoint("endpoint") - assert.NotNil(t, thr) - assert.True(t, ok) + require.NotNil(t, thr) + require.True(t, ok) } func TestNodeFacade_GetKeyValuePairs(t *testing.T) { @@ -754,8 +838,8 @@ func TestNodeFacade_GetKeyValuePairs(t *testing.T) { nf, _ := NewNodeFacade(arg) res, _, err := nf.GetKeyValuePairs("addr", api.AccountQueryOptions{}) - assert.NoError(t, err) - assert.Equal(t, expectedPairs, res) + require.NoError(t, err) + require.Equal(t, expectedPairs, res) } func TestNodeFacade_GetGuardianData(t *testing.T) { @@ -764,7 +848,6 @@ func TestNodeFacade_GetGuardianData(t *testing.T) { emptyGuardianData := api.GuardianData{} testAddress := "test address" - expectedErr := errors.New("expected error") expectedGuardianData := api.GuardianData{ ActiveGuardian: &api.Guardian{ @@ -789,14 +872,14 @@ func TestNodeFacade_GetGuardianData(t *testing.T) { t.Run("with error", func(t *testing.T) { nf, _ := NewNodeFacade(arg) res, _, err := nf.GetGuardianData("", api.AccountQueryOptions{}) - assert.Equal(t, expectedErr, err) - assert.Equal(t, emptyGuardianData, res) + require.Equal(t, expectedErr, err) + require.Equal(t, emptyGuardianData, res) }) t.Run("ok", func(t *testing.T) { nf, _ := NewNodeFacade(arg) res, _, err := nf.GetGuardianData(testAddress, api.AccountQueryOptions{}) - assert.NoError(t, err) - assert.Equal(t, expectedGuardianData, res) + require.NoError(t, err) + require.Equal(t, expectedGuardianData, res) }) } @@ -817,8 +900,8 @@ func TestNodeFacade_GetAllESDTTokens(t *testing.T) { nf, _ := NewNodeFacade(arg) res, _, err := nf.GetAllESDTTokens("addr", api.AccountQueryOptions{}) - assert.NoError(t, err) - assert.Equal(t, expectedTokens, res) + require.NoError(t, err) + require.Equal(t, expectedTokens, res) } func TestNodeFacade_GetESDTData(t *testing.T) { @@ -837,8 +920,8 @@ func TestNodeFacade_GetESDTData(t *testing.T) { nf, _ := NewNodeFacade(arg) res, _, err := nf.GetESDTData("addr", "tkn", 0, api.AccountQueryOptions{}) - assert.NoError(t, err) - assert.Equal(t, expectedData, res) + require.NoError(t, err) + require.Equal(t, expectedData, res) } func TestNodeFacade_GetValueForKey(t *testing.T) { @@ -855,8 +938,8 @@ func TestNodeFacade_GetValueForKey(t *testing.T) { nf, _ := NewNodeFacade(arg) res, _, err := nf.GetValueForKey("addr", "key", api.AccountQueryOptions{}) - assert.NoError(t, err) - assert.Equal(t, expectedValue, res) + require.NoError(t, err) + require.Equal(t, expectedValue, res) } func TestNodeFacade_GetAllIssuedESDTs(t *testing.T) { @@ -873,8 +956,8 @@ func TestNodeFacade_GetAllIssuedESDTs(t *testing.T) { nf, _ := NewNodeFacade(arg) res, err := nf.GetAllIssuedESDTs("") - assert.NoError(t, err) - assert.Equal(t, expectedValue, res) + require.NoError(t, err) + require.Equal(t, expectedValue, res) } func TestNodeFacade_GetESDTsWithRole(t *testing.T) { @@ -929,7 +1012,7 @@ func TestNodeFacade_GetAllIssuedESDTsWithError(t *testing.T) { nf, _ := NewNodeFacade(arg) _, err := nf.GetAllIssuedESDTs("") - assert.Equal(t, err, localErr) + require.Equal(t, err, localErr) } func TestNodeFacade_ValidateTransactionForSimulation(t *testing.T) { @@ -946,8 +1029,8 @@ func TestNodeFacade_ValidateTransactionForSimulation(t *testing.T) { nf, _ := NewNodeFacade(arg) err := nf.ValidateTransactionForSimulation(&transaction.Transaction{}, false) - assert.Nil(t, err) - assert.True(t, called) + require.NoError(t, err) + require.True(t, called) } func TestNodeFacade_GetTotalStakedValue(t *testing.T) { @@ -964,8 +1047,8 @@ func TestNodeFacade_GetTotalStakedValue(t *testing.T) { nf, _ := NewNodeFacade(arg) _, err := nf.GetTotalStakedValue() - assert.Nil(t, err) - assert.True(t, called) + require.NoError(t, err) + require.True(t, called) } func TestNodeFacade_GetDelegatorsList(t *testing.T) { @@ -982,8 +1065,8 @@ func TestNodeFacade_GetDelegatorsList(t *testing.T) { nf, _ := NewNodeFacade(arg) _, err := nf.GetDelegatorsList() - assert.Nil(t, err) - assert.True(t, called) + require.NoError(t, err) + require.True(t, called) } func TestNodeFacade_GetDirectStakedList(t *testing.T) { @@ -1000,8 +1083,8 @@ func TestNodeFacade_GetDirectStakedList(t *testing.T) { nf, _ := NewNodeFacade(arg) _, err := nf.GetDirectStakedList() - assert.Nil(t, err) - assert.True(t, called) + require.NoError(t, err) + require.True(t, called) } func TestNodeFacade_GetProofCurrentRootHashIsEmptyShouldErr(t *testing.T) { @@ -1016,8 +1099,8 @@ func TestNodeFacade_GetProofCurrentRootHashIsEmptyShouldErr(t *testing.T) { nf, _ := NewNodeFacade(arg) response, err := nf.GetProofCurrentRootHash("addr") - assert.Nil(t, response) - assert.Equal(t, ErrEmptyRootHash, err) + require.Nil(t, response) + require.Equal(t, ErrEmptyRootHash, err) } func TestNodeFacade_GetProof(t *testing.T) { @@ -1037,8 +1120,8 @@ func TestNodeFacade_GetProof(t *testing.T) { nf, _ := NewNodeFacade(arg) response, err := nf.GetProof("hash", "addr") - assert.Nil(t, err) - assert.Equal(t, expectedResponse, response) + require.NoError(t, err) + require.Equal(t, expectedResponse, response) } func TestNodeFacade_GetProofCurrentRootHash(t *testing.T) { @@ -1058,8 +1141,8 @@ func TestNodeFacade_GetProofCurrentRootHash(t *testing.T) { nf, _ := NewNodeFacade(arg) response, err := nf.GetProofCurrentRootHash("addr") - assert.Nil(t, err) - assert.Equal(t, expectedResponse, response) + require.NoError(t, err) + require.Equal(t, expectedResponse, response) } func TestNodeFacade_GetProofDataTrie(t *testing.T) { @@ -1084,9 +1167,9 @@ func TestNodeFacade_GetProofDataTrie(t *testing.T) { nf, _ := NewNodeFacade(arg) mainTrieResponse, dataTrieResponse, err := nf.GetProofDataTrie("hash", "addr", "key") - assert.Nil(t, err) - assert.Equal(t, expectedResponseMainTrie, mainTrieResponse) - assert.Equal(t, expectedResponseDataTrie, dataTrieResponse) + require.NoError(t, err) + require.Equal(t, expectedResponseMainTrie, mainTrieResponse) + require.Equal(t, expectedResponseDataTrie, dataTrieResponse) } func TestNodeFacade_VerifyProof(t *testing.T) { @@ -1101,47 +1184,66 @@ func TestNodeFacade_VerifyProof(t *testing.T) { nf, _ := NewNodeFacade(arg) response, err := nf.VerifyProof("hash", "addr", [][]byte{[]byte("proof")}) - assert.Nil(t, err) - assert.True(t, response) + require.NoError(t, err) + require.True(t, response) } func TestNodeFacade_ExecuteSCQuery(t *testing.T) { t.Parallel() - executeScQueryHandlerWasCalled := false - arg := createMockArguments() + t.Run("should error", func(t *testing.T) { + t.Parallel() - expectedAddress := []byte("addr") - expectedBalance := big.NewInt(37) - expectedVmOutput := &vmcommon.VMOutput{ - ReturnData: [][]byte{[]byte("test return data")}, - ReturnCode: vmcommon.AccountCollision, - OutputAccounts: map[string]*vmcommon.OutputAccount{ - "key0": { - Address: expectedAddress, - Balance: expectedBalance, + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + ExecuteSCQueryHandler: func(_ *process.SCQuery) (*vmcommon.VMOutput, error) { + return nil, expectedErr }, - }, - } - arg.ApiResolver = &mock.ApiResolverStub{ - ExecuteSCQueryHandler: func(_ *process.SCQuery) (*vmcommon.VMOutput, error) { - executeScQueryHandlerWasCalled = true - return expectedVmOutput, nil - }, - } + } - nf, _ := NewNodeFacade(arg) + nf, _ := NewNodeFacade(arg) - apiVmOutput, err := nf.ExecuteSCQuery(&process.SCQuery{}) - require.NoError(t, err) - require.True(t, executeScQueryHandlerWasCalled) - require.Equal(t, expectedVmOutput.ReturnData, apiVmOutput.ReturnData) - require.Equal(t, expectedVmOutput.ReturnCode.String(), apiVmOutput.ReturnCode) - require.Equal(t, 1, len(apiVmOutput.OutputAccounts)) + _, err := nf.ExecuteSCQuery(&process.SCQuery{}) + require.Equal(t, expectedErr, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + executeScQueryHandlerWasCalled := false + arg := createMockArguments() + + expectedAddress := []byte("addr") + expectedBalance := big.NewInt(37) + expectedVmOutput := &vmcommon.VMOutput{ + ReturnData: [][]byte{[]byte("test return data")}, + ReturnCode: vmcommon.AccountCollision, + OutputAccounts: map[string]*vmcommon.OutputAccount{ + "key0": { + Address: expectedAddress, + Balance: expectedBalance, + }, + }, + } + arg.ApiResolver = &mock.ApiResolverStub{ + ExecuteSCQueryHandler: func(_ *process.SCQuery) (*vmcommon.VMOutput, error) { + executeScQueryHandlerWasCalled = true + return expectedVmOutput, nil + }, + } + + nf, _ := NewNodeFacade(arg) - outputAccount := apiVmOutput.OutputAccounts[hex.EncodeToString([]byte("key0"))] - require.Equal(t, expectedBalance, outputAccount.Balance) - require.Equal(t, hex.EncodeToString(expectedAddress), outputAccount.Address) + apiVmOutput, err := nf.ExecuteSCQuery(&process.SCQuery{}) + require.NoError(t, err) + require.True(t, executeScQueryHandlerWasCalled) + require.Equal(t, expectedVmOutput.ReturnData, apiVmOutput.ReturnData) + require.Equal(t, expectedVmOutput.ReturnCode.String(), apiVmOutput.ReturnCode) + require.Equal(t, 1, len(apiVmOutput.OutputAccounts)) + + outputAccount := apiVmOutput.OutputAccounts[hex.EncodeToString([]byte("key0"))] + require.Equal(t, expectedBalance, outputAccount.Balance) + require.Equal(t, hex.EncodeToString(expectedAddress), outputAccount.Address) + }) } func TestNodeFacade_GetBlockByRoundShouldWork(t *testing.T) { @@ -1162,8 +1264,8 @@ func TestNodeFacade_GetBlockByRoundShouldWork(t *testing.T) { nf, _ := NewNodeFacade(arg) ret, err := nf.GetBlockByRound(0, api.BlockQueryOptions{}) - assert.Nil(t, err) - assert.Equal(t, ret, blk) + require.NoError(t, err) + require.Equal(t, ret, blk) } // ---- MetaBlock @@ -1186,8 +1288,8 @@ func TestNodeFacade_GetInternalMetaBlockByNonceShouldWork(t *testing.T) { nf, _ := NewNodeFacade(arg) ret, err := nf.GetInternalMetaBlockByNonce(common.ApiOutputFormatProto, 0) - assert.Nil(t, err) - assert.Equal(t, ret, blk) + require.NoError(t, err) + require.Equal(t, ret, blk) } func TestNodeFacade_GetInternalMetaBlockByRoundShouldWork(t *testing.T) { @@ -1208,8 +1310,8 @@ func TestNodeFacade_GetInternalMetaBlockByRoundShouldWork(t *testing.T) { nf, _ := NewNodeFacade(arg) ret, err := nf.GetInternalMetaBlockByRound(common.ApiOutputFormatProto, 0) - assert.Nil(t, err) - assert.Equal(t, ret, blk) + require.NoError(t, err) + require.Equal(t, ret, blk) } func TestNodeFacade_GetInternalMetaBlockByHashShouldWork(t *testing.T) { @@ -1230,8 +1332,8 @@ func TestNodeFacade_GetInternalMetaBlockByHashShouldWork(t *testing.T) { nf, _ := NewNodeFacade(arg) ret, err := nf.GetInternalMetaBlockByHash(common.ApiOutputFormatProto, "dummyhash") - assert.Nil(t, err) - assert.Equal(t, ret, blk) + require.NoError(t, err) + require.Equal(t, ret, blk) } // ---- ShardBlock @@ -1254,8 +1356,8 @@ func TestNodeFacade_GetInternalShardBlockByNonceShouldWork(t *testing.T) { nf, _ := NewNodeFacade(arg) ret, err := nf.GetInternalShardBlockByNonce(common.ApiOutputFormatProto, 0) - assert.Nil(t, err) - assert.Equal(t, ret, blk) + require.NoError(t, err) + require.Equal(t, ret, blk) } func TestNodeFacade_GetInternalShardBlockByRoundShouldWork(t *testing.T) { @@ -1276,8 +1378,8 @@ func TestNodeFacade_GetInternalShardBlockByRoundShouldWork(t *testing.T) { nf, _ := NewNodeFacade(arg) ret, err := nf.GetInternalShardBlockByRound(common.ApiOutputFormatProto, 0) - assert.Nil(t, err) - assert.Equal(t, ret, blk) + require.NoError(t, err) + require.Equal(t, ret, blk) } func TestNodeFacade_GetInternalShardBlockByHashShouldWork(t *testing.T) { @@ -1298,8 +1400,8 @@ func TestNodeFacade_GetInternalShardBlockByHashShouldWork(t *testing.T) { nf, _ := NewNodeFacade(arg) ret, err := nf.GetInternalShardBlockByHash(common.ApiOutputFormatProto, "dummyhash") - assert.Nil(t, err) - assert.Equal(t, ret, blk) + require.NoError(t, err) + require.Equal(t, ret, blk) } func TestNodeFacade_GetInternalMiniBlockByHashShouldWork(t *testing.T) { @@ -1320,8 +1422,8 @@ func TestNodeFacade_GetInternalMiniBlockByHashShouldWork(t *testing.T) { nf, _ := NewNodeFacade(arg) ret, err := nf.GetInternalMiniBlockByHash(common.ApiOutputFormatProto, "dummyhash", 1) - assert.Nil(t, err) - assert.Equal(t, ret, blk) + require.NoError(t, err) + require.Equal(t, ret, blk) } func TestFacade_convertVmOutputToApiResponse(t *testing.T) { @@ -1402,7 +1504,6 @@ func TestNodeFacade_GetTransactionsPool(t *testing.T) { t.Parallel() arg := createMockArguments() - expectedErr := errors.New("expected error") arg.ApiResolver = &mock.ApiResolverStub{ GetTransactionsPoolCalled: func(fields string) (*common.TransactionsPoolAPIResponse, error) { return nil, expectedErr @@ -1465,14 +1566,57 @@ func TestNodeFacade_GetTransactionsPool(t *testing.T) { }) } -func TestNodeFacade_GetGenesisBalances(t *testing.T) { +func TestNodeFacade_GetGenesisNodesPubKeys(t *testing.T) { t.Parallel() t.Run("should return error", func(t *testing.T) { t.Parallel() arg := createMockArguments() - expectedErr := errors.New("expected error") + arg.ApiResolver = &mock.ApiResolverStub{ + GetGenesisNodesPubKeysCalled: func() (map[uint32][]string, map[uint32][]string) { + return nil, nil + }, + } + + nf, _ := NewNodeFacade(arg) + eligible, waiting, err := nf.GetGenesisNodesPubKeys() + require.Nil(t, eligible) + require.Nil(t, waiting) + require.Equal(t, ErrNilGenesisNodes, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedEligible := map[uint32][]string{ + 0: {"pk1", "pk2"}, + } + providedWaiting := map[uint32][]string{ + 1: {"pk3", "pk4"}, + } + + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + GetGenesisNodesPubKeysCalled: func() (map[uint32][]string, map[uint32][]string) { + return providedEligible, providedWaiting + }, + } + + nf, _ := NewNodeFacade(arg) + eligible, waiting, err := nf.GetGenesisNodesPubKeys() + require.NoError(t, err) + require.Equal(t, providedEligible, eligible) + require.Equal(t, providedWaiting, waiting) + }) +} + +func TestNodeFacade_GetGenesisBalances(t *testing.T) { + t.Parallel() + + t.Run("GetGenesisBalances error should return error", func(t *testing.T) { + t.Parallel() + + arg := createMockArguments() arg.ApiResolver = &mock.ApiResolverStub{ GetGenesisBalancesCalled: func() ([]*common.InitialAccountAPI, error) { return nil, expectedErr @@ -1484,6 +1628,21 @@ func TestNodeFacade_GetGenesisBalances(t *testing.T) { require.Nil(t, res) require.Equal(t, expectedErr, err) }) + t.Run("GetGenesisBalances returns empty initial accounts should return error", func(t *testing.T) { + t.Parallel() + + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + GetGenesisBalancesCalled: func() ([]*common.InitialAccountAPI, error) { + return nil, nil + }, + } + + nf, _ := NewNodeFacade(arg) + res, err := nf.GetGenesisBalances() + require.Nil(t, res) + require.Equal(t, ErrNilGenesisBalances, err) + }) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -1561,7 +1720,6 @@ func TestNodeFacade_GetTransactionsPoolForSender(t *testing.T) { t.Parallel() arg := createMockArguments() - expectedErr := errors.New("expected error") arg.ApiResolver = &mock.ApiResolverStub{ GetTransactionsPoolForSenderCalled: func(sender, fields string) (*common.TransactionsPoolForSenderApiResponse, error) { return nil, expectedErr @@ -1620,7 +1778,6 @@ func TestNodeFacade_GetLastPoolNonceForSender(t *testing.T) { t.Parallel() arg := createMockArguments() - expectedErr := errors.New("expected error") arg.ApiResolver = &mock.ApiResolverStub{ GetLastPoolNonceForSenderCalled: func(sender string) (uint64, error) { return 0, expectedErr @@ -1655,11 +1812,32 @@ func TestNodeFacade_GetLastPoolNonceForSender(t *testing.T) { func TestNodeFacade_GetTransactionsPoolNonceGapsForSender(t *testing.T) { t.Parallel() + t.Run("GetAccount error should error", func(t *testing.T) { + t.Parallel() + + arg := createMockArguments() + arg.Node = &mock.NodeStub{ + GetAccountCalled: func(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { + return api.AccountResponse{}, api.BlockInfo{}, expectedErr + }, + } + arg.ApiResolver = &mock.ApiResolverStub{ + GetTransactionsPoolNonceGapsForSenderCalled: func(sender string, senderAccountNonce uint64) (*common.TransactionsPoolNonceGapsForSenderApiResponse, error) { + require.Fail(t, "should have not been called") + return nil, nil + }, + } + + nf, _ := NewNodeFacade(arg) + res, err := nf.GetTransactionsPoolNonceGapsForSender("") + require.Equal(t, &common.TransactionsPoolNonceGapsForSenderApiResponse{}, res) + require.Equal(t, expectedErr, err) + }) + t.Run("should error", func(t *testing.T) { t.Parallel() arg := createMockArguments() - expectedErr := errors.New("expected error") arg.Node = &mock.NodeStub{ GetAccountCalled: func(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { return api.AccountResponse{}, api.BlockInfo{}, nil @@ -1699,7 +1877,7 @@ func TestNodeFacade_GetTransactionsPoolNonceGapsForSender(t *testing.T) { } arg.ApiResolver = &mock.ApiResolverStub{ GetTransactionsPoolNonceGapsForSenderCalled: func(sender string, senderAccountNonce uint64) (*common.TransactionsPoolNonceGapsForSenderApiResponse, error) { - assert.Equal(t, providedNonce, senderAccountNonce) + require.Equal(t, providedNonce, senderAccountNonce) return expectedNonceGaps, nil }, } @@ -1718,7 +1896,6 @@ func TestNodeFacade_InternalValidatorsInfo(t *testing.T) { t.Parallel() arg := createMockArguments() - expectedErr := errors.New("expected error") arg.ApiResolver = &mock.ApiResolverStub{ GetInternalStartOfEpochValidatorsInfoCalled: func(epoch uint32) ([]*state.ShardValidatorInfo, error) { return nil, expectedErr @@ -1747,7 +1924,248 @@ func TestNodeFacade_InternalValidatorsInfo(t *testing.T) { nf, _ := NewNodeFacade(arg) res, err := nf.GetInternalStartOfEpochValidatorsInfo(0) require.NotNil(t, res) - require.Nil(t, err) + require.NoError(t, err) require.True(t, wasCalled) }) } + +func TestNodeFacade_GetESDTsRoles(t *testing.T) { + t.Parallel() + + expectedResponse := map[string][]string{ + "key": {"val1", "val2"}, + } + args := createMockArguments() + args.WsAntifloodConfig.WebServerAntifloodEnabled = true // coverage + + args.Node = &mock.NodeStub{ + GetESDTsRolesCalled: func(address string, options api.AccountQueryOptions, ctx context.Context) (map[string][]string, api.BlockInfo, error) { + return expectedResponse, api.BlockInfo{}, nil + }, + } + + nf, _ := NewNodeFacade(args) + + res, _, err := nf.GetESDTsRoles("address", api.AccountQueryOptions{}) + require.NoError(t, err) + require.Equal(t, expectedResponse, res) +} + +func TestNodeFacade_GetTokenSupply(t *testing.T) { + t.Parallel() + + providedResponse := &api.ESDTSupply{ + Supply: "1000", + Burned: "500", + Minted: "1500", + } + args := createMockArguments() + args.Node = &mock.NodeStub{ + GetTokenSupplyCalled: func(token string) (*api.ESDTSupply, error) { + return providedResponse, nil + }, + } + + nf, _ := NewNodeFacade(args) + + response, err := nf.GetTokenSupply("token") + require.NoError(t, err) + require.Equal(t, providedResponse, response) +} + +func TestNodeFacade_ValidateTransaction(t *testing.T) { + t.Parallel() + + args := createMockArguments() + wasCalled := false + args.Node = &mock.NodeStub{ + ValidateTransactionHandler: func(tx *transaction.Transaction) error { + wasCalled = true + return nil + }, + } + + nf, _ := NewNodeFacade(args) + + err := nf.ValidateTransaction(&transaction.Transaction{}) + require.NoError(t, err) + require.True(t, wasCalled) +} + +func TestNodeFacade_SimulateTransactionExecution(t *testing.T) { + t.Parallel() + + providedResponse := &txSimData.SimulationResults{ + Status: "ok", + FailReason: "no reason", + ScResults: nil, + Receipts: nil, + Hash: "hash", + } + args := createMockArguments() + args.TxSimulatorProcessor = &mock.TxExecutionSimulatorStub{ + ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResults, error) { + return providedResponse, nil + }, + } + + nf, _ := NewNodeFacade(args) + + response, err := nf.SimulateTransactionExecution(&transaction.Transaction{}) + require.NoError(t, err) + require.Equal(t, providedResponse, response) +} + +func TestNodeFacade_ComputeTransactionGasLimit(t *testing.T) { + t.Parallel() + + providedResponse := &transaction.CostResponse{ + GasUnits: 10, + } + args := createMockArguments() + args.ApiResolver = &mock.ApiResolverStub{ + ComputeTransactionGasLimitHandler: func(tx *transaction.Transaction) (*transaction.CostResponse, error) { + return providedResponse, nil + }, + } + + nf, _ := NewNodeFacade(args) + + response, err := nf.ComputeTransactionGasLimit(&transaction.Transaction{}) + require.NoError(t, err) + require.Equal(t, providedResponse, response) +} + +func TestNodeFacade_GetEpochStartDataAPI(t *testing.T) { + t.Parallel() + + providedResponse := &common.EpochStartDataAPI{ + Nonce: 1, + Round: 2, + Shard: 3, + Timestamp: 4, + } + args := createMockArguments() + args.Node = &mock.NodeStub{ + GetEpochStartDataAPICalled: func(epoch uint32) (*common.EpochStartDataAPI, error) { + return providedResponse, nil + }, + } + nf, _ := NewNodeFacade(args) + + response, err := nf.GetEpochStartDataAPI(0) + require.NoError(t, err) + require.Equal(t, providedResponse, response) +} + +func TestNodeFacade_GetConnectedPeersRatings(t *testing.T) { + t.Parallel() + + providedResponse := "ratings" + args := createMockArguments() + args.Node = &mock.NodeStub{ + GetConnectedPeersRatingsCalled: func() string { + return providedResponse + }, + } + nf, _ := NewNodeFacade(args) + + response := nf.GetConnectedPeersRatings() + require.Equal(t, providedResponse, response) +} + +func TestNodeFacade_GetBlockByHash(t *testing.T) { + t.Parallel() + + providedResponse := &api.Block{ + Nonce: 123, + Round: 321, + } + args := createMockArguments() + args.ApiResolver = &mock.ApiResolverStub{ + GetBlockByHashCalled: func(hash string, options api.BlockQueryOptions) (*api.Block, error) { + return providedResponse, nil + }, + } + nf, _ := NewNodeFacade(args) + + response, err := nf.GetBlockByHash("hash", api.BlockQueryOptions{}) + require.NoError(t, err) + require.Equal(t, providedResponse, response) +} + +func TestNodeFacade_GetBlockByNonce(t *testing.T) { + t.Parallel() + + providedResponse := &api.Block{ + Nonce: 123, + Round: 321, + } + args := createMockArguments() + args.ApiResolver = &mock.ApiResolverStub{ + GetBlockByNonceCalled: func(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) { + return providedResponse, nil + }, + } + nf, _ := NewNodeFacade(args) + + response, err := nf.GetBlockByNonce(0, api.BlockQueryOptions{}) + require.NoError(t, err) + require.Equal(t, providedResponse, response) +} + +func TestNodeFacade_GetAlteredAccountsForBlock(t *testing.T) { + t.Parallel() + + providedResponse := []*alteredAccount.AlteredAccount{ + { + Nonce: 123, + Address: "address", + }, + } + args := createMockArguments() + args.ApiResolver = &mock.ApiResolverStub{ + GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { + return providedResponse, nil + }, + } + nf, _ := NewNodeFacade(args) + + response, err := nf.GetAlteredAccountsForBlock(api.GetAlteredAccountsForBlockOptions{}) + require.NoError(t, err) + require.Equal(t, providedResponse, response) +} + +func TestNodeFacade_GetInternalStartOfEpochMetaBlock(t *testing.T) { + t.Parallel() + + providedResponse := "meta block" + args := createMockArguments() + args.ApiResolver = &mock.ApiResolverStub{ + GetInternalStartOfEpochMetaBlockCalled: func(format common.ApiOutputFormat, epoch uint32) (interface{}, error) { + return providedResponse, nil + }, + } + nf, _ := NewNodeFacade(args) + + response, err := nf.GetInternalStartOfEpochMetaBlock(0, 0) + require.NoError(t, err) + require.Equal(t, providedResponse, response) +} + +func TestNodeFacade_Close(t *testing.T) { + t.Parallel() + + nf, _ := NewNodeFacade(createMockArguments()) + require.NoError(t, nf.Close()) +} + +func TestNodeFacade_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var nf *nodeFacade + require.True(t, nf.IsInterfaceNil()) + + nf, _ = NewNodeFacade(createMockArguments()) + require.False(t, nf.IsInterfaceNil()) +} diff --git a/factory/api/apiResolverFactory.go b/factory/api/apiResolverFactory.go index d9d41116ab8..315fd42c2e7 100644 --- a/factory/api/apiResolverFactory.go +++ b/factory/api/apiResolverFactory.go @@ -33,6 +33,7 @@ import ( "github.com/multiversx/mx-chain-go/process/txstatus" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/state/syncer" storageFactory "github.com/multiversx/mx-chain-go/storage/factory" "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/vm" @@ -125,6 +126,12 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { return nil, errDecode } + dnsV2AddressesStrings := args.Configs.GeneralConfig.BuiltInFunctions.DNSV2Addresses + convertedDNSV2Addresses, errDecode := factory.DecodeAddresses(pkConverter, dnsV2AddressesStrings) + if errDecode != nil { + return nil, errDecode + } + builtInFuncFactory, err := createBuiltinFuncs( args.GasScheduleNotifier, args.CoreComponents.InternalMarshalizer(), @@ -135,6 +142,7 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { args.BootstrapComponents.GuardedAccountHandler(), convertedAddresses, args.Configs.GeneralConfig.BuiltInFunctions.MaxNumAddressesInTransferRole, + convertedDNSV2Addresses, ) if err != nil { return nil, err @@ -337,6 +345,12 @@ func createScQueryElement( return nil, errDecode } + dnsV2AddressesStrings := args.generalConfig.BuiltInFunctions.DNSV2Addresses + convertedDNSV2Addresses, errDecode := factory.DecodeAddresses(pkConverter, dnsV2AddressesStrings) + if errDecode != nil { + return nil, errDecode + } + builtInFuncFactory, err := createBuiltinFuncs( args.gasScheduleNotifier, args.coreComponents.InternalMarshalizer(), @@ -347,6 +361,7 @@ func createScQueryElement( args.guardedAccountHandler, convertedAddresses, args.generalConfig.BuiltInFunctions.MaxNumAddressesInTransferRole, + convertedDNSV2Addresses, ) if err != nil { return nil, err @@ -361,25 +376,26 @@ func createScQueryElement( scStorage := args.generalConfig.SmartContractsStorageForSCQuery scStorage.DB.FilePath += fmt.Sprintf("%d", args.index) argsHook := hooks.ArgBlockChainHook{ - Accounts: args.stateComponents.AccountsAdapterAPI(), - PubkeyConv: args.coreComponents.AddressPubKeyConverter(), - StorageService: args.dataComponents.StorageService(), - BlockChain: args.dataComponents.Blockchain(), - ShardCoordinator: args.processComponents.ShardCoordinator(), - Marshalizer: args.coreComponents.InternalMarshalizer(), - Uint64Converter: args.coreComponents.Uint64ByteSliceConverter(), - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), - GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), - DataPool: args.dataComponents.Datapool(), - ConfigSCStorage: scStorage, - CompiledSCPool: smartContractsCache, - WorkingDir: args.workingDir, - EpochNotifier: args.coreComponents.EpochNotifier(), - EnableEpochsHandler: args.coreComponents.EnableEpochsHandler(), - NilCompiledSCStore: true, - GasSchedule: args.gasScheduleNotifier, - Counter: counters.NewDisabledCounter(), + Accounts: args.stateComponents.AccountsAdapterAPI(), + PubkeyConv: args.coreComponents.AddressPubKeyConverter(), + StorageService: args.dataComponents.StorageService(), + BlockChain: args.dataComponents.Blockchain(), + ShardCoordinator: args.processComponents.ShardCoordinator(), + Marshalizer: args.coreComponents.InternalMarshalizer(), + Uint64Converter: args.coreComponents.Uint64ByteSliceConverter(), + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), + GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), + DataPool: args.dataComponents.Datapool(), + ConfigSCStorage: scStorage, + CompiledSCPool: smartContractsCache, + WorkingDir: args.workingDir, + EpochNotifier: args.coreComponents.EpochNotifier(), + EnableEpochsHandler: args.coreComponents.EnableEpochsHandler(), + NilCompiledSCStore: true, + GasSchedule: args.gasScheduleNotifier, + Counter: counters.NewDisabledCounter(), + MissingTrieNodesNotifier: syncer.NewMissingTrieNodesNotifier(), } maxGasForVmQueries := args.generalConfig.VirtualMachine.GasConfig.ShardMaxGasPerVmQuery @@ -481,10 +497,17 @@ func createBuiltinFuncs( guardedAccountHandler vmcommon.GuardedAccountHandler, automaticCrawlerAddresses [][]byte, maxNumAddressesInTransferRole uint32, + dnsV2Addresses [][]byte, ) (vmcommon.BuiltInFunctionFactory, error) { + mapDNSV2Addresses := make(map[string]struct{}) + for _, address := range dnsV2Addresses { + mapDNSV2Addresses[string(address)] = struct{}{} + } + argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: gasScheduleNotifier, MapDNSAddresses: make(map[string]struct{}), + MapDNSV2Addresses: mapDNSV2Addresses, Marshalizer: marshalizer, Accounts: accnts, ShardCoordinator: shardCoordinator, diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index 1529bf7d30d..0cabc983f05 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -1,6 +1,7 @@ package consensus import ( + "fmt" "time" "github.com/multiversx/mx-chain-core-go/core" @@ -16,6 +17,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/chronology" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/sposFactory" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" p2pFactory "github.com/multiversx/mx-chain-go/p2p/factory" @@ -24,9 +26,7 @@ import ( "github.com/multiversx/mx-chain-go/process/sync/storageBootstrap" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state/syncer" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" "github.com/multiversx/mx-chain-go/trie/statistics" - "github.com/multiversx/mx-chain-go/trie/storageMarker" "github.com/multiversx/mx-chain-go/update" logger "github.com/multiversx/mx-chain-logger-go" "github.com/multiversx/mx-chain-storage-go/timecache" @@ -39,6 +39,7 @@ const defaultSpan = 300 * time.Second // ConsensusComponentsFactoryArgs holds the arguments needed to create a consensus components factory type ConsensusComponentsFactoryArgs struct { Config config.Config + FlagsConfig config.ContextFlagsConfig BootstrapRoundIndex uint64 CoreComponents factory.CoreComponentsHolder NetworkComponents factory.NetworkComponentsHolder @@ -55,6 +56,7 @@ type ConsensusComponentsFactoryArgs struct { type consensusComponentsFactory struct { config config.Config + flagsConfig config.ContextFlagsConfig bootstrapRoundIndex uint64 coreComponents factory.CoreComponentsHolder networkComponents factory.NetworkComponentsHolder @@ -88,6 +90,7 @@ func NewConsensusComponentsFactory(args ConsensusComponentsFactoryArgs) (*consen return &consensusComponentsFactory{ config: args.Config, + flagsConfig: args.FlagsConfig, bootstrapRoundIndex: args.BootstrapRoundIndex, coreComponents: args.CoreComponents, networkComponents: args.NetworkComponents, @@ -133,7 +136,10 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { return nil, err } - cc.bootstrapper.StartSyncingBlocks() + err = cc.bootstrapper.StartSyncingBlocks() + if err != nil { + return nil, err + } epoch := ccf.getEpoch() consensusState, err := ccf.createConsensusState(epoch, cc.consensusGroupSize) @@ -443,6 +449,15 @@ func (ccf *consensusComponentsFactory) createShardBootstrapper() (process.Bootst return nil, err } + stateNodesNotifierSubscriber, ok := accountsDBSyncer.(common.StateSyncNotifierSubscriber) + if !ok { + return nil, fmt.Errorf("wrong type conversion for accountsDBSyncer, type: %T", accountsDBSyncer) + } + err = ccf.stateComponents.MissingTrieNodesNotifier().RegisterHandler(stateNodesNotifierSubscriber) + if err != nil { + return nil, err + } + argsBaseBootstrapper := sync.ArgBaseBootstrapper{ PoolsHolder: ccf.dataComponents.Datapool(), Store: ccf.dataComponents.StorageService(), @@ -471,6 +486,7 @@ func (ccf *consensusComponentsFactory) createShardBootstrapper() (process.Bootst HistoryRepo: ccf.processComponents.HistoryRepository(), ScheduledTxsExecutionHandler: ccf.processComponents.ScheduledTxsExecutionHandler(), ProcessWaitTime: time.Duration(ccf.config.GeneralSettings.SyncProcessTimeInMillis) * time.Millisecond, + RepopulateTokensSupplies: ccf.flagsConfig.RepopulateTokensSupplies, } argsShardBootstrapper := sync.ArgShardBootstrapper{ @@ -492,14 +508,13 @@ func (ccf *consensusComponentsFactory) createArgsBaseAccountsSyncer(trieStorageM MaxHardCapForMissingNodes: ccf.config.TrieSync.MaxHardCapForMissingNodes, TrieSyncerVersion: ccf.config.TrieSync.TrieSyncerVersion, CheckNodesOnDisk: ccf.config.TrieSync.CheckNodesOnDisk, - StorageMarker: storageMarker.NewTrieStorageMarker(), UserAccountsSyncStatisticsHandler: statistics.NewTrieSyncStatistics(), AppStatusHandler: disabled.NewAppStatusHandler(), } } func (ccf *consensusComponentsFactory) createValidatorAccountsSyncer() (process.AccountsDBSyncer, error) { - trieStorageManager, ok := ccf.stateComponents.TrieStorageManagers()[trieFactory.PeerAccountTrie] + trieStorageManager, ok := ccf.stateComponents.TrieStorageManagers()[dataRetriever.PeerAccountsUnit.String()] if !ok { return nil, errors.ErrNilTrieStorageManager } @@ -511,7 +526,7 @@ func (ccf *consensusComponentsFactory) createValidatorAccountsSyncer() (process. } func (ccf *consensusComponentsFactory) createUserAccountsSyncer() (process.AccountsDBSyncer, error) { - trieStorageManager, ok := ccf.stateComponents.TrieStorageManagers()[trieFactory.UserAccountTrie] + trieStorageManager, ok := ccf.stateComponents.TrieStorageManagers()[dataRetriever.UserAccountsUnit.String()] if !ok { return nil, errors.ErrNilTrieStorageManager } @@ -600,6 +615,7 @@ func (ccf *consensusComponentsFactory) createMetaChainBootstrapper() (process.Bo HistoryRepo: ccf.processComponents.HistoryRepository(), ScheduledTxsExecutionHandler: ccf.processComponents.ScheduledTxsExecutionHandler(), ProcessWaitTime: time.Duration(ccf.config.GeneralSettings.SyncProcessTimeInMillis) * time.Millisecond, + RepopulateTokensSupplies: ccf.flagsConfig.RepopulateTokensSupplies, } argsMetaBootstrapper := sync.ArgMetaBootstrapper{ diff --git a/factory/consensus/consensusComponents_test.go b/factory/consensus/consensusComponents_test.go index d0512e51fdf..184cb8d3d11 100644 --- a/factory/consensus/consensusComponents_test.go +++ b/factory/consensus/consensusComponents_test.go @@ -33,7 +33,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMocks "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" "github.com/multiversx/mx-chain-go/update" "github.com/stretchr/testify/require" ) @@ -136,13 +136,14 @@ func createMockConsensusComponentsFactoryArgs() consensusComp.ConsensusComponent HeaderIntegrVerif: &mock.HeaderIntegrityVerifierStub{}, FallbackHdrValidator: &testscommon.FallBackHeaderValidatorStub{}, }, - StateComponents: &testscommon.StateComponentsMock{ + StateComponents: &factoryMocks.StateComponentsMock{ StorageManagers: map[string]common.StorageManager{ - trieFactory.UserAccountTrie: &testscommon.StorageManagerStub{}, - trieFactory.PeerAccountTrie: &testscommon.StorageManagerStub{}, + retriever.UserAccountsUnit.String(): &storageManager.StorageManagerStub{}, + retriever.PeerAccountsUnit.String(): &storageManager.StorageManagerStub{}, }, - Accounts: &stateMocks.AccountsStub{}, - PeersAcc: &stateMocks.AccountsStub{}, + Accounts: &stateMocks.AccountsStub{}, + PeersAcc: &stateMocks.AccountsStub{}, + MissingNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, }, StatusComponents: &testsMocks.StatusComponentsStub{ Outport: &outportMocks.OutportStub{}, @@ -553,7 +554,7 @@ func TestConsensusComponentsFactory_Create(t *testing.T) { t.Parallel() args := createMockConsensusComponentsFactoryArgs() - stateCompStub, ok := args.StateComponents.(*testscommon.StateComponentsMock) + stateCompStub, ok := args.StateComponents.(*factoryMocks.StateComponentsMock) require.True(t, ok) stateCompStub.StorageManagers = make(map[string]common.StorageManager) // missing UserAccountTrie ccf, _ := consensusComp.NewConsensusComponentsFactory(args) @@ -604,7 +605,7 @@ func TestConsensusComponentsFactory_Create(t *testing.T) { t.Parallel() args := createMockConsensusComponentsFactoryArgs() - stateCompStub, ok := args.StateComponents.(*testscommon.StateComponentsMock) + stateCompStub, ok := args.StateComponents.(*factoryMocks.StateComponentsMock) require.True(t, ok) stateCompStub.StorageManagers = make(map[string]common.StorageManager) // missing UserAccountTrie processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) @@ -625,10 +626,10 @@ func TestConsensusComponentsFactory_Create(t *testing.T) { t.Parallel() args := createMockConsensusComponentsFactoryArgs() - stateCompStub, ok := args.StateComponents.(*testscommon.StateComponentsMock) + stateCompStub, ok := args.StateComponents.(*factoryMocks.StateComponentsMock) require.True(t, ok) stateCompStub.StorageManagers = map[string]common.StorageManager{ - trieFactory.UserAccountTrie: &testscommon.StorageManagerStub{}, + retriever.UserAccountsUnit.String(): &storageManager.StorageManagerStub{}, } // missing PeerAccountTrie processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) require.True(t, ok) diff --git a/factory/core/coreComponents.go b/factory/core/coreComponents.go index 33310be51b4..1bf74e428d4 100644 --- a/factory/core/coreComponents.go +++ b/factory/core/coreComponents.go @@ -224,6 +224,7 @@ func (ccf *coreComponentsFactory) Create() (*coreComponents, error) { } alarmScheduler := alarm.NewAlarmScheduler() + // TODO: disable watchdog if block processing cutoff is enabled watchdogTimer, err := watchdog.NewWatchdog(alarmScheduler, ccf.chanStopNodeProcess, log) if err != nil { return nil, err diff --git a/factory/data/dataComponents.go b/factory/data/dataComponents.go index 0585a6c4dad..c90f032df88 100644 --- a/factory/data/dataComponents.go +++ b/factory/data/dataComponents.go @@ -27,10 +27,10 @@ type DataComponentsFactoryArgs struct { Core factory.CoreComponentsHolder StatusCore factory.StatusCoreComponentsHolder Crypto factory.CryptoComponentsHolder + FlagsConfigs config.ContextFlagsConfig CurrentEpoch uint32 CreateTrieEpochRootHashStorer bool NodeProcessingMode common.NodeProcessingMode - SnapshotsEnabled bool } type dataComponentsFactory struct { @@ -40,10 +40,10 @@ type dataComponentsFactory struct { core factory.CoreComponentsHolder statusCore factory.StatusCoreComponentsHolder crypto factory.CryptoComponentsHolder + flagsConfig config.ContextFlagsConfig currentEpoch uint32 createTrieEpochRootHashStorer bool nodeProcessingMode common.NodeProcessingMode - snapshotsEnabled bool } // dataComponents struct holds the data components @@ -79,8 +79,8 @@ func NewDataComponentsFactory(args DataComponentsFactoryArgs) (*dataComponentsFa statusCore: args.StatusCore, currentEpoch: args.CurrentEpoch, createTrieEpochRootHashStorer: args.CreateTrieEpochRootHashStorer, + flagsConfig: args.FlagsConfigs, nodeProcessingMode: args.NodeProcessingMode, - snapshotsEnabled: args.SnapshotsEnabled, crypto: args.Crypto, }, nil } @@ -171,7 +171,8 @@ func (dcf *dataComponentsFactory) createDataStoreFromConfig() (dataRetriever.Sto StorageType: storageFactory.ProcessStorageService, CreateTrieEpochRootHashStorer: dcf.createTrieEpochRootHashStorer, NodeProcessingMode: dcf.nodeProcessingMode, - SnapshotsEnabled: dcf.snapshotsEnabled, + SnapshotsEnabled: dcf.flagsConfig.SnapshotsEnabled, + RepopulateTokensSupplies: dcf.flagsConfig.RepopulateTokensSupplies, ManagedPeersHolder: dcf.crypto.ManagedPeersHolder(), }) if err != nil { diff --git a/factory/interface.go b/factory/interface.go index ecec87cabf6..2d82d5ab86a 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -325,6 +325,8 @@ type StateComponentsHolder interface { AccountsRepository() state.AccountsRepository TriesContainer() common.TriesHolder TrieStorageManagers() map[string]common.StorageManager + MissingTrieNodesNotifier() common.MissingTrieNodesNotifier + Close() error IsInterfaceNil() bool } diff --git a/factory/mock/accountsParserStub.go b/factory/mock/accountsParserStub.go index 27066140982..436a8a418de 100644 --- a/factory/mock/accountsParserStub.go +++ b/factory/mock/accountsParserStub.go @@ -16,7 +16,7 @@ type AccountsParserStub struct { InitialAccountsCalled func() []genesis.InitialAccountHandler GetTotalStakedForDelegationAddressCalled func(delegationAddress string) *big.Int GetInitialAccountsForDelegatedCalled func(addressBytes []byte) []genesis.InitialAccountHandler - GenerateInitialTransactionsCalled func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.Pool, error) + GenerateInitialTransactionsCalled func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.TransactionPool, error) GenesisMintingAddressCalled func() string } @@ -75,12 +75,12 @@ func (aps *AccountsParserStub) InitialAccounts() []genesis.InitialAccountHandler } // GenerateInitialTransactions - -func (aps *AccountsParserStub) GenerateInitialTransactions(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.Pool, error) { +func (aps *AccountsParserStub) GenerateInitialTransactions(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.TransactionPool, error) { if aps.GenerateInitialTransactionsCalled != nil { return aps.GenerateInitialTransactionsCalled(shardCoordinator, initialIndexingData) } - return make([]*block.MiniBlock, 0), make(map[uint32]*outport.Pool), nil + return make([]*block.MiniBlock, 0), make(map[uint32]*outport.TransactionPool), nil } // IsInterfaceNil - diff --git a/factory/mock/indexerStub.go b/factory/mock/indexerStub.go deleted file mode 100644 index aed169943b6..00000000000 --- a/factory/mock/indexerStub.go +++ /dev/null @@ -1,62 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/outport" - "github.com/multiversx/mx-chain-go/process" - "github.com/multiversx/mx-chain-go/state" -) - -// IndexerStub is a mock implementation fot the Indexer interface -type IndexerStub struct { - SaveBlockCalled func(args *outport.ArgsSaveBlockData) -} - -// SaveBlock - -func (im *IndexerStub) SaveBlock(args *outport.ArgsSaveBlockData) { - if im.SaveBlockCalled != nil { - im.SaveBlockCalled(args) - } -} - -// Close will do nothing -func (im *IndexerStub) Close() error { - return nil -} - -// SetTxLogsProcessor will do nothing -func (im *IndexerStub) SetTxLogsProcessor(_ process.TransactionLogProcessorDatabase) { -} - -// SaveRoundsInfo - -func (im *IndexerStub) SaveRoundsInfo(_ []*outport.RoundInfo) { - panic("implement me") -} - -// SaveValidatorsRating - -func (im *IndexerStub) SaveValidatorsRating(_ string, _ []*outport.ValidatorRatingInfo) { - -} - -// SaveValidatorsPubKeys - -func (im *IndexerStub) SaveValidatorsPubKeys(_ map[uint32][][]byte, _ uint32) { - panic("implement me") -} - -// RevertIndexedBlock - -func (im *IndexerStub) RevertIndexedBlock(_ data.HeaderHandler, _ data.BodyHandler) { -} - -// SaveAccounts - -func (im *IndexerStub) SaveAccounts(_ uint64, _ []state.UserAccountHandler) { -} - -// IsInterfaceNil returns true if there is no value under the interface -func (im *IndexerStub) IsInterfaceNil() bool { - return im == nil -} - -// IsNilIndexer - -func (im *IndexerStub) IsNilIndexer() bool { - return false -} diff --git a/factory/mock/stateComponentsHolderStub.go b/factory/mock/stateComponentsHolderStub.go index 65cf2efdb0d..c851fdc6dac 100644 --- a/factory/mock/stateComponentsHolderStub.go +++ b/factory/mock/stateComponentsHolderStub.go @@ -7,12 +7,13 @@ import ( // StateComponentsHolderStub - type StateComponentsHolderStub struct { - PeerAccountsCalled func() state.AccountsAdapter - AccountsAdapterCalled func() state.AccountsAdapter - AccountsAdapterAPICalled func() state.AccountsAdapter - AccountsRepositoryCalled func() state.AccountsRepository - TriesContainerCalled func() common.TriesHolder - TrieStorageManagersCalled func() map[string]common.StorageManager + PeerAccountsCalled func() state.AccountsAdapter + AccountsAdapterCalled func() state.AccountsAdapter + AccountsAdapterAPICalled func() state.AccountsAdapter + AccountsRepositoryCalled func() state.AccountsRepository + TriesContainerCalled func() common.TriesHolder + TrieStorageManagersCalled func() map[string]common.StorageManager + MissingTrieNodesNotifierCalled func() common.MissingTrieNodesNotifier } // PeerAccounts - @@ -69,6 +70,20 @@ func (s *StateComponentsHolderStub) TrieStorageManagers() map[string]common.Stor return nil } +// MissingTrieNodesNotifier - +func (s *StateComponentsHolderStub) MissingTrieNodesNotifier() common.MissingTrieNodesNotifier { + if s.MissingTrieNodesNotifierCalled != nil { + return s.MissingTrieNodesNotifierCalled() + } + + return nil +} + +// Close - +func (s *StateComponentsHolderStub) Close() error { + return nil +} + // IsInterfaceNil - func (s *StateComponentsHolderStub) IsInterfaceNil() bool { return s == nil diff --git a/factory/processing/blockProcessorCreator.go b/factory/processing/blockProcessorCreator.go index 73d79a87b6f..85785abf463 100644 --- a/factory/processing/blockProcessorCreator.go +++ b/factory/processing/blockProcessorCreator.go @@ -11,16 +11,15 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" debugFactory "github.com/multiversx/mx-chain-go/debug/factory" "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" metachainEpochStart "github.com/multiversx/mx-chain-go/epochStart/metachain" mainFactory "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/genesis" - processDisabled "github.com/multiversx/mx-chain-go/genesis/process/disabled" "github.com/multiversx/mx-chain-go/outport" processOutport "github.com/multiversx/mx-chain-go/outport/process" factoryOutportProvider "github.com/multiversx/mx-chain-go/outport/process/factory" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block" + "github.com/multiversx/mx-chain-go/process/block/cutoff" "github.com/multiversx/mx-chain-go/process/block/postprocess" "github.com/multiversx/mx-chain-go/process/block/preprocess" "github.com/multiversx/mx-chain-go/process/coordinator" @@ -35,8 +34,8 @@ import ( "github.com/multiversx/mx-chain-go/process/smartContract/hooks/counters" "github.com/multiversx/mx-chain-go/process/throttle" "github.com/multiversx/mx-chain-go/process/transaction" - "github.com/multiversx/mx-chain-go/process/txsimulator" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/state/syncer" "github.com/multiversx/mx-chain-go/storage/txcache" "github.com/multiversx/mx-chain-go/vm" logger "github.com/multiversx/mx-chain-logger-go" @@ -46,7 +45,6 @@ import ( type blockProcessorAndVmFactories struct { blockProcessor process.BlockProcessor - vmFactoryForTxSimulate process.VirtualMachinesContainerFactory vmFactoryForProcessing process.VirtualMachinesContainerFactory } @@ -59,13 +57,15 @@ func (pcf *processComponentsFactory) newBlockProcessor( headerValidator process.HeaderConstructionValidator, blockTracker process.BlockTracker, pendingMiniBlocksHandler process.PendingMiniBlocksHandler, - txSimulatorProcessorArgs *txsimulator.ArgsTxSimulator, wasmVMChangeLocker common.Locker, scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, receiptsRepository mainFactory.ReceiptsRepository, + blockCutoffProcessingHandler cutoff.BlockProcessingCutoffHandler, + missingTrieNodesNotifier common.MissingTrieNodesNotifier, ) (*blockProcessorAndVmFactories, error) { - if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return pcf.newShardBlockProcessor( requestHandler, forkDetector, @@ -74,14 +74,15 @@ func (pcf *processComponentsFactory) newBlockProcessor( headerValidator, blockTracker, pcf.smartContractParser, - txSimulatorProcessorArgs, wasmVMChangeLocker, scheduledTxsExecutionHandler, processedMiniBlocksTracker, receiptsRepository, + blockCutoffProcessingHandler, + missingTrieNodesNotifier, ) } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if shardCoordinator.SelfId() == core.MetachainShardId { return pcf.newMetaBlockProcessor( requestHandler, forkDetector, @@ -91,11 +92,11 @@ func (pcf *processComponentsFactory) newBlockProcessor( headerValidator, blockTracker, pendingMiniBlocksHandler, - txSimulatorProcessorArgs, wasmVMChangeLocker, scheduledTxsExecutionHandler, processedMiniBlocksTracker, receiptsRepository, + blockCutoffProcessingHandler, ) } @@ -112,11 +113,12 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( headerValidator process.HeaderConstructionValidator, blockTracker process.BlockTracker, smartContractParser genesis.InitialSmartContractParser, - txSimulatorProcessorArgs *txsimulator.ArgsTxSimulator, wasmVMChangeLocker common.Locker, scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, receiptsRepository mainFactory.ReceiptsRepository, + blockProcessingCutoffHandler cutoff.BlockProcessingCutoffHandler, + missingTrieNodesNotifier common.MissingTrieNodesNotifier, ) (*blockProcessorAndVmFactories, error) { argsParser := smartContract.NewArgumentParser() @@ -141,6 +143,7 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( vmFactory, err := pcf.createVMFactoryShard( pcf.state.AccountsAdapter(), + missingTrieNodesNotifier, builtInFuncFactory.BuiltInFunctionContainer(), esdtTransferParser, wasmVMChangeLocker, @@ -288,11 +291,6 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( scheduledTxsExecutionHandler.SetTransactionProcessor(transactionProcessor) - vmFactoryTxSimulator, err := pcf.createShardTxSimulatorProcessor(txSimulatorProcessorArgs, argsNewScProcessor, argsNewTxProcessor, esdtTransferParser, wasmVMChangeLocker, mapDNSAddresses) - if err != nil { - return nil, err - } - blockSizeThrottler, err := throttle.NewBlockSizeThrottle( pcf.config.BlockSizeThrottleConfig.MinSizeInBytes, pcf.config.BlockSizeThrottleConfig.MaxSizeInBytes, @@ -401,7 +399,8 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( StatusComponents: pcf.statusComponents, StatusCoreComponents: pcf.statusCoreComponents, Config: pcf.config, - Version: pcf.version, + PrefsConfig: pcf.prefConfigs, + Version: pcf.flagsConfig.Version, AccountsDB: accountsDb, ForkDetector: forkDetector, NodesCoordinator: pcf.nodesCoordinator, @@ -423,6 +422,7 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( ProcessedMiniBlocksTracker: processedMiniBlocksTracker, ReceiptsRepository: receiptsRepository, OutportDataProvider: outportDataProvider, + BlockProcessingCutoffHandler: blockProcessingCutoffHandler, } arguments := block.ArgShardProcessor{ ArgBaseProcessor: argumentsBaseProcessor, @@ -438,13 +438,10 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( return nil, err } - blockProcessorComponents := &blockProcessorAndVmFactories{ + return &blockProcessorAndVmFactories{ blockProcessor: blockProcessor, - vmFactoryForTxSimulate: vmFactoryTxSimulator, vmFactoryForProcessing: vmFactory, - } - - return blockProcessorComponents, nil + }, nil } func (pcf *processComponentsFactory) newMetaBlockProcessor( @@ -456,11 +453,11 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( headerValidator process.HeaderConstructionValidator, blockTracker process.BlockTracker, pendingMiniBlocksHandler process.PendingMiniBlocksHandler, - txSimulatorProcessorArgs *txsimulator.ArgsTxSimulator, wasmVMChangeLocker common.Locker, scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, receiptsRepository mainFactory.ReceiptsRepository, + blockProcessingCutoffhandler cutoff.BlockProcessingCutoffHandler, ) (*blockProcessorAndVmFactories, error) { builtInFuncFactory, err := pcf.createBuiltInFunctionContainer(pcf.state.AccountsAdapter(), make(map[string]struct{})) if err != nil { @@ -598,11 +595,6 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( scheduledTxsExecutionHandler.SetTransactionProcessor(transactionProcessor) - vmFactoryTxSimulator, err := pcf.createMetaTxSimulatorProcessor(txSimulatorProcessorArgs, argsNewScProcessor, txTypeHandler) - if err != nil { - return nil, err - } - blockSizeThrottler, err := throttle.NewBlockSizeThrottle(pcf.config.BlockSizeThrottleConfig.MinSizeInBytes, pcf.config.BlockSizeThrottleConfig.MaxSizeInBytes) if err != nil { return nil, err @@ -821,7 +813,8 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( StatusComponents: pcf.statusComponents, StatusCoreComponents: pcf.statusCoreComponents, Config: pcf.config, - Version: pcf.version, + PrefsConfig: pcf.prefConfigs, + Version: pcf.flagsConfig.Version, AccountsDB: accountsDb, ForkDetector: forkDetector, NodesCoordinator: pcf.nodesCoordinator, @@ -843,6 +836,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( ProcessedMiniBlocksTracker: processedMiniBlocksTracker, ReceiptsRepository: receiptsRepository, OutportDataProvider: outportDataProvider, + BlockProcessingCutoffHandler: blockProcessingCutoffhandler, } esdtOwnerAddress, err := pcf.coreData.AddressPubKeyConverter().Decode(pcf.systemSCConfig.ESDTSystemSCConfig.OwnerAddress) @@ -899,7 +893,6 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( blockProcessorComponents := &blockProcessorAndVmFactories{ blockProcessor: metaProcessor, - vmFactoryForTxSimulate: vmFactoryTxSimulator, vmFactoryForProcessing: vmFactory, } @@ -950,212 +943,9 @@ func (pcf *processComponentsFactory) createOutportDataProvider( }) } -func (pcf *processComponentsFactory) createShardTxSimulatorProcessor( - txSimulatorProcessorArgs *txsimulator.ArgsTxSimulator, - scProcArgs smartContract.ArgsNewSmartContractProcessor, - txProcArgs transaction.ArgsNewTxProcessor, - esdtTransferParser vmcommon.ESDTTransferParser, - wasmVMChangeLocker common.Locker, - mapDNSAddresses map[string]struct{}, -) (process.VirtualMachinesContainerFactory, error) { - readOnlyAccountsDB, err := txsimulator.NewReadOnlyAccountsDB(pcf.state.AccountsAdapterAPI()) - if err != nil { - return nil, err - } - - argsFactory := shard.ArgsNewIntermediateProcessorsContainerFactory{ - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - Hasher: pcf.coreData.Hasher(), - PubkeyConverter: pcf.coreData.AddressPubKeyConverter(), - Store: disabled.NewChainStorer(), - PoolsHolder: pcf.data.Datapool(), - EconomicsFee: &processDisabled.FeeHandler{}, - EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), - } - - interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory(argsFactory) - if err != nil { - return nil, err - } - - builtInFuncFactory, err := pcf.createBuiltInFunctionContainer(readOnlyAccountsDB, mapDNSAddresses) - if err != nil { - return nil, err - } - - smartContractStorageSimulate := pcf.config.SmartContractsStorageSimulate - vmFactory, err := pcf.createVMFactoryShard( - readOnlyAccountsDB, - builtInFuncFactory.BuiltInFunctionContainer(), - esdtTransferParser, - wasmVMChangeLocker, - smartContractStorageSimulate, - builtInFuncFactory.NFTStorageHandler(), - builtInFuncFactory.ESDTGlobalSettingsHandler(), - ) - if err != nil { - return nil, err - } - - vmContainer, err := vmFactory.Create() - if err != nil { - return nil, err - } - - scProcArgs.VmContainer = vmContainer - - interimProcContainer, err := interimProcFactory.Create() - if err != nil { - return nil, err - } - - scForwarder, err := interimProcContainer.Get(dataBlock.SmartContractResultBlock) - if err != nil { - return nil, err - } - scProcArgs.ScrForwarder = scForwarder - scProcArgs.BlockChainHook = vmFactory.BlockChainHookImpl() - - receiptTxInterim, err := interimProcContainer.Get(dataBlock.ReceiptBlock) - if err != nil { - return nil, err - } - txProcArgs.ReceiptForwarder = receiptTxInterim - - badTxInterim, err := interimProcContainer.Get(dataBlock.InvalidBlock) - if err != nil { - return nil, err - } - scProcArgs.BadTxForwarder = badTxInterim - txProcArgs.BadTxForwarder = badTxInterim - - scProcArgs.TxFeeHandler = &processDisabled.FeeHandler{} - txProcArgs.TxFeeHandler = &processDisabled.FeeHandler{} - - scProcArgs.AccountsDB = readOnlyAccountsDB - scProcArgs.VMOutputCacher = txSimulatorProcessorArgs.VMOutputCacher - scProcessor, err := smartContract.NewSmartContractProcessor(scProcArgs) - if err != nil { - return nil, err - } - txProcArgs.ScProcessor = scProcessor - - txProcArgs.Accounts = readOnlyAccountsDB - - txSimulatorProcessorArgs.TransactionProcessor, err = transaction.NewTxProcessor(txProcArgs) - if err != nil { - return nil, err - } - - txSimulatorProcessorArgs.IntermediateProcContainer = interimProcContainer - - return vmFactory, nil -} - -func (pcf *processComponentsFactory) createMetaTxSimulatorProcessor( - txSimulatorProcessorArgs *txsimulator.ArgsTxSimulator, - scProcArgs smartContract.ArgsNewSmartContractProcessor, - txTypeHandler process.TxTypeHandler, -) (process.VirtualMachinesContainerFactory, error) { - argsFactory := metachain.ArgsNewIntermediateProcessorsContainerFactory{ - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - Hasher: pcf.coreData.Hasher(), - PubkeyConverter: pcf.coreData.AddressPubKeyConverter(), - Store: disabled.NewChainStorer(), - PoolsHolder: pcf.data.Datapool(), - EconomicsFee: &processDisabled.FeeHandler{}, - EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), - } - - interimProcFactory, err := metachain.NewIntermediateProcessorsContainerFactory(argsFactory) - if err != nil { - return nil, err - } - - interimProcContainer, err := interimProcFactory.Create() - if err != nil { - return nil, err - } - - scForwarder, err := interimProcContainer.Get(dataBlock.SmartContractResultBlock) - if err != nil { - return nil, err - } - scProcArgs.ScrForwarder = scForwarder - - badTxInterim, err := interimProcContainer.Get(dataBlock.InvalidBlock) - if err != nil { - return nil, err - } - scProcArgs.BadTxForwarder = badTxInterim - scProcArgs.VMOutputCacher = txSimulatorProcessorArgs.VMOutputCacher - - scProcArgs.TxFeeHandler = &processDisabled.FeeHandler{} - - scProcArgs.VMOutputCacher = txSimulatorProcessorArgs.VMOutputCacher - - readOnlyAccountsDB, err := txsimulator.NewReadOnlyAccountsDB(pcf.state.AccountsAdapterAPI()) - if err != nil { - return nil, err - } - - builtInFuncFactory, err := pcf.createBuiltInFunctionContainer(readOnlyAccountsDB, make(map[string]struct{})) - if err != nil { - return nil, err - } - - vmFactory, err := pcf.createVMFactoryMeta( - readOnlyAccountsDB, - builtInFuncFactory.BuiltInFunctionContainer(), - pcf.config.SmartContractsStorageSimulate, - builtInFuncFactory.NFTStorageHandler(), - builtInFuncFactory.ESDTGlobalSettingsHandler(), - ) - if err != nil { - return nil, err - } - - vmContainer, err := vmFactory.Create() - if err != nil { - return nil, err - } - - scProcArgs.VmContainer = vmContainer - scProcArgs.BlockChainHook = vmFactory.BlockChainHookImpl() - - scProcessor, err := smartContract.NewSmartContractProcessor(scProcArgs) - if err != nil { - return nil, err - } - - argsNewMetaTx := transaction.ArgsNewMetaTxProcessor{ - Hasher: pcf.coreData.Hasher(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - Accounts: readOnlyAccountsDB, - PubkeyConv: pcf.coreData.AddressPubKeyConverter(), - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - ScProcessor: scProcessor, - TxTypeHandler: txTypeHandler, - EconomicsFee: &processDisabled.FeeHandler{}, - EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), - TxVersionChecker: pcf.coreData.TxVersionChecker(), - GuardianChecker: pcf.bootstrapComponents.GuardedAccountHandler(), - } - - txSimulatorProcessorArgs.TransactionProcessor, err = transaction.NewMetaTxProcessor(argsNewMetaTx) - if err != nil { - return nil, err - } - - txSimulatorProcessorArgs.IntermediateProcContainer = interimProcContainer - - return vmFactory, nil -} - func (pcf *processComponentsFactory) createVMFactoryShard( accounts state.AccountsAdapter, + notifier common.MissingTrieNodesNotifier, builtInFuncs vmcommon.BuiltInFunctionContainer, esdtTransferParser vmcommon.ESDTTransferParser, wasmVMChangeLocker common.Locker, @@ -1179,7 +969,7 @@ func (pcf *processComponentsFactory) createVMFactoryShard( BuiltInFunctions: builtInFuncs, DataPool: pcf.data.Datapool(), CompiledSCPool: pcf.data.Datapool().SmartContracts(), - WorkingDir: pcf.workingDir, + WorkingDir: pcf.flagsConfig.WorkingDir, NFTStorageHandler: nftStorageHandler, GlobalSettingsHandler: globalSettingsHandler, EpochNotifier: pcf.coreData.EpochNotifier(), @@ -1188,6 +978,7 @@ func (pcf *processComponentsFactory) createVMFactoryShard( ConfigSCStorage: configSCStorage, GasSchedule: pcf.gasSchedule, Counter: counter, + MissingTrieNodesNotifier: notifier, } blockChainHookImpl, err := hooks.NewBlockChainHookImpl(argsHook) @@ -1230,7 +1021,7 @@ func (pcf *processComponentsFactory) createVMFactoryMeta( DataPool: pcf.data.Datapool(), CompiledSCPool: pcf.data.Datapool().SmartContracts(), ConfigSCStorage: configSCStorage, - WorkingDir: pcf.workingDir, + WorkingDir: pcf.flagsConfig.WorkingDir, NFTStorageHandler: nftStorageHandler, GlobalSettingsHandler: globalSettingsHandler, EpochNotifier: pcf.coreData.EpochNotifier(), @@ -1238,6 +1029,7 @@ func (pcf *processComponentsFactory) createVMFactoryMeta( NilCompiledSCStore: false, GasSchedule: pcf.gasSchedule, Counter: counters.NewDisabledCounter(), + MissingTrieNodesNotifier: syncer.NewMissingTrieNodesNotifier(), } blockChainHookImpl, err := hooks.NewBlockChainHookImpl(argsHook) @@ -1275,9 +1067,23 @@ func (pcf *processComponentsFactory) createBuiltInFunctionContainer( return nil, err } + convertedDNSV2Addresses, err := mainFactory.DecodeAddresses( + pcf.coreData.AddressPubKeyConverter(), + pcf.config.BuiltInFunctions.DNSV2Addresses, + ) + if err != nil { + return nil, err + } + + mapDNSV2Addresses := make(map[string]struct{}) + for _, address := range convertedDNSV2Addresses { + mapDNSV2Addresses[string(address)] = struct{}{} + } + argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: pcf.gasSchedule, MapDNSAddresses: mapDNSAddresses, + MapDNSV2Addresses: mapDNSV2Addresses, Marshalizer: pcf.coreData.InternalMarshalizer(), Accounts: accounts, ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), diff --git a/factory/processing/blockProcessorCreator_test.go b/factory/processing/blockProcessorCreator_test.go index 2842b92221f..0982521c963 100644 --- a/factory/processing/blockProcessorCreator_test.go +++ b/factory/processing/blockProcessorCreator_test.go @@ -8,14 +8,13 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/dataRetriever" dataComp "github.com/multiversx/mx-chain-go/factory/data" "github.com/multiversx/mx-chain-go/factory/mock" processComp "github.com/multiversx/mx-chain-go/factory/processing" - "github.com/multiversx/mx-chain-go/process/txsimulator" "github.com/multiversx/mx-chain-go/state" factoryState "github.com/multiversx/mx-chain-go/state/factory" "github.com/multiversx/mx-chain-go/state/storagePruningManager/disabled" - "github.com/multiversx/mx-chain-go/storage/txcache" "github.com/multiversx/mx-chain-go/testscommon" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" @@ -24,7 +23,6 @@ import ( storageManager "github.com/multiversx/mx-chain-go/testscommon/storage" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/require" ) @@ -40,7 +38,7 @@ func Test_newBlockProcessorCreatorForShard(t *testing.T) { _, err = pcf.Create() require.NoError(t, err) - bp, vmFactoryForSimulate, err := pcf.NewBlockProcessor( + bp, err := pcf.NewBlockProcessor( &testscommon.RequestHandlerStub{}, &mock.ForkDetectorStub{}, &mock.EpochStartTriggerStub{}, @@ -49,18 +47,16 @@ func Test_newBlockProcessorCreatorForShard(t *testing.T) { &mock.HeaderValidatorStub{}, &mock.BlockTrackerStub{}, &mock.PendingMiniBlocksHandlerStub{}, - &txsimulator.ArgsTxSimulator{ - VMOutputCacher: txcache.NewDisabledCache(), - }, &sync.RWMutex{}, &testscommon.ScheduledTxsExecutionStub{}, &testscommon.ProcessedMiniBlocksTrackerStub{}, &testscommon.ReceiptsRepositoryStub{}, + &testscommon.BlockProcessingCutoffStub{}, + &testscommon.MissingTrieNodesNotifierStub{}, ) require.NoError(t, err) require.NotNil(t, bp) - require.NotNil(t, vmFactoryForSimulate) } func Test_newBlockProcessorCreatorForMeta(t *testing.T) { @@ -88,24 +84,24 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { cryptoComponents := componentsMock.GetCryptoComponents(coreComponents) networkComponents := componentsMock.GetNetworkComponents(cryptoComponents) - storageManagerArgs, options := storageManager.GetStorageManagerArgsAndOptions() + storageManagerArgs := storageManager.GetStorageManagerArgs() storageManagerArgs.Marshalizer = coreComponents.InternalMarshalizer() storageManagerArgs.Hasher = coreComponents.Hasher() - storageManagerUser, _ := trie.CreateTrieStorageManager(storageManagerArgs, options) + storageManagerUser, _ := trie.CreateTrieStorageManager(storageManagerArgs, storageManager.GetStorageManagerOptions()) storageManagerArgs.MainStorer = mock.NewMemDbMock() storageManagerArgs.CheckpointsStorer = mock.NewMemDbMock() - storageManagerPeer, _ := trie.CreateTrieStorageManager(storageManagerArgs, options) + storageManagerPeer, _ := trie.CreateTrieStorageManager(storageManagerArgs, storageManager.GetStorageManagerOptions()) trieStorageManagers := make(map[string]common.StorageManager) - trieStorageManagers[trieFactory.UserAccountTrie] = storageManagerUser - trieStorageManagers[trieFactory.PeerAccountTrie] = storageManagerPeer + trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = storageManagerUser + trieStorageManagers[dataRetriever.PeerAccountsUnit.String()] = storageManagerPeer accounts, err := createAccountAdapter( &mock.MarshalizerMock{}, &hashingMocks.HasherMock{}, factoryState.NewAccountCreator(), - trieStorageManagers[trieFactory.UserAccountTrie], + trieStorageManagers[dataRetriever.UserAccountsUnit.String()], ) require.Nil(t, err) @@ -160,7 +156,7 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { _, err = pcf.Create() require.NoError(t, err) - bp, vmFactoryForSimulate, err := pcf.NewBlockProcessor( + bp, err := pcf.NewBlockProcessor( &testscommon.RequestHandlerStub{}, &mock.ForkDetectorStub{}, &mock.EpochStartTriggerStub{}, @@ -169,18 +165,16 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { &mock.HeaderValidatorStub{}, &mock.BlockTrackerStub{}, &mock.PendingMiniBlocksHandlerStub{}, - &txsimulator.ArgsTxSimulator{ - VMOutputCacher: txcache.NewDisabledCache(), - }, &sync.RWMutex{}, &testscommon.ScheduledTxsExecutionStub{}, &testscommon.ProcessedMiniBlocksTrackerStub{}, &testscommon.ReceiptsRepositoryStub{}, + &testscommon.BlockProcessingCutoffStub{}, + &testscommon.MissingTrieNodesNotifierStub{}, ) require.NoError(t, err) require.NotNil(t, bp) - require.NotNil(t, vmFactoryForSimulate) } func createAccountAdapter( diff --git a/factory/processing/export_test.go b/factory/processing/export_test.go index f9cae468a41..023cd83d46a 100644 --- a/factory/processing/export_test.go +++ b/factory/processing/export_test.go @@ -1,14 +1,11 @@ package processing import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/factory" - "github.com/multiversx/mx-chain-go/genesis" "github.com/multiversx/mx-chain-go/process" - "github.com/multiversx/mx-chain-go/process/txsimulator" + "github.com/multiversx/mx-chain-go/process/block/cutoff" ) // NewBlockProcessor calls the unexported method with the same name in order to use it in tests @@ -21,12 +18,13 @@ func (pcf *processComponentsFactory) NewBlockProcessor( headerValidator process.HeaderConstructionValidator, blockTracker process.BlockTracker, pendingMiniBlocksHandler process.PendingMiniBlocksHandler, - txSimulatorProcessorArgs *txsimulator.ArgsTxSimulator, wasmVMChangeLocker common.Locker, scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, receiptsRepository factory.ReceiptsRepository, -) (process.BlockProcessor, process.VirtualMachinesContainerFactory, error) { + blockProcessingCutoff cutoff.BlockProcessingCutoffHandler, + missingTrieNodesNotifier common.MissingTrieNodesNotifier, +) (process.BlockProcessor, error) { blockProcessorComponents, err := pcf.newBlockProcessor( requestHandler, forkDetector, @@ -36,20 +34,21 @@ func (pcf *processComponentsFactory) NewBlockProcessor( headerValidator, blockTracker, pendingMiniBlocksHandler, - txSimulatorProcessorArgs, wasmVMChangeLocker, scheduledTxsExecutionHandler, processedMiniBlocksTracker, receiptsRepository, + blockProcessingCutoff, + missingTrieNodesNotifier, ) if err != nil { - return nil, nil, err + return nil, err } - return blockProcessorComponents.blockProcessor, blockProcessorComponents.vmFactoryForTxSimulate, nil + return blockProcessorComponents.blockProcessor, nil } -// IndexGenesisBlocks - -func (pcf *processComponentsFactory) IndexGenesisBlocks(genesisBlocks map[uint32]data.HeaderHandler, indexingData map[uint32]*genesis.IndexingData) error { - return pcf.indexGenesisBlocks(genesisBlocks, indexingData, map[string]*outport.AlteredAccount{}) +// CreateTxSimulatorProcessor - +func (pcf *processComponentsFactory) CreateTxSimulatorProcessor() (factory.TransactionSimulatorProcessor, process.VirtualMachinesContainerFactory, error) { + return pcf.createTxSimulatorProcessor() } diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 205e528c1f1..916712488f6 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -12,8 +12,10 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/partitioning" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" dataBlock "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/data/receipt" nodeFactory "github.com/multiversx/mx-chain-go/cmd/node/factory" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" @@ -32,7 +34,7 @@ import ( "github.com/multiversx/mx-chain-go/epochStart/metachain" "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/epochStart/shardchain" - errErd "github.com/multiversx/mx-chain-go/errors" + errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" mainFactory "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/factory/disabled" @@ -43,6 +45,7 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" + "github.com/multiversx/mx-chain-go/process/block/cutoff" "github.com/multiversx/mx-chain-go/process/block/pendingMb" "github.com/multiversx/mx-chain-go/process/block/poolsCleaner" "github.com/multiversx/mx-chain-go/process/block/preprocess" @@ -57,7 +60,6 @@ import ( "github.com/multiversx/mx-chain-go/process/track" "github.com/multiversx/mx-chain-go/process/transactionLog" "github.com/multiversx/mx-chain-go/process/txsSender" - "github.com/multiversx/mx-chain-go/process/txsimulator" "github.com/multiversx/mx-chain-go/redundancy" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/networksharding" @@ -129,7 +131,7 @@ type processComponents struct { type ProcessComponentsFactoryArgs struct { Config config.Config EpochConfig config.EpochConfig - PrefConfigs config.PreferencesConfig + PrefConfigs config.Preferences ImportDBConfig config.ImportDbConfig AccountsParser genesis.AccountsParser SmartContractParser genesis.InitialSmartContractParser @@ -140,11 +142,9 @@ type ProcessComponentsFactoryArgs struct { WhiteListerVerifiedTxs process.WhiteListHandler MaxRating uint32 SystemSCConfig *config.SystemSmartContractsConfig - Version string ImportStartHandler update.ImportStartHandler - WorkingDir string HistoryRepo dblookupext.HistoryRepository - SnapshotsEnabled bool + FlagsConfig config.ContextFlagsConfig Data factory.DataComponentsHolder CoreData factory.CoreComponentsHolder @@ -159,7 +159,7 @@ type ProcessComponentsFactoryArgs struct { type processComponentsFactory struct { config config.Config epochConfig config.EpochConfig - prefConfigs config.PreferencesConfig + prefConfigs config.Preferences importDBConfig config.ImportDbConfig accountsParser genesis.AccountsParser smartContractParser genesis.InitialSmartContractParser @@ -171,13 +171,11 @@ type processComponentsFactory struct { maxRating uint32 systemSCConfig *config.SystemSmartContractsConfig txLogsProcessor process.TransactionLogProcessor - version string importStartHandler update.ImportStartHandler - workingDir string historyRepo dblookupext.HistoryRepository epochNotifier process.EpochNotifier importHandler update.ImportHandler - snapshotsEnabled bool + flagsConfig config.ContextFlagsConfig esdtNftStorage vmcommon.ESDTNFTStorageHandler data factory.DataComponentsHolder @@ -218,13 +216,11 @@ func NewProcessComponentsFactory(args ProcessComponentsFactoryArgs) (*processCom whiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, maxRating: args.MaxRating, systemSCConfig: args.SystemSCConfig, - version: args.Version, importStartHandler: args.ImportStartHandler, - workingDir: args.WorkingDir, historyRepo: args.HistoryRepo, epochNotifier: args.CoreData.EpochNotifier(), statusCoreComponents: args.StatusCoreComponents, - snapshotsEnabled: args.SnapshotsEnabled, + flagsConfig: args.FlagsConfig, }, nil } @@ -236,7 +232,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { pcf.config, pcf.coreData.GenesisNodesSetup().GetRoundDuration(), pcf.coreData.GenesisTime().Unix(), - pcf.prefConfigs.FullArchive, + pcf.prefConfigs.Preferences.FullArchive, ) if err != nil { return nil, err @@ -544,20 +540,6 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } - vmOutputCacherConfig := storageFactory.GetCacherFromConfig(pcf.config.VMOutputCacher) - vmOutputCacher, err := storageunit.NewCache(vmOutputCacherConfig) - if err != nil { - return nil, err - } - - txSimulatorProcessorArgs := &txsimulator.ArgsTxSimulator{ - AddressPubKeyConverter: pcf.coreData.AddressPubKeyConverter(), - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - VMOutputCacher: vmOutputCacher, - Hasher: pcf.coreData.Hasher(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - } - scheduledSCRSStorer, err := pcf.data.StorageService().GetStorer(dataRetriever.ScheduledSCRsUnit) if err != nil { return nil, err @@ -598,6 +580,11 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } + blockCutoffProcessingHandler, err := cutoff.CreateBlockProcessingCutoffHandler(pcf.prefConfigs.BlockProcessingCutoff) + if err != nil { + return nil, err + } + blockProcessorComponents, err := pcf.newBlockProcessor( requestHandler, forkDetector, @@ -607,11 +594,12 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { headerValidator, blockTracker, pendingMiniBlocksHandler, - txSimulatorProcessorArgs, pcf.coreData.WasmVMChangeLocker(), scheduledTxsExecutionHandler, processedMiniBlocksTracker, receiptsRepository, + blockCutoffProcessingHandler, + pcf.state.MissingTrieNodesNotifier(), ) if err != nil { return nil, err @@ -638,11 +626,6 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } - txSimulator, err := txsimulator.NewTransactionSimulator(*txSimulatorProcessorArgs) - if err != nil { - return nil, err - } - observerBLSPrivateKey, observerBLSPublicKey := pcf.crypto.BlockSignKeyGen().GeneratePair() observerBLSPublicKeyBuff, err := observerBLSPublicKey.ToByteArray() if err != nil { @@ -653,7 +636,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { } nodeRedundancyArg := redundancy.ArgNodeRedundancy{ - RedundancyLevel: pcf.prefConfigs.RedundancyLevel, + RedundancyLevel: pcf.prefConfigs.Preferences.RedundancyLevel, Messenger: pcf.network.NetworkMessenger(), ObserverPrivateKey: observerBLSPrivateKey, } @@ -678,6 +661,11 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } + txSimulatorProcessor, vmFactoryForTxSimulate, err := pcf.createTxSimulatorProcessor() + if err != nil { + return nil, fmt.Errorf("%w when assembling components for the transactions simulator processor", err) + } + return &processComponents{ nodesCoordinator: pcf.nodesCoordinator, shardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), @@ -701,7 +689,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { headerConstructionValidator: headerValidator, headerIntegrityVerifier: pcf.bootstrapComponents.HeaderIntegrityVerifier(), peerShardMapper: peerShardMapper, - txSimulatorProcessor: txSimulator, + txSimulatorProcessor: txSimulatorProcessor, miniBlocksPoolCleaner: mbsPoolsCleaner, txsPoolCleaner: txsPoolsCleaner, fallbackHeaderValidator: fallbackHeaderValidator, @@ -713,7 +701,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { importHandler: pcf.importHandler, nodeRedundancyHandler: nodeRedundancyHandler, currentEpochProvider: currentEpochProvider, - vmFactoryForTxSimulator: blockProcessorComponents.vmFactoryForTxSimulate, + vmFactoryForTxSimulator: vmFactoryForTxSimulate, vmFactoryForProcessing: blockProcessorComponents.vmFactoryForProcessing, scheduledTxsExecutionHandler: scheduledTxsExecutionHandler, txsSender: txsSenderWithAccumulator, @@ -739,6 +727,12 @@ func (pcf *processComponentsFactory) newValidatorStatisticsProcessor() (process. if hardforkConfig.AfterHardFork { ratingEnabledEpoch = hardforkConfig.StartEpoch + hardforkConfig.ValidatorGracePeriodInEpochs } + + genesisHeader := pcf.data.Blockchain().GetGenesisHeader() + if check.IfNil(genesisHeader) { + return nil, errorsMx.ErrGenesisBlockNotInitialized + } + arguments := peer.ArgValidatorStatisticsProcessor{ PeerAdapter: pcf.state.PeerAccounts(), PubkeyConv: pcf.coreData.ValidatorPubKeyConverter(), @@ -753,20 +747,16 @@ func (pcf *processComponentsFactory) newValidatorStatisticsProcessor() (process. RewardsHandler: pcf.coreData.EconomicsData(), NodesSetup: pcf.coreData.GenesisNodesSetup(), RatingEnableEpoch: ratingEnabledEpoch, - GenesisNonce: pcf.data.Blockchain().GetGenesisHeader().GetNonce(), + GenesisNonce: genesisHeader.GetNonce(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), } - validatorStatisticsProcessor, err := peer.NewValidatorStatisticsProcessor(arguments) - if err != nil { - return nil, err - } - - return validatorStatisticsProcessor, nil + return peer.NewValidatorStatisticsProcessor(arguments) } func (pcf *processComponentsFactory) newEpochStartTrigger(requestHandler epochStart.RequestHandler) (epochStart.TriggerHandler, error) { - if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { argsHeaderValidator := block.ArgsHeaderValidator{ Hasher: pcf.coreData.Hasher(), Marshalizer: pcf.coreData.InternalMarshalizer(), @@ -804,20 +794,20 @@ func (pcf *processComponentsFactory) newEpochStartTrigger(requestHandler epochSt AppStatusHandler: pcf.statusCoreComponents.AppStatusHandler(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), } - epochStartTrigger, err := shardchain.NewEpochStartTrigger(argEpochStart) - if err != nil { - return nil, errors.New("error creating new start of epoch trigger" + err.Error()) - } - - return epochStartTrigger, nil + return shardchain.NewEpochStartTrigger(argEpochStart) } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if shardCoordinator.SelfId() == core.MetachainShardId { + genesisHeader := pcf.data.Blockchain().GetGenesisHeader() + if check.IfNil(genesisHeader) { + return nil, errorsMx.ErrGenesisBlockNotInitialized + } + argEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ GenesisTime: time.Unix(pcf.coreData.GenesisNodesSetup().GetStartTime(), 0), Settings: &pcf.config.EpochStartConfig, Epoch: pcf.bootstrapComponents.EpochBootstrapParams().Epoch(), - EpochStartRound: pcf.data.Blockchain().GetGenesisHeader().GetRound(), + EpochStartRound: genesisHeader.GetRound(), EpochStartNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), Storage: pcf.data.StorageService(), Marshalizer: pcf.coreData.InternalMarshalizer(), @@ -825,12 +815,8 @@ func (pcf *processComponentsFactory) newEpochStartTrigger(requestHandler epochSt AppStatusHandler: pcf.statusCoreComponents.AppStatusHandler(), DataPool: pcf.data.Datapool(), } - epochStartTrigger, err := metachain.NewEpochStartTrigger(argEpochStart) - if err != nil { - return nil, errors.New("error creating new start of epoch trigger" + err.Error()) - } - return epochStartTrigger, nil + return metachain.NewEpochStartTrigger(argEpochStart) } return nil, errors.New("error creating new start of epoch trigger because of invalid shard id") @@ -863,7 +849,6 @@ func (pcf *processComponentsFactory) generateGenesisHeadersAndApplyInitialBalanc TrieStorageManagers: pcf.state.TrieStorageManagers(), SystemSCConfig: *pcf.systemSCConfig, ImportStartHandler: pcf.importStartHandler, - WorkingDir: pcf.workingDir, BlockSignKeyGen: pcf.crypto.BlockSignKeyGen(), GenesisString: pcf.config.GeneralSettings.GenesisString, GenesisNodePrice: genesisNodePrice, @@ -885,14 +870,14 @@ func (pcf *processComponentsFactory) generateGenesisHeadersAndApplyInitialBalanc return genesisBlocks, indexingData, nil } -func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string]*outport.AlteredAccount, error) { +func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string]*alteredAccount.AlteredAccount, error) { if !pcf.statusComponents.OutportHandler().HasDrivers() { - return map[string]*outport.AlteredAccount{}, nil + return map[string]*alteredAccount.AlteredAccount{}, nil } rootHash, err := pcf.state.AccountsAdapter().RootHash() if err != nil { - return nil, err + return map[string]*alteredAccount.AlteredAccount{}, err } leavesChannels := &common.TrieIteratorChannels{ @@ -901,10 +886,10 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string } err = pcf.state.AccountsAdapter().GetAllLeaves(leavesChannels, context.Background(), rootHash) if err != nil { - return nil, err + return map[string]*alteredAccount.AlteredAccount{}, err } - genesisAccounts := make(map[string]*outport.AlteredAccount, 0) + genesisAccounts := make(map[string]*alteredAccount.AlteredAccount, 0) for leaf := range leavesChannels.LeavesChan { userAccount, errUnmarshal := pcf.unmarshalUserAccount(leaf.Key(), leaf.Value()) if errUnmarshal != nil { @@ -912,13 +897,13 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string continue } - encodedAddress, err := pcf.coreData.AddressPubKeyConverter().Encode(userAccount.AddressBytes()) - if err != nil { - return nil, err + encodedAddress, errEncode := pcf.coreData.AddressPubKeyConverter().Encode(userAccount.AddressBytes()) + if errEncode != nil { + return map[string]*alteredAccount.AlteredAccount{}, errEncode } - genesisAccounts[encodedAddress] = &outport.AlteredAccount{ - AdditionalData: &outport.AdditionalAccountData{ + genesisAccounts[encodedAddress] = &alteredAccount.AlteredAccount{ + AdditionalData: &alteredAccount.AdditionalAccountData{ BalanceChanged: true, }, Address: encodedAddress, @@ -930,11 +915,15 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string err = leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { - return nil, err + return map[string]*alteredAccount.AlteredAccount{}, err } shardID := pcf.bootstrapComponents.ShardCoordinator().SelfId() - pcf.statusComponents.OutportHandler().SaveAccounts(uint64(pcf.coreData.GenesisNodesSetup().GetStartTime()), genesisAccounts, shardID) + pcf.statusComponents.OutportHandler().SaveAccounts(&outport.Accounts{ + ShardID: shardID, + BlockTimestamp: uint64(pcf.coreData.GenesisNodesSetup().GetStartTime()), + AlteredAccounts: genesisAccounts, + }) return genesisAccounts, nil } @@ -957,12 +946,7 @@ func (pcf *processComponentsFactory) setGenesisHeader(genesisBlocks map[uint32]d return errors.New("genesis block does not exist") } - err := pcf.data.Blockchain().SetGenesisHeader(genesisBlock) - if err != nil { - return err - } - - return nil + return pcf.data.Blockchain().SetGenesisHeader(genesisBlock) } func (pcf *processComponentsFactory) prepareGenesisBlock( @@ -986,12 +970,7 @@ func (pcf *processComponentsFactory) prepareGenesisBlock( pcf.data.Blockchain().SetGenesisHeaderHash(genesisBlockHash) nonceToByteSlice := pcf.coreData.Uint64ByteSliceConverter().ToByteSlice(genesisBlock.GetNonce()) - err = pcf.saveGenesisHeaderToStorage(genesisBlock, genesisBlockHash, nonceToByteSlice) - if err != nil { - return err - } - - return nil + return pcf.saveGenesisHeaderToStorage(genesisBlock, genesisBlockHash, nonceToByteSlice) } func (pcf *processComponentsFactory) saveGenesisHeaderToStorage( @@ -1103,10 +1082,10 @@ func (pcf *processComponentsFactory) createGenesisMiniBlockHandlers(miniBlocks [ func (pcf *processComponentsFactory) indexGenesisBlocks( genesisBlocks map[uint32]data.HeaderHandler, initialIndexingData map[uint32]*genesis.IndexingData, - alteredAccounts map[string]*outport.AlteredAccount, + alteredAccounts map[string]*alteredAccount.AlteredAccount, ) error { - currentShardId := pcf.bootstrapComponents.ShardCoordinator().SelfId() - originalGenesisBlockHeader := genesisBlocks[currentShardId] + currentShardID := pcf.bootstrapComponents.ShardCoordinator().SelfId() + originalGenesisBlockHeader := genesisBlocks[currentShardID] genesisBlockHeader := originalGenesisBlockHeader.ShallowClone() genesisBlockHash, err := core.CalculateHash(pcf.coreData.InternalMarshalizer(), pcf.coreData.Hasher(), genesisBlockHeader) @@ -1120,7 +1099,7 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( } intraShardMiniBlocks := getGenesisIntraShardMiniblocks(miniBlocks) - genesisBody := getGenesisBlockForShard(miniBlocks, currentShardId) + genesisBody := getGenesisBlockForShard(miniBlocks, currentShardID) if pcf.statusComponents.OutportHandler().HasDrivers() { log.Info("indexGenesisBlocks(): indexer.SaveBlock", "hash", genesisBlockHash) @@ -1128,39 +1107,47 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( // manually add the genesis minting address as it is not exist in the trie genesisAddress := pcf.accountsParser.GenesisMintingAddress() - alteredAccounts[genesisAddress] = &outport.AlteredAccount{ + alteredAccounts[genesisAddress] = &alteredAccount.AlteredAccount{ Address: genesisAddress, Balance: "0", } - _ = genesisBlockHeader.SetTxCount(uint32(len(txsPoolPerShard[currentShardId].Txs))) - - arg := &outport.ArgsSaveBlockData{ - HeaderHash: genesisBlockHash, - Body: genesisBody, - Header: genesisBlockHeader, - HeaderGasConsumption: outport.HeaderGasConsumption{ - GasProvided: 0, - GasRefunded: 0, - GasPenalized: 0, - MaxGasPerBlock: pcf.coreData.EconomicsData().MaxGasLimitPerBlock(currentShardId), + _ = genesisBlockHeader.SetTxCount(uint32(len(txsPoolPerShard[currentShardID].Transactions))) + + arg := &outport.OutportBlockWithHeaderAndBody{ + OutportBlock: &outport.OutportBlock{ + BlockData: nil, // this will be filled by outport handler + HeaderGasConsumption: &outport.HeaderGasConsumption{ + GasProvided: 0, + GasRefunded: 0, + GasPenalized: 0, + MaxGasPerBlock: pcf.coreData.EconomicsData().MaxGasLimitPerBlock(currentShardID), + }, + TransactionPool: txsPoolPerShard[currentShardID], + AlteredAccounts: alteredAccounts, }, - TransactionsPool: txsPoolPerShard[currentShardId], - AlteredAccounts: alteredAccounts, + HeaderDataWithBody: &outport.HeaderDataWithBody{ + Body: genesisBody, + Header: genesisBlockHeader, + HeaderHash: genesisBlockHash, + }, + } + errOutport := pcf.statusComponents.OutportHandler().SaveBlock(arg) + if errOutport != nil { + log.Error("indexGenesisBlocks.outportHandler.SaveBlock cannot save block", "error", errOutport) } - pcf.statusComponents.OutportHandler().SaveBlock(arg) } - log.Info("indexGenesisBlocks(): historyRepo.RecordBlock", "shardID", currentShardId, "hash", genesisBlockHash) - if txsPoolPerShard[currentShardId] != nil { + log.Info("indexGenesisBlocks(): historyRepo.RecordBlock", "shardID", currentShardID, "hash", genesisBlockHash) + if txsPoolPerShard[currentShardID] != nil { err = pcf.historyRepo.RecordBlock( genesisBlockHash, originalGenesisBlockHeader, genesisBody, - unwrapTxs(txsPoolPerShard[currentShardId].Scrs), - unwrapTxs(txsPoolPerShard[currentShardId].Receipts), + wrapSCRsInfo(txsPoolPerShard[currentShardID].SmartContractResults), + wrapReceipts(txsPoolPerShard[currentShardID].Receipts), intraShardMiniBlocks, - txsPoolPerShard[currentShardId].Logs) + wrapLogs(txsPoolPerShard[currentShardID].Logs)) if err != nil { return err } @@ -1171,31 +1158,26 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( return err } - if txsPoolPerShard[currentShardId] != nil { - err = pcf.saveGenesisTxsToStorage(unwrapTxs(txsPoolPerShard[currentShardId].Txs)) + if txsPoolPerShard[currentShardID] != nil { + err = pcf.saveGenesisTxsToStorage(wrapTxsInfo(txsPoolPerShard[currentShardID].Transactions)) if err != nil { return err } } - nonceByHashDataUnit := dataRetriever.GetHdrNonceHashDataUnit(currentShardId) + nonceByHashDataUnit := dataRetriever.GetHdrNonceHashDataUnit(currentShardID) nonceAsBytes := pcf.coreData.Uint64ByteSliceConverter().ToByteSlice(genesisBlockHeader.GetNonce()) err = pcf.data.StorageService().Put(nonceByHashDataUnit, nonceAsBytes, genesisBlockHash) if err != nil { return err } - err = pcf.saveAlteredGenesisHeaderToStorage( + return pcf.saveAlteredGenesisHeaderToStorage( genesisBlockHeader, genesisBlockHash, genesisBody, intraShardMiniBlocks, txsPoolPerShard) - if err != nil { - return err - } - - return nil } func (pcf *processComponentsFactory) saveAlteredGenesisHeaderToStorage( @@ -1203,7 +1185,7 @@ func (pcf *processComponentsFactory) saveAlteredGenesisHeaderToStorage( genesisBlockHash []byte, genesisBody *dataBlock.Body, intraShardMiniBlocks []*dataBlock.MiniBlock, - txsPoolPerShard map[uint32]*outport.Pool, + txsPoolPerShard map[uint32]*outport.TransactionPool, ) error { currentShardId := pcf.bootstrapComponents.ShardCoordinator().SelfId() @@ -1230,10 +1212,10 @@ func (pcf *processComponentsFactory) saveAlteredGenesisHeaderToStorage( genesisBlockHash, genesisBlockHeader, genesisBody, - unwrapTxs(txsPoolPerShard[currentShardId].Scrs), - unwrapTxs(txsPoolPerShard[currentShardId].Receipts), + wrapSCRsInfo(txsPoolPerShard[currentShardId].SmartContractResults), + wrapReceipts(txsPoolPerShard[currentShardId].Receipts), intraShardMiniBlocks, - txsPoolPerShard[currentShardId].Logs) + wrapLogs(txsPoolPerShard[currentShardId].Logs)) if err != nil { return err } @@ -1284,13 +1266,14 @@ func (pcf *processComponentsFactory) newBlockTracker( requestHandler process.RequestHandler, genesisBlocks map[uint32]data.HeaderHandler, ) (process.BlockTracker, error) { + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() argBaseTracker := track.ArgBaseTracker{ Hasher: pcf.coreData.Hasher(), HeaderValidator: headerValidator, Marshalizer: pcf.coreData.InternalMarshalizer(), RequestHandler: requestHandler, RoundHandler: pcf.coreData.RoundHandler(), - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ShardCoordinator: shardCoordinator, Store: pcf.data.StorageService(), StartHeaders: genesisBlocks, PoolsHolder: pcf.data.Datapool(), @@ -1298,7 +1281,7 @@ func (pcf *processComponentsFactory) newBlockTracker( FeeHandler: pcf.coreData.EconomicsData(), } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { arguments := track.ArgShardTracker{ ArgBaseTracker: argBaseTracker, } @@ -1306,7 +1289,7 @@ func (pcf *processComponentsFactory) newBlockTracker( return track.NewShardBlockTrack(arguments) } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if shardCoordinator.SelfId() == core.MetachainShardId { arguments := track.ArgMetaTracker{ ArgBaseTracker: argBaseTracker, } @@ -1362,7 +1345,7 @@ func (pcf *processComponentsFactory) newShardResolverContainerFactory( InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, - IsFullHistoryNode: pcf.prefConfigs.FullArchive, + IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), PayloadValidator: payloadValidator, } @@ -1396,15 +1379,12 @@ func (pcf *processComponentsFactory) newMetaResolverContainerFactory( InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, - IsFullHistoryNode: pcf.prefConfigs.FullArchive, + IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), PayloadValidator: payloadValidator, } - resolversContainerFactory, err := resolverscontainer.NewMetaResolversContainerFactory(resolversContainerFactoryArgs) - if err != nil { - return nil, err - } - return resolversContainerFactory, nil + + return resolverscontainer.NewMetaResolversContainerFactory(resolversContainerFactoryArgs) } func (pcf *processComponentsFactory) newRequestersContainerFactory( @@ -1416,9 +1396,10 @@ func (pcf *processComponentsFactory) newRequestersContainerFactory( return pcf.newStorageRequesters() } + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() requestersContainerFactoryArgs := requesterscontainer.FactoryArgs{ RequesterConfig: pcf.config.Requesters, - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ShardCoordinator: shardCoordinator, Messenger: pcf.network.NetworkMessenger(), Marshaller: pcf.coreData.InternalMarshalizer(), Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), @@ -1429,10 +1410,10 @@ func (pcf *processComponentsFactory) newRequestersContainerFactory( SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return requesterscontainer.NewShardRequestersContainerFactory(requestersContainerFactoryArgs) } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if shardCoordinator.SelfId() == core.MetachainShardId { return requesterscontainer.NewMetaRequestersContainerFactory(requestersContainerFactoryArgs) } @@ -1448,7 +1429,8 @@ func (pcf *processComponentsFactory) newInterceptorContainerFactory( peerShardMapper *networksharding.PeerShardMapper, hardforkTrigger factory.HardforkTrigger, ) (process.InterceptorsContainerFactory, process.TimeCacher, error) { - if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return pcf.newShardInterceptorContainerFactory( headerSigVerifier, headerIntegrityVerifier, @@ -1459,7 +1441,7 @@ func (pcf *processComponentsFactory) newInterceptorContainerFactory( hardforkTrigger, ) } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if shardCoordinator.SelfId() == core.MetachainShardId { return pcf.newMetaInterceptorContainerFactory( headerSigVerifier, headerIntegrityVerifier, @@ -1496,7 +1478,7 @@ func (pcf *processComponentsFactory) newStorageRequesters() (dataRetriever.Reque storageServiceCreator, err := storageFactory.NewStorageServiceFactory( storageFactory.StorageServiceFactoryArgs{ Config: pcf.config, - PrefsConfig: pcf.prefConfigs, + PrefsConfig: pcf.prefConfigs.Preferences, ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), PathManager: pathManager, EpochStartNotifier: manualEpochStartNotifier, @@ -1505,7 +1487,8 @@ func (pcf *processComponentsFactory) newStorageRequesters() (dataRetriever.Reque StorageType: storageFactory.ProcessStorageService, CreateTrieEpochRootHashStorer: false, NodeProcessingMode: common.GetNodeProcessingMode(&pcf.importDBConfig), - SnapshotsEnabled: pcf.snapshotsEnabled, + SnapshotsEnabled: pcf.flagsConfig.SnapshotsEnabled, + RepopulateTokensSupplies: pcf.flagsConfig.RepopulateTokensSupplies, ManagedPeersHolder: pcf.crypto.ManagedPeersHolder(), }, ) @@ -1559,14 +1542,10 @@ func (pcf *processComponentsFactory) createStorageRequestersForMeta( DataPacker: dataPacker, ManualEpochStartNotifier: manualEpochStartNotifier, ChanGracefullyClose: pcf.coreData.ChanStopNodeProcess(), - SnapshotsEnabled: pcf.snapshotsEnabled, - } - requestersContainerFactory, err := storagerequesterscontainer.NewMetaRequestersContainerFactory(requestersContainerFactoryArgs) - if err != nil { - return nil, err + SnapshotsEnabled: pcf.flagsConfig.SnapshotsEnabled, } - return requestersContainerFactory, nil + return storagerequesterscontainer.NewMetaRequestersContainerFactory(requestersContainerFactoryArgs) } func (pcf *processComponentsFactory) createStorageRequestersForShard( @@ -1592,14 +1571,10 @@ func (pcf *processComponentsFactory) createStorageRequestersForShard( DataPacker: dataPacker, ManualEpochStartNotifier: manualEpochStartNotifier, ChanGracefullyClose: pcf.coreData.ChanStopNodeProcess(), - SnapshotsEnabled: pcf.snapshotsEnabled, - } - requestersContainerFactory, err := storagerequesterscontainer.NewShardRequestersContainerFactory(requestersContainerFactoryArgs) - if err != nil { - return nil, err + SnapshotsEnabled: pcf.flagsConfig.SnapshotsEnabled, } - return requestersContainerFactory, nil + return storagerequesterscontainer.NewShardRequestersContainerFactory(requestersContainerFactoryArgs) } func (pcf *processComponentsFactory) newShardInterceptorContainerFactory( @@ -1702,10 +1677,11 @@ func (pcf *processComponentsFactory) newForkDetector( headerBlackList process.TimeCacher, blockTracker process.BlockTracker, ) (process.ForkDetector, error) { - if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return sync.NewShardForkDetector(pcf.coreData.RoundHandler(), headerBlackList, blockTracker, pcf.coreData.GenesisNodesSetup().GetStartTime()) } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if shardCoordinator.SelfId() == core.MetachainShardId { return sync.NewMetaForkDetector(pcf.coreData.RoundHandler(), headerBlackList, blockTracker, pcf.coreData.GenesisNodesSetup().GetStartTime()) } @@ -1753,7 +1729,7 @@ func (pcf *processComponentsFactory) createExportFactoryHandler( accountsDBs := make(map[state.AccountsDbIdentifier]state.AccountsAdapter) accountsDBs[state.UserAccountsState] = pcf.state.AccountsAdapter() accountsDBs[state.PeerAccountsState] = pcf.state.PeerAccounts() - exportFolder := filepath.Join(pcf.workingDir, hardforkConfig.ImportFolder) + exportFolder := filepath.Join(pcf.flagsConfig.WorkingDir, hardforkConfig.ImportFolder) argsExporter := updateFactory.ArgsExporter{ CoreComponents: pcf.coreData, CryptoComponents: pcf.crypto, @@ -1848,12 +1824,7 @@ func createNetworkShardingCollector( NodesCoordinator: nodesCoordinator, PreferredPeersHolder: preferredPeersHolder, } - psm, err := networksharding.NewPeerShardMapper(arg) - if err != nil { - return nil, err - } - - return psm, nil + return networksharding.NewPeerShardMapper(arg) } func createCache(cacheConfig config.CacheConfig) (storage.Cacher, error) { @@ -1863,85 +1834,91 @@ func createCache(cacheConfig config.CacheConfig) (storage.Cacher, error) { func checkProcessComponentsArgs(args ProcessComponentsFactoryArgs) error { baseErrMessage := "error creating process components" if check.IfNil(args.AccountsParser) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilAccountsParser) + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilAccountsParser) } - if check.IfNil(args.SmartContractParser) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilSmartContractParser) + if check.IfNil(args.GasSchedule) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilGasSchedule) } - if args.GasSchedule == nil { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilGasSchedule) + if check.IfNil(args.Data) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilDataComponentsHolder) } - if check.IfNil(args.NodesCoordinator) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilNodesCoordinator) + if check.IfNil(args.Data.Blockchain()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilBlockChainHandler) } - if check.IfNil(args.Data) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilDataComponentsHolder) + if check.IfNil(args.Data.Datapool()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilDataPoolsHolder) + } + if check.IfNil(args.Data.StorageService()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilStorageService) } if check.IfNil(args.CoreData) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilCoreComponentsHolder) + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilCoreComponentsHolder) } - if args.CoreData.EconomicsData() == nil { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilEconomicsData) + if check.IfNil(args.CoreData.EconomicsData()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilEconomicsData) } - if check.IfNil(args.CoreData.RoundHandler()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilRoundHandler) + if check.IfNil(args.CoreData.GenesisNodesSetup()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilGenesisNodesSetupHandler) } - if check.IfNil(args.Crypto) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilCryptoComponentsHolder) + if check.IfNil(args.CoreData.AddressPubKeyConverter()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilAddressPublicKeyConverter) } - if check.IfNil(args.State) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilStateComponentsHolder) + if check.IfNil(args.CoreData.EpochNotifier()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilEpochNotifier) } - if check.IfNil(args.Network) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilNetworkComponentsHolder) + if check.IfNil(args.CoreData.ValidatorPubKeyConverter()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilPubKeyConverter) } - if check.IfNil(args.RequestedItemsHandler) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilRequestedItemHandler) + if check.IfNil(args.CoreData.InternalMarshalizer()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilInternalMarshalizer) } - if check.IfNil(args.WhiteListHandler) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilWhiteListHandler) + if check.IfNil(args.CoreData.Uint64ByteSliceConverter()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilUint64ByteSliceConverter) } - if check.IfNil(args.WhiteListerVerifiedTxs) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilWhiteListVerifiedTxs) + if check.IfNil(args.Crypto) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilCryptoComponentsHolder) } - if check.IfNil(args.CoreData.EpochStartNotifierWithConfirm()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilEpochStartNotifier) + if check.IfNil(args.Crypto.BlockSignKeyGen()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilBlockSignKeyGen) } - if check.IfNil(args.CoreData.Rater()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilRater) + if check.IfNil(args.State) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilStateComponentsHolder) } - if check.IfNil(args.CoreData.RatingsData()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilRatingData) + if check.IfNil(args.State.AccountsAdapter()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilAccountsAdapter) } - if check.IfNil(args.CoreData.ValidatorPubKeyConverter()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilPubKeyConverter) + if check.IfNil(args.Network) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilNetworkComponentsHolder) } - if args.SystemSCConfig == nil { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilSystemSCConfig) + if check.IfNil(args.Network.NetworkMessenger()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilMessenger) } - if check.IfNil(args.CoreData.EpochNotifier()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilEpochNotifier) + if check.IfNil(args.Network.InputAntiFloodHandler()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilInputAntiFloodHandler) } - if check.IfNil(args.CoreData.EnableEpochsHandler()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilEnableEpochsHandler) + if args.SystemSCConfig == nil { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilSystemSCConfig) } if check.IfNil(args.BootstrapComponents) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilBootstrapComponentsHolder) + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilBootstrapComponentsHolder) } if check.IfNil(args.BootstrapComponents.ShardCoordinator()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilShardCoordinator) + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilShardCoordinator) + } + if check.IfNil(args.BootstrapComponents.EpochBootstrapParams()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilBootstrapParamsHandler) } if check.IfNil(args.StatusComponents) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilStatusComponentsHolder) + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilStatusComponentsHolder) } - if check.IfNil(args.StatusCoreComponents) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilStatusCoreComponents) + if check.IfNil(args.StatusComponents.OutportHandler()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilOutportHandler) } - if check.IfNil(args.StatusCoreComponents.AppStatusHandler()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilAppStatusHandler) + if check.IfNil(args.HistoryRepo) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilHistoryRepository) } - if check.IfNil(args.Crypto.ManagedPeersHolder()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilManagedPeersHolder) + if check.IfNil(args.StatusCoreComponents) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilStatusCoreComponents) } return nil @@ -1983,11 +1960,42 @@ func (pc *processComponents) Close() error { return nil } -func unwrapTxs(txs map[string]data.TransactionHandlerWithGasUsedAndFee) map[string]data.TransactionHandler { - output := make(map[string]data.TransactionHandler) - for hash, wrappedTx := range txs { - output[hash] = wrappedTx.GetTxHandler() +func wrapTxsInfo(txs map[string]*outport.TxInfo) map[string]data.TransactionHandler { + ret := make(map[string]data.TransactionHandler, len(txs)) + for hash, tx := range txs { + ret[hash] = tx.Transaction + } + + return ret +} + +func wrapSCRsInfo(scrs map[string]*outport.SCRInfo) map[string]data.TransactionHandler { + ret := make(map[string]data.TransactionHandler, len(scrs)) + for hash, scr := range scrs { + ret[hash] = scr.SmartContractResult + } + + return ret +} + +func wrapReceipts(receipts map[string]*receipt.Receipt) map[string]data.TransactionHandler { + ret := make(map[string]data.TransactionHandler, len(receipts)) + for hash, r := range receipts { + ret[hash] = r + } + + return ret +} + +func wrapLogs(logs []*outport.LogData) []*data.LogData { + ret := make([]*data.LogData, len(logs)) + + for idx, logData := range logs { + ret[idx] = &data.LogData{ + LogHandler: logData.Log, + TxHash: logData.TxHash, + } } - return output + return ret } diff --git a/factory/processing/processComponentsHandler_test.go b/factory/processing/processComponentsHandler_test.go index d762b551d5a..534fef02ec8 100644 --- a/factory/processing/processComponentsHandler_test.go +++ b/factory/processing/processComponentsHandler_test.go @@ -3,155 +3,175 @@ package processing_test import ( "testing" - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/factory/mock" + errorsMx "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/factory" processComp "github.com/multiversx/mx-chain-go/factory/processing" - componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/stretchr/testify/require" ) -// ------------ Test TestManagedProcessComponents -------------------- -func TestManagedProcessComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { +func TestNewManagedProcessComponents(t *testing.T) { t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) - _ = processArgs.CoreData.SetInternalMarshalizer(nil) - processComponentsFactory, _ := processComp.NewProcessComponentsFactory(processArgs) - managedProcessComponents, err := processComp.NewManagedProcessComponents(processComponentsFactory) - require.NoError(t, err) - err = managedProcessComponents.Create() - require.Error(t, err) - require.Nil(t, managedProcessComponents.NodesCoordinator()) + t.Run("nil factory should error", func(t *testing.T) { + t.Parallel() + + managedProcessComponents, err := processComp.NewManagedProcessComponents(nil) + require.Equal(t, errorsMx.ErrNilProcessComponentsFactory, err) + require.Nil(t, managedProcessComponents) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + processComponentsFactory, _ := processComp.NewProcessComponentsFactory(createMockProcessComponentsFactoryArgs()) + managedProcessComponents, err := processComp.NewManagedProcessComponents(processComponentsFactory) + require.NoError(t, err) + require.NotNil(t, managedProcessComponents) + }) } -func TestManagedProcessComponents_CreateShouldWork(t *testing.T) { +func TestManagedProcessComponents_Create(t *testing.T) { t.Parallel() - coreComponents := componentsMock.GetCoreComponents() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) - shardCoordinator.SelfIDCalled = func() uint32 { - return core.MetachainShardId - } - shardCoordinator.ComputeIdCalled = func(address []byte) uint32 { - if core.IsSmartContractOnMetachain(address[len(address)-1:], address) { - return core.MetachainShardId - } - - return 0 - } - - shardCoordinator.CurrentShard = core.MetachainShardId - dataComponents := componentsMock.GetDataComponents(coreComponents, shardCoordinator) - cryptoComponents := componentsMock.GetCryptoComponents(coreComponents) - networkComponents := componentsMock.GetNetworkComponents(cryptoComponents) - stateComponents := componentsMock.GetStateComponents(coreComponents) - processArgs := componentsMock.GetProcessArgs( - shardCoordinator, - coreComponents, - dataComponents, - cryptoComponents, - stateComponents, - networkComponents, - ) - - componentsMock.SetShardCoordinator(t, processArgs.BootstrapComponents, shardCoordinator) - - processComponentsFactory, err := processComp.NewProcessComponentsFactory(processArgs) - require.Nil(t, err) - managedProcessComponents, err := processComp.NewManagedProcessComponents(processComponentsFactory) - require.NoError(t, err) - require.True(t, check.IfNil(managedProcessComponents.NodesCoordinator())) - require.True(t, check.IfNil(managedProcessComponents.InterceptorsContainer())) - require.True(t, check.IfNil(managedProcessComponents.ResolversContainer())) - require.True(t, check.IfNil(managedProcessComponents.RequestersFinder())) - require.True(t, check.IfNil(managedProcessComponents.RoundHandler())) - require.True(t, check.IfNil(managedProcessComponents.ForkDetector())) - require.True(t, check.IfNil(managedProcessComponents.BlockProcessor())) - require.True(t, check.IfNil(managedProcessComponents.EpochStartTrigger())) - require.True(t, check.IfNil(managedProcessComponents.EpochStartNotifier())) - require.True(t, check.IfNil(managedProcessComponents.BlackListHandler())) - require.True(t, check.IfNil(managedProcessComponents.BootStorer())) - require.True(t, check.IfNil(managedProcessComponents.HeaderSigVerifier())) - require.True(t, check.IfNil(managedProcessComponents.ValidatorsStatistics())) - require.True(t, check.IfNil(managedProcessComponents.ValidatorsProvider())) - require.True(t, check.IfNil(managedProcessComponents.BlockTracker())) - require.True(t, check.IfNil(managedProcessComponents.PendingMiniBlocksHandler())) - require.True(t, check.IfNil(managedProcessComponents.RequestHandler())) - require.True(t, check.IfNil(managedProcessComponents.TxLogsProcessor())) - require.True(t, check.IfNil(managedProcessComponents.HeaderConstructionValidator())) - require.True(t, check.IfNil(managedProcessComponents.HeaderIntegrityVerifier())) - require.True(t, check.IfNil(managedProcessComponents.CurrentEpochProvider())) - require.True(t, check.IfNil(managedProcessComponents.NodeRedundancyHandler())) - require.True(t, check.IfNil(managedProcessComponents.WhiteListHandler())) - require.True(t, check.IfNil(managedProcessComponents.WhiteListerVerifiedTxs())) - require.True(t, check.IfNil(managedProcessComponents.RequestedItemsHandler())) - require.True(t, check.IfNil(managedProcessComponents.ImportStartHandler())) - require.True(t, check.IfNil(managedProcessComponents.HistoryRepository())) - require.True(t, check.IfNil(managedProcessComponents.TransactionSimulatorProcessor())) - require.True(t, check.IfNil(managedProcessComponents.FallbackHeaderValidator())) - require.True(t, check.IfNil(managedProcessComponents.PeerShardMapper())) - require.True(t, check.IfNil(managedProcessComponents.ShardCoordinator())) - require.True(t, check.IfNil(managedProcessComponents.TxsSenderHandler())) - require.True(t, check.IfNil(managedProcessComponents.HardforkTrigger())) - require.True(t, check.IfNil(managedProcessComponents.ProcessedMiniBlocksTracker())) - - err = managedProcessComponents.Create() + t.Run("invalid params should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.PublicKeyPeerId.Type = "invalid" + processComponentsFactory, _ := processComp.NewProcessComponentsFactory(args) + managedProcessComponents, _ := processComp.NewManagedProcessComponents(processComponentsFactory) + require.NotNil(t, managedProcessComponents) + + err := managedProcessComponents.Create() + require.Error(t, err) + }) + t.Run("should work with getters", func(t *testing.T) { + t.Parallel() + + processComponentsFactory, _ := processComp.NewProcessComponentsFactory(createMockProcessComponentsFactoryArgs()) + managedProcessComponents, _ := processComp.NewManagedProcessComponents(processComponentsFactory) + require.NotNil(t, managedProcessComponents) + + require.True(t, check.IfNil(managedProcessComponents.NodesCoordinator())) + require.True(t, check.IfNil(managedProcessComponents.InterceptorsContainer())) + require.True(t, check.IfNil(managedProcessComponents.ResolversContainer())) + require.True(t, check.IfNil(managedProcessComponents.RequestersFinder())) + require.True(t, check.IfNil(managedProcessComponents.RoundHandler())) + require.True(t, check.IfNil(managedProcessComponents.ForkDetector())) + require.True(t, check.IfNil(managedProcessComponents.BlockProcessor())) + require.True(t, check.IfNil(managedProcessComponents.EpochStartTrigger())) + require.True(t, check.IfNil(managedProcessComponents.EpochStartNotifier())) + require.True(t, check.IfNil(managedProcessComponents.BlackListHandler())) + require.True(t, check.IfNil(managedProcessComponents.BootStorer())) + require.True(t, check.IfNil(managedProcessComponents.HeaderSigVerifier())) + require.True(t, check.IfNil(managedProcessComponents.ValidatorsStatistics())) + require.True(t, check.IfNil(managedProcessComponents.ValidatorsProvider())) + require.True(t, check.IfNil(managedProcessComponents.BlockTracker())) + require.True(t, check.IfNil(managedProcessComponents.PendingMiniBlocksHandler())) + require.True(t, check.IfNil(managedProcessComponents.RequestHandler())) + require.True(t, check.IfNil(managedProcessComponents.TxLogsProcessor())) + require.True(t, check.IfNil(managedProcessComponents.HeaderConstructionValidator())) + require.True(t, check.IfNil(managedProcessComponents.HeaderIntegrityVerifier())) + require.True(t, check.IfNil(managedProcessComponents.CurrentEpochProvider())) + require.True(t, check.IfNil(managedProcessComponents.NodeRedundancyHandler())) + require.True(t, check.IfNil(managedProcessComponents.WhiteListHandler())) + require.True(t, check.IfNil(managedProcessComponents.WhiteListerVerifiedTxs())) + require.True(t, check.IfNil(managedProcessComponents.RequestedItemsHandler())) + require.True(t, check.IfNil(managedProcessComponents.ImportStartHandler())) + require.True(t, check.IfNil(managedProcessComponents.HistoryRepository())) + require.True(t, check.IfNil(managedProcessComponents.TransactionSimulatorProcessor())) + require.True(t, check.IfNil(managedProcessComponents.FallbackHeaderValidator())) + require.True(t, check.IfNil(managedProcessComponents.PeerShardMapper())) + require.True(t, check.IfNil(managedProcessComponents.ShardCoordinator())) + require.True(t, check.IfNil(managedProcessComponents.TxsSenderHandler())) + require.True(t, check.IfNil(managedProcessComponents.HardforkTrigger())) + require.True(t, check.IfNil(managedProcessComponents.ProcessedMiniBlocksTracker())) + require.True(t, check.IfNil(managedProcessComponents.AccountsParser())) + require.True(t, check.IfNil(managedProcessComponents.ScheduledTxsExecutionHandler())) + require.True(t, check.IfNil(managedProcessComponents.ESDTDataStorageHandlerForAPI())) + require.True(t, check.IfNil(managedProcessComponents.ReceiptsRepository())) + + err := managedProcessComponents.Create() + require.NoError(t, err) + require.False(t, check.IfNil(managedProcessComponents.NodesCoordinator())) + require.False(t, check.IfNil(managedProcessComponents.InterceptorsContainer())) + require.False(t, check.IfNil(managedProcessComponents.ResolversContainer())) + require.False(t, check.IfNil(managedProcessComponents.RequestersFinder())) + require.False(t, check.IfNil(managedProcessComponents.RoundHandler())) + require.False(t, check.IfNil(managedProcessComponents.ForkDetector())) + require.False(t, check.IfNil(managedProcessComponents.BlockProcessor())) + require.False(t, check.IfNil(managedProcessComponents.EpochStartTrigger())) + require.False(t, check.IfNil(managedProcessComponents.EpochStartNotifier())) + require.False(t, check.IfNil(managedProcessComponents.BlackListHandler())) + require.False(t, check.IfNil(managedProcessComponents.BootStorer())) + require.False(t, check.IfNil(managedProcessComponents.HeaderSigVerifier())) + require.False(t, check.IfNil(managedProcessComponents.ValidatorsStatistics())) + require.False(t, check.IfNil(managedProcessComponents.ValidatorsProvider())) + require.False(t, check.IfNil(managedProcessComponents.BlockTracker())) + require.False(t, check.IfNil(managedProcessComponents.PendingMiniBlocksHandler())) + require.False(t, check.IfNil(managedProcessComponents.RequestHandler())) + require.False(t, check.IfNil(managedProcessComponents.TxLogsProcessor())) + require.False(t, check.IfNil(managedProcessComponents.HeaderConstructionValidator())) + require.False(t, check.IfNil(managedProcessComponents.HeaderIntegrityVerifier())) + require.False(t, check.IfNil(managedProcessComponents.CurrentEpochProvider())) + require.False(t, check.IfNil(managedProcessComponents.NodeRedundancyHandler())) + require.False(t, check.IfNil(managedProcessComponents.WhiteListHandler())) + require.False(t, check.IfNil(managedProcessComponents.WhiteListerVerifiedTxs())) + require.False(t, check.IfNil(managedProcessComponents.RequestedItemsHandler())) + require.False(t, check.IfNil(managedProcessComponents.ImportStartHandler())) + require.False(t, check.IfNil(managedProcessComponents.HistoryRepository())) + require.False(t, check.IfNil(managedProcessComponents.TransactionSimulatorProcessor())) + require.False(t, check.IfNil(managedProcessComponents.FallbackHeaderValidator())) + require.False(t, check.IfNil(managedProcessComponents.PeerShardMapper())) + require.False(t, check.IfNil(managedProcessComponents.ShardCoordinator())) + require.False(t, check.IfNil(managedProcessComponents.TxsSenderHandler())) + require.False(t, check.IfNil(managedProcessComponents.HardforkTrigger())) + require.False(t, check.IfNil(managedProcessComponents.ProcessedMiniBlocksTracker())) + require.False(t, check.IfNil(managedProcessComponents.AccountsParser())) + require.False(t, check.IfNil(managedProcessComponents.ScheduledTxsExecutionHandler())) + require.False(t, check.IfNil(managedProcessComponents.ESDTDataStorageHandlerForAPI())) + require.False(t, check.IfNil(managedProcessComponents.ReceiptsRepository())) + + require.Equal(t, factory.ProcessComponentsName, managedProcessComponents.String()) + }) +} + +func TestManagedProcessComponents_CheckSubcomponents(t *testing.T) { + t.Parallel() + + processComponentsFactory, _ := processComp.NewProcessComponentsFactory(createMockProcessComponentsFactoryArgs()) + managedProcessComponents, _ := processComp.NewManagedProcessComponents(processComponentsFactory) + require.NotNil(t, managedProcessComponents) + require.Equal(t, errorsMx.ErrNilProcessComponents, managedProcessComponents.CheckSubcomponents()) + + err := managedProcessComponents.Create() require.NoError(t, err) - require.False(t, check.IfNil(managedProcessComponents.NodesCoordinator())) - require.False(t, check.IfNil(managedProcessComponents.InterceptorsContainer())) - require.False(t, check.IfNil(managedProcessComponents.ResolversContainer())) - require.False(t, check.IfNil(managedProcessComponents.RequestersFinder())) - require.False(t, check.IfNil(managedProcessComponents.RoundHandler())) - require.False(t, check.IfNil(managedProcessComponents.ForkDetector())) - require.False(t, check.IfNil(managedProcessComponents.BlockProcessor())) - require.False(t, check.IfNil(managedProcessComponents.EpochStartTrigger())) - require.False(t, check.IfNil(managedProcessComponents.EpochStartNotifier())) - require.False(t, check.IfNil(managedProcessComponents.BlackListHandler())) - require.False(t, check.IfNil(managedProcessComponents.BootStorer())) - require.False(t, check.IfNil(managedProcessComponents.HeaderSigVerifier())) - require.False(t, check.IfNil(managedProcessComponents.ValidatorsStatistics())) - require.False(t, check.IfNil(managedProcessComponents.ValidatorsProvider())) - require.False(t, check.IfNil(managedProcessComponents.BlockTracker())) - require.False(t, check.IfNil(managedProcessComponents.PendingMiniBlocksHandler())) - require.False(t, check.IfNil(managedProcessComponents.RequestHandler())) - require.False(t, check.IfNil(managedProcessComponents.TxLogsProcessor())) - require.False(t, check.IfNil(managedProcessComponents.HeaderConstructionValidator())) - require.False(t, check.IfNil(managedProcessComponents.HeaderIntegrityVerifier())) - require.False(t, check.IfNil(managedProcessComponents.CurrentEpochProvider())) - require.False(t, check.IfNil(managedProcessComponents.NodeRedundancyHandler())) - require.False(t, check.IfNil(managedProcessComponents.WhiteListHandler())) - require.False(t, check.IfNil(managedProcessComponents.WhiteListerVerifiedTxs())) - require.False(t, check.IfNil(managedProcessComponents.RequestedItemsHandler())) - require.False(t, check.IfNil(managedProcessComponents.ImportStartHandler())) - require.False(t, check.IfNil(managedProcessComponents.HistoryRepository())) - require.False(t, check.IfNil(managedProcessComponents.TransactionSimulatorProcessor())) - require.False(t, check.IfNil(managedProcessComponents.FallbackHeaderValidator())) - require.False(t, check.IfNil(managedProcessComponents.PeerShardMapper())) - require.False(t, check.IfNil(managedProcessComponents.ShardCoordinator())) - require.False(t, check.IfNil(managedProcessComponents.TxsSenderHandler())) - require.False(t, check.IfNil(managedProcessComponents.HardforkTrigger())) - require.False(t, check.IfNil(managedProcessComponents.ProcessedMiniBlocksTracker())) - - nodeSkBytes, err := cryptoComponents.PrivateKey().ToByteArray() - require.Nil(t, err) - observerSkBytes, err := managedProcessComponents.NodeRedundancyHandler().ObserverPrivateKey().ToByteArray() - require.Nil(t, err) - require.NotEqual(t, nodeSkBytes, observerSkBytes) + + require.Nil(t, managedProcessComponents.CheckSubcomponents()) } func TestManagedProcessComponents_Close(t *testing.T) { t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) - processComponentsFactory, _ := processComp.NewProcessComponentsFactory(processArgs) + processComponentsFactory, _ := processComp.NewProcessComponentsFactory(createMockProcessComponentsFactoryArgs()) managedProcessComponents, _ := processComp.NewManagedProcessComponents(processComponentsFactory) err := managedProcessComponents.Create() require.NoError(t, err) err = managedProcessComponents.Close() require.NoError(t, err) - require.Nil(t, managedProcessComponents.NodesCoordinator()) + + err = managedProcessComponents.Close() + require.NoError(t, err) +} + +func TestManagedProcessComponents_IsInterfaceNil(t *testing.T) { + t.Parallel() + + managedProcessComponents, _ := processComp.NewManagedProcessComponents(nil) + require.True(t, managedProcessComponents.IsInterfaceNil()) + + processComponentsFactory, _ := processComp.NewProcessComponentsFactory(createMockProcessComponentsFactoryArgs()) + managedProcessComponents, _ = processComp.NewManagedProcessComponents(processComponentsFactory) + require.False(t, managedProcessComponents.IsInterfaceNil()) } diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index ebae3a2c893..5287ceb46ff 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -1,102 +1,1027 @@ package processing_test import ( + "bytes" + "context" + "errors" + "math/big" "strings" "sync" "testing" + "github.com/multiversx/mx-chain-core-go/core/keyValStorage" coreData "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" dataBlock "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/endProcess" outportCore "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/hashing/blake2b" + "github.com/multiversx/mx-chain-core-go/hashing/keccak" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/factory" + "github.com/multiversx/mx-chain-go/config" + errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory/mock" processComp "github.com/multiversx/mx-chain-go/factory/processing" "github.com/multiversx/mx-chain-go/genesis" + genesisMocks "github.com/multiversx/mx-chain-go/genesis/mock" + testsMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" - componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + mxState "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks" + "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/dblookupext" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + factoryMocks "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" + "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/outport" - storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" - "github.com/stretchr/testify/assert" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/state" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + updateMocks "github.com/multiversx/mx-chain-go/update/mock" "github.com/stretchr/testify/require" ) -// ------------ Test TestProcessComponents -------------------- -func TestProcessComponents_CloseShouldWork(t *testing.T) { - t.Parallel() +const ( + testingProtocolSustainabilityAddress = "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp" +) + +var ( + gasSchedule, _ = common.LoadGasScheduleConfig("../../cmd/node/config/gasSchedules/gasScheduleV1.toml") + addrPubKeyConv, _ = factory.NewPubkeyConverter(config.PubkeyConfig{ + Length: 32, + Type: "bech32", + SignatureLength: 0, + Hrp: "erd", + }) + valPubKeyConv, _ = factory.NewPubkeyConverter(config.PubkeyConfig{ + Length: 96, + Type: "hex", + SignatureLength: 48, + }) +) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) - pcf, err := processComp.NewProcessComponentsFactory(processArgs) - require.Nil(t, err) +func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFactoryArgs { + + args := processComp.ProcessComponentsFactoryArgs{ + Config: testscommon.GetGeneralConfig(), + EpochConfig: config.EpochConfig{}, + PrefConfigs: config.Preferences{}, + ImportDBConfig: config.ImportDbConfig{}, + FlagsConfig: config.ContextFlagsConfig{ + Version: "v1.0.0", + }, + AccountsParser: &mock.AccountsParserStub{ + GenerateInitialTransactionsCalled: func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*dataBlock.MiniBlock, map[uint32]*outportCore.TransactionPool, error) { + return []*dataBlock.MiniBlock{ + {}, + }, + map[uint32]*outportCore.TransactionPool{ + 0: {}, + }, nil + }, + }, + SmartContractParser: &mock.SmartContractParserStub{}, + GasSchedule: &testscommon.GasScheduleNotifierMock{ + GasSchedule: gasSchedule, + }, + NodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, + RequestedItemsHandler: &testscommon.RequestedItemsHandlerStub{}, + WhiteListHandler: &testscommon.WhiteListHandlerStub{}, + WhiteListerVerifiedTxs: &testscommon.WhiteListHandlerStub{}, + MaxRating: 100, + SystemSCConfig: &config.SystemSmartContractsConfig{ + ESDTSystemSCConfig: config.ESDTSystemSCConfig{ + BaseIssuingCost: "1000", + OwnerAddress: "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c", + }, + GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ + V1: config.GovernanceSystemSCConfigV1{ + ProposalCost: "500", + NumNodes: 100, + MinQuorum: 50, + MinPassThreshold: 50, + MinVetoThreshold: 50, + }, + Active: config.GovernanceSystemSCConfigActive{ + ProposalCost: "500", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, + }, + OwnerAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + }, + StakingSystemSCConfig: config.StakingSystemSCConfig{ + GenesisNodePrice: "2500000000000000000000", + MinStakeValue: "1", + UnJailValue: "1", + MinStepValue: "1", + UnBondPeriod: 0, + NumRoundsWithoutBleed: 0, + MaximumPercentageToBleed: 0, + BleedPercentagePerRound: 0, + MaxNumberOfNodesForStake: 10, + ActivateBLSPubKeyMessageVerification: false, + MinUnstakeTokensValue: "1", + }, + DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ + MinCreationDeposit: "100", + MinStakeAmount: "100", + ConfigChangeAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + }, + DelegationSystemSCConfig: config.DelegationSystemSCConfig{ + MinServiceFee: 0, + MaxServiceFee: 100, + }, + }, + ImportStartHandler: &testscommon.ImportStartHandlerStub{}, + HistoryRepo: &dblookupext.HistoryRepositoryStub{}, + Data: &testsMocks.DataComponentsStub{ + DataPool: dataRetriever.NewPoolsHolderMock(), + BlockChain: &testscommon.ChainHandlerStub{ + GetGenesisHeaderHashCalled: func() []byte { + return []byte("genesis hash") + }, + GetGenesisHeaderCalled: func() coreData.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, + MbProvider: &testsMocks.MiniBlocksProviderStub{}, + Store: genericMocks.NewChainStorerMock(0), + }, + CoreData: &mock.CoreComponentsMock{ + IntMarsh: &marshal.GogoProtoMarshalizer{}, + TxMarsh: &marshal.JsonMarshalizer{}, + UInt64ByteSliceConv: &testsMocks.Uint64ByteSliceConverterMock{}, + AddrPubKeyConv: addrPubKeyConv, + ValPubKeyConv: valPubKeyConv, + NodesConfig: &testscommon.NodesSetupStub{ + GetShardConsensusGroupSizeCalled: func() uint32 { + return 2 + }, + GetMetaConsensusGroupSizeCalled: func() uint32 { + return 2 + }, + }, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{ + ProtocolSustainabilityAddressCalled: func() string { + return testingProtocolSustainabilityAddress + }, + }, + Hash: blake2b.NewBlake2b(), + TxVersionCheckHandler: &testscommon.TxVersionCheckerStub{}, + RatingHandler: &testscommon.RaterMock{}, + EnableEpochsHandlerField: &testscommon.EnableEpochsHandlerStub{}, + EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, + EpochNotifierWithConfirm: &updateMocks.EpochStartNotifierStub{}, + RoundHandlerField: &testscommon.RoundHandlerMock{}, + ChanStopProcess: make(chan endProcess.ArgEndProcess, 1), + TxSignHasherField: keccak.NewKeccak(), + HardforkTriggerPubKeyField: []byte("hardfork pub key"), + WasmVMChangeLockerInternal: &sync.RWMutex{}, + NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, + RatingsConfig: &testscommon.RatingsInfoMock{}, + PathHdl: &testscommon.PathManagerStub{}, + ProcessStatusHandlerInternal: &testscommon.ProcessStatusHandlerStub{}, + }, + Crypto: &testsMocks.CryptoComponentsStub{ + BlKeyGen: &cryptoMocks.KeyGenStub{}, + BlockSig: &cryptoMocks.SingleSignerStub{}, + MultiSigContainer: &cryptoMocks.MultiSignerContainerMock{ + MultiSigner: &cryptoMocks.MultisignerMock{}, + }, + PrivKey: &cryptoMocks.PrivateKeyStub{}, + PubKey: &cryptoMocks.PublicKeyStub{}, + PubKeyString: "pub key string", + PubKeyBytes: []byte("pub key bytes"), + TxKeyGen: &cryptoMocks.KeyGenStub{}, + TxSig: &cryptoMocks.SingleSignerStub{}, + PeerSignHandler: &cryptoMocks.PeerSignatureHandlerStub{}, + MsgSigVerifier: &testscommon.MessageSignVerifierMock{}, + ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, + }, + Network: &testsMocks.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{}, + InputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, + OutputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, + }, + BootstrapComponents: &mainFactoryMocks.BootstrapComponentsStub{ + ShCoordinator: mock.NewMultiShardsCoordinatorMock(2), + BootstrapParams: &bootstrapMocks.BootstrapParamsHandlerMock{}, + HdrIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, + GuardedAccountHandlerField: &guardianMocks.GuardedAccountHandlerStub{}, + VersionedHdrFactory: &testscommon.VersionedHeaderFactoryStub{}, + }, + StatusComponents: &testsMocks.StatusComponentsStub{ + Outport: &outport.OutportStub{}, + }, + StatusCoreComponents: &factoryMocks.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + }, + } - pc, err := pcf.Create() - require.Nil(t, err) + args.State = components.GetStateComponents(args.CoreData) - err = pc.Close() - require.NoError(t, err) + return args } -func TestProcessComponentsFactory_CreateWithInvalidTxAccumulatorTimeExpectError(t *testing.T) { +func TestNewProcessComponentsFactory(t *testing.T) { t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) - processArgs.Config.Antiflood.TxAccumulator.MaxAllowedTimeInMilliseconds = 0 - pcf, err := processComp.NewProcessComponentsFactory(processArgs) - require.Nil(t, err) + t.Run("nil AccountsParser should error", func(t *testing.T) { + t.Parallel() - instance, err := pcf.Create() - require.Nil(t, instance) - require.Error(t, err) - require.True(t, strings.Contains(err.Error(), process.ErrInvalidValue.Error())) + args := createMockProcessComponentsFactoryArgs() + args.AccountsParser = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilAccountsParser)) + require.Nil(t, pcf) + }) + t.Run("nil GasSchedule should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.GasSchedule = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilGasSchedule)) + require.Nil(t, pcf) + }) + t.Run("nil Data should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Data = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilDataComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil BlockChain should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Data = &testsMocks.DataComponentsStub{ + BlockChain: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilBlockChainHandler)) + require.Nil(t, pcf) + }) + t.Run("nil DataPool should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Data = &testsMocks.DataComponentsStub{ + BlockChain: &testscommon.ChainHandlerStub{}, + DataPool: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilDataPoolsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil StorageService should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Data = &testsMocks.DataComponentsStub{ + BlockChain: &testscommon.ChainHandlerStub{}, + DataPool: &dataRetriever.PoolsHolderStub{}, + Store: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilStorageService)) + require.Nil(t, pcf) + }) + t.Run("nil CoreData should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilCoreComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil EconomicsData should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilEconomicsData)) + require.Nil(t, pcf) + }) + t.Run("nil GenesisNodesSetup should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + NodesConfig: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilGenesisNodesSetupHandler)) + require.Nil(t, pcf) + }) + t.Run("nil AddressPubKeyConverter should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + NodesConfig: &testscommon.NodesSetupStub{}, + AddrPubKeyConv: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilAddressPublicKeyConverter)) + require.Nil(t, pcf) + }) + t.Run("nil EpochNotifier should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + NodesConfig: &testscommon.NodesSetupStub{}, + AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, + EpochChangeNotifier: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilEpochNotifier)) + require.Nil(t, pcf) + }) + t.Run("nil ValidatorPubKeyConverter should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + NodesConfig: &testscommon.NodesSetupStub{}, + AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + ValPubKeyConv: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilPubKeyConverter)) + require.Nil(t, pcf) + }) + t.Run("nil InternalMarshalizer should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + NodesConfig: &testscommon.NodesSetupStub{}, + AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + ValPubKeyConv: &testscommon.PubkeyConverterStub{}, + IntMarsh: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilInternalMarshalizer)) + require.Nil(t, pcf) + }) + t.Run("nil Uint64ByteSliceConverter should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + NodesConfig: &testscommon.NodesSetupStub{}, + AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + ValPubKeyConv: &testscommon.PubkeyConverterStub{}, + IntMarsh: &testscommon.MarshalizerStub{}, + UInt64ByteSliceConv: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilUint64ByteSliceConverter)) + require.Nil(t, pcf) + }) + t.Run("nil Crypto should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Crypto = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilCryptoComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil BlockSignKeyGen should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Crypto = &testsMocks.CryptoComponentsStub{ + BlKeyGen: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilBlockSignKeyGen)) + require.Nil(t, pcf) + }) + t.Run("nil State should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.State = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilStateComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil AccountsAdapter should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.State = &factoryMocks.StateComponentsMock{ + Accounts: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilAccountsAdapter)) + require.Nil(t, pcf) + }) + t.Run("nil Network should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Network = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilNetworkComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil NetworkMessenger should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Network = &testsMocks.NetworkComponentsStub{ + Messenger: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilMessenger)) + require.Nil(t, pcf) + }) + t.Run("nil InputAntiFloodHandler should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Network = &testsMocks.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{}, + InputAntiFlood: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilInputAntiFloodHandler)) + require.Nil(t, pcf) + }) + t.Run("nil SystemSCConfig should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.SystemSCConfig = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilSystemSCConfig)) + require.Nil(t, pcf) + }) + t.Run("nil BootstrapComponents should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.BootstrapComponents = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilBootstrapComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil ShardCoordinator should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.BootstrapComponents = &mainFactoryMocks.BootstrapComponentsStub{ + ShCoordinator: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilShardCoordinator)) + require.Nil(t, pcf) + }) + t.Run("nil EpochBootstrapParams should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.BootstrapComponents = &mainFactoryMocks.BootstrapComponentsStub{ + ShCoordinator: &testscommon.ShardsCoordinatorMock{}, + BootstrapParams: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilBootstrapParamsHandler)) + require.Nil(t, pcf) + }) + t.Run("nil StatusComponents should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.StatusComponents = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilStatusComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil OutportHandler should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.StatusComponents = &testsMocks.StatusComponentsStub{ + Outport: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilOutportHandler)) + require.Nil(t, pcf) + }) + t.Run("nil HistoryRepo should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.HistoryRepo = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilHistoryRepository)) + require.Nil(t, pcf) + }) + t.Run("nil StatusCoreComponents should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.StatusCoreComponents = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilStatusCoreComponents)) + require.Nil(t, pcf) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + pcf, err := processComp.NewProcessComponentsFactory(createMockProcessComponentsFactoryArgs()) + require.NoError(t, err) + require.NotNil(t, pcf) + }) } -func TestProcessComponents_IndexGenesisBlocks(t *testing.T) { +func TestProcessComponentsFactory_Create(t *testing.T) { t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) - processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) - processArgs.Data = &mock.DataComponentsMock{ - Storage: &storageStubs.ChainStorerStub{}, - } + expectedErr := errors.New("expected error") + t.Run("CreateCurrentEpochProvider fails should error", func(t *testing.T) { + t.Parallel() - saveBlockCalledMutex := sync.Mutex{} + args := createMockProcessComponentsFactoryArgs() + args.Config.EpochStartConfig.RoundsPerEpoch = 0 + args.PrefConfigs.Preferences.FullArchive = true + testCreateWithArgs(t, args, "rounds per epoch") + }) + t.Run("createNetworkShardingCollector fails due to invalid PublicKeyPeerId config should error", func(t *testing.T) { + t.Parallel() - outportHandler := &outport.OutportStub{ - HasDriversCalled: func() bool { - return true - }, - SaveBlockCalled: func(args *outportCore.ArgsSaveBlockData) { - saveBlockCalledMutex.Lock() - require.NotNil(t, args) + args := createMockProcessComponentsFactoryArgs() + args.Config.PublicKeyPeerId.Type = "invalid" + testCreateWithArgs(t, args, "cache type") + }) + t.Run("createNetworkShardingCollector fails due to invalid PublicKeyShardId config should error", func(t *testing.T) { + t.Parallel() - bodyRequired := &dataBlock.Body{ - MiniBlocks: make([]*block.MiniBlock, 4), + args := createMockProcessComponentsFactoryArgs() + args.Config.PublicKeyShardId.Type = "invalid" + testCreateWithArgs(t, args, "cache type") + }) + t.Run("createNetworkShardingCollector fails due to invalid PeerIdShardId config should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.PeerIdShardId.Type = "invalid" + testCreateWithArgs(t, args, "cache type") + }) + t.Run("prepareNetworkShardingCollector fails due to SetPeerShardResolver failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + netwCompStub, ok := args.Network.(*testsMocks.NetworkComponentsStub) + require.True(t, ok) + netwCompStub.Messenger = &p2pmocks.MessengerStub{ + SetPeerShardResolverCalled: func(peerShardResolver p2p.PeerShardResolver) error { + return expectedErr + }, + } + testCreateWithArgs(t, args, expectedErr.Error()) + }) + t.Run("prepareNetworkShardingCollector fails due to SetPeerValidatorMapper failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + netwCompStub, ok := args.Network.(*testsMocks.NetworkComponentsStub) + require.True(t, ok) + netwCompStub.InputAntiFlood = &testsMocks.P2PAntifloodHandlerStub{ + SetPeerValidatorMapperCalled: func(validatorMapper process.PeerValidatorMapper) error { + return expectedErr + }, + } + testCreateWithArgs(t, args, expectedErr.Error()) + }) + t.Run("newStorageRequester fails due to NewStorageServiceFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.ImportDBConfig.IsImportDBMode = true + args.Config.StoragePruning.NumActivePersisters = 0 + testCreateWithArgs(t, args, "active persisters") + }) + t.Run("newResolverContainerFactory fails due to NewPeerAuthenticationPayloadValidator failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.HeartbeatV2.HeartbeatExpiryTimespanInSec = 0 + testCreateWithArgs(t, args, "expiry timespan") + }) + t.Run("generateGenesisHeadersAndApplyInitialBalances fails due to invalid GenesisNodePrice should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.LogsAndEvents.SaveInStorageEnabled = false // coverage + args.Config.DbLookupExtensions.Enabled = true // coverage + args.SystemSCConfig.StakingSystemSCConfig.GenesisNodePrice = "invalid" + testCreateWithArgs(t, args, "invalid genesis node price") + }) + t.Run("newValidatorStatisticsProcessor fails due to nil genesis header should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.ImportDBConfig.IsImportDBMode = true // coverage + dataCompStub, ok := args.Data.(*testsMocks.DataComponentsStub) + require.True(t, ok) + blockChainStub, ok := dataCompStub.BlockChain.(*testscommon.ChainHandlerStub) + require.True(t, ok) + blockChainStub.GetGenesisHeaderCalled = func() coreData.HeaderHandler { + return nil + } + testCreateWithArgs(t, args, errorsMx.ErrGenesisBlockNotInitialized.Error()) + }) + t.Run("indexGenesisBlocks fails due to GenerateInitialTransactions failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.AccountsParser = &mock.AccountsParserStub{ + GenerateInitialTransactionsCalled: func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*dataBlock.MiniBlock, map[uint32]*outportCore.TransactionPool, error) { + return nil, nil, expectedErr + }, + } + testCreateWithArgs(t, args, expectedErr.Error()) + }) + t.Run("NewMiniBlocksPoolsCleaner fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.PoolsCleanersConfig.MaxRoundsToKeepUnprocessedMiniBlocks = 0 + testCreateWithArgs(t, args, "MaxRoundsToKeepUnprocessedData") + }) + t.Run("NewTxsPoolsCleaner fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.PoolsCleanersConfig.MaxRoundsToKeepUnprocessedTransactions = 0 + testCreateWithArgs(t, args, "MaxRoundsToKeepUnprocessedData") + }) + t.Run("createHardforkTrigger fails due to Decode failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.Hardfork.PublicKeyToListenFrom = "invalid key" + testCreateWithArgs(t, args, "PublicKeyToListenFrom") + }) + t.Run("NewCache fails for vmOutput should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.VMOutputCacher.Type = "invalid" + testCreateWithArgs(t, args, "cache type") + }) + t.Run("newShardBlockProcessor: attachProcessDebugger fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.Debug.Process.Enabled = true + args.Config.Debug.Process.PollingTimeInSeconds = 0 + testCreateWithArgs(t, args, "PollingTimeInSeconds") + }) + t.Run("nodesSetupChecker.Check fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) + coreCompStub.GenesisNodesSetupCalled = func() sharding.GenesisNodesSetupHandler { + return &testscommon.NodesSetupStub{ + AllInitialNodesCalled: func() []nodesCoordinator.GenesisNodeInfoHandler { + return []nodesCoordinator.GenesisNodeInfoHandler{ + &genesisMocks.GenesisNodeInfoHandlerMock{ + PubKeyBytesValue: []byte("no stake"), + }, + } + }, + GetShardConsensusGroupSizeCalled: func() uint32 { + return 2 + }, + GetMetaConsensusGroupSizeCalled: func() uint32 { + return 2 + }, } + } + args.CoreData = coreCompStub + testCreateWithArgs(t, args, "no one staked") + }) + t.Run("should work with indexAndReturnGenesisAccounts failing due to RootHash failure", func(t *testing.T) { + t.Parallel() - txsPoolRequired := &outportCore.Pool{} + args := createMockProcessComponentsFactoryArgs() + statusCompStub, ok := args.StatusComponents.(*testsMocks.StatusComponentsStub) + require.True(t, ok) + statusCompStub.Outport = &outport.OutportStub{ + HasDriversCalled: func() bool { + return true + }, + } + stateCompMock := factoryMocks.NewStateComponentsMockFromRealComponent(args.State) + realAccounts := stateCompMock.AccountsAdapter() + stateCompMock.Accounts = &state.AccountsStub{ + GetAllLeavesCalled: realAccounts.GetAllLeaves, + RootHashCalled: func() ([]byte, error) { + return nil, expectedErr + }, + CommitCalled: realAccounts.Commit, + } + args.State = stateCompMock - assert.Equal(t, txsPoolRequired, args.TransactionsPool) - assert.Equal(t, bodyRequired, args.Body) - saveBlockCalledMutex.Unlock() - }, - } + pcf, _ := processComp.NewProcessComponentsFactory(args) + require.NotNil(t, pcf) - processArgs.StatusComponents = &mainFactoryMocks.StatusComponentsStub{ - Outport: outportHandler, - } + instance, err := pcf.Create() + require.Nil(t, err) + require.NotNil(t, instance) + + err = instance.Close() + require.NoError(t, err) + _ = args.State.Close() + }) + t.Run("should work with indexAndReturnGenesisAccounts failing due to GetAllLeaves failure", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + statusCompStub, ok := args.StatusComponents.(*testsMocks.StatusComponentsStub) + require.True(t, ok) + statusCompStub.Outport = &outport.OutportStub{ + HasDriversCalled: func() bool { + return true + }, + } + stateCompMock := factoryMocks.NewStateComponentsMockFromRealComponent(args.State) + realAccounts := stateCompMock.AccountsAdapter() + stateCompMock.Accounts = &state.AccountsStub{ + GetAllLeavesCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { + close(leavesChannels.LeavesChan) + leavesChannels.ErrChan.Close() + return expectedErr + }, + RootHashCalled: realAccounts.RootHash, + CommitCalled: realAccounts.Commit, + } + args.State = stateCompMock + + pcf, _ := processComp.NewProcessComponentsFactory(args) + require.NotNil(t, pcf) + + instance, err := pcf.Create() + require.Nil(t, err) + require.NotNil(t, instance) + + err = instance.Close() + require.NoError(t, err) + _ = args.State.Close() + }) + t.Run("should work with indexAndReturnGenesisAccounts failing due to Unmarshal failure", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + statusCompStub, ok := args.StatusComponents.(*testsMocks.StatusComponentsStub) + require.True(t, ok) + statusCompStub.Outport = &outport.OutportStub{ + HasDriversCalled: func() bool { + return true + }, + } + stateCompMock := factoryMocks.NewStateComponentsMockFromRealComponent(args.State) + realAccounts := stateCompMock.AccountsAdapter() + stateCompMock.Accounts = &state.AccountsStub{ + GetAllLeavesCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { + addrOk, _ := addrPubKeyConv.Decode("erd17c4fs6mz2aa2hcvva2jfxdsrdknu4220496jmswer9njznt22eds0rxlr4") + addrNOK, _ := addrPubKeyConv.Decode("erd1ulhw20j7jvgfgak5p05kv667k5k9f320sgef5ayxkt9784ql0zssrzyhjp") + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage(addrOk, []byte("value")) // coverage + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage(addrNOK, []byte("value")) + close(leavesChannels.LeavesChan) + leavesChannels.ErrChan.Close() + return nil + }, + RootHashCalled: realAccounts.RootHash, + CommitCalled: realAccounts.Commit, + } + args.State = stateCompMock + + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) + cnt := 0 + coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { + return &testscommon.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + cnt++ + if cnt == 1 { + return nil // coverage, key_ok + } + return expectedErr + }, + } + } + args.CoreData = coreCompStub + pcf, _ := processComp.NewProcessComponentsFactory(args) + require.NotNil(t, pcf) - pcf, err := processComp.NewProcessComponentsFactory(processArgs) - require.Nil(t, err) + instance, err := pcf.Create() + require.Nil(t, err) + require.NotNil(t, instance) - genesisBlocks := make(map[uint32]coreData.HeaderHandler) - indexingData := make(map[uint32]*genesis.IndexingData) + err = instance.Close() + require.NoError(t, err) + _ = args.State.Close() + }) + t.Run("should work with indexAndReturnGenesisAccounts failing due to error on GetAllLeaves", func(t *testing.T) { + t.Parallel() - for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - genesisBlocks[i] = &block.Header{} + args := createMockProcessComponentsFactoryArgs() + statusCompStub, ok := args.StatusComponents.(*testsMocks.StatusComponentsStub) + require.True(t, ok) + statusCompStub.Outport = &outport.OutportStub{ + HasDriversCalled: func() bool { + return true + }, + } + realStateComp := args.State + args.State = &factoryMocks.StateComponentsMock{ + Accounts: &state.AccountsStub{ + GetAllLeavesCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { + close(leavesChannels.LeavesChan) + leavesChannels.ErrChan.WriteInChanNonBlocking(expectedErr) + leavesChannels.ErrChan.Close() + return nil + }, + CommitCalled: realStateComp.AccountsAdapter().Commit, + RootHashCalled: realStateComp.AccountsAdapter().RootHash, + }, + PeersAcc: realStateComp.PeerAccounts(), + Tries: realStateComp.TriesContainer(), + AccountsAPI: realStateComp.AccountsAdapterAPI(), + StorageManagers: realStateComp.TrieStorageManagers(), + MissingNodesNotifier: realStateComp.MissingTrieNodesNotifier(), + } + + pcf, _ := processComp.NewProcessComponentsFactory(args) + require.NotNil(t, pcf) + + instance, err := pcf.Create() + require.Nil(t, err) + require.NotNil(t, instance) + + err = instance.Close() + require.NoError(t, err) + _ = args.State.Close() + }) + t.Run("should work with indexAndReturnGenesisAccounts failing due to error on Encode", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + statusCompStub, ok := args.StatusComponents.(*testsMocks.StatusComponentsStub) + require.True(t, ok) + statusCompStub.Outport = &outport.OutportStub{ + HasDriversCalled: func() bool { + return true + }, + } + realStateComp := args.State + args.State = &factoryMocks.StateComponentsMock{ + Accounts: &state.AccountsStub{ + GetAllLeavesCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("invalid addr"), []byte("value")) + close(leavesChannels.LeavesChan) + leavesChannels.ErrChan.Close() + return nil + }, + CommitCalled: realStateComp.AccountsAdapter().Commit, + RootHashCalled: realStateComp.AccountsAdapter().RootHash, + }, + PeersAcc: realStateComp.PeerAccounts(), + Tries: realStateComp.TriesContainer(), + AccountsAPI: realStateComp.AccountsAdapterAPI(), + StorageManagers: realStateComp.TrieStorageManagers(), + MissingNodesNotifier: realStateComp.MissingTrieNodesNotifier(), + } + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) + coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { + return &testscommon.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return nil + }, + } + } + args.CoreData = coreCompStub + + pcf, _ := processComp.NewProcessComponentsFactory(args) + require.NotNil(t, pcf) + + instance, err := pcf.Create() + require.Nil(t, err) + require.NotNil(t, instance) + + err = instance.Close() + require.NoError(t, err) + _ = args.State.Close() + }) + t.Run("should work - shard", func(t *testing.T) { + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + processArgs := components.GetProcessComponentsFactoryArgs(shardCoordinator) + pcf, _ := processComp.NewProcessComponentsFactory(processArgs) + require.NotNil(t, pcf) + + instance, err := pcf.Create() + require.NoError(t, err) + require.NotNil(t, instance) + + err = instance.Close() + require.NoError(t, err) + _ = processArgs.State.Close() + }) + t.Run("should work - meta", func(t *testing.T) { + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + shardCoordinator.CurrentShard = common.MetachainShardId + processArgs := components.GetProcessComponentsFactoryArgs(shardCoordinator) + + shardCoordinator.ComputeIdCalled = func(address []byte) uint32 { + protocolSustainabilityAddr, err := processArgs.CoreData.AddressPubKeyConverter().Decode(testingProtocolSustainabilityAddress) + require.NoError(t, err) + if bytes.Equal(protocolSustainabilityAddr, address) { + return 0 + } + return shardCoordinator.CurrentShard + } + fundGenesisWallets(t, processArgs) + + pcf, _ := processComp.NewProcessComponentsFactory(processArgs) + require.NotNil(t, pcf) + + instance, err := pcf.Create() + require.NoError(t, err) + require.NotNil(t, instance) + + err = instance.Close() + require.NoError(t, err) + _ = processArgs.State.Close() + }) +} + +func fundGenesisWallets(t *testing.T, args processComp.ProcessComponentsFactoryArgs) { + accounts := args.State.AccountsAdapter() + initialNodes := args.CoreData.GenesisNodesSetup().AllInitialNodes() + nodePrice, ok := big.NewInt(0).SetString(args.SystemSCConfig.StakingSystemSCConfig.GenesisNodePrice, 10) + require.True(t, ok) + for _, node := range initialNodes { + account, err := accounts.LoadAccount(node.AddressBytes()) + require.NoError(t, err) + + userAccount := account.(mxState.UserAccountHandler) + err = userAccount.AddToBalance(nodePrice) + require.NoError(t, err) + + require.NoError(t, accounts.SaveAccount(userAccount)) + _, err = accounts.Commit() + require.NoError(t, err) } +} + +func testCreateWithArgs(t *testing.T, args processComp.ProcessComponentsFactoryArgs, expectedErrSubstr string) { + pcf, _ := processComp.NewProcessComponentsFactory(args) + require.NotNil(t, pcf) + + instance, err := pcf.Create() + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), expectedErrSubstr)) + require.Nil(t, instance) - err = pcf.IndexGenesisBlocks(genesisBlocks, indexingData) - require.Nil(t, err) + _ = args.State.Close() } diff --git a/factory/processing/txSimulatorProcessComponents.go b/factory/processing/txSimulatorProcessComponents.go new file mode 100644 index 00000000000..023495f6cb5 --- /dev/null +++ b/factory/processing/txSimulatorProcessComponents.go @@ -0,0 +1,375 @@ +package processing + +import ( + "github.com/multiversx/mx-chain-core-go/core" + dataBlock "github.com/multiversx/mx-chain-core-go/data/block" + bootstrapDisabled "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/genesis" + processDisabled "github.com/multiversx/mx-chain-go/genesis/process/disabled" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/block/preprocess" + "github.com/multiversx/mx-chain-go/process/coordinator" + "github.com/multiversx/mx-chain-go/process/factory/shard" + "github.com/multiversx/mx-chain-go/process/smartContract" + "github.com/multiversx/mx-chain-go/process/transaction" + "github.com/multiversx/mx-chain-go/process/transactionLog" + "github.com/multiversx/mx-chain-go/process/txsimulator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/state/syncer" + "github.com/multiversx/mx-chain-go/storage" + storageFactory "github.com/multiversx/mx-chain-go/storage/factory" + "github.com/multiversx/mx-chain-go/storage/storageunit" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-vm-common-go/parsers" +) + +func (pcf *processComponentsFactory) createTxSimulatorProcessor() (factory.TransactionSimulatorProcessor, process.VirtualMachinesContainerFactory, error) { + readOnlyAccountsDB, err := txsimulator.NewReadOnlyAccountsDB(pcf.state.AccountsAdapterAPI()) + if err != nil { + return nil, nil, err + } + + vmOutputCacherConfig := storageFactory.GetCacherFromConfig(pcf.config.VMOutputCacher) + vmOutputCacher, err := storageunit.NewCache(vmOutputCacherConfig) + if err != nil { + return nil, nil, err + } + + txLogsProcessor, err := transactionLog.NewTxLogProcessor(transactionLog.ArgTxLogProcessor{ + Marshalizer: pcf.coreData.InternalMarshalizer(), + SaveInStorageEnabled: false, // no storer needed for tx simulator + }) + if err != nil { + return nil, nil, err + } + + txSimulatorProcessorArgs, vmContainerFactory, err := pcf.createArgsTxSimulatorProcessor(readOnlyAccountsDB, vmOutputCacher, txLogsProcessor) + if err != nil { + return nil, nil, err + } + + txSimulatorProcessorArgs.VMOutputCacher = vmOutputCacher + txSimulatorProcessorArgs.AddressPubKeyConverter = pcf.coreData.AddressPubKeyConverter() + txSimulatorProcessorArgs.ShardCoordinator = pcf.bootstrapComponents.ShardCoordinator() + txSimulatorProcessorArgs.Hasher = pcf.coreData.Hasher() + txSimulatorProcessorArgs.Marshalizer = pcf.coreData.InternalMarshalizer() + + txSimulator, err := txsimulator.NewTransactionSimulator(txSimulatorProcessorArgs) + + return txSimulator, vmContainerFactory, err +} + +func (pcf *processComponentsFactory) createArgsTxSimulatorProcessor( + accountsAdapter state.AccountsAdapter, + vmOutputCacher storage.Cacher, + txLogsProcessor process.TransactionLogProcessor, +) (txsimulator.ArgsTxSimulator, process.VirtualMachinesContainerFactory, error) { + shardID := pcf.bootstrapComponents.ShardCoordinator().SelfId() + if shardID == core.MetachainShardId { + return pcf.createArgsTxSimulatorProcessorForMeta(accountsAdapter, vmOutputCacher, txLogsProcessor) + } else { + return pcf.createArgsTxSimulatorProcessorShard(accountsAdapter, vmOutputCacher, txLogsProcessor) + } +} + +func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorForMeta( + accountsAdapter state.AccountsAdapter, + vmOutputCacher storage.Cacher, + txLogsProcessor process.TransactionLogProcessor, +) (txsimulator.ArgsTxSimulator, process.VirtualMachinesContainerFactory, error) { + args := txsimulator.ArgsTxSimulator{} + + argsFactory := shard.ArgsNewIntermediateProcessorsContainerFactory{ + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + Hasher: pcf.coreData.Hasher(), + PubkeyConverter: pcf.coreData.AddressPubKeyConverter(), + Store: bootstrapDisabled.NewChainStorer(), + PoolsHolder: pcf.data.Datapool(), + EconomicsFee: &processDisabled.FeeHandler{}, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + } + intermediateProcessorsFactory, err := shard.NewIntermediateProcessorsContainerFactory(argsFactory) + if err != nil { + return args, nil, err + } + + intermediateProcessorsContainer, err := intermediateProcessorsFactory.Create() + if err != nil { + return args, nil, err + } + + builtInFuncFactory, err := pcf.createBuiltInFunctionContainer(accountsAdapter, make(map[string]struct{})) + if err != nil { + return args, nil, err + } + + vmContainerFactory, err := pcf.createVMFactoryMeta( + accountsAdapter, + builtInFuncFactory.BuiltInFunctionContainer(), + pcf.config.SmartContractsStorageSimulate, + builtInFuncFactory.NFTStorageHandler(), + builtInFuncFactory.ESDTGlobalSettingsHandler(), + ) + if err != nil { + return args, nil, err + } + + vmContainer, err := vmContainerFactory.Create() + if err != nil { + return args, nil, err + } + + txTypeHandler, err := pcf.createTxTypeHandler(builtInFuncFactory) + if err != nil { + return args, nil, err + } + + gasHandler, err := preprocess.NewGasComputation( + pcf.coreData.EconomicsData(), + txTypeHandler, + pcf.coreData.EnableEpochsHandler(), + ) + if err != nil { + return args, nil, err + } + + scForwarder, err := intermediateProcessorsContainer.Get(dataBlock.SmartContractResultBlock) + if err != nil { + return args, nil, err + } + badTxInterim, err := intermediateProcessorsContainer.Get(dataBlock.InvalidBlock) + if err != nil { + return args, nil, err + } + + scProcArgs := smartContract.ArgsNewSmartContractProcessor{ + VmContainer: vmContainer, + ArgsParser: smartContract.NewArgumentParser(), + Hasher: pcf.coreData.Hasher(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + AccountsDB: accountsAdapter, + BlockChainHook: vmContainerFactory.BlockChainHookImpl(), + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + PubkeyConv: pcf.coreData.AddressPubKeyConverter(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ScrForwarder: scForwarder, + TxFeeHandler: &processDisabled.FeeHandler{}, + EconomicsFee: pcf.coreData.EconomicsData(), + TxTypeHandler: txTypeHandler, + GasHandler: gasHandler, + GasSchedule: pcf.gasSchedule, + TxLogsProcessor: txLogsProcessor, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + BadTxForwarder: badTxInterim, + VMOutputCacher: vmOutputCacher, + WasmVMChangeLocker: pcf.coreData.WasmVMChangeLocker(), + IsGenesisProcessing: false, + } + + scProcessor, err := smartContract.NewSmartContractProcessor(scProcArgs) + if err != nil { + return args, nil, err + } + + argsTxProcessor := transaction.ArgsNewMetaTxProcessor{ + Hasher: pcf.coreData.Hasher(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + Accounts: accountsAdapter, + PubkeyConv: pcf.coreData.AddressPubKeyConverter(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ScProcessor: scProcessor, + TxTypeHandler: txTypeHandler, + EconomicsFee: pcf.coreData.EconomicsData(), + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + TxVersionChecker: pcf.coreData.TxVersionChecker(), + GuardianChecker: pcf.bootstrapComponents.GuardedAccountHandler(), + } + + txProcessor, err := transaction.NewMetaTxProcessor(argsTxProcessor) + if err != nil { + return args, nil, err + } + + args.TransactionProcessor = txProcessor + args.IntermediateProcContainer = intermediateProcessorsContainer + + return args, vmContainerFactory, nil +} + +func (pcf *processComponentsFactory) createTxTypeHandler(builtInFuncFactory vmcommon.BuiltInFunctionFactory) (process.TxTypeHandler, error) { + esdtTransferParser, err := parsers.NewESDTTransferParser(pcf.coreData.InternalMarshalizer()) + if err != nil { + return nil, err + } + + argsTxTypeHandler := coordinator.ArgNewTxTypeHandler{ + PubkeyConverter: pcf.coreData.AddressPubKeyConverter(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + } + + return coordinator.NewTxTypeHandler(argsTxTypeHandler) +} + +func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorShard( + accountsAdapter state.AccountsAdapter, + vmOutputCacher storage.Cacher, + txLogsProcessor process.TransactionLogProcessor, +) (txsimulator.ArgsTxSimulator, process.VirtualMachinesContainerFactory, error) { + args := txsimulator.ArgsTxSimulator{} + + argsFactory := shard.ArgsNewIntermediateProcessorsContainerFactory{ + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + Hasher: pcf.coreData.Hasher(), + PubkeyConverter: pcf.coreData.AddressPubKeyConverter(), + Store: bootstrapDisabled.NewChainStorer(), + PoolsHolder: pcf.data.Datapool(), + EconomicsFee: &processDisabled.FeeHandler{}, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + } + + intermediateProcessorsFactory, err := shard.NewIntermediateProcessorsContainerFactory(argsFactory) + if err != nil { + return args, nil, err + } + + intermediateProcessorsContainer, err := intermediateProcessorsFactory.Create() + if err != nil { + return args, nil, err + } + + mapDNSAddresses, err := pcf.smartContractParser.GetDeployedSCAddresses(genesis.DNSType) + if err != nil { + return args, nil, err + } + + builtInFuncFactory, err := pcf.createBuiltInFunctionContainer(accountsAdapter, mapDNSAddresses) + if err != nil { + return args, nil, err + } + + smartContractStorageSimulate := pcf.config.SmartContractsStorageSimulate + esdtTransferParser, err := parsers.NewESDTTransferParser(pcf.coreData.InternalMarshalizer()) + if err != nil { + return args, nil, err + } + + vmContainerFactory, err := pcf.createVMFactoryShard( + accountsAdapter, + syncer.NewMissingTrieNodesNotifier(), + builtInFuncFactory.BuiltInFunctionContainer(), + esdtTransferParser, + pcf.coreData.WasmVMChangeLocker(), + smartContractStorageSimulate, + builtInFuncFactory.NFTStorageHandler(), + builtInFuncFactory.ESDTGlobalSettingsHandler(), + ) + if err != nil { + return args, nil, err + } + + err = builtInFuncFactory.SetPayableHandler(vmContainerFactory.BlockChainHookImpl()) + if err != nil { + return args, nil, err + } + + vmContainer, err := vmContainerFactory.Create() + if err != nil { + return args, nil, err + } + + txTypeHandler, err := pcf.createTxTypeHandler(builtInFuncFactory) + if err != nil { + return args, nil, err + } + txFeeHandler := &processDisabled.FeeHandler{} + + gasHandler, err := preprocess.NewGasComputation( + pcf.coreData.EconomicsData(), + txTypeHandler, + pcf.coreData.EnableEpochsHandler(), + ) + if err != nil { + return args, nil, err + } + + scForwarder, err := intermediateProcessorsContainer.Get(dataBlock.SmartContractResultBlock) + if err != nil { + return args, nil, err + } + badTxInterim, err := intermediateProcessorsContainer.Get(dataBlock.InvalidBlock) + if err != nil { + return args, nil, err + } + receiptTxInterim, err := intermediateProcessorsContainer.Get(dataBlock.ReceiptBlock) + if err != nil { + return args, nil, err + } + + argsParser := smartContract.NewArgumentParser() + + scProcArgs := smartContract.ArgsNewSmartContractProcessor{ + VmContainer: vmContainer, + ArgsParser: argsParser, + Hasher: pcf.coreData.Hasher(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + AccountsDB: accountsAdapter, + BlockChainHook: vmContainerFactory.BlockChainHookImpl(), + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + PubkeyConv: pcf.coreData.AddressPubKeyConverter(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ScrForwarder: scForwarder, + TxFeeHandler: &processDisabled.FeeHandler{}, + EconomicsFee: pcf.coreData.EconomicsData(), + TxTypeHandler: txTypeHandler, + GasHandler: gasHandler, + GasSchedule: pcf.gasSchedule, + TxLogsProcessor: txLogsProcessor, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + BadTxForwarder: badTxInterim, + VMOutputCacher: vmOutputCacher, + WasmVMChangeLocker: pcf.coreData.WasmVMChangeLocker(), + IsGenesisProcessing: false, + } + + scProcessor, err := smartContract.NewSmartContractProcessor(scProcArgs) + if err != nil { + return args, nil, err + } + + argsTxProcessor := transaction.ArgsNewTxProcessor{ + Accounts: accountsAdapter, + Hasher: pcf.coreData.Hasher(), + PubkeyConv: pcf.coreData.AddressPubKeyConverter(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + SignMarshalizer: pcf.coreData.TxMarshalizer(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ScProcessor: scProcessor, + TxFeeHandler: txFeeHandler, + TxTypeHandler: txTypeHandler, + EconomicsFee: pcf.coreData.EconomicsData(), + ReceiptForwarder: receiptTxInterim, + BadTxForwarder: badTxInterim, + ArgsParser: argsParser, + ScrForwarder: scForwarder, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + TxVersionChecker: pcf.coreData.TxVersionChecker(), + GuardianChecker: pcf.bootstrapComponents.GuardedAccountHandler(), + } + + txProcessor, err := transaction.NewTxProcessor(argsTxProcessor) + if err != nil { + return args, nil, err + } + + args.TransactionProcessor = txProcessor + args.IntermediateProcContainer = intermediateProcessorsContainer + + return args, vmContainerFactory, nil +} diff --git a/factory/processing/txSimulatorProcessComponents_test.go b/factory/processing/txSimulatorProcessComponents_test.go new file mode 100644 index 00000000000..94a55251978 --- /dev/null +++ b/factory/processing/txSimulatorProcessComponents_test.go @@ -0,0 +1,53 @@ +package processing_test + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/factory/processing" + "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/stretchr/testify/assert" +) + +func TestManagedProcessComponents_createTxSimulatorProcessor(t *testing.T) { + t.Parallel() + + shardCoordinatorForShardID2 := mock.NewMultiShardsCoordinatorMock(3) + shardCoordinatorForShardID2.CurrentShard = 2 + + shardCoordinatorForMetachain := mock.NewMultiShardsCoordinatorMock(3) + shardCoordinatorForMetachain.CurrentShard = core.MetachainShardId + + // no further t.Parallel as these tests are quite heavy (they open netMessengers and other components that start a lot of goroutines) + t.Run("invalid VMOutputCacher config should error", func(t *testing.T) { + processArgs := components.GetProcessComponentsFactoryArgs(shardCoordinatorForShardID2) + processArgs.Config.VMOutputCacher.Type = "invalid" + pcf, _ := processing.NewProcessComponentsFactory(processArgs) + + txSimulator, vmContainerFactory, err := pcf.CreateTxSimulatorProcessor() + assert.NotNil(t, err) + assert.True(t, check.IfNil(txSimulator)) + assert.True(t, check.IfNil(vmContainerFactory)) + assert.Contains(t, err.Error(), "not supported cache type") + }) + t.Run("should work for shard", func(t *testing.T) { + processArgs := components.GetProcessComponentsFactoryArgs(shardCoordinatorForShardID2) + pcf, _ := processing.NewProcessComponentsFactory(processArgs) + + txSimulator, vmContainerFactory, err := pcf.CreateTxSimulatorProcessor() + assert.Nil(t, err) + assert.False(t, check.IfNil(txSimulator)) + assert.False(t, check.IfNil(vmContainerFactory)) + }) + t.Run("should work for metachain", func(t *testing.T) { + processArgs := components.GetProcessComponentsFactoryArgs(shardCoordinatorForMetachain) + pcf, _ := processing.NewProcessComponentsFactory(processArgs) + + txSimulator, vmContainerFactory, err := pcf.CreateTxSimulatorProcessor() + assert.Nil(t, err) + assert.False(t, check.IfNil(txSimulator)) + assert.False(t, check.IfNil(vmContainerFactory)) + }) +} diff --git a/factory/state/stateComponents.go b/factory/state/stateComponents.go index 671f78b8bd1..1778f0e103c 100644 --- a/factory/state/stateComponents.go +++ b/factory/state/stateComponents.go @@ -14,6 +14,7 @@ import ( factoryState "github.com/multiversx/mx-chain-go/state/factory" "github.com/multiversx/mx-chain-go/state/storagePruningManager" "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" + "github.com/multiversx/mx-chain-go/state/syncer" trieFactory "github.com/multiversx/mx-chain-go/trie/factory" ) @@ -44,12 +45,13 @@ type stateComponentsFactory struct { // stateComponents struct holds the state components of the MultiversX protocol type stateComponents struct { - peerAccounts state.AccountsAdapter - accountsAdapter state.AccountsAdapter - accountsAdapterAPI state.AccountsAdapter - accountsRepository state.AccountsRepository - triesContainer common.TriesHolder - trieStorageManagers map[string]common.StorageManager + peerAccounts state.AccountsAdapter + accountsAdapter state.AccountsAdapter + accountsAdapterAPI state.AccountsAdapter + accountsRepository state.AccountsRepository + triesContainer common.TriesHolder + trieStorageManagers map[string]common.StorageManager + missingTrieNodesNotifier common.MissingTrieNodesNotifier } // NewStateComponentsFactory will return a new instance of stateComponentsFactory @@ -99,18 +101,19 @@ func (scf *stateComponentsFactory) Create() (*stateComponents, error) { } return &stateComponents{ - peerAccounts: peerAdapter, - accountsAdapter: accountsAdapter, - accountsAdapterAPI: accountsAdapterAPI, - accountsRepository: accountsRepository, - triesContainer: triesContainer, - trieStorageManagers: trieStorageManagers, + peerAccounts: peerAdapter, + accountsAdapter: accountsAdapter, + accountsAdapterAPI: accountsAdapterAPI, + accountsRepository: accountsRepository, + triesContainer: triesContainer, + trieStorageManagers: trieStorageManagers, + missingTrieNodesNotifier: syncer.NewMissingTrieNodesNotifier(), }, nil } func (scf *stateComponentsFactory) createAccountsAdapters(triesContainer common.TriesHolder) (state.AccountsAdapter, state.AccountsAdapter, state.AccountsRepository, error) { accountFactory := factoryState.NewAccountCreator() - merkleTrie := triesContainer.Get([]byte(trieFactory.UserAccountTrie)) + merkleTrie := triesContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) storagePruning, err := scf.newStoragePruningManager() if err != nil { return nil, nil, nil, err @@ -176,7 +179,7 @@ func (scf *stateComponentsFactory) createAccountsAdapters(triesContainer common. func (scf *stateComponentsFactory) createPeerAdapter(triesContainer common.TriesHolder) (state.AccountsAdapter, error) { accountFactory := factoryState.NewPeerAccountCreator() - merkleTrie := triesContainer.Get([]byte(trieFactory.PeerAccountTrie)) + merkleTrie := triesContainer.Get([]byte(dataRetriever.PeerAccountsUnit.String())) storagePruning, err := scf.newStoragePruningManager() if err != nil { return nil, err diff --git a/factory/state/stateComponentsHandler.go b/factory/state/stateComponentsHandler.go index 0fe1465d450..78271a28ffe 100644 --- a/factory/state/stateComponentsHandler.go +++ b/factory/state/stateComponentsHandler.go @@ -90,6 +90,9 @@ func (msc *managedStateComponents) CheckSubcomponents() error { return errors.ErrNilTrieStorageManager } } + if check.IfNil(msc.missingTrieNodesNotifier) { + return errors.ErrNilMissingTrieNodesNotifier + } return nil } @@ -199,6 +202,18 @@ func (msc *managedStateComponents) SetTriesStorageManagers(managers map[string]c return nil } +// MissingTrieNodesNotifier returns the missing trie nodes notifier +func (msc *managedStateComponents) MissingTrieNodesNotifier() common.MissingTrieNodesNotifier { + msc.mutStateComponents.RLock() + defer msc.mutStateComponents.RUnlock() + + if msc.stateComponents == nil { + return nil + } + + return msc.stateComponents.missingTrieNodesNotifier +} + // IsInterfaceNil returns true if the interface is nil func (msc *managedStateComponents) IsInterfaceNil() bool { return msc == nil diff --git a/factory/state/stateComponentsHandler_test.go b/factory/state/stateComponentsHandler_test.go index dd429650df1..ba552ed416a 100644 --- a/factory/state/stateComponentsHandler_test.go +++ b/factory/state/stateComponentsHandler_test.go @@ -7,8 +7,8 @@ import ( errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" stateComp "github.com/multiversx/mx-chain-go/factory/state" - "github.com/multiversx/mx-chain-go/testscommon" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/require" ) @@ -66,6 +66,7 @@ func TestManagedStateComponents_Create(t *testing.T) { require.Nil(t, managedStateComponents.TrieStorageManagers()) require.Nil(t, managedStateComponents.AccountsAdapterAPI()) require.Nil(t, managedStateComponents.AccountsRepository()) + require.Nil(t, managedStateComponents.MissingTrieNodesNotifier()) err = managedStateComponents.Create() require.NoError(t, err) @@ -75,6 +76,7 @@ func TestManagedStateComponents_Create(t *testing.T) { require.NotNil(t, managedStateComponents.TrieStorageManagers()) require.NotNil(t, managedStateComponents.AccountsAdapterAPI()) require.NotNil(t, managedStateComponents.AccountsRepository()) + require.NotNil(t, managedStateComponents.MissingTrieNodesNotifier()) require.Equal(t, factory.StateComponentsName, managedStateComponents.String()) require.NoError(t, managedStateComponents.Close()) @@ -126,7 +128,7 @@ func TestManagedStateComponents_Setters(t *testing.T) { require.NoError(t, err) triesContainer := &trieMock.TriesHolderStub{} - triesStorageManagers := map[string]common.StorageManager{"a": &testscommon.StorageManagerStub{}} + triesStorageManagers := map[string]common.StorageManager{"a": &storageManager.StorageManagerStub{}} err = managedStateComponents.SetTriesContainer(nil) require.Equal(t, errorsMx.ErrNilTriesContainer, err) diff --git a/factory/status/statusComponents.go b/factory/status/statusComponents.go index c2898767ef3..44c2c0b4b2f 100644 --- a/factory/status/statusComponents.go +++ b/factory/status/statusComponents.go @@ -6,9 +6,8 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" nodeData "github.com/multiversx/mx-chain-core-go/data" + outportCore "github.com/multiversx/mx-chain-core-go/data/outport" factoryMarshalizer "github.com/multiversx/mx-chain-core-go/marshal/factory" - "github.com/multiversx/mx-chain-core-go/websocketOutportDriver/data" - wsDriverFactory "github.com/multiversx/mx-chain-core-go/websocketOutportDriver/factory" indexerFactory "github.com/multiversx/mx-chain-es-indexer-go/process/factory" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/statistics" @@ -162,7 +161,10 @@ func (pc *statusComponents) epochStartEventHandler() epochStart.ActionHandler { "error", err.Error()) } - pc.outportHandler.SaveValidatorsPubKeys(validatorsPubKeys, currentEpoch) + pc.outportHandler.SaveValidatorsPubKeys(&outportCore.ValidatorsPubKeys{ + ShardValidatorsPubKeys: outportCore.ConvertPubKeys(validatorsPubKeys), + Epoch: currentEpoch, + }) }, func(_ nodeData.HeaderHandler) {}, common.IndexerOrder) @@ -188,7 +190,12 @@ func (pc *statusComponents) Close() error { // createOutportDriver creates a new outport.OutportHandler which is used to register outport drivers // once a driver is subscribed it will receive data through the implemented outport.Driver methods func (scf *statusComponentsFactory) createOutportDriver() (outport.OutportHandler, error) { - webSocketSenderDriverFactoryArgs, err := scf.makeWebSocketDriverArgs() + hostDriverArgs, err := scf.makeHostDriverArgs() + if err != nil { + return nil, err + } + + eventNotifierArgs, err := scf.makeEventNotifierArgs() if err != nil { return nil, err } @@ -196,11 +203,8 @@ func (scf *statusComponentsFactory) createOutportDriver() (outport.OutportHandle outportFactoryArgs := &outportDriverFactory.OutportFactoryArgs{ RetrialInterval: common.RetrialIntervalForOutportDriver, ElasticIndexerFactoryArgs: scf.makeElasticIndexerArgs(), - EventNotifierFactoryArgs: scf.makeEventNotifierArgs(), - WebSocketSenderDriverFactoryArgs: outportDriverFactory.WrappedOutportDriverWebSocketSenderFactoryArgs{ - Enabled: scf.externalConfig.WebSocketConnector.Enabled, - OutportDriverWebSocketSenderFactoryArgs: webSocketSenderDriverFactoryArgs, - }, + EventNotifierFactoryArgs: eventNotifierArgs, + HostDriverArgs: hostDriverArgs, } return outportDriverFactory.CreateOutport(outportFactoryArgs) @@ -222,11 +226,19 @@ func (scf *statusComponentsFactory) makeElasticIndexerArgs() indexerFactory.Args EnabledIndexes: elasticSearchConfig.EnabledIndexes, Denomination: scf.economicsConfig.GlobalSettings.Denomination, UseKibana: elasticSearchConfig.UseKibana, + ImportDB: scf.isInImportMode, + HeaderMarshaller: scf.coreComponents.InternalMarshalizer(), } } -func (scf *statusComponentsFactory) makeEventNotifierArgs() *outportDriverFactory.EventNotifierFactoryArgs { +func (scf *statusComponentsFactory) makeEventNotifierArgs() (*outportDriverFactory.EventNotifierFactoryArgs, error) { eventNotifierConfig := scf.externalConfig.EventNotifierConnector + + marshaller, err := factoryMarshalizer.NewMarshalizer(eventNotifierConfig.MarshallerType) + if err != nil { + return &outportDriverFactory.EventNotifierFactoryArgs{}, err + } + return &outportDriverFactory.EventNotifierFactoryArgs{ Enabled: eventNotifierConfig.Enabled, UseAuthorization: eventNotifierConfig.UseAuthorization, @@ -234,30 +246,22 @@ func (scf *statusComponentsFactory) makeEventNotifierArgs() *outportDriverFactor Username: eventNotifierConfig.Username, Password: eventNotifierConfig.Password, RequestTimeoutSec: eventNotifierConfig.RequestTimeoutSec, - Marshaller: scf.coreComponents.InternalMarshalizer(), - Hasher: scf.coreComponents.Hasher(), - PubKeyConverter: scf.coreComponents.AddressPubKeyConverter(), - } + Marshaller: marshaller, + }, nil } -func (scf *statusComponentsFactory) makeWebSocketDriverArgs() (wsDriverFactory.OutportDriverWebSocketSenderFactoryArgs, error) { - if !scf.externalConfig.WebSocketConnector.Enabled { - return wsDriverFactory.OutportDriverWebSocketSenderFactoryArgs{}, nil +func (scf *statusComponentsFactory) makeHostDriverArgs() (outportDriverFactory.ArgsHostDriverFactory, error) { + if !scf.externalConfig.HostDriverConfig.Enabled { + return outportDriverFactory.ArgsHostDriverFactory{}, nil } - marshaller, err := factoryMarshalizer.NewMarshalizer(scf.externalConfig.WebSocketConnector.MarshallerType) + marshaller, err := factoryMarshalizer.NewMarshalizer(scf.externalConfig.HostDriverConfig.MarshallerType) if err != nil { - return wsDriverFactory.OutportDriverWebSocketSenderFactoryArgs{}, err + return outportDriverFactory.ArgsHostDriverFactory{}, err } - return wsDriverFactory.OutportDriverWebSocketSenderFactoryArgs{ + return outportDriverFactory.ArgsHostDriverFactory{ Marshaller: marshaller, - WebSocketConfig: data.WebSocketConfig{ - URL: scf.externalConfig.WebSocketConnector.URL, - WithAcknowledge: scf.externalConfig.WebSocketConnector.WithAcknowledge, - }, - Uint64ByteSliceConverter: scf.coreComponents.Uint64ByteSliceConverter(), - Log: log, - WithAcknowledge: scf.externalConfig.WebSocketConnector.WithAcknowledge, + HostConfig: scf.externalConfig.HostDriverConfig, }, nil } diff --git a/factory/status/statusComponents_test.go b/factory/status/statusComponents_test.go index 5240fc11ba7..c27489057e0 100644 --- a/factory/status/statusComponents_test.go +++ b/factory/status/statusComponents_test.go @@ -30,7 +30,10 @@ func createMockStatusComponentsFactoryArgs() statusComp.StatusComponentsFactoryA Password: "pass", EnabledIndexes: []string{"transactions", "blocks"}, }, - WebSocketConnector: config.WebSocketDriverConfig{ + HostDriverConfig: config.HostDriverConfig{ + MarshallerType: "json", + }, + EventNotifierConnector: config.EventNotifierConfig{ MarshallerType: "json", }, }, @@ -184,8 +187,8 @@ func TestStatusComponentsFactory_Create(t *testing.T) { t.Parallel() args := createMockStatusComponentsFactoryArgs() - args.ExternalConfig.WebSocketConnector.Enabled = true - args.ExternalConfig.WebSocketConnector.MarshallerType = "invalid type" + args.ExternalConfig.HostDriverConfig.Enabled = true + args.ExternalConfig.HostDriverConfig.MarshallerType = "invalid type" scf, _ := statusComp.NewStatusComponentsFactory(args) require.NotNil(t, scf) @@ -201,7 +204,7 @@ func TestStatusComponentsFactory_Create(t *testing.T) { return core.MetachainShardId // coverage } args, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) - args.ExternalConfig.WebSocketConnector.Enabled = true // coverage + args.ExternalConfig.HostDriverConfig.Enabled = true // coverage scf, err := statusComp.NewStatusComponentsFactory(args) require.Nil(t, err) diff --git a/genesis/errors.go b/genesis/errors.go index 2553b9650aa..1c0330e4cad 100644 --- a/genesis/errors.go +++ b/genesis/errors.go @@ -167,8 +167,8 @@ var ErrBLSKeyNotStaked = errors.New("bls key not staked") // ErrMissingDeployedSC signals that a delegation referenced an un-deployed contract var ErrMissingDeployedSC = errors.New("missing deployed SC") -// ErrNilGeneralSettingsConfig signals that a nil general settings config was provided -var ErrNilGeneralSettingsConfig = errors.New("nil general settings config") - // ErrNilEpochConfig signals that a nil epoch config was provided var ErrNilEpochConfig = errors.New("nil epoch config") + +// ErrNilGasSchedule signals that an operation has been attempted with a nil gas schedule +var ErrNilGasSchedule = errors.New("nil gas schedule") diff --git a/genesis/interface.go b/genesis/interface.go index 8396e9845a5..1a618a44efe 100644 --- a/genesis/interface.go +++ b/genesis/interface.go @@ -34,7 +34,7 @@ type AccountsParser interface { GenesisMintingAddress() string GetTotalStakedForDelegationAddress(delegationAddress string) *big.Int GetInitialAccountsForDelegated(addressBytes []byte) []InitialAccountHandler - GenerateInitialTransactions(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*IndexingData) ([]*block.MiniBlock, map[uint32]*outport.Pool, error) + GenerateInitialTransactions(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*IndexingData) ([]*block.MiniBlock, map[uint32]*outport.TransactionPool, error) IsInterfaceNil() bool } diff --git a/genesis/mock/accountsParserStub.go b/genesis/mock/accountsParserStub.go index 27066140982..436a8a418de 100644 --- a/genesis/mock/accountsParserStub.go +++ b/genesis/mock/accountsParserStub.go @@ -16,7 +16,7 @@ type AccountsParserStub struct { InitialAccountsCalled func() []genesis.InitialAccountHandler GetTotalStakedForDelegationAddressCalled func(delegationAddress string) *big.Int GetInitialAccountsForDelegatedCalled func(addressBytes []byte) []genesis.InitialAccountHandler - GenerateInitialTransactionsCalled func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.Pool, error) + GenerateInitialTransactionsCalled func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.TransactionPool, error) GenesisMintingAddressCalled func() string } @@ -75,12 +75,12 @@ func (aps *AccountsParserStub) InitialAccounts() []genesis.InitialAccountHandler } // GenerateInitialTransactions - -func (aps *AccountsParserStub) GenerateInitialTransactions(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.Pool, error) { +func (aps *AccountsParserStub) GenerateInitialTransactions(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.TransactionPool, error) { if aps.GenerateInitialTransactionsCalled != nil { return aps.GenerateInitialTransactionsCalled(shardCoordinator, initialIndexingData) } - return make([]*block.MiniBlock, 0), make(map[uint32]*outport.Pool), nil + return make([]*block.MiniBlock, 0), make(map[uint32]*outport.TransactionPool), nil } // IsInterfaceNil - diff --git a/genesis/mock/storageManagerStub.go b/genesis/mock/storageManagerStub.go index e6b686b726c..d881d8e3b2f 100644 --- a/genesis/mock/storageManagerStub.go +++ b/genesis/mock/storageManagerStub.go @@ -7,13 +7,13 @@ import ( // StorageManagerStub - type StorageManagerStub struct { - DatabaseCalled func() common.DBWriteCacher + DatabaseCalled func() common.BaseStorer TakeSnapshotCalled func([]byte) SetCheckpointCalled func([]byte) PruneCalled func([]byte) CancelPruneCalled func([]byte) MarkForEvictionCalled func([]byte, common.ModifiedHashes) error - GetDbThatContainsHashCalled func([]byte) common.DBWriteCacher + GetDbThatContainsHashCalled func([]byte) common.BaseStorer GetSnapshotThatContainsHashCalled func(rootHash []byte) common.SnapshotDbHandler IsPruningEnabledCalled func() bool EnterSnapshotModeCalled func() @@ -22,7 +22,7 @@ type StorageManagerStub struct { } // Database - -func (sms *StorageManagerStub) Database() common.DBWriteCacher { +func (sms *StorageManagerStub) Database() common.BaseStorer { if sms.DatabaseCalled != nil { return sms.DatabaseCalled() } diff --git a/genesis/parsing/accountsParser.go b/genesis/parsing/accountsParser.go index 1dd7f7b1966..a4a64ffb343 100644 --- a/genesis/parsing/accountsParser.go +++ b/genesis/parsing/accountsParser.go @@ -302,13 +302,13 @@ func (ap *accountsParser) GetInitialAccountsForDelegated(addressBytes []byte) [] return list } -func (ap *accountsParser) createIndexerPools(shardIDs []uint32) map[uint32]*outportcore.Pool { - txsPoolPerShard := make(map[uint32]*outportcore.Pool) +func (ap *accountsParser) createIndexerPools(shardIDs []uint32) map[uint32]*outportcore.TransactionPool { + txsPoolPerShard := make(map[uint32]*outportcore.TransactionPool) for _, id := range shardIDs { - txsPoolPerShard[id] = &outportcore.Pool{ - Txs: make(map[string]coreData.TransactionHandlerWithGasUsedAndFee), - Scrs: make(map[string]coreData.TransactionHandlerWithGasUsedAndFee), + txsPoolPerShard[id] = &outportcore.TransactionPool{ + Transactions: make(map[string]*outportcore.TxInfo), + SmartContractResults: make(map[string]*outportcore.SCRInfo), } } @@ -388,7 +388,7 @@ func (ap *accountsParser) getAllTxs( func (ap *accountsParser) setScrsTxsPool( shardCoordinator sharding.Coordinator, indexingData map[uint32]*genesis.IndexingData, - txsPoolPerShard map[uint32]*outportcore.Pool, + txsPoolPerShard map[uint32]*outportcore.TransactionPool, ) { for _, id := range indexingData { for txHash, tx := range id.ScrsTxs { @@ -401,8 +401,14 @@ func (ap *accountsParser) setScrsTxsPool( } scrTx.GasLimit = uint64(0) - txsPoolPerShard[senderShardID].Scrs[txHash] = outportcore.NewTransactionHandlerWithGasAndFee(scrTx, 0, big.NewInt(0)) - txsPoolPerShard[receiverShardID].Scrs[txHash] = outportcore.NewTransactionHandlerWithGasAndFee(scrTx, 0, big.NewInt(0)) + txsPoolPerShard[senderShardID].SmartContractResults[txHash] = &outportcore.SCRInfo{ + SmartContractResult: scrTx, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } + txsPoolPerShard[receiverShardID].SmartContractResults[txHash] = &outportcore.SCRInfo{ + SmartContractResult: scrTx, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } } } } @@ -410,7 +416,7 @@ func (ap *accountsParser) setScrsTxsPool( func (ap *accountsParser) setTxsPoolAndMiniBlocks( shardCoordinator sharding.Coordinator, allTxs []coreData.TransactionHandler, - txsPoolPerShard map[uint32]*outportcore.Pool, + txsPoolPerShard map[uint32]*outportcore.TransactionPool, miniBlocks []*block.MiniBlock, ) error { @@ -436,8 +442,15 @@ func (ap *accountsParser) setTxsPoolAndMiniBlocks( tx.Signature = []byte(common.GenesisTxSignatureString) tx.GasLimit = uint64(0) - txsPoolPerShard[senderShardID].Txs[string(txHash)] = outportcore.NewTransactionHandlerWithGasAndFee(tx, 0, big.NewInt(0)) - txsPoolPerShard[receiverShardID].Txs[string(txHash)] = outportcore.NewTransactionHandlerWithGasAndFee(tx, 0, big.NewInt(0)) + txsPoolPerShard[senderShardID].Transactions[string(txHash)] = &outportcore.TxInfo{ + Transaction: tx, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } + + txsPoolPerShard[receiverShardID].Transactions[string(txHash)] = &outportcore.TxInfo{ + Transaction: tx, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } for _, miniBlock := range miniBlocks { if senderShardID == miniBlock.GetSenderShardID() && @@ -466,7 +479,7 @@ func getNonEmptyMiniBlocks(miniBlocks []*block.MiniBlock) []*block.MiniBlock { func (ap *accountsParser) GenerateInitialTransactions( shardCoordinator sharding.Coordinator, indexingData map[uint32]*genesis.IndexingData, -) ([]*block.MiniBlock, map[uint32]*outportcore.Pool, error) { +) ([]*block.MiniBlock, map[uint32]*outportcore.TransactionPool, error) { if check.IfNil(shardCoordinator) { return nil, nil, genesis.ErrNilShardCoordinator } diff --git a/genesis/parsing/accountsParser_test.go b/genesis/parsing/accountsParser_test.go index ca45b0f3649..53253086210 100644 --- a/genesis/parsing/accountsParser_test.go +++ b/genesis/parsing/accountsParser_test.go @@ -593,17 +593,17 @@ func TestAccountsParser_setScrsTxsPool(t *testing.T) { indexingDataMap[i] = indexingData } - txsPoolPerShard := make(map[uint32]*outport.Pool) + txsPoolPerShard := make(map[uint32]*outport.TransactionPool) for i := uint32(0); i < sharder.NumOfShards; i++ { - txsPoolPerShard[i] = &outport.Pool{ - Scrs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{}, + txsPoolPerShard[i] = &outport.TransactionPool{ + SmartContractResults: map[string]*outport.SCRInfo{}, } } ap.SetScrsTxsPool(sharder, indexingDataMap, txsPoolPerShard) assert.Equal(t, 1, len(txsPoolPerShard)) - assert.Equal(t, uint64(0), txsPoolPerShard[0].Scrs["hash"].GetGasLimit()) - assert.Equal(t, uint64(1), txsPoolPerShard[0].Scrs["hash"].GetNonce()) + assert.Equal(t, uint64(0), txsPoolPerShard[0].SmartContractResults["hash"].SmartContractResult.GetGasLimit()) + assert.Equal(t, uint64(1), txsPoolPerShard[0].SmartContractResults["hash"].SmartContractResult.GetNonce()) } func TestAccountsParser_GenerateInitialTransactionsTxsPool(t *testing.T) { @@ -645,21 +645,21 @@ func TestAccountsParser_GenerateInitialTransactionsTxsPool(t *testing.T) { assert.Equal(t, 2, len(miniBlocks)) assert.Equal(t, 3, len(txsPoolPerShard)) - assert.Equal(t, 1, len(txsPoolPerShard[0].Txs)) - assert.Equal(t, 1, len(txsPoolPerShard[1].Txs)) - assert.Equal(t, len(ibs), len(txsPoolPerShard[core.MetachainShardId].Txs)) - assert.Equal(t, 0, len(txsPoolPerShard[0].Scrs)) - assert.Equal(t, 0, len(txsPoolPerShard[1].Scrs)) - assert.Equal(t, 0, len(txsPoolPerShard[core.MetachainShardId].Scrs)) - - for _, tx := range txsPoolPerShard[1].Txs { - assert.Equal(t, ibs[0].GetSupply(), tx.GetValue()) - assert.Equal(t, ibs[0].AddressBytes(), tx.GetRcvAddr()) + assert.Equal(t, 1, len(txsPoolPerShard[0].Transactions)) + assert.Equal(t, 1, len(txsPoolPerShard[1].Transactions)) + assert.Equal(t, len(ibs), len(txsPoolPerShard[core.MetachainShardId].Transactions)) + assert.Equal(t, 0, len(txsPoolPerShard[0].SmartContractResults)) + assert.Equal(t, 0, len(txsPoolPerShard[1].SmartContractResults)) + assert.Equal(t, 0, len(txsPoolPerShard[core.MetachainShardId].SmartContractResults)) + + for _, tx := range txsPoolPerShard[1].Transactions { + assert.Equal(t, ibs[0].GetSupply(), tx.Transaction.GetValue()) + assert.Equal(t, ibs[0].AddressBytes(), tx.Transaction.GetRcvAddr()) } - for _, tx := range txsPoolPerShard[0].Txs { - assert.Equal(t, ibs[1].GetSupply(), tx.GetValue()) - assert.Equal(t, ibs[1].AddressBytes(), tx.GetRcvAddr()) + for _, tx := range txsPoolPerShard[0].Transactions { + assert.Equal(t, ibs[1].GetSupply(), tx.Transaction.GetValue()) + assert.Equal(t, ibs[1].AddressBytes(), tx.Transaction.GetRcvAddr()) } } @@ -692,8 +692,8 @@ func TestAccountsParser_GenerateInitialTransactionsZeroGasLimitShouldWork(t *tes require.Nil(t, err) for i := uint32(0); i < sharder.NumberOfShards(); i++ { - for _, tx := range txsPoolPerShard[i].Txs { - assert.Equal(t, uint64(0), tx.GetGasLimit()) + for _, tx := range txsPoolPerShard[i].Transactions { + assert.Equal(t, uint64(0), tx.Transaction.GetGasLimit()) } } } @@ -737,10 +737,10 @@ func TestAccountsParser_GenerateInitialTransactionsVerifyTxsHashes(t *testing.T) assert.Equal(t, 1, len(miniBlocks)) assert.Equal(t, 2, len(txsPoolPerShard)) - assert.Equal(t, 1, len(txsPoolPerShard[0].Txs)) + assert.Equal(t, 1, len(txsPoolPerShard[0].Transactions)) - for hashString, v := range txsPoolPerShard[0].Txs { + for hashString, v := range txsPoolPerShard[0].Transactions { assert.Equal(t, txHash, []byte(hashString)) - assert.Equal(t, tx, v.GetTxHandler()) + assert.Equal(t, tx, v.Transaction) } } diff --git a/genesis/parsing/export_test.go b/genesis/parsing/export_test.go index 58f21b1aa76..e1bbcdcc8d4 100644 --- a/genesis/parsing/export_test.go +++ b/genesis/parsing/export_test.go @@ -43,7 +43,7 @@ func (ap *accountsParser) CreateMintTransactions() []coreData.TransactionHandler func (ap *accountsParser) SetScrsTxsPool( shardCoordinator sharding.Coordinator, indexingData map[uint32]*genesis.IndexingData, - txsPoolPerShard map[uint32]*outport.Pool, + txsPoolPerShard map[uint32]*outport.TransactionPool, ) { ap.setScrsTxsPool(shardCoordinator, indexingData, txsPoolPerShard) } diff --git a/genesis/process/genesisBlockCreator.go b/genesis/process/genesisBlockCreator.go index 2939fa6014f..2fde795be1f 100644 --- a/genesis/process/genesisBlockCreator.go +++ b/genesis/process/genesisBlockCreator.go @@ -24,11 +24,11 @@ import ( "github.com/multiversx/mx-chain-go/process/smartContract/hooks/counters" "github.com/multiversx/mx-chain-go/sharding" factoryState "github.com/multiversx/mx-chain-go/state/factory" + "github.com/multiversx/mx-chain-go/state/syncer" "github.com/multiversx/mx-chain-go/statusHandler" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/factory" "github.com/multiversx/mx-chain-go/storage/storageunit" - triesFactory "github.com/multiversx/mx-chain-go/trie/factory" "github.com/multiversx/mx-chain-go/update" hardfork "github.com/multiversx/mx-chain-go/update/genesis" hardForkProcess "github.com/multiversx/mx-chain-go/update/process" @@ -204,6 +204,9 @@ func checkArgumentsForBlockCreator(arg ArgsGenesisBlockCreator) error { if arg.EpochConfig == nil { return genesis.ErrNilEpochConfig } + if arg.GasSchedule == nil { + return genesis.ErrNilGasSchedule + } return nil } @@ -432,23 +435,24 @@ func (gbc *genesisBlockCreator) computeDNSAddresses(enableEpochsConfig config.En builtInFuncs := vmcommonBuiltInFunctions.NewBuiltInFunctionContainer() argsHook := hooks.ArgBlockChainHook{ - Accounts: gbc.arg.Accounts, - PubkeyConv: gbc.arg.Core.AddressPubKeyConverter(), - StorageService: gbc.arg.Data.StorageService(), - BlockChain: gbc.arg.Data.Blockchain(), - ShardCoordinator: gbc.arg.ShardCoordinator, - Marshalizer: gbc.arg.Core.InternalMarshalizer(), - Uint64Converter: gbc.arg.Core.Uint64ByteSliceConverter(), - BuiltInFunctions: builtInFuncs, - NFTStorageHandler: &disabled.SimpleNFTStorage{}, - GlobalSettingsHandler: &disabled.ESDTGlobalSettingsHandler{}, - DataPool: gbc.arg.Data.Datapool(), - CompiledSCPool: gbc.arg.Data.Datapool().SmartContracts(), - EpochNotifier: epochNotifier, - EnableEpochsHandler: enableEpochsHandler, - NilCompiledSCStore: true, - GasSchedule: gbc.arg.GasSchedule, - Counter: counters.NewDisabledCounter(), + Accounts: gbc.arg.Accounts, + PubkeyConv: gbc.arg.Core.AddressPubKeyConverter(), + StorageService: gbc.arg.Data.StorageService(), + BlockChain: gbc.arg.Data.Blockchain(), + ShardCoordinator: gbc.arg.ShardCoordinator, + Marshalizer: gbc.arg.Core.InternalMarshalizer(), + Uint64Converter: gbc.arg.Core.Uint64ByteSliceConverter(), + BuiltInFunctions: builtInFuncs, + NFTStorageHandler: &disabled.SimpleNFTStorage{}, + GlobalSettingsHandler: &disabled.ESDTGlobalSettingsHandler{}, + DataPool: gbc.arg.Data.Datapool(), + CompiledSCPool: gbc.arg.Data.Datapool().SmartContracts(), + EpochNotifier: epochNotifier, + EnableEpochsHandler: enableEpochsHandler, + NilCompiledSCStore: true, + GasSchedule: gbc.arg.GasSchedule, + Counter: counters.NewDisabledCounter(), + MissingTrieNodesNotifier: syncer.NewMissingTrieNodesNotifier(), } blockChainHook, err := hooks.NewBlockChainHookImpl(argsHook) if err != nil { @@ -493,7 +497,7 @@ func (gbc *genesisBlockCreator) getNewArgForShard(shardID uint32) (ArgsGenesisBl newArgument.Core.InternalMarshalizer(), newArgument.Core.Hasher(), factoryState.NewAccountCreator(), - gbc.arg.TrieStorageManagers[triesFactory.UserAccountTrie], + gbc.arg.TrieStorageManagers[dataRetriever.UserAccountsUnit.String()], gbc.arg.Core.AddressPubKeyConverter(), ) if err != nil { diff --git a/genesis/process/genesisBlockCreator_test.go b/genesis/process/genesisBlockCreator_test.go index 07ea7f986db..db4e29072d8 100644 --- a/genesis/process/genesisBlockCreator_test.go +++ b/genesis/process/genesisBlockCreator_test.go @@ -33,7 +33,6 @@ import ( stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageCommon "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/factory" "github.com/multiversx/mx-chain-go/update" updateMock "github.com/multiversx/mx-chain-go/update/mock" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" @@ -53,12 +52,12 @@ func createMockArgument( entireSupply *big.Int, ) ArgsGenesisBlockCreator { - storageManagerArgs, options := storageCommon.GetStorageManagerArgsAndOptions() - storageManager, _ := trie.CreateTrieStorageManager(storageManagerArgs, options) + storageManagerArgs := storageCommon.GetStorageManagerArgs() + storageManager, _ := trie.CreateTrieStorageManager(storageManagerArgs, storageCommon.GetStorageManagerOptions()) trieStorageManagers := make(map[string]common.StorageManager) - trieStorageManagers[factory.UserAccountTrie] = storageManager - trieStorageManagers[factory.PeerAccountTrie] = storageManager + trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = storageManager + trieStorageManagers[dataRetriever.PeerAccountsUnit.String()] = storageManager arg := ArgsGenesisBlockCreator{ GenesisTime: 0, @@ -70,7 +69,7 @@ func createMockArgument( UInt64ByteSliceConv: &mock.Uint64ByteSliceConverterMock{}, AddrPubKeyConv: testscommon.NewPubkeyConverterMock(32), Chain: "chainID", - TxVersionCheck: &testscommon.TxVersionCheckerStub{}, + TxVersionCheck: &testscommon.TxVersionCheckerStub{}, MinTxVersion: 1, EnableEpochsHandlerField: &testscommon.EnableEpochsHandlerStub{}, }, @@ -105,8 +104,9 @@ func createMockArgument( MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, - ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", + OwnerAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: nodePrice.Text(10), @@ -155,7 +155,7 @@ func createMockArgument( &mock.MarshalizerMock{}, &hashingMocks.HasherMock{}, factoryState.NewAccountCreator(), - trieStorageManagers[factory.UserAccountTrie], + trieStorageManagers[dataRetriever.UserAccountsUnit.String()], &testscommon.PubkeyConverterMock{}, ) require.Nil(t, err) diff --git a/genesis/process/metaGenesisBlockCreator.go b/genesis/process/metaGenesisBlockCreator.go index 2470562b6e5..26a7aa18b45 100644 --- a/genesis/process/metaGenesisBlockCreator.go +++ b/genesis/process/metaGenesisBlockCreator.go @@ -34,6 +34,7 @@ import ( "github.com/multiversx/mx-chain-go/process/smartContract/hooks/counters" syncDisabled "github.com/multiversx/mx-chain-go/process/sync/disabled" processTransaction "github.com/multiversx/mx-chain-go/process/transaction" + "github.com/multiversx/mx-chain-go/state/syncer" "github.com/multiversx/mx-chain-go/storage/txcache" "github.com/multiversx/mx-chain-go/update" hardForkProcess "github.com/multiversx/mx-chain-go/update/process" @@ -303,23 +304,24 @@ func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpoc builtInFuncs := vmcommonBuiltInFunctions.NewBuiltInFunctionContainer() argsHook := hooks.ArgBlockChainHook{ - Accounts: arg.Accounts, - PubkeyConv: arg.Core.AddressPubKeyConverter(), - StorageService: arg.Data.StorageService(), - BlockChain: arg.Data.Blockchain(), - ShardCoordinator: arg.ShardCoordinator, - Marshalizer: arg.Core.InternalMarshalizer(), - Uint64Converter: arg.Core.Uint64ByteSliceConverter(), - BuiltInFunctions: builtInFuncs, - NFTStorageHandler: &disabled.SimpleNFTStorage{}, - GlobalSettingsHandler: &disabled.ESDTGlobalSettingsHandler{}, - DataPool: arg.Data.Datapool(), - CompiledSCPool: arg.Data.Datapool().SmartContracts(), - EpochNotifier: epochNotifier, - EnableEpochsHandler: enableEpochsHandler, - NilCompiledSCStore: true, - GasSchedule: arg.GasSchedule, - Counter: counters.NewDisabledCounter(), + Accounts: arg.Accounts, + PubkeyConv: arg.Core.AddressPubKeyConverter(), + StorageService: arg.Data.StorageService(), + BlockChain: arg.Data.Blockchain(), + ShardCoordinator: arg.ShardCoordinator, + Marshalizer: arg.Core.InternalMarshalizer(), + Uint64Converter: arg.Core.Uint64ByteSliceConverter(), + BuiltInFunctions: builtInFuncs, + NFTStorageHandler: &disabled.SimpleNFTStorage{}, + GlobalSettingsHandler: &disabled.ESDTGlobalSettingsHandler{}, + DataPool: arg.Data.Datapool(), + CompiledSCPool: arg.Data.Datapool().SmartContracts(), + EpochNotifier: epochNotifier, + EnableEpochsHandler: enableEpochsHandler, + NilCompiledSCStore: true, + GasSchedule: arg.GasSchedule, + Counter: counters.NewDisabledCounter(), + MissingTrieNodesNotifier: syncer.NewMissingTrieNodesNotifier(), } pubKeyVerifier, err := disabled.NewMessageSignVerifier(arg.BlockSignKeyGen) diff --git a/genesis/process/shardGenesisBlockCreator.go b/genesis/process/shardGenesisBlockCreator.go index 7b30e2e7ebc..9f8152bd8ab 100644 --- a/genesis/process/shardGenesisBlockCreator.go +++ b/genesis/process/shardGenesisBlockCreator.go @@ -32,6 +32,7 @@ import ( syncDisabled "github.com/multiversx/mx-chain-go/process/sync/disabled" "github.com/multiversx/mx-chain-go/process/transaction" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/state/syncer" "github.com/multiversx/mx-chain-go/storage/txcache" "github.com/multiversx/mx-chain-go/update" hardForkProcess "github.com/multiversx/mx-chain-go/update/process" @@ -392,6 +393,7 @@ func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpo argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: arg.GasSchedule, MapDNSAddresses: make(map[string]struct{}), + MapDNSV2Addresses: make(map[string]struct{}), EnableUserNameChange: false, Marshalizer: arg.Core.InternalMarshalizer(), Accounts: arg.Accounts, @@ -408,23 +410,24 @@ func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpo } argsHook := hooks.ArgBlockChainHook{ - Accounts: arg.Accounts, - PubkeyConv: arg.Core.AddressPubKeyConverter(), - StorageService: arg.Data.StorageService(), - BlockChain: arg.Data.Blockchain(), - ShardCoordinator: arg.ShardCoordinator, - Marshalizer: arg.Core.InternalMarshalizer(), - Uint64Converter: arg.Core.Uint64ByteSliceConverter(), - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), - GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), - DataPool: arg.Data.Datapool(), - CompiledSCPool: arg.Data.Datapool().SmartContracts(), - EpochNotifier: epochNotifier, - EnableEpochsHandler: enableEpochsHandler, - NilCompiledSCStore: true, - GasSchedule: arg.GasSchedule, - Counter: counters.NewDisabledCounter(), + Accounts: arg.Accounts, + PubkeyConv: arg.Core.AddressPubKeyConverter(), + StorageService: arg.Data.StorageService(), + BlockChain: arg.Data.Blockchain(), + ShardCoordinator: arg.ShardCoordinator, + Marshalizer: arg.Core.InternalMarshalizer(), + Uint64Converter: arg.Core.Uint64ByteSliceConverter(), + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), + GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), + DataPool: arg.Data.Datapool(), + CompiledSCPool: arg.Data.Datapool().SmartContracts(), + EpochNotifier: epochNotifier, + EnableEpochsHandler: enableEpochsHandler, + NilCompiledSCStore: true, + GasSchedule: arg.GasSchedule, + Counter: counters.NewDisabledCounter(), + MissingTrieNodesNotifier: syncer.NewMissingTrieNodesNotifier(), } esdtTransferParser, err := parsers.NewESDTTransferParser(arg.Core.InternalMarshalizer()) if err != nil { diff --git a/go.mod b/go.mod index bba09a4e3c7..7e6cebc7912 100644 --- a/go.mod +++ b/go.mod @@ -13,20 +13,20 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.2.1 + github.com/multiversx/mx-chain-communication-go v1.0.2 + github.com/multiversx/mx-chain-core-go v1.2.5 github.com/multiversx/mx-chain-crypto-go v1.2.6 - github.com/multiversx/mx-chain-es-indexer-go v1.4.1 + github.com/multiversx/mx-chain-es-indexer-go v1.4.4 github.com/multiversx/mx-chain-logger-go v1.0.11 - github.com/multiversx/mx-chain-p2p-go v1.0.16 - github.com/multiversx/mx-chain-storage-go v1.0.8 - github.com/multiversx/mx-chain-vm-common-go v1.4.1 - github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 - github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 - github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80 + github.com/multiversx/mx-chain-storage-go v1.0.10 + github.com/multiversx/mx-chain-vm-common-go v1.4.5 + github.com/multiversx/mx-chain-vm-v1_2-go v1.2.55 + github.com/multiversx/mx-chain-vm-v1_3-go v1.3.56 + github.com/multiversx/mx-chain-vm-v1_4-go v1.4.82 github.com/pelletier/go-toml v1.9.3 github.com/pkg/errors v0.9.1 github.com/shirou/gopsutil v3.21.11+incompatible - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.2 github.com/urfave/cli v1.22.10 golang.org/x/crypto v0.7.0 gopkg.in/go-playground/validator.v8 v8.18.2 @@ -150,7 +150,7 @@ require ( github.com/raulk/go-watchdog v1.3.0 // indirect github.com/russross/blackfriday/v2 v2.0.1 // indirect github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect - github.com/smartystreets/assertions v1.13.0 // indirect + github.com/smartystreets/assertions v1.13.1 // indirect github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect diff --git a/go.sum b/go.sum index 3b5e4647240..82a1022faa0 100644 --- a/go.sum +++ b/go.sum @@ -616,27 +616,33 @@ github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2 github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= +github.com/multiversx/mx-chain-communication-go v1.0.2 h1:1AKdqFZNmigt1kcwYMl+L8fzolsb+WpeTX6yzpmvbV4= +github.com/multiversx/mx-chain-communication-go v1.0.2/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.1 h1:kmDfK7Znl3S0IJlDEE4sFuBOmA2rZkBudxlGhI1bvQc= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.2.4/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= +github.com/multiversx/mx-chain-core-go v1.2.5 h1:uIZSqRygJAxv+pGuZnoSMwS4t10C/paasuwps5nxrIQ= +github.com/multiversx/mx-chain-core-go v1.2.5/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= github.com/multiversx/mx-chain-crypto-go v1.2.6/go.mod h1:rOj0Rr19HTOYt9YTeym7RKxlHt91NXln3LVKjHKVmA0= -github.com/multiversx/mx-chain-es-indexer-go v1.4.1 h1:gD/D7xZP7OL8L/ZZ3SoOfKjVHrU0iUxIG2AbidHFTUc= -github.com/multiversx/mx-chain-es-indexer-go v1.4.1/go.mod h1:o+LWvL+UEKx1lrFhkV2SfxoaFybKro3ZLN4HOMGXDos= +github.com/multiversx/mx-chain-es-indexer-go v1.4.4 h1:3k8pB1AEILlNXL2ggSnP43uqVBQQg3hbx7351IcGbh0= +github.com/multiversx/mx-chain-es-indexer-go v1.4.4/go.mod h1:IAFuU3LhjVfs3+Sf4T3BlNjY1TmZHWovHRhV7tfR8cw= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= -github.com/multiversx/mx-chain-p2p-go v1.0.16 h1:iMK8KUi006/avVcmecnk7lqbDCRL0BN04vgepoVLlyY= -github.com/multiversx/mx-chain-p2p-go v1.0.16/go.mod h1:7piVUb5Z7UHK6n3JW8yIc32RdDFZu7GivRY29q0r2gI= -github.com/multiversx/mx-chain-storage-go v1.0.8 h1:PB9OAwZs3rWz7nybBOxVCxgrd785FUUUAsVc5JWXYCw= github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= -github.com/multiversx/mx-chain-vm-common-go v1.4.1 h1:HHZF9zU4WsMbfLrCarx3ESM95caWUrPBleGHKdsbzgc= +github.com/multiversx/mx-chain-storage-go v1.0.10 h1:5rzPMME+CEJyoGGJ1tAb6ISnPmr68VFvGoKo0hF0WtU= +github.com/multiversx/mx-chain-storage-go v1.0.10/go.mod h1:VP9fwyFBmbmDzahUuu0IeGX/dKG3iBWjN6FSQ6YtVaI= github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 h1:HGbatzgIhVPJASN3ADnygH4MrKypAidOVtZkHkY/jKw= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53/go.mod h1:STVJW9m/TUJ9Q64W/T4P/KuhP5fPb+cCb6Q4gSehWJg= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 h1:bl2essObOEDwVWci71hJ2QO5AYTsKk6IlzRK0i8y63w= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54/go.mod h1:1rgU8wXdn76S7rZx+4YS6ObK+M1XiSdPoPmXVq8fuZE= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80 h1:iiOXTcwvfjQXlchlVnSdNeqHYKVn/k7s/MsHfk/wrr0= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80/go.mod h1:Be8y+QBPSKacW2TJaaQSeKYNGtCenFt4dpBOAnICAcc= +github.com/multiversx/mx-chain-vm-common-go v1.4.4/go.mod h1:+AjDwO/RJwQ75dzHJ/gBxmi5uTdICdhAo8bGNHTf7Yk= +github.com/multiversx/mx-chain-vm-common-go v1.4.5 h1:/pIMGSGqNJXbfAKOqigU2yapuBlosMCJiu6r+r+XcHE= +github.com/multiversx/mx-chain-vm-common-go v1.4.5/go.mod h1:+AjDwO/RJwQ75dzHJ/gBxmi5uTdICdhAo8bGNHTf7Yk= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.55 h1:jvBLu7JoitavahMDCkfOGYWjgXGBOe+3JJ0hNxj9AZM= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.55/go.mod h1:jCNgHGyj0JoLAsmijOSVK0G+yphccp9gIKsp/mRguF4= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.56 h1:VXveqaT/wdipfhIdUHXxFderY3+KxtFEbrDkF+zirr8= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.56/go.mod h1:guKkvnEDwGPaysZOVa+SaHEyiFDRJkFSVu0VE7jbk4k= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.82 h1:f0jL0jMPayN+/J/ZoK9sDRLggqvUp+/DJmu0dVTQNq8= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.82/go.mod h1:tKdkDQXDPFE5vAYOAJOq2iiTibi9KeiasNWsmA4nEmk= github.com/multiversx/mx-components-big-int v0.1.1 h1:695mYPKYOrmGEGgRH4/pZruDoe3CPP1LHrBxKfvj5l4= github.com/multiversx/mx-components-big-int v0.1.1/go.mod h1:0QrcFdfeLgJ/am10HGBeH0G0DNF+0Qx1E4DS/iozQls= github.com/multiversx/protobuf v1.3.2 h1:RaNkxvGTGbA0lMcnHAN24qE1G1i+Xs5yHA6MDvQ4mSM= @@ -760,8 +766,8 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.13.0 h1:Dx1kYM01xsSqKPno3aqLnrwac2LetPvN23diwyr69Qs= -github.com/smartystreets/assertions v1.13.0/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8= +github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU= +github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= @@ -791,8 +797,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= diff --git a/heartbeat/sender/baseSender.go b/heartbeat/sender/baseSender.go index ac438148d31..cf7a7787c1f 100644 --- a/heartbeat/sender/baseSender.go +++ b/heartbeat/sender/baseSender.go @@ -80,7 +80,7 @@ func checkBaseSenderArgs(args argBaseSender) error { return fmt.Errorf("%w for timeBetweenSendsWhenError", heartbeat.ErrInvalidTimeDuration) } if args.thresholdBetweenSends < minThresholdBetweenSends || args.thresholdBetweenSends > maxThresholdBetweenSends { - return fmt.Errorf("%w for thresholdBetweenSends, receieved %f, min allowed %f, max allowed %f", + return fmt.Errorf("%w for thresholdBetweenSends, received %f, min allowed %f, max allowed %f", heartbeat.ErrInvalidThreshold, args.thresholdBetweenSends, minThresholdBetweenSends, maxThresholdBetweenSends) } if check.IfNil(args.privKey) { diff --git a/integrationTests/benchmarks/loadFromTrie_test.go b/integrationTests/benchmarks/loadFromTrie_test.go index 487ced6b447..711ddeba293 100644 --- a/integrationTests/benchmarks/loadFromTrie_test.go +++ b/integrationTests/benchmarks/loadFromTrie_test.go @@ -12,11 +12,10 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing/blake2b" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" "github.com/multiversx/mx-chain-go/storage/storageunit" - "github.com/multiversx/mx-chain-go/testscommon" + testStorage "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/trie/hashesHolder/disabled" "github.com/stretchr/testify/require" @@ -163,20 +162,12 @@ func generateRandHexString(size int) string { } func getTrieStorageManager(store storage.Storer, marshaller marshal.Marshalizer, hasher hashing.Hasher) common.StorageManager { - generalCfg := config.TrieStorageManagerConfig{ - PruningBufferLen: 1000, - SnapshotsBufferLen: 10, - SnapshotsGoroutineNum: 1, - } - args := trie.NewTrieStorageManagerArgs{ - MainStorer: store, - CheckpointsStorer: database.NewMemDB(), - Marshalizer: marshaller, - Hasher: hasher, - GeneralConfig: generalCfg, - CheckpointHashesHolder: disabled.NewDisabledCheckpointHashesHolder(), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := testStorage.GetStorageManagerArgs() + args.MainStorer = store + args.Marshalizer = marshaller + args.Hasher = hasher + args.CheckpointHashesHolder = disabled.NewDisabledCheckpointHashesHolder() + trieStorageManager, _ := trie.NewTrieStorageManager(args) return trieStorageManager diff --git a/integrationTests/interface.go b/integrationTests/interface.go index fb61587b644..7a6de790497 100644 --- a/integrationTests/interface.go +++ b/integrationTests/interface.go @@ -4,10 +4,10 @@ import ( "math/big" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" dataApi "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" - "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" @@ -109,6 +109,6 @@ type Facade interface { GetTransactionsPoolForSender(sender, fields string) (*common.TransactionsPoolForSenderApiResponse, error) GetLastPoolNonceForSender(sender string) (uint64, error) GetTransactionsPoolNonceGapsForSender(sender string) (*common.TransactionsPoolNonceGapsForSenderApiResponse, error) - GetAlteredAccountsForBlock(options dataApi.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) + GetAlteredAccountsForBlock(options dataApi.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) IsInterfaceNil() bool } diff --git a/integrationTests/longTests/storage/storage_test.go b/integrationTests/longTests/storage/storage_test.go index 88269350c68..56474f26978 100644 --- a/integrationTests/longTests/storage/storage_test.go +++ b/integrationTests/longTests/storage/storage_test.go @@ -105,10 +105,12 @@ func TestWriteContinuouslyInTree(t *testing.T) { nbTxsWrite := 1000000 testStorage := integrationTests.NewTestStorage() store := testStorage.CreateStorageLevelDB() - storageManagerArgs, options := storage.GetStorageManagerArgsAndOptions() + storageManagerArgs := storage.GetStorageManagerArgs() storageManagerArgs.MainStorer = store storageManagerArgs.Marshalizer = &marshal.JsonMarshalizer{} storageManagerArgs.Hasher = blake2b.NewBlake2b() + + options := storage.GetStorageManagerOptions() options.CheckpointsEnabled = false options.PruningEnabled = false diff --git a/integrationTests/mock/accountsDBSyncerStub.go b/integrationTests/mock/accountsDBSyncerStub.go index 9ff9abb9017..39477bdc70a 100644 --- a/integrationTests/mock/accountsDBSyncerStub.go +++ b/integrationTests/mock/accountsDBSyncerStub.go @@ -7,7 +7,7 @@ import ( // AccountsDBSyncerStub - type AccountsDBSyncerStub struct { GetSyncedTriesCalled func() map[string]common.Trie - SyncAccountsCalled func(rootHash []byte) error + SyncAccountsCalled func(rootHash []byte, storageMarker common.StorageMarker) error } // GetSyncedTries - @@ -19,9 +19,9 @@ func (a *AccountsDBSyncerStub) GetSyncedTries() map[string]common.Trie { } // SyncAccounts - -func (a *AccountsDBSyncerStub) SyncAccounts(rootHash []byte) error { +func (a *AccountsDBSyncerStub) SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error { if a.SyncAccountsCalled != nil { - return a.SyncAccountsCalled(rootHash) + return a.SyncAccountsCalled(rootHash, storageMarker) } return nil } diff --git a/integrationTests/mock/nilOutport.go b/integrationTests/mock/nilOutport.go deleted file mode 100644 index 67d4ca6b797..00000000000 --- a/integrationTests/mock/nilOutport.go +++ /dev/null @@ -1,62 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - outportcore "github.com/multiversx/mx-chain-core-go/data/outport" - "github.com/multiversx/mx-chain-go/outport" -) - -type nilOutport struct{} - -// NewNilOutport - -func NewNilOutport() *nilOutport { - return new(nilOutport) -} - -// SaveBlock - -func (n *nilOutport) SaveBlock(_ *outportcore.ArgsSaveBlockData) { -} - -// RevertIndexedBlock - -func (n *nilOutport) RevertIndexedBlock(_ data.HeaderHandler, _ data.BodyHandler) { -} - -// SaveRoundsInfo - -func (n *nilOutport) SaveRoundsInfo(_ []*outportcore.RoundInfo) { -} - -// SaveValidatorsPubKeys - -func (n *nilOutport) SaveValidatorsPubKeys(_ map[uint32][][]byte, _ uint32) { -} - -// SaveValidatorsRating - -func (n *nilOutport) SaveValidatorsRating(_ string, _ []*outportcore.ValidatorRatingInfo) { -} - -// SaveAccounts - -func (n *nilOutport) SaveAccounts(_ uint64, _ map[string]*outportcore.AlteredAccount, _ uint32) { -} - -// FinalizedBlock - -func (n *nilOutport) FinalizedBlock(_ []byte) { -} - -// Close - -func (n *nilOutport) Close() error { - return nil -} - -// IsInterfaceNil - -func (n *nilOutport) IsInterfaceNil() bool { - return n == nil -} - -// SubscribeDriver - -func (n *nilOutport) SubscribeDriver(_ outport.Driver) error { - return nil -} - -// HasDrivers - -func (n *nilOutport) HasDrivers() bool { - return false -} diff --git a/integrationTests/mock/p2pAntifloodHandlerStub.go b/integrationTests/mock/p2pAntifloodHandlerStub.go index bda3da406d5..c181d10909d 100644 --- a/integrationTests/mock/p2pAntifloodHandlerStub.go +++ b/integrationTests/mock/p2pAntifloodHandlerStub.go @@ -16,81 +16,85 @@ type P2PAntifloodHandlerStub struct { SetDebuggerCalled func(debugger process.AntifloodDebugger) error BlacklistPeerCalled func(peer core.PeerID, reason string, duration time.Duration) IsOriginatorEligibleForTopicCalled func(pid core.PeerID, topic string) error + SetPeerValidatorMapperCalled func(validatorMapper process.PeerValidatorMapper) error } // CanProcessMessage - -func (p2pahs *P2PAntifloodHandlerStub) CanProcessMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { - if p2pahs.CanProcessMessageCalled == nil { +func (stub *P2PAntifloodHandlerStub) CanProcessMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { + if stub.CanProcessMessageCalled == nil { return nil } - return p2pahs.CanProcessMessageCalled(message, fromConnectedPeer) + return stub.CanProcessMessageCalled(message, fromConnectedPeer) } // IsOriginatorEligibleForTopic - -func (p2pahs *P2PAntifloodHandlerStub) IsOriginatorEligibleForTopic(pid core.PeerID, topic string) error { - if p2pahs.IsOriginatorEligibleForTopicCalled != nil { - return p2pahs.IsOriginatorEligibleForTopicCalled(pid, topic) +func (stub *P2PAntifloodHandlerStub) IsOriginatorEligibleForTopic(pid core.PeerID, topic string) error { + if stub.IsOriginatorEligibleForTopicCalled != nil { + return stub.IsOriginatorEligibleForTopicCalled(pid, topic) } return nil } // CanProcessMessagesOnTopic - -func (p2pahs *P2PAntifloodHandlerStub) CanProcessMessagesOnTopic(peer core.PeerID, topic string, numMessages uint32, totalSize uint64, sequence []byte) error { - if p2pahs.CanProcessMessagesOnTopicCalled == nil { +func (stub *P2PAntifloodHandlerStub) CanProcessMessagesOnTopic(peer core.PeerID, topic string, numMessages uint32, totalSize uint64, sequence []byte) error { + if stub.CanProcessMessagesOnTopicCalled == nil { return nil } - return p2pahs.CanProcessMessagesOnTopicCalled(peer, topic, numMessages, totalSize, sequence) + return stub.CanProcessMessagesOnTopicCalled(peer, topic, numMessages, totalSize, sequence) } // ApplyConsensusSize - -func (p2pahs *P2PAntifloodHandlerStub) ApplyConsensusSize(size int) { - if p2pahs.ApplyConsensusSizeCalled != nil { - p2pahs.ApplyConsensusSizeCalled(size) +func (stub *P2PAntifloodHandlerStub) ApplyConsensusSize(size int) { + if stub.ApplyConsensusSizeCalled != nil { + stub.ApplyConsensusSizeCalled(size) } } // SetDebugger - -func (p2pahs *P2PAntifloodHandlerStub) SetDebugger(debugger process.AntifloodDebugger) error { - if p2pahs.SetDebuggerCalled != nil { - return p2pahs.SetDebuggerCalled(debugger) +func (stub *P2PAntifloodHandlerStub) SetDebugger(debugger process.AntifloodDebugger) error { + if stub.SetDebuggerCalled != nil { + return stub.SetDebuggerCalled(debugger) } return nil } // BlacklistPeer - -func (p2pahs *P2PAntifloodHandlerStub) BlacklistPeer(peer core.PeerID, reason string, duration time.Duration) { - if p2pahs.BlacklistPeerCalled != nil { - p2pahs.BlacklistPeerCalled(peer, reason, duration) +func (stub *P2PAntifloodHandlerStub) BlacklistPeer(peer core.PeerID, reason string, duration time.Duration) { + if stub.BlacklistPeerCalled != nil { + stub.BlacklistPeerCalled(peer, reason, duration) } } // ResetForTopic - -func (p2pahs *P2PAntifloodHandlerStub) ResetForTopic(_ string) { +func (stub *P2PAntifloodHandlerStub) ResetForTopic(_ string) { } // SetMaxMessagesForTopic - -func (p2pahs *P2PAntifloodHandlerStub) SetMaxMessagesForTopic(_ string, _ uint32) { +func (stub *P2PAntifloodHandlerStub) SetMaxMessagesForTopic(_ string, _ uint32) { } // SetPeerValidatorMapper - -func (p2pahs *P2PAntifloodHandlerStub) SetPeerValidatorMapper(_ process.PeerValidatorMapper) error { +func (stub *P2PAntifloodHandlerStub) SetPeerValidatorMapper(validatorMapper process.PeerValidatorMapper) error { + if stub.SetPeerValidatorMapperCalled != nil { + return stub.SetPeerValidatorMapperCalled(validatorMapper) + } return nil } // SetTopicsForAll - -func (p2pahs *P2PAntifloodHandlerStub) SetTopicsForAll(_ ...string) { +func (stub *P2PAntifloodHandlerStub) SetTopicsForAll(_ ...string) { } // Close - -func (p2pahs *P2PAntifloodHandlerStub) Close() error { +func (stub *P2PAntifloodHandlerStub) Close() error { return nil } // IsInterfaceNil - -func (p2pahs *P2PAntifloodHandlerStub) IsInterfaceNil() bool { - return p2pahs == nil +func (stub *P2PAntifloodHandlerStub) IsInterfaceNil() bool { + return stub == nil } diff --git a/integrationTests/mock/storageManagerStub.go b/integrationTests/mock/storageManagerStub.go deleted file mode 100644 index 83b88c88abb..00000000000 --- a/integrationTests/mock/storageManagerStub.go +++ /dev/null @@ -1,91 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-go/common" -) - -// StorageManagerStub - -type StorageManagerStub struct { - DatabaseCalled func() common.DBWriteCacher - TakeSnapshotCalled func([]byte) - SetCheckpointCalled func([]byte) - GetDbThatContainsHashCalled func([]byte) common.DBWriteCacher - GetSnapshotThatContainsHashCalled func(rootHash []byte) common.SnapshotDbHandler - IsPruningEnabledCalled func() bool - IsPruningBlockedCalled func() bool - EnterPruningBufferingModeCalled func() - ExitPruningBufferingModeCalled func() - IsInterfaceNilCalled func() bool -} - -// Database - -func (sms *StorageManagerStub) Database() common.DBWriteCacher { - if sms.DatabaseCalled != nil { - return sms.DatabaseCalled() - } - return nil -} - -// TakeSnapshot - -func (sms *StorageManagerStub) TakeSnapshot([]byte) { - -} - -// SetCheckpoint - -func (sms *StorageManagerStub) SetCheckpoint([]byte) { - -} - -// GetSnapshotThatContainsHash - -func (sms *StorageManagerStub) GetSnapshotThatContainsHash(d []byte) common.SnapshotDbHandler { - if sms.GetSnapshotThatContainsHashCalled != nil { - return sms.GetSnapshotThatContainsHashCalled(d) - } - - return nil -} - -// IsPruningEnabled - -func (sms *StorageManagerStub) IsPruningEnabled() bool { - if sms.IsPruningEnabledCalled != nil { - return sms.IsPruningEnabledCalled() - } - return false -} - -// IsPruningBlocked - -func (sms *StorageManagerStub) IsPruningBlocked() bool { - if sms.IsPruningBlockedCalled != nil { - return sms.IsPruningBlockedCalled() - } - return false -} - -// GetSnapshotDbBatchDelay - -func (sms *StorageManagerStub) GetSnapshotDbBatchDelay() int { - return 0 -} - -// Close - -func (sms *StorageManagerStub) Close() error { - return nil -} - -// EnterPruningBufferingMode - -func (sms *StorageManagerStub) EnterPruningBufferingMode() { - if sms.EnterPruningBufferingModeCalled != nil { - sms.EnterPruningBufferingModeCalled() - } -} - -// ExitPruningBufferingMode - -func (sms *StorageManagerStub) ExitPruningBufferingMode() { - if sms.ExitPruningBufferingModeCalled != nil { - sms.ExitPruningBufferingModeCalled() - } -} - -// IsInterfaceNil - -func (sms *StorageManagerStub) IsInterfaceNil() bool { - return sms == nil -} diff --git a/integrationTests/multiShard/hardFork/hardFork_test.go b/integrationTests/multiShard/hardFork/hardFork_test.go index 929d6afc1b9..69a5ccfbdcf 100644 --- a/integrationTests/multiShard/hardFork/hardFork_test.go +++ b/integrationTests/multiShard/hardFork/hardFork_test.go @@ -443,8 +443,9 @@ func hardForkImport( MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, - ChangeConfigAddress: integrationTests.DelegationManagerConfigChangeAddress, + OwnerAddress: integrationTests.DelegationManagerConfigChangeAddress, }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", diff --git a/integrationTests/multiShard/smartContract/dns/dns_test.go b/integrationTests/multiShard/smartContract/dns/dns_test.go index 63d7ce13a60..4fdf6eb52f5 100644 --- a/integrationTests/multiShard/smartContract/dns/dns_test.go +++ b/integrationTests/multiShard/smartContract/dns/dns_test.go @@ -129,11 +129,15 @@ func prepareNodesAndPlayers() ([]*integrationTests.TestProcessorNode, []*integra numMetachainNodes := 1 genesisFile := "smartcontracts.json" - nodes, _ := integrationTests.CreateNodesWithFullGenesis( + enableEpochsConfig := integrationTests.GetDefaultEnableEpochsConfig() + enableEpochsConfig.StakingV2EnableEpoch = integrationTests.UnreachableEpoch + enableEpochsConfig.ChangeUsernameEnableEpoch = integrationTests.UnreachableEpoch + nodes, _ := integrationTests.CreateNodesWithFullGenesisCustomEnableEpochs( numOfShards, nodesPerShard, numMetachainNodes, genesisFile, + enableEpochsConfig, ) for _, node := range nodes { diff --git a/integrationTests/realcomponents/processorRunner.go b/integrationTests/realcomponents/processorRunner.go new file mode 100644 index 00000000000..6176a54858e --- /dev/null +++ b/integrationTests/realcomponents/processorRunner.go @@ -0,0 +1,587 @@ +package realcomponents + +import ( + "crypto/rand" + "io" + "math/big" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/data/esdt" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" + dbLookupFactory "github.com/multiversx/mx-chain-go/dblookupext/factory" + "github.com/multiversx/mx-chain-go/factory" + factoryBootstrap "github.com/multiversx/mx-chain-go/factory/bootstrap" + factoryCore "github.com/multiversx/mx-chain-go/factory/core" + factoryCrypto "github.com/multiversx/mx-chain-go/factory/crypto" + factoryData "github.com/multiversx/mx-chain-go/factory/data" + factoryNetwork "github.com/multiversx/mx-chain-go/factory/network" + factoryProcessing "github.com/multiversx/mx-chain-go/factory/processing" + factoryState "github.com/multiversx/mx-chain-go/factory/state" + factoryStatus "github.com/multiversx/mx-chain-go/factory/status" + factoryStatusCore "github.com/multiversx/mx-chain-go/factory/statusCore" + "github.com/multiversx/mx-chain-go/genesis" + "github.com/multiversx/mx-chain-go/genesis/parsing" + "github.com/multiversx/mx-chain-go/integrationTests/vm" + "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" + "github.com/multiversx/mx-chain-go/p2p" + "github.com/multiversx/mx-chain-go/process/interceptors" + nodesCoord "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/storage/cache" + storageFactory "github.com/multiversx/mx-chain-go/storage/factory" + "github.com/multiversx/mx-chain-go/storage/storageunit" + "github.com/multiversx/mx-chain-go/update/trigger" + "github.com/stretchr/testify/require" +) + +// ProcessorRunner is a test emulation to the nodeRunner component +type ProcessorRunner struct { + closers []io.Closer + Config config.Configs + CoreComponents factory.CoreComponentsHolder + CryptoComponents factory.CryptoComponentsHandler + StatusCoreComponents factory.StatusCoreComponentsHolder + NetworkComponents factory.NetworkComponentsHolder + BootstrapComponents factory.BootstrapComponentsHolder + DataComponents factory.DataComponentsHolder + StateComponents factory.StateComponentsHolder + NodesCoordinator nodesCoord.NodesCoordinator + StatusComponents factory.StatusComponentsHolder + ProcessComponents factory.ProcessComponentsHolder +} + +// NewProcessorRunner returns a new instance of ProcessorRunner +func NewProcessorRunner(tb testing.TB, config config.Configs) *ProcessorRunner { + pr := &ProcessorRunner{ + Config: config, + closers: make([]io.Closer, 0), + } + + pr.createComponents(tb) + + return pr +} + +func (pr *ProcessorRunner) createComponents(tb testing.TB) { + pr.createCoreComponents(tb) + pr.createCryptoComponents(tb) + pr.createStatusCoreComponents(tb) + pr.createNetworkComponents(tb) + pr.createBootstrapComponents(tb) + pr.createDataComponents(tb) + pr.createStateComponents(tb) + pr.createStatusComponents(tb) + pr.createProcessComponents(tb) +} + +func (pr *ProcessorRunner) createCoreComponents(tb testing.TB) { + argsCore := factoryCore.CoreComponentsFactoryArgs{ + Config: *pr.Config.GeneralConfig, + ConfigPathsHolder: *pr.Config.ConfigurationPathsHolder, + EpochConfig: *pr.Config.EpochConfig, + RoundConfig: *pr.Config.RoundConfig, + RatingsConfig: *pr.Config.RatingsConfig, + EconomicsConfig: *pr.Config.EconomicsConfig, + ImportDbConfig: *pr.Config.ImportDbConfig, + NodesFilename: pr.Config.ConfigurationPathsHolder.Nodes, + WorkingDirectory: pr.Config.FlagsConfig.WorkingDir, + ChanStopNodeProcess: make(chan endProcess.ArgEndProcess), + } + coreFactory, err := factoryCore.NewCoreComponentsFactory(argsCore) + require.Nil(tb, err) + + coreComp, err := factoryCore.NewManagedCoreComponents(coreFactory) + require.Nil(tb, err) + + err = coreComp.Create() + require.Nil(tb, err) + require.Nil(tb, coreComp.CheckSubcomponents()) + + pr.closers = append(pr.closers, coreComp) + pr.CoreComponents = coreComp +} + +func (pr *ProcessorRunner) createCryptoComponents(tb testing.TB) { + argsCrypto := factoryCrypto.CryptoComponentsFactoryArgs{ + ValidatorKeyPemFileName: pr.Config.ConfigurationPathsHolder.ValidatorKey, + AllValidatorKeysPemFileName: pr.Config.ConfigurationPathsHolder.AllValidatorKeys, + SkIndex: 0, + Config: *pr.Config.GeneralConfig, + EnableEpochs: pr.Config.EpochConfig.EnableEpochs, + PrefsConfig: *pr.Config.PreferencesConfig, + CoreComponentsHolder: pr.CoreComponents, + KeyLoader: core.NewKeyLoader(), + ActivateBLSPubKeyMessageVerification: false, + IsInImportMode: false, + ImportModeNoSigCheck: false, + NoKeyProvided: true, + P2pKeyPemFileName: "", + } + + cryptoFactory, err := factoryCrypto.NewCryptoComponentsFactory(argsCrypto) + require.Nil(tb, err) + + cryptoComp, err := factoryCrypto.NewManagedCryptoComponents(cryptoFactory) + require.Nil(tb, err) + + err = cryptoComp.Create() + require.Nil(tb, err) + require.Nil(tb, cryptoComp.CheckSubcomponents()) + + pr.closers = append(pr.closers, cryptoComp) + pr.CryptoComponents = cryptoComp +} + +func (pr *ProcessorRunner) createStatusCoreComponents(tb testing.TB) { + argsStatusCore := factoryStatusCore.StatusCoreComponentsFactoryArgs{ + Config: *pr.Config.GeneralConfig, + EpochConfig: *pr.Config.EpochConfig, + RoundConfig: *pr.Config.RoundConfig, + RatingsConfig: *pr.Config.RatingsConfig, + EconomicsConfig: *pr.Config.EconomicsConfig, + CoreComp: pr.CoreComponents, + } + + statusCoreFactory, err := factoryStatusCore.NewStatusCoreComponentsFactory(argsStatusCore) + require.Nil(tb, err) + + statusCoreComp, err := factoryStatusCore.NewManagedStatusCoreComponents(statusCoreFactory) + require.Nil(tb, err) + + err = statusCoreComp.Create() + require.Nil(tb, err) + require.Nil(tb, statusCoreComp.CheckSubcomponents()) + + pr.closers = append(pr.closers, statusCoreComp) + pr.StatusCoreComponents = statusCoreComp +} + +func (pr *ProcessorRunner) createNetworkComponents(tb testing.TB) { + argsNetwork := factoryNetwork.NetworkComponentsFactoryArgs{ + P2pConfig: *pr.Config.P2pConfig, + MainConfig: *pr.Config.GeneralConfig, + RatingsConfig: *pr.Config.RatingsConfig, + StatusHandler: pr.StatusCoreComponents.AppStatusHandler(), + Marshalizer: pr.CoreComponents.InternalMarshalizer(), + Syncer: pr.CoreComponents.SyncTimer(), + PreferredPeersSlices: make([]string, 0), + BootstrapWaitTime: 1, + NodeOperationMode: p2p.NormalOperation, + ConnectionWatcherType: "", + CryptoComponents: pr.CryptoComponents, + } + + networkFactory, err := factoryNetwork.NewNetworkComponentsFactory(argsNetwork) + require.Nil(tb, err) + + networkComp, err := factoryNetwork.NewManagedNetworkComponents(networkFactory) + require.Nil(tb, err) + + err = networkComp.Create() + require.Nil(tb, err) + require.Nil(tb, networkComp.CheckSubcomponents()) + + pr.closers = append(pr.closers, networkComp) + pr.NetworkComponents = networkComp +} + +func (pr *ProcessorRunner) createBootstrapComponents(tb testing.TB) { + argsBootstrap := factoryBootstrap.BootstrapComponentsFactoryArgs{ + Config: *pr.Config.GeneralConfig, + RoundConfig: *pr.Config.RoundConfig, + PrefConfig: *pr.Config.PreferencesConfig, + ImportDbConfig: *pr.Config.ImportDbConfig, + FlagsConfig: *pr.Config.FlagsConfig, + WorkingDir: pr.Config.FlagsConfig.WorkingDir, + CoreComponents: pr.CoreComponents, + CryptoComponents: pr.CryptoComponents, + NetworkComponents: pr.NetworkComponents, + StatusCoreComponents: pr.StatusCoreComponents, + } + + bootstrapFactory, err := factoryBootstrap.NewBootstrapComponentsFactory(argsBootstrap) + require.Nil(tb, err) + + bootstrapComp, err := factoryBootstrap.NewManagedBootstrapComponents(bootstrapFactory) + require.Nil(tb, err) + + err = bootstrapComp.Create() + require.Nil(tb, err) + require.Nil(tb, bootstrapComp.CheckSubcomponents()) + + pr.closers = append(pr.closers, bootstrapComp) + pr.BootstrapComponents = bootstrapComp +} + +func (pr *ProcessorRunner) createDataComponents(tb testing.TB) { + argsData := factoryData.DataComponentsFactoryArgs{ + Config: *pr.Config.GeneralConfig, + PrefsConfig: pr.Config.PreferencesConfig.Preferences, + ShardCoordinator: pr.BootstrapComponents.ShardCoordinator(), + Core: pr.CoreComponents, + StatusCore: pr.StatusCoreComponents, + Crypto: pr.CryptoComponents, + CurrentEpoch: 0, + CreateTrieEpochRootHashStorer: false, + NodeProcessingMode: common.Normal, + FlagsConfigs: config.ContextFlagsConfig{ + SnapshotsEnabled: false, + }, + } + + dataFactory, err := factoryData.NewDataComponentsFactory(argsData) + require.Nil(tb, err) + + dataComp, err := factoryData.NewManagedDataComponents(dataFactory) + require.Nil(tb, err) + + err = dataComp.Create() + require.Nil(tb, err) + require.Nil(tb, dataComp.CheckSubcomponents()) + + pr.closers = append(pr.closers, dataComp) + pr.DataComponents = dataComp +} + +func (pr *ProcessorRunner) createStateComponents(tb testing.TB) { + argsState := factoryState.StateComponentsFactoryArgs{ + Config: *pr.Config.GeneralConfig, + Core: pr.CoreComponents, + StatusCore: pr.StatusCoreComponents, + StorageService: pr.DataComponents.StorageService(), + ProcessingMode: common.Normal, + ShouldSerializeSnapshots: false, + SnapshotsEnabled: false, + ChainHandler: pr.DataComponents.Blockchain(), + } + + stateFactory, err := factoryState.NewStateComponentsFactory(argsState) + require.Nil(tb, err) + + stateComp, err := factoryState.NewManagedStateComponents(stateFactory) + require.Nil(tb, err) + + err = stateComp.Create() + require.Nil(tb, err) + require.Nil(tb, stateComp.CheckSubcomponents()) + + pr.closers = append(pr.closers, stateComp) + pr.StateComponents = stateComp +} + +func (pr *ProcessorRunner) createStatusComponents(tb testing.TB) { + nodesShufflerOut, err := factoryBootstrap.CreateNodesShuffleOut( + pr.CoreComponents.GenesisNodesSetup(), + pr.Config.GeneralConfig.EpochStartConfig, + pr.CoreComponents.ChanStopNodeProcess(), + ) + require.Nil(tb, err) + + bootstrapStorer, err := pr.DataComponents.StorageService().GetStorer(dataRetriever.BootstrapUnit) + require.Nil(tb, err) + + pr.NodesCoordinator, err = factoryBootstrap.CreateNodesCoordinator( + nodesShufflerOut, + pr.CoreComponents.GenesisNodesSetup(), + pr.Config.PreferencesConfig.Preferences, + pr.CoreComponents.EpochStartNotifierWithConfirm(), + pr.CryptoComponents.PublicKey(), + pr.CoreComponents.InternalMarshalizer(), + pr.CoreComponents.Hasher(), + pr.CoreComponents.Rater(), + bootstrapStorer, + pr.CoreComponents.NodesShuffler(), + pr.BootstrapComponents.ShardCoordinator().SelfId(), + pr.BootstrapComponents.EpochBootstrapParams(), + pr.BootstrapComponents.EpochBootstrapParams().Epoch(), + pr.CoreComponents.ChanStopNodeProcess(), + pr.CoreComponents.NodeTypeProvider(), + pr.CoreComponents.EnableEpochsHandler(), + pr.DataComponents.Datapool().CurrentEpochValidatorInfo(), + ) + require.Nil(tb, err) + + argsStatus := factoryStatus.StatusComponentsFactoryArgs{ + Config: *pr.Config.GeneralConfig, + ExternalConfig: *pr.Config.ExternalConfig, + EconomicsConfig: *pr.Config.EconomicsConfig, + ShardCoordinator: pr.BootstrapComponents.ShardCoordinator(), + NodesCoordinator: pr.NodesCoordinator, + EpochStartNotifier: pr.CoreComponents.EpochStartNotifierWithConfirm(), + CoreComponents: pr.CoreComponents, + StatusCoreComponents: pr.StatusCoreComponents, + NetworkComponents: pr.NetworkComponents, + StateComponents: pr.StateComponents, + IsInImportMode: false, + } + + statusFactory, err := factoryStatus.NewStatusComponentsFactory(argsStatus) + require.Nil(tb, err) + + statusComp, err := factoryStatus.NewManagedStatusComponents(statusFactory) + require.Nil(tb, err) + + err = statusComp.Create() + require.Nil(tb, err) + require.Nil(tb, statusComp.CheckSubcomponents()) + + pr.closers = append(pr.closers, statusComp) + pr.StatusComponents = statusComp +} + +func (pr *ProcessorRunner) createProcessComponents(tb testing.TB) { + totalSupply, ok := big.NewInt(0).SetString(pr.Config.EconomicsConfig.GlobalSettings.GenesisTotalSupply, 10) + require.True(tb, ok) + + args := genesis.AccountsParserArgs{ + GenesisFilePath: pr.Config.ConfigurationPathsHolder.Genesis, + EntireSupply: totalSupply, + MinterAddress: pr.Config.EconomicsConfig.GlobalSettings.GenesisMintingSenderAddress, + PubkeyConverter: pr.CoreComponents.AddressPubKeyConverter(), + KeyGenerator: pr.CryptoComponents.TxSignKeyGen(), + Hasher: pr.CoreComponents.Hasher(), + Marshalizer: pr.CoreComponents.InternalMarshalizer(), + } + + accountsParser, err := parsing.NewAccountsParser(args) + require.Nil(tb, err) + + whiteListCache, err := storageunit.NewCache(storageFactory.GetCacherFromConfig(pr.Config.GeneralConfig.WhiteListPool)) + require.Nil(tb, err) + + whiteListRequest, err := interceptors.NewWhiteListDataVerifier(whiteListCache) + require.Nil(tb, err) + + whiteListCacheVerified, err := storageunit.NewCache(storageFactory.GetCacherFromConfig(pr.Config.GeneralConfig.WhiteListerVerifiedTxs)) + require.Nil(tb, err) + + whiteListerVerifiedTxs, err := interceptors.NewWhiteListDataVerifier(whiteListCacheVerified) + require.Nil(tb, err) + + smartContractParser, err := parsing.NewSmartContractsParser( + pr.Config.ConfigurationPathsHolder.SmartContracts, + pr.CoreComponents.AddressPubKeyConverter(), + pr.CryptoComponents.TxSignKeyGen(), + ) + require.Nil(tb, err) + + argsGasScheduleNotifier := forking.ArgsNewGasScheduleNotifier{ + GasScheduleConfig: pr.Config.EpochConfig.GasSchedule, + ConfigDir: pr.Config.ConfigurationPathsHolder.GasScheduleDirectoryName, + EpochNotifier: pr.CoreComponents.EpochNotifier(), + WasmVMChangeLocker: pr.CoreComponents.WasmVMChangeLocker(), + } + gasScheduleNotifier, err := forking.NewGasScheduleNotifier(argsGasScheduleNotifier) + require.Nil(tb, err) + + historyRepoFactoryArgs := &dbLookupFactory.ArgsHistoryRepositoryFactory{ + SelfShardID: pr.BootstrapComponents.ShardCoordinator().SelfId(), + Config: pr.Config.GeneralConfig.DbLookupExtensions, + Hasher: pr.CoreComponents.Hasher(), + Marshalizer: pr.CoreComponents.InternalMarshalizer(), + Store: pr.DataComponents.StorageService(), + Uint64ByteSliceConverter: pr.CoreComponents.Uint64ByteSliceConverter(), + } + historyRepositoryFactory, err := dbLookupFactory.NewHistoryRepositoryFactory(historyRepoFactoryArgs) + require.Nil(tb, err) + + historyRepository, err := historyRepositoryFactory.Create() + require.Nil(tb, err) + + requestedItemsHandler := cache.NewTimeCache( + time.Duration(uint64(time.Millisecond) * pr.CoreComponents.GenesisNodesSetup().GetRoundDuration())) + + importStartHandler, err := trigger.NewImportStartHandler(filepath.Join(pr.Config.FlagsConfig.DbDir, common.DefaultDBPath), pr.Config.FlagsConfig.Version) + require.Nil(tb, err) + + argsProcess := factoryProcessing.ProcessComponentsFactoryArgs{ + Config: *pr.Config.GeneralConfig, + EpochConfig: *pr.Config.EpochConfig, + PrefConfigs: *pr.Config.PreferencesConfig, + ImportDBConfig: *pr.Config.ImportDbConfig, + FlagsConfig: config.ContextFlagsConfig{ + Version: "test", + WorkingDir: pr.Config.FlagsConfig.WorkingDir, + SnapshotsEnabled: false, + }, + AccountsParser: accountsParser, + SmartContractParser: smartContractParser, + GasSchedule: gasScheduleNotifier, + NodesCoordinator: pr.NodesCoordinator, + RequestedItemsHandler: requestedItemsHandler, + WhiteListHandler: whiteListRequest, + WhiteListerVerifiedTxs: whiteListerVerifiedTxs, + MaxRating: pr.Config.RatingsConfig.General.MaxRating, + SystemSCConfig: pr.Config.SystemSCConfig, + ImportStartHandler: importStartHandler, + HistoryRepo: historyRepository, + Data: pr.DataComponents, + CoreData: pr.CoreComponents, + Crypto: pr.CryptoComponents, + State: pr.StateComponents, + Network: pr.NetworkComponents, + BootstrapComponents: pr.BootstrapComponents, + StatusComponents: pr.StatusComponents, + StatusCoreComponents: pr.StatusCoreComponents, + } + + processFactory, err := factoryProcessing.NewProcessComponentsFactory(argsProcess) + require.Nil(tb, err) + + processComp, err := factoryProcessing.NewManagedProcessComponents(processFactory) + require.Nil(tb, err) + + err = processComp.Create() + require.Nil(tb, err) + require.Nil(tb, processComp.CheckSubcomponents()) + + pr.closers = append(pr.closers, processComp) + pr.ProcessComponents = processComp +} + +// Close will close all inner components +func (pr *ProcessorRunner) Close(tb testing.TB) { + for i := len(pr.closers) - 1; i >= 0; i-- { + err := pr.closers[i].Close() + require.Nil(tb, err) + } +} + +// GenerateAddress will generate an address for the given shardID +func (pr *ProcessorRunner) GenerateAddress(shardID uint32) []byte { + address := make([]byte, 32) + + for { + _, _ = rand.Read(address) + if pr.BootstrapComponents.ShardCoordinator().ComputeId(address) == shardID { + return address + } + } +} + +// AddBalanceToAccount will add the provided balance to the account +func (pr *ProcessorRunner) AddBalanceToAccount(tb testing.TB, address []byte, balanceToAdd *big.Int) { + userAccount := pr.GetUserAccount(tb, address) + + err := userAccount.AddToBalance(balanceToAdd) + require.Nil(tb, err) + + err = pr.StateComponents.AccountsAdapter().SaveAccount(userAccount) + require.Nil(tb, err) +} + +// GetUserAccount will return the user account for the provided address +func (pr *ProcessorRunner) GetUserAccount(tb testing.TB, address []byte) state.UserAccountHandler { + acc, err := pr.StateComponents.AccountsAdapter().LoadAccount(address) + require.Nil(tb, err) + + userAccount, ok := acc.(state.UserAccountHandler) + require.True(tb, ok) + + return userAccount +} + +// SetESDTForAccount will set the provided ESDT balance to the account +func (pr *ProcessorRunner) SetESDTForAccount( + tb testing.TB, + address []byte, + tokenIdentifier string, + esdtNonce uint64, + esdtValue *big.Int, +) { + userAccount := pr.GetUserAccount(tb, address) + + esdtData := &esdt.ESDigitalToken{ + Value: esdtValue, + Properties: []byte{}, + } + + esdtDataBytes, err := pr.CoreComponents.InternalMarshalizer().Marshal(esdtData) + require.Nil(tb, err) + + key := append([]byte(core.ProtectedKeyPrefix), []byte(core.ESDTKeyIdentifier)...) + key = append(key, tokenIdentifier...) + if esdtNonce > 0 { + key = append(key, big.NewInt(0).SetUint64(esdtNonce).Bytes()...) + } + + err = userAccount.SaveKeyValue(key, esdtDataBytes) + require.Nil(tb, err) + + err = pr.StateComponents.AccountsAdapter().SaveAccount(userAccount) + require.Nil(tb, err) + + pr.saveNewTokenOnSystemAccount(tb, key, esdtData) + + _, err = pr.StateComponents.AccountsAdapter().Commit() + require.Nil(tb, err) +} + +func (pr *ProcessorRunner) saveNewTokenOnSystemAccount(tb testing.TB, tokenKey []byte, esdtData *esdt.ESDigitalToken) { + esdtDataOnSystemAcc := esdtData + esdtDataOnSystemAcc.Properties = nil + esdtDataOnSystemAcc.Reserved = []byte{1} + esdtDataOnSystemAcc.Value.Set(esdtData.Value) + + esdtDataBytes, err := pr.CoreComponents.InternalMarshalizer().Marshal(esdtData) + require.Nil(tb, err) + + sysAccount, err := pr.StateComponents.AccountsAdapter().LoadAccount(core.SystemAccountAddress) + require.Nil(tb, err) + + sysUserAccount, ok := sysAccount.(state.UserAccountHandler) + require.True(tb, ok) + + err = sysUserAccount.SaveKeyValue(tokenKey, esdtDataBytes) + require.Nil(tb, err) + + err = pr.StateComponents.AccountsAdapter().SaveAccount(sysAccount) + require.Nil(tb, err) +} + +// ExecuteTransactionAsScheduled will execute the provided transaction as scheduled +func (pr *ProcessorRunner) ExecuteTransactionAsScheduled(tb testing.TB, tx *transaction.Transaction) error { + hash, err := core.CalculateHash(pr.CoreComponents.InternalMarshalizer(), pr.CoreComponents.Hasher(), tx) + require.Nil(tb, err) + pr.ProcessComponents.ScheduledTxsExecutionHandler().AddScheduledTx(hash, tx) + + return pr.ProcessComponents.ScheduledTxsExecutionHandler().Execute(hash) +} + +// CreateDeploySCTx will return the transaction and the hash for the deployment smart-contract transaction +func (pr *ProcessorRunner) CreateDeploySCTx( + tb testing.TB, + owner []byte, + contractPath string, + gasLimit uint64, + initialHexParameters []string, +) (*transaction.Transaction, []byte) { + scCode := wasm.GetSCCode(contractPath) + ownerAccount := pr.GetUserAccount(tb, owner) + + txDataComponents := append([]string{wasm.CreateDeployTxData(scCode)}, initialHexParameters...) + + tx := &transaction.Transaction{ + Nonce: ownerAccount.GetNonce(), + Value: big.NewInt(0), + RcvAddr: vm.CreateEmptyAddress(), + SndAddr: owner, + GasPrice: pr.CoreComponents.EconomicsData().MinGasPrice(), + GasLimit: gasLimit, + Data: []byte(strings.Join(txDataComponents, "@")), + } + + hash, err := core.CalculateHash(pr.CoreComponents.InternalMarshalizer(), pr.CoreComponents.Hasher(), tx) + require.Nil(tb, err) + + return tx, hash +} diff --git a/integrationTests/realcomponents/processorRunner_test.go b/integrationTests/realcomponents/processorRunner_test.go new file mode 100644 index 00000000000..55951b63831 --- /dev/null +++ b/integrationTests/realcomponents/processorRunner_test.go @@ -0,0 +1,17 @@ +package realcomponents + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/testscommon" +) + +func TestNewProcessorRunnerAndClose(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + cfg := testscommon.CreateTestConfigs(t, "../../cmd/node/config") + pr := NewProcessorRunner(t, *cfg) + pr.Close(t) +} diff --git a/integrationTests/realcomponents/testdata/adder/adder.abi.json b/integrationTests/realcomponents/testdata/adder/adder.abi.json new file mode 100644 index 00000000000..4f529e58e47 --- /dev/null +++ b/integrationTests/realcomponents/testdata/adder/adder.abi.json @@ -0,0 +1,61 @@ +{ + "buildInfo": { + "rustc": { + "version": "1.60.0-nightly", + "commitHash": "c5c610aad0a012a9228ecb83cc19e77111a52140", + "commitDate": "2022-02-14", + "channel": "Nightly", + "short": "rustc 1.60.0-nightly (c5c610aad 2022-02-14)" + }, + "contractCrate": { + "name": "adder", + "version": "0.0.0" + }, + "framework": { + "name": "elrond-wasm", + "version": "0.29.3" + } + }, + "docs": [ + "One of the simplest smart contracts possible,", + "it holds a single variable in storage, which anyone can increment." + ], + "name": "Adder", + "constructor": { + "inputs": [ + { + "name": "initial_value", + "type": "BigUint" + } + ], + "outputs": [] + }, + "endpoints": [ + { + "name": "getSum", + "mutability": "readonly", + "inputs": [], + "outputs": [ + { + "type": "BigUint" + } + ] + }, + { + "docs": [ + "Add desired amount to the storage variable." + ], + "name": "add", + "mutability": "mutable", + "inputs": [ + { + "name": "value", + "type": "BigUint" + } + ], + "outputs": [] + } + ], + "hasCallback": false, + "types": {} +} diff --git a/integrationTests/realcomponents/testdata/adder/adder.wasm b/integrationTests/realcomponents/testdata/adder/adder.wasm new file mode 100755 index 00000000000..bcf3b797f3f Binary files /dev/null and b/integrationTests/realcomponents/testdata/adder/adder.wasm differ diff --git a/integrationTests/realcomponents/txsimulator/componentConstruction_test.go b/integrationTests/realcomponents/txsimulator/componentConstruction_test.go new file mode 100644 index 00000000000..c7c30cf6d32 --- /dev/null +++ b/integrationTests/realcomponents/txsimulator/componentConstruction_test.go @@ -0,0 +1,137 @@ +package txsimulator + +import ( + "fmt" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests/realcomponents" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/vm" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var log = logger.GetOrCreate("integrationTests/realcomponents/txsimulator") + +func TestTransactionSimulationComponentConstructionOnMetachain(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + cfg := testscommon.CreateTestConfigs(t, "../../../cmd/node/config") + cfg.EpochConfig.EnableEpochs.ESDTEnableEpoch = 0 + cfg.EpochConfig.EnableEpochs.BuiltInFunctionsEnableEpoch = 0 + cfg.PreferencesConfig.Preferences.DestinationShardAsObserver = "metachain" // the problem was only on the metachain + + pr := realcomponents.NewProcessorRunner(t, *cfg) + defer pr.Close(t) + + senderShardID := uint32(0) // doesn't matter + alice := pr.GenerateAddress(senderShardID) + log.Info("generated address", + "alice", pr.CoreComponents.AddressPubKeyConverter().SilentEncode(alice, log), + "shard", senderShardID, + ) + + rootHash, err := pr.StateComponents.AccountsAdapter().Commit() + require.Nil(t, err) + + err = pr.DataComponents.Blockchain().SetCurrentBlockHeaderAndRootHash( + &block.MetaBlock{ + Nonce: 1, + RootHash: rootHash, + }, rootHash) + require.Nil(t, err) + + issueCost, _ := big.NewInt(0).SetString(pr.Config.SystemSCConfig.ESDTSystemSCConfig.BaseIssuingCost, 10) + + txForSimulation := &transaction.Transaction{ + Nonce: pr.GetUserAccount(t, alice).GetNonce(), + Value: issueCost, + RcvAddr: vm.ESDTSCAddress, + SndAddr: alice, + GasPrice: pr.CoreComponents.EconomicsData().MinGasPrice(), + GasLimit: 60_000_000, + Data: []byte(fmt.Sprintf("issue@%x@%x@0100@02", "token", "tkn")), + ChainID: []byte(pr.CoreComponents.ChainID()), + Version: 1, + } + + _, err = pr.ProcessComponents.TransactionSimulatorProcessor().ProcessTx(txForSimulation) + assert.Nil(t, err) + assert.Equal(t, 0, pr.StateComponents.AccountsAdapter().JournalLen()) // state for processing should not be dirtied +} + +func TestTransactionSimulationComponentConstructionOnShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + cfg := testscommon.CreateTestConfigs(t, "../../../cmd/node/config") + cfg.EpochConfig.EnableEpochs.SCDeployEnableEpoch = 0 + cfg.PreferencesConfig.Preferences.DestinationShardAsObserver = "0" + cfg.GeneralConfig.VirtualMachine.Execution.WasmVMVersions = []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: "v1.4", + }, + } + + pr := realcomponents.NewProcessorRunner(t, *cfg) + defer pr.Close(t) + + senderShardID := pr.ProcessComponents.ShardCoordinator().SelfId() + alice := pr.GenerateAddress(senderShardID) + log.Info("generated address", + "alice", pr.CoreComponents.AddressPubKeyConverter().SilentEncode(alice, log), + "shard", senderShardID, + ) + + // mint some tokens for alice + mintValue, _ := big.NewInt(0).SetString("1000000000000000000", 10) // 1 EGLD + pr.AddBalanceToAccount(t, alice, mintValue) + + // deploy the contract + txDeploy, hash := pr.CreateDeploySCTx(t, alice, "../testdata/adder/adder.wasm", 3000000, []string{"01"}) + err := pr.ExecuteTransactionAsScheduled(t, txDeploy) + require.Nil(t, err) + + // get the contract address from logs + logs, found := pr.ProcessComponents.TxLogsProcessor().GetLogFromCache(hash) + require.True(t, found) + events := logs.GetLogEvents() + require.Equal(t, 1, len(events)) + require.Equal(t, "SCDeploy", string(events[0].GetIdentifier())) + contractAddress := events[0].GetAddress() + + rootHash, err := pr.StateComponents.AccountsAdapter().Commit() + require.Nil(t, err) + + err = pr.DataComponents.Blockchain().SetCurrentBlockHeaderAndRootHash( + &block.Header{ + Nonce: 1, + RootHash: rootHash, + }, rootHash) + require.Nil(t, err) + + txForSimulation := &transaction.Transaction{ + Nonce: pr.GetUserAccount(t, alice).GetNonce(), + Value: big.NewInt(0), + RcvAddr: contractAddress, + SndAddr: alice, + GasPrice: pr.CoreComponents.EconomicsData().MinGasPrice(), + GasLimit: 3_000_000, + Data: []byte("add@06"), + ChainID: []byte(pr.CoreComponents.ChainID()), + Version: 1, + } + + _, err = pr.ProcessComponents.TransactionSimulatorProcessor().ProcessTx(txForSimulation) + assert.Nil(t, err) + assert.Equal(t, 0, pr.StateComponents.AccountsAdapter().JournalLen()) // state for processing should not be dirtied +} diff --git a/integrationTests/state/stateTrie/stateTrie_test.go b/integrationTests/state/stateTrie/stateTrie_test.go index 2b9267de06f..f653c917308 100644 --- a/integrationTests/state/stateTrie/stateTrie_test.go +++ b/integrationTests/state/stateTrie/stateTrie_test.go @@ -5,13 +5,13 @@ import ( "encoding/base64" "encoding/binary" "encoding/hex" - "errors" "fmt" "math" "math/big" "math/rand" "runtime" "strconv" + "strings" "sync" "sync/atomic" "testing" @@ -37,9 +37,8 @@ import ( "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" - trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" + testStorage "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -47,18 +46,6 @@ import ( const denomination = "000000000000000000" -func getNewTrieStorageManagerArgs() trie.NewTrieStorageManagerArgs { - return trie.NewTrieStorageManagerArgs{ - MainStorer: integrationTests.CreateMemUnit(), - CheckpointsStorer: integrationTests.CreateMemUnit(), - Marshalizer: integrationTests.TestMarshalizer, - Hasher: integrationTests.TestHasher, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: &trieMock.CheckpointHashesHolderStub{}, - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } -} - func TestAccountsDB_RetrieveDataWithSomeValuesShouldWork(t *testing.T) { // test simulates creation of a new account, data trie retrieval, // adding a (key, value) pair in that data trie, committing changes @@ -270,7 +257,7 @@ func TestAccountsDB_CommitTwoOkAccountsShouldWork(t *testing.T) { func TestTrieDB_RecreateFromStorageShouldWork(t *testing.T) { hasher := integrationTests.TestHasher store := integrationTests.CreateMemUnit() - args := getNewTrieStorageManagerArgs() + args := testStorage.GetStorageManagerArgs() args.MainStorer = store args.Hasher = hasher trieStorage, _ := trie.NewTrieStorageManager(args) @@ -1055,7 +1042,7 @@ func createAccounts( HashesSize: evictionWaitListSize * 100, } ewl, _ := evictionWaitingList.NewMemoryEvictionWaitingList(ewlArgs) - args := getNewTrieStorageManagerArgs() + args := testStorage.GetStorageManagerArgs() args.MainStorer = store trieStorage, _ := trie.NewTrieStorageManager(args) maxTrieLevelInMemory := uint(5) @@ -1368,7 +1355,7 @@ func TestRollbackBlockAndCheckThatPruningIsCancelledOnAccountsTrie(t *testing.T) if !bytes.Equal(rootHash, rootHashOfRollbackedBlock) { time.Sleep(time.Second * 6) err = shardNode.AccntState.RecreateTrie(rootHashOfRollbackedBlock) - require.True(t, errors.Is(err, trie.ErrKeyNotFound)) + require.True(t, strings.Contains(err.Error(), trie.ErrKeyNotFound.Error())) } nonces := []*uint64{new(uint64), new(uint64)} @@ -1529,7 +1516,7 @@ func TestTriePruningWhenBlockIsFinal(t *testing.T) { require.Equal(t, uint64(17), nodes[1].BlockChain.GetCurrentBlockHeader().GetNonce()) err := shardNode.AccntState.RecreateTrie(rootHashOfFirstBlock) - require.True(t, errors.Is(err, trie.ErrKeyNotFound)) + require.True(t, strings.Contains(err.Error(), trie.ErrKeyNotFound.Error())) } func TestStatePruningIsNotBuffered(t *testing.T) { @@ -1674,7 +1661,7 @@ func checkTrieCanBeRecreated(tb testing.TB, node *integrationTests.TestProcessor return } - stateTrie := node.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + stateTrie := node.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) roothash := node.BlockChain.GetCurrentBlockRootHash() tr, err := stateTrie.Recreate(roothash) require.Nil(tb, err) @@ -1852,7 +1839,7 @@ func testNodeStateCheckpointSnapshotAndPruning( prunedRootHashes [][]byte, ) { - stateTrie := node.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + stateTrie := node.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) assert.Equal(t, 6, len(checkpointsRootHashes)) for i := range checkpointsRootHashes { tr, err := stateTrie.Recreate(checkpointsRootHashes[i]) @@ -2033,7 +2020,7 @@ func checkCodeConsistency( ) { for code := range codeMap { codeHash := integrationTests.TestHasher.Compute(code) - tr := shardNode.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + tr := shardNode.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) if codeMap[code] != 0 { val, _, err := tr.Get(codeHash) @@ -2487,7 +2474,7 @@ func createAccountsDBTestSetup() *state.AccountsDB { HashesSize: evictionWaitListSize * 100, } ewl, _ := evictionWaitingList.NewMemoryEvictionWaitingList(ewlArgs) - args := getNewTrieStorageManagerArgs() + args := testStorage.GetStorageManagerArgs() args.GeneralConfig = generalCfg trieStorage, _ := trie.NewTrieStorageManager(args) maxTrieLevelInMemory := uint(5) diff --git a/integrationTests/state/stateTrieClose/stateTrieClose_test.go b/integrationTests/state/stateTrieClose/stateTrieClose_test.go index 985f49c660a..7b96f2b39b1 100644 --- a/integrationTests/state/stateTrieClose/stateTrieClose_test.go +++ b/integrationTests/state/stateTrieClose/stateTrieClose_test.go @@ -10,13 +10,10 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/integrationTests" - "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/goroutines" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/multiversx/mx-chain-go/trie/keyBuilder" "github.com/stretchr/testify/assert" ) @@ -116,15 +113,7 @@ func TestPatriciaMerkleTrie_Close(t *testing.T) { } func TestTrieStorageManager_Close(t *testing.T) { - args := trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.CreateMemUnit(), - CheckpointsStorer: testscommon.CreateMemUnit(), - Marshalizer: &testscommon.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10, 32), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := storage.GetStorageManagerArgs() gc := goroutines.NewGoCounter(goroutines.TestsRelevantGoRoutines) idxInitial, _ := gc.Snapshot() diff --git a/integrationTests/state/stateTrieSync/stateTrieSync_test.go b/integrationTests/state/stateTrieSync/stateTrieSync_test.go index b48d8c10dd4..6ef9c6e5d9a 100644 --- a/integrationTests/state/stateTrieSync/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync/stateTrieSync_test.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/throttler" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/process/factory" @@ -21,7 +22,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" testStorage "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/trie" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" "github.com/multiversx/mx-chain-go/trie/keyBuilder" "github.com/multiversx/mx-chain-go/trie/statistics" "github.com/multiversx/mx-chain-go/trie/storageMarker" @@ -95,7 +95,7 @@ func testNodeRequestInterceptTrieNodesWithMessenger(t *testing.T, version int) { time.Sleep(integrationTests.SyncDelay) - resolverTrie := nResolver.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + resolverTrie := nResolver.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) // we have tested even with the 1000000 value and found out that it worked in a reasonable amount of time ~3.5 minutes numTrieLeaves := 10000 for i := 0; i < numTrieLeaves; i++ { @@ -108,7 +108,7 @@ func testNodeRequestInterceptTrieNodesWithMessenger(t *testing.T, version int) { numLeaves := getNumLeaves(t, resolverTrie, rootHash) assert.Equal(t, numTrieLeaves, numLeaves) - requesterTrie := nRequester.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + requesterTrie := nRequester.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) nilRootHash, _ := requesterTrie.RootHash() tss := statistics.NewTrieSyncStatistics() @@ -216,7 +216,7 @@ func testNodeRequestInterceptTrieNodesWithMessengerNotSyncingShouldErr(t *testin time.Sleep(integrationTests.SyncDelay) - resolverTrie := nResolver.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + resolverTrie := nResolver.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) // we have tested even with the 1000000 value and found out that it worked in a reasonable amount of time ~3.5 minutes numTrieLeaves := 100000 for i := 0; i < numTrieLeaves; i++ { @@ -229,7 +229,7 @@ func testNodeRequestInterceptTrieNodesWithMessengerNotSyncingShouldErr(t *testin numLeaves := getNumLeaves(t, resolverTrie, rootHash) assert.Equal(t, numTrieLeaves, numLeaves) - requesterTrie := nRequester.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + requesterTrie := nRequester.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) tss := statistics.NewTrieSyncStatistics() arg := trie.ArgTrieSyncer{ @@ -339,7 +339,7 @@ func testMultipleDataTriesSync(t *testing.T, numAccounts int, numDataTrieLeaves err = leavesChannel.ErrChan.ReadFromChanNonBlocking() require.Nil(t, err) - requesterTrie := nRequester.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + requesterTrie := nRequester.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) nilRootHash, _ := requesterTrie.RootHash() syncerArgs := getUserAccountSyncerArgs(nRequester, version) @@ -347,7 +347,7 @@ func testMultipleDataTriesSync(t *testing.T, numAccounts int, numDataTrieLeaves userAccSyncer, err := syncer.NewUserAccountsSyncer(syncerArgs) assert.Nil(t, err) - err = userAccSyncer.SyncAccounts(rootHash) + err = userAccSyncer.SyncAccounts(rootHash, storageMarker.NewDisabledStorageMarker()) assert.Nil(t, err) _ = nRequester.AccntState.RecreateTrie(rootHash) @@ -466,14 +466,14 @@ func testSyncMissingSnapshotNodes(t *testing.T, version int) { time.Sleep(integrationTests.StepDelay) } - resolverTrie := nResolver.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + resolverTrie := nResolver.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) accState := nResolver.AccntState dataTrieRootHashes := addAccountsToState(t, numAccounts, numDataTrieLeaves, accState, valSize) rootHash, _ := accState.RootHash() numLeaves := getNumLeaves(t, resolverTrie, rootHash) require.Equal(t, numAccounts+numSystemAccounts, numLeaves) - requesterTrie := nRequester.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + requesterTrie := nRequester.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) nilRootHash, _ := requesterTrie.RootHash() copyPartialState(t, nResolver, nRequester, dataTrieRootHashes) @@ -487,7 +487,7 @@ func testSyncMissingSnapshotNodes(t *testing.T, version int) { err = nRequester.AccntState.StartSnapshotIfNeeded() assert.Nil(t, err) - tsm := nRequester.TrieStorageManagers[trieFactory.UserAccountTrie] + tsm := nRequester.TrieStorageManagers[dataRetriever.UserAccountsUnit.String()] _ = tsm.PutInEpoch([]byte(common.ActiveDBKey), []byte(common.ActiveDBVal), 0) nRequester.AccntState.SnapshotState(rootHash) for tsm.IsPruningBlocked() { @@ -505,12 +505,12 @@ func testSyncMissingSnapshotNodes(t *testing.T, version int) { } func copyPartialState(t *testing.T, sourceNode, destinationNode *integrationTests.TestProcessorNode, dataTriesRootHashes [][]byte) { - resolverTrie := sourceNode.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + resolverTrie := sourceNode.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) hashes, _ := resolverTrie.GetAllHashes() assert.NotEqual(t, 0, len(hashes)) hashes = append(hashes, getDataTriesHashes(t, resolverTrie, dataTriesRootHashes)...) - destStorage := destinationNode.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)).GetStorageManager() + destStorage := destinationNode.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())).GetStorageManager() for i, hash := range hashes { if i%1000 == 0 { @@ -582,14 +582,13 @@ func getUserAccountSyncerArgs(node *integrationTests.TestProcessorNode, version ArgsNewBaseAccountsSyncer: syncer.ArgsNewBaseAccountsSyncer{ Hasher: integrationTests.TestHasher, Marshalizer: integrationTests.TestMarshalizer, - TrieStorageManager: node.TrieStorageManagers[trieFactory.UserAccountTrie], + TrieStorageManager: node.TrieStorageManagers[dataRetriever.UserAccountsUnit.String()], RequestHandler: node.RequestHandler, Timeout: common.TimeoutGettingTrieNodes, Cacher: node.DataPool.TrieNodes(), MaxTrieLevelInMemory: 200, MaxHardCapForMissingNodes: 5000, TrieSyncerVersion: version, - StorageMarker: storageMarker.NewTrieStorageMarker(), UserAccountsSyncStatisticsHandler: statistics.NewTrieSyncStatistics(), AppStatusHandler: integrationTests.TestAppStatusHandler, }, diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index dac26a1b4be..b1c860ff006 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -62,6 +62,7 @@ import ( testStorage "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + testcommonStorage "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/trie/hashesHolder" @@ -396,12 +397,6 @@ func CreateStore(numOfShards uint32) dataRetriever.StorageService { // CreateTrieStorageManagerWithPruningStorer creates the trie storage manager for the tests func CreateTrieStorageManagerWithPruningStorer(coordinator sharding.Coordinator, notifier pruning.EpochStartNotifier) common.StorageManager { - generalCfg := config.TrieStorageManagerConfig{ - PruningBufferLen: 1000, - SnapshotsBufferLen: 10, - SnapshotsGoroutineNum: 1, - } - mainStorer, _, err := testStorage.CreateTestingTriePruningStorer(coordinator, notifier) if err != nil { fmt.Println("err creating main storer" + err.Error()) @@ -410,15 +405,14 @@ func CreateTrieStorageManagerWithPruningStorer(coordinator sharding.Coordinator, if err != nil { fmt.Println("err creating checkpoints storer" + err.Error()) } - args := trie.NewTrieStorageManagerArgs{ - MainStorer: mainStorer, - CheckpointsStorer: checkpointsStorer, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + + args := testcommonStorage.GetStorageManagerArgs() + args.MainStorer = mainStorer + args.CheckpointsStorer = checkpointsStorer + args.Marshalizer = TestMarshalizer + args.Hasher = TestHasher + args.CheckpointHashesHolder = hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())) + trieStorageManager, _ := trie.NewTrieStorageManager(args) return trieStorageManager @@ -426,20 +420,12 @@ func CreateTrieStorageManagerWithPruningStorer(coordinator sharding.Coordinator, // CreateTrieStorageManager creates the trie storage manager for the tests func CreateTrieStorageManager(store storage.Storer) (common.StorageManager, storage.Storer) { - generalCfg := config.TrieStorageManagerConfig{ - PruningBufferLen: 1000, - SnapshotsBufferLen: 10, - SnapshotsGoroutineNum: 1, - } - args := trie.NewTrieStorageManagerArgs{ - MainStorer: store, - CheckpointsStorer: CreateMemUnit(), - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := testcommonStorage.GetStorageManagerArgs() + args.MainStorer = store + args.Marshalizer = TestMarshalizer + args.Hasher = TestHasher + args.CheckpointHashesHolder = hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())) + trieStorageManager, _ := trie.NewTrieStorageManager(args) return trieStorageManager, store @@ -667,7 +653,7 @@ func CreateFullGenesisBlocks( OwnerAddress: "aaaaaa", }, GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ - ChangeConfigAddress: DelegationManagerConfigChangeAddress, + OwnerAddress: DelegationManagerConfigChangeAddress, V1: config.GovernanceSystemSCConfigV1{ ProposalCost: "500", }, @@ -676,6 +662,7 @@ func CreateFullGenesisBlocks( MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, }, StakingSystemSCConfig: config.StakingSystemSCConfig{ @@ -783,8 +770,9 @@ func CreateGenesisMetaBlock( MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, - ChangeConfigAddress: DelegationManagerConfigChangeAddress, + OwnerAddress: DelegationManagerConfigChangeAddress, }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", @@ -1022,20 +1010,11 @@ func CreateSimpleTxProcessor(accnts state.AccountsAdapter) process.TransactionPr // CreateNewDefaultTrie returns a new trie with test hasher and marsahalizer func CreateNewDefaultTrie() common.Trie { - generalCfg := config.TrieStorageManagerConfig{ - PruningBufferLen: 1000, - SnapshotsBufferLen: 10, - SnapshotsGoroutineNum: 1, - } - args := trie.NewTrieStorageManagerArgs{ - MainStorer: CreateMemUnit(), - CheckpointsStorer: CreateMemUnit(), - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := testcommonStorage.GetStorageManagerArgs() + args.Marshalizer = TestMarshalizer + args.Hasher = TestHasher + args.CheckpointHashesHolder = hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())) + trieStorage, _ := trie.NewTrieStorageManager(args) tr, _ := trie.NewTrie(trieStorage, TestMarshalizer, TestHasher, maxTrieLevelInMemory) @@ -1462,11 +1441,21 @@ func CreateNodesWithFullGenesis( numMetaChainNodes int, genesisFile string, ) ([]*TestProcessorNode, *TestProcessorNode) { - nodes := make([]*TestProcessorNode, numOfShards*nodesPerShard+numMetaChainNodes) - connectableNodes := make([]Connectable, len(nodes)) - enableEpochsConfig := GetDefaultEnableEpochsConfig() enableEpochsConfig.StakingV2EnableEpoch = UnreachableEpoch + return CreateNodesWithFullGenesisCustomEnableEpochs(numOfShards, nodesPerShard, numMetaChainNodes, genesisFile, enableEpochsConfig) +} + +// CreateNodesWithFullGenesisCustomEnableEpochs creates multiple nodes in different shards +func CreateNodesWithFullGenesisCustomEnableEpochs( + numOfShards int, + nodesPerShard int, + numMetaChainNodes int, + genesisFile string, + enableEpochsConfig *config.EnableEpochs, +) ([]*TestProcessorNode, *TestProcessorNode) { + nodes := make([]*TestProcessorNode, numOfShards*nodesPerShard+numMetaChainNodes) + connectableNodes := make([]Connectable, len(nodes)) economicsConfig := createDefaultEconomicsConfig() economicsConfig.GlobalSettings.YearSettings = append( diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index fb55a30cb98..dbfa6bc4b72 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -58,6 +58,7 @@ import ( "github.com/multiversx/mx-chain-go/node" "github.com/multiversx/mx-chain-go/node/external" "github.com/multiversx/mx-chain-go/node/nodeDebugFactory" + disabledOutport "github.com/multiversx/mx-chain-go/outport/disabled" "github.com/multiversx/mx-chain-go/p2p" p2pFactory "github.com/multiversx/mx-chain-go/p2p/factory" "github.com/multiversx/mx-chain-go/process" @@ -113,8 +114,8 @@ import ( stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" "github.com/multiversx/mx-chain-go/trie/keyBuilder" "github.com/multiversx/mx-chain-go/update" "github.com/multiversx/mx-chain-go/update/trigger" @@ -572,15 +573,15 @@ func (tpn *TestProcessorNode) initAccountDBsWithPruningStorer() { tpn.TrieContainer = state.NewDataTriesHolder() var stateTrie common.Trie tpn.AccntState, stateTrie = CreateAccountsDB(UserAccount, trieStorageManager) - tpn.TrieContainer.Put([]byte(trieFactory.UserAccountTrie), stateTrie) + tpn.TrieContainer.Put([]byte(dataRetriever.UserAccountsUnit.String()), stateTrie) var peerTrie common.Trie tpn.PeerState, peerTrie = CreateAccountsDB(ValidatorAccount, trieStorageManager) - tpn.TrieContainer.Put([]byte(trieFactory.PeerAccountTrie), peerTrie) + tpn.TrieContainer.Put([]byte(dataRetriever.PeerAccountsUnit.String()), peerTrie) tpn.TrieStorageManagers = make(map[string]common.StorageManager) - tpn.TrieStorageManagers[trieFactory.UserAccountTrie] = trieStorageManager - tpn.TrieStorageManagers[trieFactory.PeerAccountTrie] = trieStorageManager + tpn.TrieStorageManagers[dataRetriever.UserAccountsUnit.String()] = trieStorageManager + tpn.TrieStorageManagers[dataRetriever.PeerAccountsUnit.String()] = trieStorageManager } func (tpn *TestProcessorNode) initAccountDBs(store storage.Storer) { @@ -588,15 +589,15 @@ func (tpn *TestProcessorNode) initAccountDBs(store storage.Storer) { tpn.TrieContainer = state.NewDataTriesHolder() var stateTrie common.Trie tpn.AccntState, stateTrie = CreateAccountsDB(UserAccount, trieStorageManager) - tpn.TrieContainer.Put([]byte(trieFactory.UserAccountTrie), stateTrie) + tpn.TrieContainer.Put([]byte(dataRetriever.UserAccountsUnit.String()), stateTrie) var peerTrie common.Trie tpn.PeerState, peerTrie = CreateAccountsDB(ValidatorAccount, trieStorageManager) - tpn.TrieContainer.Put([]byte(trieFactory.PeerAccountTrie), peerTrie) + tpn.TrieContainer.Put([]byte(dataRetriever.PeerAccountsUnit.String()), peerTrie) tpn.TrieStorageManagers = make(map[string]common.StorageManager) - tpn.TrieStorageManagers[trieFactory.UserAccountTrie] = trieStorageManager - tpn.TrieStorageManagers[trieFactory.PeerAccountTrie] = trieStorageManager + tpn.TrieStorageManagers[dataRetriever.UserAccountsUnit.String()] = trieStorageManager + tpn.TrieStorageManagers[dataRetriever.PeerAccountsUnit.String()] = trieStorageManager } func (tpn *TestProcessorNode) initValidatorStatistics() { @@ -796,6 +797,7 @@ func (tpn *TestProcessorNode) createFullSCQueryService(gasMap map[string]map[str argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: gasSchedule, MapDNSAddresses: make(map[string]struct{}), + MapDNSV2Addresses: make(map[string]struct{}), Marshalizer: TestMarshalizer, Accounts: tpn.AccntState, ShardCoordinator: tpn.ShardCoordinator, @@ -810,23 +812,24 @@ func (tpn *TestProcessorNode) createFullSCQueryService(gasMap map[string]map[str smartContractsCache := testscommon.NewCacherMock() argsHook := hooks.ArgBlockChainHook{ - Accounts: tpn.AccntState, - PubkeyConv: TestAddressPubkeyConverter, - StorageService: tpn.Storage, - BlockChain: tpn.BlockChain, - ShardCoordinator: tpn.ShardCoordinator, - Marshalizer: TestMarshalizer, - Uint64Converter: TestUint64Converter, - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), - GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), - DataPool: tpn.DataPool, - CompiledSCPool: smartContractsCache, - EpochNotifier: tpn.EpochNotifier, - EnableEpochsHandler: tpn.EnableEpochsHandler, - NilCompiledSCStore: true, - GasSchedule: gasSchedule, - Counter: counters.NewDisabledCounter(), + Accounts: tpn.AccntState, + PubkeyConv: TestAddressPubkeyConverter, + StorageService: tpn.Storage, + BlockChain: tpn.BlockChain, + ShardCoordinator: tpn.ShardCoordinator, + Marshalizer: TestMarshalizer, + Uint64Converter: TestUint64Converter, + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), + GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), + DataPool: tpn.DataPool, + CompiledSCPool: smartContractsCache, + EpochNotifier: tpn.EpochNotifier, + EnableEpochsHandler: tpn.EnableEpochsHandler, + NilCompiledSCStore: true, + GasSchedule: gasSchedule, + Counter: counters.NewDisabledCounter(), + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { @@ -861,8 +864,9 @@ func (tpn *TestProcessorNode) createFullSCQueryService(gasMap map[string]map[str MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, - ChangeConfigAddress: DelegationManagerConfigChangeAddress, + OwnerAddress: DelegationManagerConfigChangeAddress, }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", @@ -1474,6 +1478,7 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: gasSchedule, MapDNSAddresses: mapDNSAddresses, + MapDNSV2Addresses: mapDNSAddresses, Marshalizer: TestMarshalizer, Accounts: tpn.AccntState, ShardCoordinator: tpn.ShardCoordinator, @@ -1495,23 +1500,24 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u log.LogIfError(err) argsHook := hooks.ArgBlockChainHook{ - Accounts: tpn.AccntState, - PubkeyConv: TestAddressPubkeyConverter, - StorageService: tpn.Storage, - BlockChain: tpn.BlockChain, - ShardCoordinator: tpn.ShardCoordinator, - Marshalizer: TestMarshalizer, - Uint64Converter: TestUint64Converter, - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), - GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), - DataPool: tpn.DataPool, - CompiledSCPool: tpn.DataPool.SmartContracts(), - EpochNotifier: tpn.EpochNotifier, - EnableEpochsHandler: tpn.EnableEpochsHandler, - NilCompiledSCStore: true, - GasSchedule: gasSchedule, - Counter: counter, + Accounts: tpn.AccntState, + PubkeyConv: TestAddressPubkeyConverter, + StorageService: tpn.Storage, + BlockChain: tpn.BlockChain, + ShardCoordinator: tpn.ShardCoordinator, + Marshalizer: TestMarshalizer, + Uint64Converter: TestUint64Converter, + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), + GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), + DataPool: tpn.DataPool, + CompiledSCPool: tpn.DataPool.SmartContracts(), + EpochNotifier: tpn.EpochNotifier, + EnableEpochsHandler: tpn.EnableEpochsHandler, + NilCompiledSCStore: true, + GasSchedule: gasSchedule, + Counter: counter, + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } maxGasLimitPerBlock := uint64(0xFFFFFFFFFFFFFFFF) @@ -1699,6 +1705,7 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: gasSchedule, MapDNSAddresses: make(map[string]struct{}), + MapDNSV2Addresses: make(map[string]struct{}), Marshalizer: TestMarshalizer, Accounts: tpn.AccntState, ShardCoordinator: tpn.ShardCoordinator, @@ -1710,23 +1717,24 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri argsBuiltIn.AutomaticCrawlerAddresses = GenerateOneAddressPerShard(argsBuiltIn.ShardCoordinator) builtInFuncFactory, _ := builtInFunctions.CreateBuiltInFunctionsFactory(argsBuiltIn) argsHook := hooks.ArgBlockChainHook{ - Accounts: tpn.AccntState, - PubkeyConv: TestAddressPubkeyConverter, - StorageService: tpn.Storage, - BlockChain: tpn.BlockChain, - ShardCoordinator: tpn.ShardCoordinator, - Marshalizer: TestMarshalizer, - Uint64Converter: TestUint64Converter, - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), - GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), - DataPool: tpn.DataPool, - CompiledSCPool: tpn.DataPool.SmartContracts(), - EpochNotifier: tpn.EpochNotifier, - EnableEpochsHandler: tpn.EnableEpochsHandler, - NilCompiledSCStore: true, - GasSchedule: gasSchedule, - Counter: counters.NewDisabledCounter(), + Accounts: tpn.AccntState, + PubkeyConv: TestAddressPubkeyConverter, + StorageService: tpn.Storage, + BlockChain: tpn.BlockChain, + ShardCoordinator: tpn.ShardCoordinator, + Marshalizer: TestMarshalizer, + Uint64Converter: TestUint64Converter, + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), + GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), + DataPool: tpn.DataPool, + CompiledSCPool: tpn.DataPool.SmartContracts(), + EpochNotifier: tpn.EpochNotifier, + EnableEpochsHandler: tpn.EnableEpochsHandler, + NilCompiledSCStore: true, + GasSchedule: gasSchedule, + Counter: counters.NewDisabledCounter(), + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } var signVerifier vm.MessageSignVerifier @@ -1762,8 +1770,9 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, - ChangeConfigAddress: DelegationManagerConfigChangeAddress, + OwnerAddress: DelegationManagerConfigChangeAddress, }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", @@ -2089,6 +2098,7 @@ func (tpn *TestProcessorNode) initBlockProcessor(stateCheckpointModulus uint) { ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, OutportDataProvider: &outport.OutportDataProviderStub{}, + BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, } if check.IfNil(tpn.EpochStartNotifier) { @@ -2491,9 +2501,7 @@ func (tpn *TestProcessorNode) StartSync() error { return errors.New("no bootstrapper available") } - tpn.Bootstrapper.StartSyncingBlocks() - - return nil + return tpn.Bootstrapper.StartSyncingBlocks() } // LoadTxSignSkBytes alters the already generated sk/pk pair @@ -3205,17 +3213,18 @@ func GetDefaultCryptoComponents() *mock.CryptoComponentsStub { } // GetDefaultStateComponents - -func GetDefaultStateComponents() *testscommon.StateComponentsMock { - return &testscommon.StateComponentsMock{ +func GetDefaultStateComponents() *testFactory.StateComponentsMock { + return &testFactory.StateComponentsMock{ PeersAcc: &stateMock.AccountsStub{}, Accounts: &stateMock.AccountsStub{}, AccountsRepo: &stateMock.AccountsRepositoryStub{}, Tries: &trieMock.TriesHolderStub{}, StorageManagers: map[string]common.StorageManager{ - "0": &testscommon.StorageManagerStub{}, - trieFactory.UserAccountTrie: &testscommon.StorageManagerStub{}, - trieFactory.PeerAccountTrie: &testscommon.StorageManagerStub{}, + "0": &storageManager.StorageManagerStub{}, + dataRetriever.UserAccountsUnit.String(): &storageManager.StorageManagerStub{}, + dataRetriever.PeerAccountsUnit.String(): &storageManager.StorageManagerStub{}, }, + MissingNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } } @@ -3234,7 +3243,7 @@ func GetDefaultNetworkComponents() *mock.NetworkComponentsStub { // GetDefaultStatusComponents - func GetDefaultStatusComponents() *mock.StatusComponentsStub { return &mock.StatusComponentsStub{ - Outport: mock.NewNilOutport(), + Outport: disabledOutport.NewDisabledOutport(), SoftwareVersionCheck: &mock.SoftwareVersionCheckerMock{}, } } @@ -3252,7 +3261,7 @@ func getDefaultBootstrapComponents(shardCoordinator sharding.Coordinator) *mainF return &mainFactoryMocks.BootstrapComponentsStub{ Bootstrapper: &bootstrapMocks.EpochStartBootstrapperStub{ TrieHolder: &trieMock.TriesHolderStub{}, - StorageManagers: map[string]common.StorageManager{"0": &testscommon.StorageManagerStub{}}, + StorageManagers: map[string]common.StorageManager{"0": &storageManager.StorageManagerStub{}}, BootstrapCalled: nil, }, BootstrapParams: &bootstrapMocks.BootstrapParamsHandlerMock{}, diff --git a/integrationTests/testProcessorNodeWithTestWebServer.go b/integrationTests/testProcessorNodeWithTestWebServer.go index e76dd3fd464..aa07a327720 100644 --- a/integrationTests/testProcessorNodeWithTestWebServer.go +++ b/integrationTests/testProcessorNodeWithTestWebServer.go @@ -142,6 +142,7 @@ func createFacadeComponents(tpn *TestProcessorNode) (nodeFacade.ApiResolver, nod argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: gasScheduleNotifier, MapDNSAddresses: make(map[string]struct{}), + MapDNSV2Addresses: make(map[string]struct{}), Marshalizer: TestMarshalizer, Accounts: tpn.AccntState, ShardCoordinator: tpn.ShardCoordinator, diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 8b2b72d5419..74e6595d2f8 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/provider" "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/outport/disabled" "github.com/multiversx/mx-chain-go/process/block" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" "github.com/multiversx/mx-chain-go/process/sync" @@ -100,6 +101,7 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, OutportDataProvider: &outport.OutportDataProviderStub{}, + BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, } if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { @@ -163,13 +165,14 @@ func (tpn *TestProcessorNode) createShardBootstrapper() (TestBootstrapper, error MiniblocksProvider: tpn.MiniblocksProvider, Uint64Converter: TestUint64Converter, AppStatusHandler: TestAppStatusHandler, - OutportHandler: mock.NewNilOutport(), + OutportHandler: disabled.NewDisabledOutport(), AccountsDBSyncer: &mock.AccountsDBSyncerStub{}, CurrentEpochProvider: &testscommon.CurrentEpochProviderStub{}, IsInImportMode: false, HistoryRepo: &dblookupext.HistoryRepositoryStub{}, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, ProcessWaitTime: tpn.RoundHandler.TimeDuration(), + RepopulateTokensSupplies: false, } argsShardBootstrapper := sync.ArgShardBootstrapper{ @@ -208,13 +211,14 @@ func (tpn *TestProcessorNode) createMetaChainBootstrapper() (TestBootstrapper, e MiniblocksProvider: tpn.MiniblocksProvider, Uint64Converter: TestUint64Converter, AppStatusHandler: TestAppStatusHandler, - OutportHandler: mock.NewNilOutport(), + OutportHandler: disabled.NewDisabledOutport(), AccountsDBSyncer: &mock.AccountsDBSyncerStub{}, CurrentEpochProvider: &testscommon.CurrentEpochProviderStub{}, IsInImportMode: false, HistoryRepo: &dblookupext.HistoryRepositoryStub{}, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, ProcessWaitTime: tpn.RoundHandler.TimeDuration(), + RepopulateTokensSupplies: false, } argsMetaBootstrapper := sync.ArgMetaBootstrapper{ diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index 0633bb3e546..cc306a99b51 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -55,7 +55,6 @@ import ( dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -76,6 +75,15 @@ const minTransactionVersion = 1 var dnsAddr = []byte{0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 137, 17, 46, 56, 127, 47, 62, 172, 4, 126, 190, 242, 221, 230, 209, 243, 105, 104, 242, 66, 49, 49} +// DNSV2Address defines the address for the new DNS contract +const DNSV2Address = "erd1qqqqqqqqqqqqqpgqcy67yanvwpepqmerkq6m8pgav0tlvgwxjmdq4hukxw" + +// DNSV2DeployerAddress defines the address of the deployer for the DNS v2 contracts +const DNSV2DeployerAddress = "erd1uzk2g5rhvg8prk9y50d0q7qsxg7tm7f320q0q4qlpmfu395wjmdqqy0n9q" + +// TestAddressPubkeyConverter represents an address public key converter +var TestAddressPubkeyConverter, _ = pubkeyConverter.NewBech32PubkeyConverter(32, "erd") + // TODO: Merge test utilities from this file with the ones from "wasmvm/utils.go" var globalEpochNotifier = forking.NewGenericEpochNotifier() @@ -377,24 +385,25 @@ func CreateTxProcessorWithOneSCExecutorMockVM( builtInFuncs := vmcommonBuiltInFunctions.NewBuiltInFunctionContainer() datapool := dataRetrieverMock.NewPoolsHolderMock() args := hooks.ArgBlockChainHook{ - Accounts: accnts, - PubkeyConv: pubkeyConv, - StorageService: &storageStubs.ChainStorerStub{}, - BlockChain: &testscommon.ChainHandlerStub{}, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(2), - Marshalizer: integrationtests.TestMarshalizer, - Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - BuiltInFunctions: builtInFuncs, - NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, - GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, - DataPool: datapool, - CompiledSCPool: datapool.SmartContracts(), - NilCompiledSCStore: true, - ConfigSCStorage: *defaultStorageConfig(), - EpochNotifier: genericEpochNotifier, - EnableEpochsHandler: enableEpochsHandler, - GasSchedule: gasScheduleNotifier, - Counter: &testscommon.BlockChainHookCounterStub{}, + Accounts: accnts, + PubkeyConv: pubkeyConv, + StorageService: &storageStubs.ChainStorerStub{}, + BlockChain: &testscommon.ChainHandlerStub{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(2), + Marshalizer: integrationtests.TestMarshalizer, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + BuiltInFunctions: builtInFuncs, + NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, + GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, + DataPool: datapool, + CompiledSCPool: datapool.SmartContracts(), + NilCompiledSCStore: true, + ConfigSCStorage: *defaultStorageConfig(), + EpochNotifier: genericEpochNotifier, + EnableEpochsHandler: enableEpochsHandler, + GasSchedule: gasScheduleNotifier, + Counter: &testscommon.BlockChainHookCounterStub{}, + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } blockChainHook, _ := hooks.NewBlockChainHookImpl(args) @@ -480,24 +489,25 @@ func CreateTxProcessorWithOneSCExecutorMockVM( func CreateOneSCExecutorMockVM(accnts state.AccountsAdapter) vmcommon.VMExecutionHandler { datapool := dataRetrieverMock.NewPoolsHolderMock() args := hooks.ArgBlockChainHook{ - Accounts: accnts, - PubkeyConv: pubkeyConv, - StorageService: &storageStubs.ChainStorerStub{}, - BlockChain: &testscommon.ChainHandlerStub{}, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(2), - Marshalizer: integrationtests.TestMarshalizer, - Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - BuiltInFunctions: vmcommonBuiltInFunctions.NewBuiltInFunctionContainer(), - NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, - GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, - DataPool: datapool, - CompiledSCPool: datapool.SmartContracts(), - NilCompiledSCStore: true, - ConfigSCStorage: *defaultStorageConfig(), - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, - GasSchedule: CreateMockGasScheduleNotifier(), - Counter: &testscommon.BlockChainHookCounterStub{}, + Accounts: accnts, + PubkeyConv: pubkeyConv, + StorageService: &storageStubs.ChainStorerStub{}, + BlockChain: &testscommon.ChainHandlerStub{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(2), + Marshalizer: integrationtests.TestMarshalizer, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + BuiltInFunctions: vmcommonBuiltInFunctions.NewBuiltInFunctionContainer(), + NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, + GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, + DataPool: datapool, + CompiledSCPool: datapool.SmartContracts(), + NilCompiledSCStore: true, + ConfigSCStorage: *defaultStorageConfig(), + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, + GasSchedule: CreateMockGasScheduleNotifier(), + Counter: &testscommon.BlockChainHookCounterStub{}, + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } blockChainHook, _ := hooks.NewBlockChainHookImpl(args) vm, _ := mock.NewOneSCExecutorMockVM(blockChainHook, integrationtests.TestHasher) @@ -523,11 +533,17 @@ func CreateVMAndBlockchainHookAndDataPool( gasSchedule = mock.NewGasScheduleNotifierMock(testGasSchedule) } + dnsV2Decoded, _ := TestAddressPubkeyConverter.Decode(DNSV2Address) + argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: gasSchedule, MapDNSAddresses: map[string]struct{}{ string(dnsAddr): {}, }, + MapDNSV2Addresses: map[string]struct{}{ + string(dnsV2Decoded): {}, + string(dnsAddr): {}, + }, Marshalizer: integrationtests.TestMarshalizer, Accounts: accnts, ShardCoordinator: shardCoordinator, @@ -544,27 +560,27 @@ func CreateVMAndBlockchainHookAndDataPool( datapool := dataRetrieverMock.NewPoolsHolderMock() args := hooks.ArgBlockChainHook{ - Accounts: accnts, - PubkeyConv: pubkeyConv, - StorageService: &storageStubs.ChainStorerStub{}, - BlockChain: chainHandler, - ShardCoordinator: shardCoordinator, - Marshalizer: integrationtests.TestMarshalizer, - Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), - GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), - DataPool: datapool, - CompiledSCPool: datapool.SmartContracts(), - NilCompiledSCStore: true, - ConfigSCStorage: *defaultStorageConfig(), - EpochNotifier: epochNotifierInstance, - EnableEpochsHandler: enableEpochsHandler, - GasSchedule: gasSchedule, - Counter: counter, - } - - hasher := &hashingMocks.HasherMock{} + Accounts: accnts, + PubkeyConv: pubkeyConv, + StorageService: &storageStubs.ChainStorerStub{}, + BlockChain: chainHandler, + ShardCoordinator: shardCoordinator, + Marshalizer: integrationtests.TestMarshalizer, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), + GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), + DataPool: datapool, + CompiledSCPool: datapool.SmartContracts(), + NilCompiledSCStore: true, + ConfigSCStorage: *defaultStorageConfig(), + EpochNotifier: epochNotifierInstance, + EnableEpochsHandler: enableEpochsHandler, + GasSchedule: gasSchedule, + Counter: counter, + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, + } + maxGasLimitPerBlock := uint64(0xFFFFFFFFFFFFFFFF) blockChainHookImpl, _ := hooks.NewBlockChainHookImpl(args) argsNewVMFactory := shard.ArgVMContainerFactory{ @@ -577,7 +593,7 @@ func CreateVMAndBlockchainHookAndDataPool( EnableEpochsHandler: enableEpochsHandler, WasmVMChangeLocker: wasmVMChangeLocker, ESDTTransferParser: esdtTransferParser, - Hasher: hasher, + Hasher: integrationtests.TestHasher, } vmFactory, err := shard.NewVMContainerFactory(argsNewVMFactory) if err != nil { @@ -608,18 +624,18 @@ func CreateVMAndBlockchainHookMeta( gasSchedule = mock.NewGasScheduleNotifierMock(testGasSchedule) } - var err error - guardedAccountHandler, err := guardian.NewGuardedAccount(integrationtests.TestMarshalizer, globalEpochNotifier, EpochGuardianDelay) - if err != nil { - panic(err) - } + guardedAccountHandler, _ := guardian.NewGuardedAccount(integrationtests.TestMarshalizer, globalEpochNotifier, EpochGuardianDelay) + dnsV2Decoded, _ := TestAddressPubkeyConverter.Decode(DNSV2Address) enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsConfig, globalEpochNotifier) argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: gasSchedule, MapDNSAddresses: map[string]struct{}{ string(dnsAddr): {}, }, + MapDNSV2Addresses: map[string]struct{}{ + string(dnsV2Decoded): {}, + }, Marshalizer: integrationtests.TestMarshalizer, Accounts: accnts, ShardCoordinator: shardCoordinator, @@ -633,23 +649,24 @@ func CreateVMAndBlockchainHookMeta( datapool := dataRetrieverMock.NewPoolsHolderMock() args := hooks.ArgBlockChainHook{ - Accounts: accnts, - PubkeyConv: pubkeyConv, - StorageService: &storageStubs.ChainStorerStub{}, - BlockChain: &testscommon.ChainHandlerStub{}, - ShardCoordinator: shardCoordinator, - Marshalizer: integrationtests.TestMarshalizer, - Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), - GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), - DataPool: datapool, - CompiledSCPool: datapool.SmartContracts(), - NilCompiledSCStore: true, - EpochNotifier: globalEpochNotifier, - EnableEpochsHandler: enableEpochsHandler, - GasSchedule: gasSchedule, - Counter: &testscommon.BlockChainHookCounterStub{}, + Accounts: accnts, + PubkeyConv: pubkeyConv, + StorageService: &storageStubs.ChainStorerStub{}, + BlockChain: &testscommon.ChainHandlerStub{}, + ShardCoordinator: shardCoordinator, + Marshalizer: integrationtests.TestMarshalizer, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), + GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), + DataPool: datapool, + CompiledSCPool: datapool.SmartContracts(), + NilCompiledSCStore: true, + EpochNotifier: globalEpochNotifier, + EnableEpochsHandler: enableEpochsHandler, + GasSchedule: gasSchedule, + Counter: &testscommon.BlockChainHookCounterStub{}, + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } economicsData, err := createEconomicsData(config.EnableEpochs{}) @@ -708,8 +725,9 @@ func createSystemSCConfig() *config.SystemSmartContractsConfig { MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, - ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", + OwnerAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "2500000000000000000000", @@ -1403,6 +1421,21 @@ func TestAccount( return senderRecovShardAccount.GetBalance() } +// TestAccountUsername - +func TestAccountUsername( + t *testing.T, + accnts state.AccountsAdapter, + senderAddressBytes []byte, + username []byte, +) { + + senderRecovAccount, _ := accnts.GetExistingAccount(senderAddressBytes) + require.False(t, check.IfNil(senderRecovAccount)) + + senderRecovShardAccount := senderRecovAccount.(state.UserAccountHandler) + require.Equal(t, senderRecovShardAccount.GetUserName(), username) +} + // ComputeExpectedBalance - func ComputeExpectedBalance( existing *big.Int, diff --git a/integrationTests/vm/txsFee/asyncCall_test.go b/integrationTests/vm/txsFee/asyncCall_test.go index ffa618c256d..33e05a6a95e 100644 --- a/integrationTests/vm/txsFee/asyncCall_test.go +++ b/integrationTests/vm/txsFee/asyncCall_test.go @@ -173,12 +173,12 @@ func TestAsyncCallsOnInitFunctionOnUpgrade(t *testing.T) { } func testAsyncCallsOnInitFunctionOnUpgrade(t *testing.T, enableEpochs config.EnableEpochs, expectedGasLimit uint64, gasScheduleNotifier core.GasScheduleNotifier) { - shardCoordinatorForShard0, _ := sharding.NewMultiShardCoordinator(3, 1) + shardCoordinatorForShard1, _ := sharding.NewMultiShardCoordinator(3, 1) shardCoordinatorForShardMeta, _ := sharding.NewMultiShardCoordinator(3, core.MetachainShardId) - testContextShard0, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas( + testContextShard1, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas( enableEpochs, - shardCoordinatorForShard0, + shardCoordinatorForShard1, integrationtests.CreateMemUnit(), gasScheduleNotifier, ) @@ -194,7 +194,7 @@ func testAsyncCallsOnInitFunctionOnUpgrade(t *testing.T, enableEpochs config.Ena // step 1. deploy the first contract scAddress, owner := utils.DoDeployWithCustomParams( t, - testContextShard0, + testContextShard1, "./testdata/first/first.wasm", big.NewInt(100000000000), 2000, @@ -203,20 +203,20 @@ func testAsyncCallsOnInitFunctionOnUpgrade(t *testing.T, enableEpochs config.Ena assert.Equal(t, 32, len(owner)) assert.Equal(t, 32, len(scAddress)) - intermediates := testContextShard0.GetIntermediateTransactions(t) + intermediates := testContextShard1.GetIntermediateTransactions(t) assert.Equal(t, 1, len(intermediates)) - testContextShard0.CleanIntermediateTransactions(t) + testContextShard1.CleanIntermediateTransactions(t) // step 2. call a dummy function on the first version of the contract tx := utils.CreateSmartContractCall(1, owner, scAddress, 10, 2000, "callMe", nil) - code, err := testContextShard0.TxProcessor.ProcessTransaction(tx) + code, err := testContextShard1.TxProcessor.ProcessTransaction(tx) require.Nil(t, err) require.Equal(t, vmcommon.Ok, code) - intermediates = testContextShard0.GetIntermediateTransactions(t) + intermediates = testContextShard1.GetIntermediateTransactions(t) assert.Equal(t, 1, len(intermediates)) - testContextShard0.CleanIntermediateTransactions(t) + testContextShard1.CleanIntermediateTransactions(t) // step 3. upgrade to the second contract @@ -230,13 +230,13 @@ func testAsyncCallsOnInitFunctionOnUpgrade(t *testing.T, enableEpochs config.Ena hex.EncodeToString([]byte("dummyArg")), }, "@") tx = utils.CreateSmartContractCall(2, owner, scAddress, 10, 10000000, txData, nil) - code, err = testContextShard0.TxProcessor.ProcessTransaction(tx) + code, err = testContextShard1.TxProcessor.ProcessTransaction(tx) assert.Nil(t, err) assert.Equal(t, vmcommon.Ok, code) - intermediates = testContextShard0.GetIntermediateTransactions(t) + intermediates = testContextShard1.GetIntermediateTransactions(t) assert.Equal(t, 1, len(intermediates)) - testContextShard0.CleanIntermediateTransactions(t) + testContextShard1.CleanIntermediateTransactions(t) // step 4. execute scr on metachain, should fail @@ -251,7 +251,7 @@ func testAsyncCallsOnInitFunctionOnUpgrade(t *testing.T, enableEpochs config.Ena // step 5. execute generated metachain scr on the contract scr = intermediates[0].(*smartContractResult.SmartContractResult) - code, err = testContextShard0.ScProcessor.ProcessSmartContractResult(scr) + code, err = testContextShard1.ScProcessor.ProcessSmartContractResult(scr) assert.Nil(t, err) assert.Equal(t, vmcommon.Ok, code) @@ -295,12 +295,12 @@ func TestAsyncCallsOnInitFunctionOnDeploy(t *testing.T) { } func testAsyncCallsOnInitFunctionOnDeploy(t *testing.T, enableEpochs config.EnableEpochs, expectedGasLimit uint64, gasScheduleNotifier core.GasScheduleNotifier) { - shardCoordinatorForShard0, _ := sharding.NewMultiShardCoordinator(3, 1) + shardCoordinatorForShard1, _ := sharding.NewMultiShardCoordinator(3, 1) shardCoordinatorForShardMeta, _ := sharding.NewMultiShardCoordinator(3, core.MetachainShardId) - testContextShard0, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas( + testContextShard1, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas( enableEpochs, - shardCoordinatorForShard0, + shardCoordinatorForShard1, integrationtests.CreateMemUnit(), gasScheduleNotifier, ) @@ -316,7 +316,7 @@ func testAsyncCallsOnInitFunctionOnDeploy(t *testing.T, enableEpochs config.Enab // step 1. deploy the first contract scAddressFirst, firstOwner := utils.DoDeployWithCustomParams( t, - testContextShard0, + testContextShard1, "./testdata/first/first.wasm", big.NewInt(100000000000), 2000, @@ -325,26 +325,26 @@ func testAsyncCallsOnInitFunctionOnDeploy(t *testing.T, enableEpochs config.Enab assert.Equal(t, 32, len(firstOwner)) assert.Equal(t, 32, len(scAddressFirst)) - intermediates := testContextShard0.GetIntermediateTransactions(t) + intermediates := testContextShard1.GetIntermediateTransactions(t) assert.Equal(t, 1, len(intermediates)) - testContextShard0.CleanIntermediateTransactions(t) + testContextShard1.CleanIntermediateTransactions(t) // step 2. call a dummy function on the first contract tx := utils.CreateSmartContractCall(1, firstOwner, scAddressFirst, 10, 2000, "callMe", nil) - code, err := testContextShard0.TxProcessor.ProcessTransaction(tx) + code, err := testContextShard1.TxProcessor.ProcessTransaction(tx) require.Nil(t, err) require.Equal(t, vmcommon.Ok, code) - intermediates = testContextShard0.GetIntermediateTransactions(t) + intermediates = testContextShard1.GetIntermediateTransactions(t) assert.Equal(t, 1, len(intermediates)) - testContextShard0.CleanIntermediateTransactions(t) + testContextShard1.CleanIntermediateTransactions(t) // step 3. deploy the second contract that does an async on init function scAddressSecond, secondOwner := utils.DoDeployWithCustomParams( t, - testContextShard0, + testContextShard1, "./testdata/asyncOnInit/asyncOnInit.wasm", big.NewInt(100000000000), 10000000, @@ -357,9 +357,9 @@ func testAsyncCallsOnInitFunctionOnDeploy(t *testing.T, enableEpochs config.Enab assert.Equal(t, 32, len(secondOwner)) assert.Equal(t, 32, len(scAddressSecond)) - intermediates = testContextShard0.GetIntermediateTransactions(t) + intermediates = testContextShard1.GetIntermediateTransactions(t) assert.Equal(t, 1, len(intermediates)) - testContextShard0.CleanIntermediateTransactions(t) + testContextShard1.CleanIntermediateTransactions(t) // step 4. execute scr on metachain, should fail @@ -374,7 +374,7 @@ func testAsyncCallsOnInitFunctionOnDeploy(t *testing.T, enableEpochs config.Enab // step 5. execute generated metachain scr on the contract scr = intermediates[0].(*smartContractResult.SmartContractResult) - code, err = testContextShard0.ScProcessor.ProcessSmartContractResult(scr) + code, err = testContextShard1.ScProcessor.ProcessSmartContractResult(scr) assert.Nil(t, err) assert.Equal(t, vmcommon.Ok, code) diff --git a/integrationTests/vm/txsFee/dns_test.go b/integrationTests/vm/txsFee/dns_test.go index 1d4a439a692..cda8aaece8c 100644 --- a/integrationTests/vm/txsFee/dns_test.go +++ b/integrationTests/vm/txsFee/dns_test.go @@ -10,15 +10,24 @@ import ( "fmt" "math/big" "testing" + "unicode/utf8" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/state" + logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +const returnOkData = "@6f6b" + func TestDeployDNSContract_TestRegisterAndResolveAndSendTxWithSndAndRcvUserName(t *testing.T) { testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) @@ -37,7 +46,7 @@ func TestDeployDNSContract_TestRegisterAndResolveAndSendTxWithSndAndRcvUserName( _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) _, _ = vm.CreateAccount(testContext.Accounts, rcvAddr, 0, senderBalance) - userName := utils.GenerateUserNameForMyDNSContract() + userName := utils.GenerateUserNameForDefaultDNSContract() txData := []byte("register@" + hex.EncodeToString(userName)) // create username for sender tx := vm.CreateTransaction(0, big.NewInt(0), sndAddr, scAddress, gasPrice, gasLimit, txData) @@ -55,7 +64,7 @@ func TestDeployDNSContract_TestRegisterAndResolveAndSendTxWithSndAndRcvUserName( utils.CleanAccumulatedIntermediateTransactions(t, testContext) // create username for receiver - rcvUserName := utils.GenerateUserNameForMyDNSContract() + rcvUserName := utils.GenerateUserNameForDefaultDNSContract() txData = []byte("register@" + hex.EncodeToString(rcvUserName)) tx = vm.CreateTransaction(0, big.NewInt(0), rcvAddr, scAddress, gasPrice, gasLimit, txData) retCode, err = testContext.TxProcessor.ProcessTransaction(tx) @@ -100,3 +109,304 @@ func TestDeployDNSContract_TestRegisterAndResolveAndSendTxWithSndAndRcvUserName( require.Equal(t, vmcommon.Ok, retCode) require.Nil(t, err) } + +// relayer address is in shard 2, creates a transaction on the behalf of the user from shard 2, that will call the DNS contract +// from shard 1. +func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShardBackwardsCompatibility(t *testing.T) { + enableEpochs := config.EnableEpochs{ + ChangeUsernameEnableEpoch: 1000, // flag disabled, backwards compatibility + } + + testContextForDNSContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, enableEpochs) + require.Nil(t, err) + defer testContextForDNSContract.Close() + + testContextForRelayerAndUser, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, enableEpochs) + require.Nil(t, err) + defer testContextForRelayerAndUser.Close() + + scAddress, _ := utils.DoDeployDNS(t, testContextForDNSContract, "../../multiShard/smartContract/dns/dns.wasm") + fmt.Println(scAddress) + utils.CleanAccumulatedIntermediateTransactions(t, testContextForDNSContract) + require.Equal(t, uint32(1), testContextForDNSContract.ShardCoordinator.ComputeId(scAddress)) + + relayerAddress := []byte("relayer-901234567890123456789112") + require.Equal(t, uint32(2), testContextForRelayerAndUser.ShardCoordinator.ComputeId(relayerAddress)) + userAddress := []byte("user-678901234567890123456789112") + require.Equal(t, uint32(2), testContextForRelayerAndUser.ShardCoordinator.ComputeId(userAddress)) + + initialBalance := big.NewInt(10000000000) + _, _ = vm.CreateAccount(testContextForRelayerAndUser.Accounts, relayerAddress, 0, initialBalance) + + firstUsername := utils.GenerateUserNameForDNSContract(scAddress) + args := argsProcessRegister{ + relayerAddress: relayerAddress, + userAddress: userAddress, + scAddress: scAddress, + testContextForRelayerAndUser: testContextForRelayerAndUser, + testContextForDNSContract: testContextForDNSContract, + username: firstUsername, + gasPrice: 10, + } + scrs, retCode, err := processRegisterThroughRelayedTxs(t, args) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, retCode) + assert.Equal(t, 4, len(scrs)) + + expectedTotalBalance := big.NewInt(0).Set(initialBalance) + expectedTotalBalance.Sub(expectedTotalBalance, big.NewInt(10)) // due to a bug, some fees were burnt + + // check username + acc, _ := testContextForRelayerAndUser.Accounts.GetExistingAccount(userAddress) + account, _ := acc.(state.UserAccountHandler) + require.Equal(t, firstUsername, account.GetUserName()) + checkBalances(t, args, expectedTotalBalance) + + secondUsername := utils.GenerateUserNameForDNSContract(scAddress) + args.username = secondUsername + + _, retCode, err = processRegisterThroughRelayedTxs(t, args) + require.Nil(t, err) + require.Equal(t, vmcommon.UserError, retCode) + + // check username hasn't changed + acc, _ = testContextForRelayerAndUser.Accounts.GetExistingAccount(userAddress) + account, _ = acc.(state.UserAccountHandler) + require.Equal(t, firstUsername, account.GetUserName()) + checkBalances(t, args, expectedTotalBalance) +} + +func TestDeployDNSContract_TestGasWhenSaveUsernameAfterDNSv2IsActivated(t *testing.T) { + testContextForDNSContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) + require.Nil(t, err) + defer testContextForDNSContract.Close() + + testContextForRelayerAndUser, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{}) + require.Nil(t, err) + defer testContextForRelayerAndUser.Close() + + scAddress, _ := utils.DoDeployDNS(t, testContextForDNSContract, "../../multiShard/smartContract/dns/dns.wasm") + fmt.Println(scAddress) + utils.CleanAccumulatedIntermediateTransactions(t, testContextForDNSContract) + require.Equal(t, uint32(1), testContextForDNSContract.ShardCoordinator.ComputeId(scAddress)) + + relayerAddress := []byte("relayer-901234567890123456789112") + require.Equal(t, uint32(2), testContextForRelayerAndUser.ShardCoordinator.ComputeId(relayerAddress)) + userAddress := []byte("user-678901234567890123456789112") + require.Equal(t, uint32(2), testContextForRelayerAndUser.ShardCoordinator.ComputeId(userAddress)) + + initialBalance := big.NewInt(10000000000) + _, _ = vm.CreateAccount(testContextForRelayerAndUser.Accounts, relayerAddress, 0, initialBalance) + + firstUsername := utils.GenerateUserNameForDNSContract(scAddress) + args := argsProcessRegister{ + relayerAddress: relayerAddress, + userAddress: userAddress, + scAddress: scAddress, + testContextForRelayerAndUser: testContextForRelayerAndUser, + testContextForDNSContract: testContextForDNSContract, + username: firstUsername, + gasPrice: 10, + } + scrs, retCode, err := processRegisterThroughRelayedTxs(t, args) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, retCode) + assert.Equal(t, 4, len(scrs)) + + // check username + acc, _ := testContextForRelayerAndUser.Accounts.GetExistingAccount(userAddress) + account, _ := acc.(state.UserAccountHandler) + require.Equal(t, firstUsername, account.GetUserName()) + checkBalances(t, args, initialBalance) + + secondUsername := utils.GenerateUserNameForDNSContract(scAddress) + args.username = secondUsername + + _, retCode, err = processRegisterThroughRelayedTxs(t, args) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, retCode) + + // check username has changed + acc, _ = testContextForRelayerAndUser.Accounts.GetExistingAccount(userAddress) + account, _ = acc.(state.UserAccountHandler) + require.Equal(t, secondUsername, account.GetUserName()) + checkBalances(t, args, initialBalance) +} + +type argsProcessRegister struct { + relayerAddress []byte + userAddress []byte + scAddress []byte + testContextForRelayerAndUser *vm.VMTestContext + testContextForDNSContract *vm.VMTestContext + username []byte + gasPrice uint64 +} + +func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ([]*smartContractResult.SmartContractResult, vmcommon.ReturnCode, error) { + scrs := make([]*smartContractResult.SmartContractResult, 0) + + // generate the user transaction + userTxData := []byte("register@" + hex.EncodeToString(args.username)) + userTxGasLimit := uint64(200000) + userTx := vm.CreateTransaction( + getNonce(args.testContextForRelayerAndUser, args.userAddress), + big.NewInt(0), + args.userAddress, + args.scAddress, + args.gasPrice, + userTxGasLimit, + userTxData, + ) + + log.Info("user tx", "tx", txToString(userTx)) + + // generate the relayed transaction + relayedTxData := integrationTests.PrepareRelayedTxDataV1(userTx) // v1 will suffice + relayedTxGasLimit := userTxGasLimit + 1 + uint64(len(relayedTxData)) + relayedTx := vm.CreateTransaction( + getNonce(args.testContextForRelayerAndUser, args.relayerAddress), + big.NewInt(0), + args.relayerAddress, + args.userAddress, + args.gasPrice, + relayedTxGasLimit, + relayedTxData, + ) + + log.Info("executing relayed tx", "tx", txToString(relayedTx)) + + // start executing relayed transaction + retCode, err := args.testContextForRelayerAndUser.TxProcessor.ProcessTransaction(relayedTx) + if err != nil { + return scrs, retCode, err + } + + intermediateTxs := args.testContextForRelayerAndUser.GetIntermediateTransactions(tb) + args.testContextForRelayerAndUser.CleanIntermediateTransactions(tb) + if len(intermediateTxs) == 0 { + return scrs, retCode, err // execution finished + } + testContexts := []*vm.VMTestContext{args.testContextForRelayerAndUser, args.testContextForDNSContract} + + globalReturnCode := vmcommon.Ok + + for { + scr := intermediateTxs[0].(*smartContractResult.SmartContractResult) + scrs = append(scrs, scr) + + context := chooseVMTestContexts(scr, testContexts) + require.NotNil(tb, context) + + // execute the smart contract result + log.Info("executing scr", "on shard", context.ShardCoordinator.SelfId(), "scr", scrToString(scr)) + + retCode, err = context.ScProcessor.ProcessSmartContractResult(scr) + if err != nil { + return scrs, retCode, err + } + if retCode != vmcommon.Ok { + globalReturnCode = retCode + } + if string(scr.Data) == returnOkData { + return scrs, globalReturnCode, err // execution finished + } + + intermediateTxs = context.GetIntermediateTransactions(tb) + context.CleanIntermediateTransactions(tb) + if len(intermediateTxs) == 0 { + return scrs, globalReturnCode, err // execution finished + } + } +} + +func chooseVMTestContexts(scr *smartContractResult.SmartContractResult, contexts []*vm.VMTestContext) *vm.VMTestContext { + for _, context := range contexts { + if context.ShardCoordinator.ComputeId(scr.RcvAddr) == context.ShardCoordinator.SelfId() { + return context + } + } + + return nil +} + +func scrToString(scr *smartContractResult.SmartContractResult) string { + data := string(scr.Data) + if !isASCII(data) { + data = hex.EncodeToString(scr.Data) + } + + hash, _ := core.CalculateHash(integrationTests.TestMarshalizer, integrationTests.TestHasher, scr) + + rcv, _ := integrationTests.TestAddressPubkeyConverter.Encode(scr.RcvAddr) + snd, _ := integrationTests.TestAddressPubkeyConverter.Encode(scr.SndAddr) + return fmt.Sprintf("hash: %s, nonce: %d, value: %s, rcvAddr: %s, sender: %s, gasLimit: %d, gasPrice: %d, data: %s", + hex.EncodeToString(hash), + scr.Nonce, scr.Value.String(), + rcv, + snd, + scr.GasLimit, scr.GasPrice, data, + ) +} + +func txToString(tx *transaction.Transaction) string { + data := string(tx.Data) + if !isASCII(data) { + data = hex.EncodeToString(tx.Data) + } + + hash, _ := core.CalculateHash(integrationTests.TestMarshalizer, integrationTests.TestHasher, tx) + rcv, _ := integrationTests.TestAddressPubkeyConverter.Encode(tx.RcvAddr) + snd, _ := integrationTests.TestAddressPubkeyConverter.Encode(tx.SndAddr) + return fmt.Sprintf("hash: %s, nonce: %d, value: %s, rcvAddr: %s, sender: %s, gasLimit: %d, gasPrice: %d, data: %s", + hex.EncodeToString(hash), + tx.Nonce, tx.Value.String(), + rcv, + snd, + tx.GasLimit, tx.GasPrice, data, + ) +} + +func isASCII(data string) bool { + for i := 0; i < len(data); i++ { + if data[i] >= utf8.RuneSelf { + return false + } + + if data[i] >= logger.ASCIISpace { + continue + } + + if data[i] == logger.ASCIITab || data[i] == logger.ASCIILineFeed || data[i] == logger.ASCIINewLine { + continue + } + + return false + } + + return true +} + +func checkBalances(tb testing.TB, args argsProcessRegister, initialBalance *big.Int) { + entireBalance := big.NewInt(0) + relayerBalance := getBalance(args.testContextForRelayerAndUser, args.relayerAddress) + userBalance := getBalance(args.testContextForRelayerAndUser, args.userAddress) + scBalance := getBalance(args.testContextForDNSContract, args.scAddress) + accumulatedFees := big.NewInt(0).Set(args.testContextForRelayerAndUser.TxFeeHandler.GetAccumulatedFees()) + accumulatedFees.Add(accumulatedFees, args.testContextForDNSContract.TxFeeHandler.GetAccumulatedFees()) + + entireBalance.Add(entireBalance, relayerBalance) + entireBalance.Add(entireBalance, userBalance) + entireBalance.Add(entireBalance, scBalance) + entireBalance.Add(entireBalance, accumulatedFees) + + log.Info("checkBalances", + "relayerBalance", relayerBalance.String(), + "userBalance", userBalance.String(), + "scBalance", scBalance.String(), + "accumulated fees", accumulatedFees.String(), + "total", entireBalance.String(), + ) + + assert.Equal(tb, initialBalance, entireBalance) +} diff --git a/integrationTests/vm/txsFee/multiShard/esdt_test.go b/integrationTests/vm/txsFee/multiShard/esdt_test.go index 60941642343..f224b528ef6 100644 --- a/integrationTests/vm/txsFee/multiShard/esdt_test.go +++ b/integrationTests/vm/txsFee/multiShard/esdt_test.go @@ -1,9 +1,13 @@ package multiShard import ( + "encoding/hex" "math/big" + "strings" "testing" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" @@ -40,3 +44,145 @@ func TestESDTTransferShouldWork(t *testing.T) { expectedReceiverBalance := big.NewInt(100) utils.CheckESDTBalance(t, testContext, rcvAddr, token, expectedReceiverBalance) } + +func TestMultiESDTNFTTransferViaRelayedV2(t *testing.T) { + tokenID1 := []byte("MYNFT1") + tokenID2 := []byte("MYNFT2") + sh0Addr := []byte("12345678901234567890123456789010") + sh1Addr := []byte("12345678901234567890123456789011") + + relayerSh0 := []byte("12345678901234567890123456789110") + relayerSh1 := []byte("12345678901234567890123456789111") + sh0Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) + require.Nil(t, err) + defer sh0Context.Close() + + sh1Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) + require.Nil(t, err) + defer sh1Context.Close() + _, _ = vm.CreateAccount(sh1Context.Accounts, sh1Addr, 0, big.NewInt(10000000000)) + _, _ = vm.CreateAccount(sh0Context.Accounts, relayerSh0, 0, big.NewInt(1000000000)) + _, _ = vm.CreateAccount(sh1Context.Accounts, relayerSh1, 0, big.NewInt(1000000000)) + + // create the nfts, add the liquidity to the system accounts and check for balances + utils.CreateAccountWithESDTBalance(t, sh0Context.Accounts, sh0Addr, big.NewInt(100000000), tokenID1, 1, big.NewInt(1)) + utils.CreateAccountWithESDTBalance(t, sh0Context.Accounts, sh0Addr, big.NewInt(100000000), tokenID2, 1, big.NewInt(1)) + + sh0Accnt, _ := sh0Context.Accounts.LoadAccount(sh0Addr) + sh1Accnt, _ := sh1Context.Accounts.LoadAccount(sh1Addr) + + transfers := []*utils.TransferESDTData{ + { + Token: tokenID1, + Nonce: 1, + Value: big.NewInt(1), + }, + { + Token: tokenID2, + Nonce: 1, + Value: big.NewInt(1), + }, + } + + // + // Step 1: transfer the NFTs sh0->sh1 via multi transfer with a shard 0 relayer + // + + innerTx := utils.CreateMultiTransferTX(sh0Accnt.GetNonce(), sh0Addr, sh1Addr, 10, 10000000, transfers...) + relayedTx := createRelayedV2FromInnerTx(0, relayerSh0, innerTx) + + retCode, err := sh0Context.TxProcessor.ProcessTransaction(relayedTx) + require.Equal(t, vmcommon.Ok, retCode) + require.NoError(t, err) + + scrs := sh0Context.GetIntermediateTransactions(t) + + shard1Scr := scrs[0] + for _, scr := range scrs { + if scr.GetRcvAddr()[len(scr.GetRcvAddr())-1] == byte(0) { + shard1Scr = scr + break + } + } + // check the balances after the transfer, as well as the liquidity + utils.ProcessSCRResult(t, sh1Context, shard1Scr, vmcommon.Ok, nil) + utils.CheckESDTNFTBalance(t, sh0Context, sh0Addr, tokenID1, 1, big.NewInt(0)) + utils.CheckESDTNFTBalance(t, sh0Context, core.SystemAccountAddress, tokenID1, 1, big.NewInt(0)) + utils.CheckESDTNFTBalance(t, sh1Context, sh1Addr, tokenID1, 1, big.NewInt(1)) + utils.CheckESDTNFTBalance(t, sh1Context, core.SystemAccountAddress, tokenID1, 1, big.NewInt(1)) + + // + // Step 2: transfer the NFTs sh1->sh0 via multi transfer with a shard 1 relayer + // + + sh0Context.CleanIntermediateTransactions(t) + sh1Context.CleanIntermediateTransactions(t) + + innerTx = utils.CreateMultiTransferTX(sh1Accnt.GetNonce(), sh1Addr, sh0Addr, 10, 10000000, transfers...) + relayedTx = createRelayedV2FromInnerTx(0, relayerSh1, innerTx) + + retCode, err = sh1Context.TxProcessor.ProcessTransaction(relayedTx) + require.Equal(t, vmcommon.Ok, retCode) + require.NoError(t, err) + + scrs = sh1Context.GetIntermediateTransactions(t) + shard0Scr := scrs[0] + for _, scr := range scrs { + if scr.GetRcvAddr()[len(scr.GetRcvAddr())-1] == byte(0) { + shard0Scr = scr + break + } + } + // check the balances after the transfer, as well as the liquidity + utils.ProcessSCRResult(t, sh0Context, shard0Scr, vmcommon.Ok, nil) + utils.CheckESDTNFTBalance(t, sh0Context, sh0Addr, tokenID1, 1, big.NewInt(1)) + utils.CheckESDTNFTBalance(t, sh0Context, core.SystemAccountAddress, tokenID1, 1, big.NewInt(1)) + utils.CheckESDTNFTBalance(t, sh1Context, sh1Addr, tokenID1, 1, big.NewInt(0)) + utils.CheckESDTNFTBalance(t, sh1Context, core.SystemAccountAddress, tokenID1, 1, big.NewInt(0)) + + // + // Step 3: transfer the NFTs sh0->s1 via multi transfer with a shard 1 relayer + // + + sh0Context.CleanIntermediateTransactions(t) + sh1Context.CleanIntermediateTransactions(t) + + innerTx = utils.CreateMultiTransferTX(sh0Accnt.GetNonce()+1, sh0Addr, sh1Addr, 10, 10000000, transfers...) + relayedTx = createRelayedV2FromInnerTx(1, relayerSh1, innerTx) + + retCode, err = sh0Context.TxProcessor.ProcessTransaction(relayedTx) + require.Equal(t, vmcommon.Ok, retCode) + require.NoError(t, err) + + scrs = sh0Context.GetIntermediateTransactions(t) + shard1Scr = scrs[0] + for _, scr := range scrs { + if scr.GetRcvAddr()[len(scr.GetRcvAddr())-1] == byte(1) { + shard1Scr = scr + break + } + } + // check the balances after the transfer, as well as the liquidity + utils.ProcessSCRResult(t, sh1Context, shard1Scr, vmcommon.Ok, nil) + utils.CheckESDTNFTBalance(t, sh0Context, sh0Addr, tokenID1, 1, big.NewInt(0)) + utils.CheckESDTNFTBalance(t, sh0Context, core.SystemAccountAddress, tokenID1, 1, big.NewInt(0)) + utils.CheckESDTNFTBalance(t, sh1Context, sh1Addr, tokenID1, 1, big.NewInt(1)) + utils.CheckESDTNFTBalance(t, sh1Context, core.SystemAccountAddress, tokenID1, 1, big.NewInt(1)) +} + +func createRelayedV2FromInnerTx(relayerNonce uint64, relayer []byte, innerTx *transaction.Transaction) *transaction.Transaction { + nonceHex := "00" + if innerTx.Nonce > 0 { + nonceHex = hex.EncodeToString(big.NewInt(int64(innerTx.Nonce)).Bytes()) + } + data := strings.Join([]string{"relayedTxV2", hex.EncodeToString(innerTx.RcvAddr), nonceHex, hex.EncodeToString(innerTx.Data), hex.EncodeToString(innerTx.Signature)}, "@") + return &transaction.Transaction{ + Nonce: relayerNonce, + Value: big.NewInt(0), + SndAddr: relayer, + RcvAddr: innerTx.SndAddr, + GasPrice: innerTx.GasPrice, + GasLimit: innerTx.GasLimit + 1_000_000, + Data: []byte(data), + } +} diff --git a/integrationTests/vm/txsFee/relayedDns_test.go b/integrationTests/vm/txsFee/relayedDns_test.go index 37d1633a53f..761c566f5a0 100644 --- a/integrationTests/vm/txsFee/relayedDns_test.go +++ b/integrationTests/vm/txsFee/relayedDns_test.go @@ -35,7 +35,7 @@ func TestRelayedTxDnsTransaction_ShouldWork(t *testing.T) { _, _ = vm.CreateAccount(testContext.Accounts, rcvAddr, 0, big.NewInt(0)) _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(100000000)) - sndAddrUserName := utils.GenerateUserNameForMyDNSContract() + sndAddrUserName := utils.GenerateUserNameForDefaultDNSContract() txData := []byte("register@" + hex.EncodeToString(sndAddrUserName)) // create user name for sender innerTx := vm.CreateTransaction(0, big.NewInt(0), sndAddr, scAddress, gasPrice, gasLimit, txData) @@ -57,7 +57,7 @@ func TestRelayedTxDnsTransaction_ShouldWork(t *testing.T) { dnsUserNameAddr := ret.ReturnData[0] require.Equal(t, sndAddr, dnsUserNameAddr) - rcvAddrUserName := utils.GenerateUserNameForMyDNSContract() + rcvAddrUserName := utils.GenerateUserNameForDefaultDNSContract() txData = []byte("register@" + hex.EncodeToString(rcvAddrUserName)) // create user name for receiver innerTx = vm.CreateTransaction(0, big.NewInt(0), rcvAddr, scAddress, gasPrice, gasLimit, txData) diff --git a/integrationTests/vm/txsFee/utils/utils.go b/integrationTests/vm/txsFee/utils/utils.go index 88ba54a0fc2..423c6bc15ae 100644 --- a/integrationTests/vm/txsFee/utils/utils.go +++ b/integrationTests/vm/txsFee/utils/utils.go @@ -355,10 +355,15 @@ func randStringBytes(n int) string { return string(b) } -// GenerateUserNameForMyDNSContract - -func GenerateUserNameForMyDNSContract() []byte { +// GenerateUserNameForDefaultDNSContract - +func GenerateUserNameForDefaultDNSContract() []byte { + return GenerateUserNameForDNSContract([]byte{49}) +} + +// GenerateUserNameForDNSContract - +func GenerateUserNameForDNSContract(contractAddress []byte) []byte { testHasher := keccak.NewKeccak() - contractLastByte := byte(49) + contractLastByte := contractAddress[len(contractAddress)-1] for { userName := randStringBytes(10) diff --git a/integrationTests/vm/txsFee/utils/utilsESDT.go b/integrationTests/vm/txsFee/utils/utilsESDT.go index dbc2a6665e1..68fe255a1ba 100644 --- a/integrationTests/vm/txsFee/utils/utilsESDT.go +++ b/integrationTests/vm/txsFee/utils/utilsESDT.go @@ -3,6 +3,7 @@ package utils import ( "bytes" "encoding/hex" + "fmt" "math/big" "strings" "testing" @@ -43,7 +44,10 @@ func CreateAccountWithESDTBalance( } if esdtNonce > 0 { esdtData.TokenMetaData = &esdt.MetaData{ - Nonce: esdtNonce, + Name: []byte(fmt.Sprintf("Token %d", esdtNonce)), + URIs: [][]byte{[]byte(fmt.Sprintf("URI for token %d", esdtNonce))}, + Creator: pubKey, + Nonce: esdtNonce, } } diff --git a/integrationTests/vm/wasm/testdata/manage-user-contract.wasm b/integrationTests/vm/wasm/testdata/manage-user-contract.wasm new file mode 100644 index 00000000000..0d41f70927b Binary files /dev/null and b/integrationTests/vm/wasm/testdata/manage-user-contract.wasm differ diff --git a/integrationTests/vm/wasm/utils.go b/integrationTests/vm/wasm/utils.go index d6abad6b22b..bc6a7fdae2f 100644 --- a/integrationTests/vm/wasm/utils.go +++ b/integrationTests/vm/wasm/utils.go @@ -245,6 +245,7 @@ func (context *TestContext) initVMAndBlockchainHook() { argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: gasSchedule, MapDNSAddresses: DNSAddresses, + MapDNSV2Addresses: DNSAddresses, Marshalizer: marshalizer, Accounts: context.Accounts, ShardCoordinator: oneShardCoordinator, @@ -290,8 +291,9 @@ func (context *TestContext) initVMAndBlockchainHook() { MaxBatchSize: 100, }, }, - GasSchedule: gasSchedule, - Counter: &testscommon.BlockChainHookCounterStub{}, + GasSchedule: gasSchedule, + Counter: &testscommon.BlockChainHookCounterStub{}, + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } vmFactoryConfig := config.VirtualMachineConfig{ diff --git a/integrationTests/vm/wasm/wasmvm/wasmVM_test.go b/integrationTests/vm/wasm/wasmvm/wasmVM_test.go index cb6fbaf3717..e8d4cdce9e3 100644 --- a/integrationTests/vm/wasm/wasmvm/wasmVM_test.go +++ b/integrationTests/vm/wasm/wasmvm/wasmVM_test.go @@ -933,3 +933,113 @@ func TestCommunityContract_CrossShard_TxProcessor(t *testing.T) { utils.TestAccount(t, testContextParentSC.Accounts, parentAddress, 0, zero) utils.TestAccount(t, testContextFunderSC.Accounts, funderAddress, 0, transferEGLD) } + +func TestDeployDNSV2SetDeleteUserNames(t *testing.T) { + senderAddressBytes, _ := vm.TestAddressPubkeyConverter.Decode(vm.DNSV2DeployerAddress) + senderNonce := uint64(0) + senderBalance := big.NewInt(100000000) + gasPrice := uint64(1) + gasLimit := uint64(10000000) + + scCode := wasm.GetSCCode("../testdata/manage-user-contract.wasm") + + tx := vm.CreateTx( + senderAddressBytes, + vm.CreateEmptyAddress(), + senderNonce, + big.NewInt(0), + gasPrice, + gasLimit, + wasm.CreateDeployTxData(scCode), + ) + + testContext, err := vm.CreatePreparedTxProcessorAndAccountsWithVMs( + senderNonce, + senderAddressBytes, + senderBalance, + config.EnableEpochs{}, + ) + require.Nil(t, err) + defer testContext.Close() + + returnCode, err := testContext.TxProcessor.ProcessTransaction(tx) + require.Nil(t, err) + require.Equal(t, returnCode, vmcommon.Ok) + + _, err = testContext.Accounts.Commit() + require.Nil(t, err) + + expectedBalance := big.NewInt(90000000) + + vm.TestAccount( + t, + testContext.Accounts, + senderAddressBytes, + senderNonce+1, + expectedBalance) + + dnsV2Address, _ := vm.TestAddressPubkeyConverter.Decode(vm.DNSV2Address) + senderNonce++ + tx = vm.CreateTx( + senderAddressBytes, + dnsV2Address, + senderNonce, + big.NewInt(0), + gasPrice, + gasLimit, + "saveName@"+hex.EncodeToString(senderAddressBytes)+"@"+hex.EncodeToString([]byte("userName1")), + ) + + returnCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Nil(t, err) + require.Equal(t, returnCode, vmcommon.Ok) + vm.TestAccountUsername(t, testContext.Accounts, senderAddressBytes, []byte("userName1")) + + senderNonce++ + tx = vm.CreateTx( + senderAddressBytes, + dnsV2Address, + senderNonce, + big.NewInt(0), + gasPrice, + gasLimit, + "removeName@"+hex.EncodeToString(senderAddressBytes), + ) + + returnCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Nil(t, err) + require.Equal(t, returnCode, vmcommon.Ok) + vm.TestAccountUsername(t, testContext.Accounts, senderAddressBytes, nil) + + senderNonce++ + tx = vm.CreateTx( + senderAddressBytes, + dnsV2Address, + senderNonce, + big.NewInt(0), + gasPrice, + gasLimit, + "saveName@"+hex.EncodeToString(senderAddressBytes)+"@"+hex.EncodeToString([]byte("userName2")), + ) + + returnCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Nil(t, err) + require.Equal(t, returnCode, vmcommon.Ok) + vm.TestAccountUsername(t, testContext.Accounts, senderAddressBytes, []byte("userName2")) + + senderNonce++ + tx = vm.CreateTx( + senderAddressBytes, + dnsV2Address, + senderNonce, + big.NewInt(0), + gasPrice, + gasLimit, + "saveName@"+hex.EncodeToString(senderAddressBytes)+"@"+hex.EncodeToString([]byte("userName3")), + ) + + returnCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Nil(t, err) + require.Equal(t, returnCode, vmcommon.Ok) + vm.TestAccountUsername(t, testContext.Accounts, senderAddressBytes, []byte("userName3")) +} diff --git a/node/external/blockAPI/baseBlock.go b/node/external/blockAPI/baseBlock.go index 2c0867ef2ac..de881108c64 100644 --- a/node/external/blockAPI/baseBlock.go +++ b/node/external/blockAPI/baseBlock.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/outport" @@ -359,11 +360,11 @@ func bigIntToStr(value *big.Int) string { return value.String() } -func alteredAccountsMapToAPIResponse(alteredAccounts map[string]*outport.AlteredAccount, tokensFilter string) []*outport.AlteredAccount { - response := make([]*outport.AlteredAccount, 0, len(alteredAccounts)) +func alteredAccountsMapToAPIResponse(alteredAccounts map[string]*alteredAccount.AlteredAccount, tokensFilter string) []*alteredAccount.AlteredAccount { + response := make([]*alteredAccount.AlteredAccount, 0, len(alteredAccounts)) for address, altAccount := range alteredAccounts { - apiAlteredAccount := &outport.AlteredAccount{ + apiAlteredAccount := &alteredAccount.AlteredAccount{ Address: address, Balance: altAccount.Balance, Nonce: altAccount.Nonce, @@ -379,13 +380,13 @@ func alteredAccountsMapToAPIResponse(alteredAccounts map[string]*outport.Altered return response } -func attachTokensToAlteredAccount(apiAlteredAccount *outport.AlteredAccount, altAccount *outport.AlteredAccount, tokensFilter string) { +func attachTokensToAlteredAccount(apiAlteredAccount *alteredAccount.AlteredAccount, altAccount *alteredAccount.AlteredAccount, tokensFilter string) { for _, token := range altAccount.Tokens { if !shouldAddTokenToResult(token.Identifier, tokensFilter) { continue } - apiAlteredAccount.Tokens = append(apiAlteredAccount.Tokens, &outport.AccountTokenData{ + apiAlteredAccount.Tokens = append(apiAlteredAccount.Tokens, &alteredAccount.AccountTokenData{ Identifier: token.Identifier, Balance: token.Balance, Nonce: token.Nonce, @@ -408,7 +409,7 @@ func shouldIncludeAllTokens(tokensFilter string) bool { return tokensFilter == "*" || tokensFilter == "all" } -func (bap *baseAPIBlockProcessor) apiBlockToAlteredAccounts(apiBlock *api.Block, options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { +func (bap *baseAPIBlockProcessor) apiBlockToAlteredAccounts(apiBlock *api.Block, options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { blockHash, err := hex.DecodeString(apiBlock.Hash) if err != nil { return nil, err @@ -448,13 +449,13 @@ func (bap *baseAPIBlockProcessor) apiBlockToAlteredAccounts(apiBlock *api.Block, return alteredAccountsAPI, nil } -func (bap *baseAPIBlockProcessor) apiBlockToOutportPool(apiBlock *api.Block) (*outport.Pool, error) { - pool := &outport.Pool{ - Txs: make(map[string]data.TransactionHandlerWithGasUsedAndFee), - Scrs: make(map[string]data.TransactionHandlerWithGasUsedAndFee), - Invalid: make(map[string]data.TransactionHandlerWithGasUsedAndFee), - Rewards: make(map[string]data.TransactionHandlerWithGasUsedAndFee), - Logs: make([]*data.LogData, 0), +func (bap *baseAPIBlockProcessor) apiBlockToOutportPool(apiBlock *api.Block) (*outport.TransactionPool, error) { + pool := &outport.TransactionPool{ + Transactions: make(map[string]*outport.TxInfo), + SmartContractResults: make(map[string]*outport.SCRInfo), + InvalidTxs: make(map[string]*outport.TxInfo), + Rewards: make(map[string]*outport.RewardInfo), + Logs: make([]*outport.LogData, 0), } var err error @@ -475,7 +476,7 @@ func (bap *baseAPIBlockProcessor) apiBlockToOutportPool(apiBlock *api.Block) (*o return pool, nil } -func (bap *baseAPIBlockProcessor) addLogsToPool(tx *transaction.ApiTransactionResult, pool *outport.Pool) error { +func (bap *baseAPIBlockProcessor) addLogsToPool(tx *transaction.ApiTransactionResult, pool *outport.TransactionPool) error { if tx.Logs == nil { return nil } @@ -500,18 +501,18 @@ func (bap *baseAPIBlockProcessor) addLogsToPool(tx *transaction.ApiTransactionRe }) } - pool.Logs = append(pool.Logs, &data.LogData{ - LogHandler: &transaction.Log{ + pool.Logs = append(pool.Logs, &outport.LogData{ + TxHash: tx.Hash, + Log: &transaction.Log{ Address: logAddressBytes, Events: logsEvents, }, - TxHash: tx.Hash, }) return nil } -func (bap *baseAPIBlockProcessor) addTxToPool(tx *transaction.ApiTransactionResult, pool *outport.Pool) error { +func (bap *baseAPIBlockProcessor) addTxToPool(tx *transaction.ApiTransactionResult, pool *outport.TransactionPool) error { senderBytes, err := bap.addressPubKeyConverter.Decode(tx.Sender) if err != nil && tx.Type != string(transaction.TxTypeReward) { return fmt.Errorf("error while decoding the sender address. address=%s, error=%s", tx.Sender, err.Error()) @@ -521,7 +522,6 @@ func (bap *baseAPIBlockProcessor) addTxToPool(tx *transaction.ApiTransactionResu return fmt.Errorf("error while decoding the receiver address. address=%s, error=%s", tx.Receiver, err.Error()) } - zeroBigInt := big.NewInt(0) txValueString := tx.Value if len(txValueString) == 0 { txValueString = "0" @@ -533,49 +533,54 @@ func (bap *baseAPIBlockProcessor) addTxToPool(tx *transaction.ApiTransactionResu switch tx.Type { case string(transaction.TxTypeNormal): - pool.Txs[tx.Hash] = outport.NewTransactionHandlerWithGasAndFee( - &transaction.Transaction{ + pool.Transactions[tx.Hash] = &outport.TxInfo{ + Transaction: &transaction.Transaction{ SndAddr: senderBytes, RcvAddr: receiverBytes, Value: txValue, }, - 0, - zeroBigInt, - ) + FeeInfo: newFeeInfo(), + } + case string(transaction.TxTypeUnsigned): - pool.Scrs[tx.Hash] = outport.NewTransactionHandlerWithGasAndFee( - &smartContractResult.SmartContractResult{ + pool.SmartContractResults[tx.Hash] = &outport.SCRInfo{ + SmartContractResult: &smartContractResult.SmartContractResult{ SndAddr: senderBytes, RcvAddr: receiverBytes, Value: txValue, }, - 0, - zeroBigInt, - ) + FeeInfo: newFeeInfo(), + } case string(transaction.TxTypeInvalid): - pool.Invalid[tx.Hash] = outport.NewTransactionHandlerWithGasAndFee( - &transaction.Transaction{ + pool.InvalidTxs[tx.Hash] = &outport.TxInfo{ + Transaction: &transaction.Transaction{ SndAddr: senderBytes, // do not set the receiver since the cost is only on sender's side in case of invalid txs Value: txValue, }, - 0, - zeroBigInt, - ) + FeeInfo: newFeeInfo(), + } + case string(transaction.TxTypeReward): - pool.Rewards[tx.Hash] = outport.NewTransactionHandlerWithGasAndFee( - &rewardTx.RewardTx{ + pool.Rewards[tx.Hash] = &outport.RewardInfo{ + Reward: &rewardTx.RewardTx{ RcvAddr: receiverBytes, Value: txValue, }, - 0, - zeroBigInt, - ) + } } return nil } +func newFeeInfo() *outport.FeeInfo { + return &outport.FeeInfo{ + GasUsed: 0, + Fee: big.NewInt(0), + InitialPaidFee: big.NewInt(0), + } +} + func createAlteredBlockHash(hash []byte) []byte { alteredHash := make([]byte, 0) alteredHash = append(alteredHash, hash...) diff --git a/node/external/blockAPI/baseBlock_test.go b/node/external/blockAPI/baseBlock_test.go index e65e10ccbd1..d4b5e46d822 100644 --- a/node/external/blockAPI/baseBlock_test.go +++ b/node/external/blockAPI/baseBlock_test.go @@ -87,7 +87,7 @@ func TestBaseBlockGetIntraMiniblocksSCRS(t *testing.T) { intraMbs, err := baseAPIBlockProc.getIntrashardMiniblocksFromReceiptsStorage(blockHeader, []byte{}, api.BlockQueryOptions{WithTransactions: true}) require.Nil(t, err) require.Equal(t, &api.MiniBlock{ - Hash: "7630a217810d1ad3ea67e32dbff0e8f3ea6d970191f03d3c71761b3b60e57b91", + Hash: "f4add7b23eb83cf290422b0f6b770e3007b8ed3cd9683797fc90c8b4881f27bd", Type: "SmartContractResultBlock", Transactions: []*transaction.ApiTransactionResult{ { @@ -97,7 +97,7 @@ func TestBaseBlockGetIntraMiniblocksSCRS(t *testing.T) { Receiver: "726376", Data: []byte("doSomething"), MiniBlockType: "SmartContractResultBlock", - MiniBlockHash: "7630a217810d1ad3ea67e32dbff0e8f3ea6d970191f03d3c71761b3b60e57b91", + MiniBlockHash: "f4add7b23eb83cf290422b0f6b770e3007b8ed3cd9683797fc90c8b4881f27bd", }, }, ProcessingType: block.Normal.String(), @@ -152,7 +152,7 @@ func TestBaseBlockGetIntraMiniblocksReceipts(t *testing.T) { intraMbs, err := baseAPIBlockProc.getIntrashardMiniblocksFromReceiptsStorage(blockHeader, []byte{}, api.BlockQueryOptions{WithTransactions: true}) require.Nil(t, err) require.Equal(t, &api.MiniBlock{ - Hash: "262b3023ca9ba61e90a60932b4db7f8b0d1dec7c2a00261cf0c5d43785f17f6f", + Hash: "596545f64319f2fcf8e0ebae06f40f3353d603f6070255588a48018c7b30c951", Type: "ReceiptBlock", Receipts: []*transaction.ApiReceipt{ { diff --git a/node/external/blockAPI/interface.go b/node/external/blockAPI/interface.go index 8519c8c4963..75707d828c7 100644 --- a/node/external/blockAPI/interface.go +++ b/node/external/blockAPI/interface.go @@ -2,8 +2,8 @@ package blockAPI import ( "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" - "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/state" @@ -22,7 +22,7 @@ type APIBlockHandler interface { GetBlockByNonce(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) GetBlockByHash(hash []byte, options api.BlockQueryOptions) (*api.Block, error) GetBlockByRound(round uint64, options api.BlockQueryOptions) (*api.Block, error) - GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) + GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) IsInterfaceNil() bool } diff --git a/node/external/blockAPI/metaBlock.go b/node/external/blockAPI/metaBlock.go index 970b918eb02..9367153e11d 100644 --- a/node/external/blockAPI/metaBlock.go +++ b/node/external/blockAPI/metaBlock.go @@ -5,9 +5,9 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" ) @@ -112,7 +112,7 @@ func (mbp *metaAPIBlockProcessor) GetBlockByRound(round uint64, options api.Bloc } // GetAlteredAccountsForBlock returns the altered accounts for the desired meta block -func (mbp *metaAPIBlockProcessor) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { +func (mbp *metaAPIBlockProcessor) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { headerHash, blockBytes, err := mbp.getHashAndBlockBytesFromStorer(options.GetBlockParameters) if err != nil { return nil, err diff --git a/node/external/blockAPI/metaBlock_test.go b/node/external/blockAPI/metaBlock_test.go index 205f4716b2d..b3cbb6d4ffb 100644 --- a/node/external/blockAPI/metaBlock_test.go +++ b/node/external/blockAPI/metaBlock_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/block" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" @@ -892,11 +893,11 @@ func TestMetaAPIBlockProcessor_GetAlteredAccountsForBlock(t *testing.T) { metaAPIBlockProc.logsFacade = &testscommon.LogsFacadeStub{} metaAPIBlockProc.alteredAccountsProvider = &testscommon.AlteredAccountsProviderStub{ - ExtractAlteredAccountsFromPoolCalled: func(outportPool *outportcore.Pool, options shared.AlteredAccountsOptions) (map[string]*outportcore.AlteredAccount, error) { - retMap := map[string]*outportcore.AlteredAccount{} - for _, tx := range outportPool.Txs { - retMap[string(tx.GetSndAddr())] = &outportcore.AlteredAccount{ - Address: string(tx.GetSndAddr()), + ExtractAlteredAccountsFromPoolCalled: func(outportPool *outportcore.TransactionPool, options shared.AlteredAccountsOptions) (map[string]*alteredAccount.AlteredAccount, error) { + retMap := map[string]*alteredAccount.AlteredAccount{} + for _, tx := range outportPool.Transactions { + retMap[string(tx.Transaction.GetSndAddr())] = &alteredAccount.AlteredAccount{ + Address: string(tx.Transaction.GetSndAddr()), Balance: "10", } } @@ -912,7 +913,7 @@ func TestMetaAPIBlockProcessor_GetAlteredAccountsForBlock(t *testing.T) { }, }) require.NoError(t, err) - require.True(t, areAlteredAccountsResponsesTheSame([]*outportcore.AlteredAccount{ + require.True(t, areAlteredAccountsResponsesTheSame([]*alteredAccount.AlteredAccount{ { Address: "addr0", Balance: "10", diff --git a/node/external/blockAPI/shardBlock.go b/node/external/blockAPI/shardBlock.go index eab2e51ed9e..4f97f1af026 100644 --- a/node/external/blockAPI/shardBlock.go +++ b/node/external/blockAPI/shardBlock.go @@ -4,9 +4,9 @@ import ( "encoding/hex" "time" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/node/filters" @@ -114,7 +114,7 @@ func (sbp *shardAPIBlockProcessor) GetBlockByRound(round uint64, options api.Blo } // GetAlteredAccountsForBlock will return the altered accounts for the desired shard block -func (sbp *shardAPIBlockProcessor) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { +func (sbp *shardAPIBlockProcessor) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { headerHash, blockBytes, err := sbp.getHashAndBlockBytesFromStorer(options.GetBlockParameters) if err != nil { return nil, err diff --git a/node/external/blockAPI/shardBlock_test.go b/node/external/blockAPI/shardBlock_test.go index a32a9fe4cdf..5dd2f2bf8fb 100644 --- a/node/external/blockAPI/shardBlock_test.go +++ b/node/external/blockAPI/shardBlock_test.go @@ -6,6 +6,7 @@ import ( "math/big" "testing" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/block" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" @@ -662,11 +663,11 @@ func TestShardAPIBlockProcessor_GetAlteredAccountsForBlock(t *testing.T) { metaAPIBlockProc.logsFacade = &testscommon.LogsFacadeStub{} metaAPIBlockProc.alteredAccountsProvider = &testscommon.AlteredAccountsProviderStub{ - ExtractAlteredAccountsFromPoolCalled: func(txPool *outportcore.Pool, options shared.AlteredAccountsOptions) (map[string]*outportcore.AlteredAccount, error) { - retMap := map[string]*outportcore.AlteredAccount{} - for _, tx := range txPool.Txs { - retMap[string(tx.GetSndAddr())] = &outportcore.AlteredAccount{ - Address: string(tx.GetSndAddr()), + ExtractAlteredAccountsFromPoolCalled: func(txPool *outportcore.TransactionPool, options shared.AlteredAccountsOptions) (map[string]*alteredAccount.AlteredAccount, error) { + retMap := map[string]*alteredAccount.AlteredAccount{} + for _, tx := range txPool.Transactions { + retMap[string(tx.Transaction.GetSndAddr())] = &alteredAccount.AlteredAccount{ + Address: string(tx.Transaction.GetSndAddr()), Balance: "10", } } @@ -682,7 +683,7 @@ func TestShardAPIBlockProcessor_GetAlteredAccountsForBlock(t *testing.T) { }, }) require.NoError(t, err) - require.True(t, areAlteredAccountsResponsesTheSame([]*outportcore.AlteredAccount{ + require.True(t, areAlteredAccountsResponsesTheSame([]*alteredAccount.AlteredAccount{ { Address: "addr0", Balance: "10", @@ -696,7 +697,7 @@ func TestShardAPIBlockProcessor_GetAlteredAccountsForBlock(t *testing.T) { }) } -func areAlteredAccountsResponsesTheSame(first []*outportcore.AlteredAccount, second []*outportcore.AlteredAccount) bool { +func areAlteredAccountsResponsesTheSame(first []*alteredAccount.AlteredAccount, second []*alteredAccount.AlteredAccount) bool { if len(first) != len(second) { return false } diff --git a/node/external/nodeApiResolver.go b/node/external/nodeApiResolver.go index acdb5e97fe6..3720e8a2293 100644 --- a/node/external/nodeApiResolver.go +++ b/node/external/nodeApiResolver.go @@ -7,8 +7,8 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" - "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/genesis" @@ -197,7 +197,7 @@ func (nar *nodeApiResolver) GetBlockByRound(round uint64, options api.BlockQuery } // GetAlteredAccountsForBlock will return the altered accounts for the desired block -func (nar *nodeApiResolver) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { +func (nar *nodeApiResolver) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { return nar.apiBlockHandler.GetAlteredAccountsForBlock(options) } diff --git a/node/mock/apiBlockHandlerStub.go b/node/mock/apiBlockHandlerStub.go index 691072e0ca0..54bbed8c29c 100644 --- a/node/mock/apiBlockHandlerStub.go +++ b/node/mock/apiBlockHandlerStub.go @@ -1,8 +1,8 @@ package mock import ( + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" - "github.com/multiversx/mx-chain-core-go/data/outport" ) // BlockAPIHandlerStub - @@ -10,7 +10,7 @@ type BlockAPIHandlerStub struct { GetBlockByNonceCalled func(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) GetBlockByHashCalled func(hash []byte, options api.BlockQueryOptions) (*api.Block, error) GetBlockByRoundCalled func(round uint64, options api.BlockQueryOptions) (*api.Block, error) - GetAlteredAccountsForBlockCalled func(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) + GetAlteredAccountsForBlockCalled func(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) } // GetBlockByNonce - @@ -41,7 +41,7 @@ func (bah *BlockAPIHandlerStub) GetBlockByRound(round uint64, options api.BlockQ } // GetAlteredAccountsForBlock - -func (bah *BlockAPIHandlerStub) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { +func (bah *BlockAPIHandlerStub) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { if bah.GetAlteredAccountsForBlockCalled != nil { return bah.GetAlteredAccountsForBlockCalled(options) } diff --git a/node/mock/indexerStub.go b/node/mock/indexerStub.go deleted file mode 100644 index 6c8aaad7b6c..00000000000 --- a/node/mock/indexerStub.go +++ /dev/null @@ -1,62 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - outportcore "github.com/multiversx/mx-chain-core-go/data/outport" - "github.com/multiversx/mx-chain-go/process" - "github.com/multiversx/mx-chain-go/state" -) - -// IndexerStub is a mock implementation fot the Indexer interface -type IndexerStub struct { - SaveBlockCalled func(args *outportcore.ArgsSaveBlockData) -} - -// SaveBlock - -func (im *IndexerStub) SaveBlock(args *outportcore.ArgsSaveBlockData) { - if im.SaveBlockCalled != nil { - im.SaveBlockCalled(args) - } -} - -// Close will do nothing -func (im *IndexerStub) Close() error { - return nil -} - -// SetTxLogsProcessor will do nothing -func (im *IndexerStub) SetTxLogsProcessor(_ process.TransactionLogProcessorDatabase) { -} - -// SaveRoundsInfo - -func (im *IndexerStub) SaveRoundsInfo(_ []*outportcore.RoundInfo) { - panic("implement me") -} - -// SaveValidatorsRating - -func (im *IndexerStub) SaveValidatorsRating(_ string, _ []*outportcore.ValidatorRatingInfo) { - -} - -// SaveValidatorsPubKeys - -func (im *IndexerStub) SaveValidatorsPubKeys(_ map[uint32][][]byte, _ uint32) { - panic("implement me") -} - -// RevertIndexedBlock - -func (im *IndexerStub) RevertIndexedBlock(_ data.HeaderHandler, _ data.BodyHandler) { -} - -// SaveAccounts - -func (im *IndexerStub) SaveAccounts(_ uint64, _ []state.UserAccountHandler) { -} - -// IsInterfaceNil returns true if there is no value under the interface -func (im *IndexerStub) IsInterfaceNil() bool { - return im == nil -} - -// IsNilIndexer - -func (im *IndexerStub) IsNilIndexer() bool { - return false -} diff --git a/node/node.go b/node/node.go index 3de7c9610a4..1d5cc001adb 100644 --- a/node/node.go +++ b/node/node.go @@ -577,9 +577,10 @@ func (n *Node) GetTokenSupply(token string) (*api.ESDTSupply, error) { } return &api.ESDTSupply{ - Supply: bigToString(esdtSupply.Supply), - Burned: bigToString(esdtSupply.Burned), - Minted: bigToString(esdtSupply.Minted), + Supply: bigToString(esdtSupply.Supply), + Burned: bigToString(esdtSupply.Burned), + Minted: bigToString(esdtSupply.Minted), + RecomputedSupply: esdtSupply.RecomputedSupply, }, nil } diff --git a/node/nodeRunner.go b/node/nodeRunner.go index acad2c629ad..db89809d43b 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -20,6 +20,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/closing" "github.com/multiversx/mx-chain-core-go/core/throttler" "github.com/multiversx/mx-chain-core-go/data/endProcess" + outportCore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/api/gin" "github.com/multiversx/mx-chain-go/api/shared" "github.com/multiversx/mx-chain-go/common" @@ -59,9 +60,7 @@ import ( "github.com/multiversx/mx-chain-go/storage/cache" storageFactory "github.com/multiversx/mx-chain-go/storage/factory" "github.com/multiversx/mx-chain-go/storage/storageunit" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" trieStatistics "github.com/multiversx/mx-chain-go/trie/statistics" - "github.com/multiversx/mx-chain-go/trie/storageMarker" "github.com/multiversx/mx-chain-go/update/trigger" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -611,7 +610,7 @@ func getUserAccountSyncer( processComponents mainFactory.ProcessComponentsHolder, ) (process.AccountsDBSyncer, error) { maxTrieLevelInMemory := config.StateTriesConfig.MaxStateTrieLevelInMemory - userTrie := stateComponents.TriesContainer().Get([]byte(trieFactory.UserAccountTrie)) + userTrie := stateComponents.TriesContainer().Get([]byte(dataRetriever.UserAccountsUnit.String())) storageManager := userTrie.GetStorageManager() thr, err := throttler.NewNumGoRoutinesThrottler(int32(config.TrieSync.NumConcurrentTrieSyncers)) @@ -644,7 +643,7 @@ func getValidatorAccountSyncer( processComponents mainFactory.ProcessComponentsHolder, ) (process.AccountsDBSyncer, error) { maxTrieLevelInMemory := config.StateTriesConfig.MaxPeerTrieLevelInMemory - peerTrie := stateComponents.TriesContainer().Get([]byte(trieFactory.PeerAccountTrie)) + peerTrie := stateComponents.TriesContainer().Get([]byte(dataRetriever.PeerAccountsUnit.String())) storageManager := peerTrie.GetStorageManager() args := syncer.ArgsNewValidatorAccountsSyncer{ @@ -679,7 +678,6 @@ func getBaseAccountSyncerArgs( MaxTrieLevelInMemory: maxTrieLevelInMemory, MaxHardCapForMissingNodes: config.TrieSync.MaxHardCapForMissingNodes, TrieSyncerVersion: config.TrieSync.TrieSyncerVersion, - StorageMarker: storageMarker.NewDisabledStorageMarker(), CheckNodesOnDisk: true, UserAccountsSyncStatisticsHandler: trieStatistics.NewTrieSyncStatistics(), AppStatusHandler: disabled.NewAppStatusHandler(), @@ -863,6 +861,7 @@ func (nr *nodeRunner) CreateManagedConsensusComponents( consensusArgs := consensusComp.ConsensusComponentsFactoryArgs{ Config: *nr.configs.GeneralConfig, + FlagsConfig: *nr.configs.FlagsConfig, BootstrapRoundIndex: nr.configs.FlagsConfig.BootstrapRoundIndex, CoreComponents: coreComponents, NetworkComponents: networkComponents, @@ -1209,7 +1208,7 @@ func (nr *nodeRunner) CreateManagedProcessComponents( processArgs := processComp.ProcessComponentsFactoryArgs{ Config: *configs.GeneralConfig, EpochConfig: *configs.EpochConfig, - PrefConfigs: configs.PreferencesConfig.Preferences, + PrefConfigs: *configs.PreferencesConfig, ImportDBConfig: *configs.ImportDbConfig, AccountsParser: accountsParser, SmartContractParser: smartContractParser, @@ -1228,11 +1227,9 @@ func (nr *nodeRunner) CreateManagedProcessComponents( WhiteListerVerifiedTxs: whiteListerVerifiedTxs, MaxRating: configs.RatingsConfig.General.MaxRating, SystemSCConfig: configs.SystemSCConfig, - Version: configs.FlagsConfig.Version, ImportStartHandler: importStartHandler, - WorkingDir: configs.FlagsConfig.WorkingDir, HistoryRepo: historyRepository, - SnapshotsEnabled: configs.FlagsConfig.SnapshotsEnabled, + FlagsConfig: *configs.FlagsConfig, } processComponentsFactory, err := processComp.NewProcessComponentsFactory(processArgs) if err != nil { @@ -1276,8 +1273,8 @@ func (nr *nodeRunner) CreateManagedDataComponents( Crypto: crypto, CurrentEpoch: storerEpoch, CreateTrieEpochRootHashStorer: configs.ImportDbConfig.ImportDbSaveTrieEpochRootHash, + FlagsConfigs: *configs.FlagsConfig, NodeProcessingMode: common.GetNodeProcessingMode(nr.configs.ImportDbConfig), - SnapshotsEnabled: configs.FlagsConfig.SnapshotsEnabled, } dataComponentsFactory, err := dataComp.NewDataComponentsFactory(dataArgs) @@ -1677,7 +1674,10 @@ func indexValidatorsListIfNeeded( } if len(validatorsPubKeys) > 0 { - outportHandler.SaveValidatorsPubKeys(validatorsPubKeys, epoch) + outportHandler.SaveValidatorsPubKeys(&outportCore.ValidatorsPubKeys{ + ShardValidatorsPubKeys: outportCore.ConvertPubKeys(validatorsPubKeys), + Epoch: epoch, + }) } } diff --git a/node/nodeRunner_test.go b/node/nodeRunner_test.go index ce0242db3f7..231dfc96c73 100644 --- a/node/nodeRunner_test.go +++ b/node/nodeRunner_test.go @@ -6,112 +6,22 @@ package node import ( "io/ioutil" "os" - "os/exec" "path" - "strings" "syscall" "testing" "time" "github.com/multiversx/mx-chain-core-go/data/endProcess" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/node/mock" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/api" logger "github.com/multiversx/mx-chain-logger-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func createConfigs(tb testing.TB) *config.Configs { - tempDir := tb.TempDir() - - originalConfigsPath := "../cmd/node/config" - newConfigsPath := path.Join(tempDir, "config") - - cmd := exec.Command("cp", "-r", originalConfigsPath, newConfigsPath) - err := cmd.Run() - require.Nil(tb, err) - - newGenesisSmartContractsFilename := path.Join(newConfigsPath, "genesisSmartContracts.json") - correctTestPathInGenesisSmartContracts(tb, tempDir, newGenesisSmartContractsFilename) - - apiConfig, err := common.LoadApiConfig(path.Join(newConfigsPath, "api.toml")) - require.Nil(tb, err) - - generalConfig, err := common.LoadMainConfig(path.Join(newConfigsPath, "config.toml")) - require.Nil(tb, err) - - ratingsConfig, err := common.LoadRatingsConfig(path.Join(newConfigsPath, "ratings.toml")) - require.Nil(tb, err) - - economicsConfig, err := common.LoadEconomicsConfig(path.Join(newConfigsPath, "economics.toml")) - require.Nil(tb, err) - - prefsConfig, err := common.LoadPreferencesConfig(path.Join(newConfigsPath, "prefs.toml")) - require.Nil(tb, err) - - p2pConfig, err := common.LoadP2PConfig(path.Join(newConfigsPath, "p2p.toml")) - require.Nil(tb, err) - - externalConfig, err := common.LoadExternalConfig(path.Join(newConfigsPath, "external.toml")) - require.Nil(tb, err) - - systemSCConfig, err := common.LoadSystemSmartContractsConfig(path.Join(newConfigsPath, "systemSmartContractsConfig.toml")) - require.Nil(tb, err) - - epochConfig, err := common.LoadEpochConfig(path.Join(newConfigsPath, "enableEpochs.toml")) - require.Nil(tb, err) - - roundConfig, err := common.LoadRoundConfig(path.Join(newConfigsPath, "enableRounds.toml")) - require.Nil(tb, err) - - // make the node pass the network wait constraints - p2pConfig.Node.MinNumPeersToWaitForOnBootstrap = 0 - p2pConfig.Node.ThresholdMinConnectedPeers = 0 - - return &config.Configs{ - GeneralConfig: generalConfig, - ApiRoutesConfig: apiConfig, - EconomicsConfig: economicsConfig, - SystemSCConfig: systemSCConfig, - RatingsConfig: ratingsConfig, - PreferencesConfig: prefsConfig, - ExternalConfig: externalConfig, - P2pConfig: p2pConfig, - FlagsConfig: &config.ContextFlagsConfig{ - WorkingDir: tempDir, - NoKeyProvided: true, - Version: "test version", - DbDir: path.Join(tempDir, "db"), - }, - ImportDbConfig: &config.ImportDbConfig{}, - ConfigurationPathsHolder: &config.ConfigurationPathsHolder{ - GasScheduleDirectoryName: path.Join(newConfigsPath, "gasSchedules"), - Nodes: path.Join(newConfigsPath, "nodesSetup.json"), - Genesis: path.Join(newConfigsPath, "genesis.json"), - SmartContracts: newGenesisSmartContractsFilename, - ValidatorKey: "validatorKey.pem", - }, - EpochConfig: epochConfig, - RoundConfig: roundConfig, - } -} - -func correctTestPathInGenesisSmartContracts(tb testing.TB, tempDir string, newGenesisSmartContractsFilename string) { - input, err := ioutil.ReadFile(newGenesisSmartContractsFilename) - require.Nil(tb, err) - - lines := strings.Split(string(input), "\n") - for i, line := range lines { - if strings.Contains(line, "./config") { - lines[i] = strings.Replace(line, "./config", path.Join(tempDir, "config"), 1) - } - } - output := strings.Join(lines, "\n") - err = ioutil.WriteFile(newGenesisSmartContractsFilename, []byte(output), 0644) - require.Nil(tb, err) -} +const originalConfigsPath = "../cmd/node/config" func TestNewNodeRunner(t *testing.T) { t.Parallel() @@ -127,7 +37,7 @@ func TestNewNodeRunner(t *testing.T) { t.Run("with valid configs should work", func(t *testing.T) { t.Parallel() - configs := createConfigs(t) + configs := testscommon.CreateTestConfigs(t, originalConfigsPath) runner, err := NewNodeRunner(configs) assert.NotNil(t, runner) assert.Nil(t, err) @@ -137,7 +47,7 @@ func TestNewNodeRunner(t *testing.T) { func TestNodeRunner_StartAndCloseNodeUsingSIGINT(t *testing.T) { t.Parallel() - configs := createConfigs(t) + configs := testscommon.CreateTestConfigs(t, originalConfigsPath) runner, _ := NewNodeRunner(configs) trigger := mock.NewApplicationRunningTrigger() diff --git a/node/nodeTesting_test.go b/node/nodeTesting_test.go index e18d26ba218..8718ea5c8ea 100644 --- a/node/nodeTesting_test.go +++ b/node/nodeTesting_test.go @@ -21,8 +21,10 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + factoryMocks "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -396,7 +398,7 @@ func getDefaultCryptoComponents() *factoryMock.CryptoComponentsMock { PubKeyBytes: []byte("pubKey"), BlockSig: &mock.SingleSignerMock{}, TxSig: &mock.SingleSignerMock{}, - MultiSigContainer: cryptoMocks.NewMultiSignerContainerMock( cryptoMocks.NewMultiSigner()), + MultiSigContainer: cryptoMocks.NewMultiSignerContainerMock(cryptoMocks.NewMultiSigner()), PeerSignHandler: &mock.PeerSignatureHandler{}, BlKeyGen: &mock.KeyGenMock{}, TxKeyGen: &mock.KeyGenMock{}, @@ -407,14 +409,14 @@ func getDefaultCryptoComponents() *factoryMock.CryptoComponentsMock { } } -func getDefaultStateComponents() *testscommon.StateComponentsMock { - return &testscommon.StateComponentsMock{ +func getDefaultStateComponents() *factoryMocks.StateComponentsMock { + return &factoryMocks.StateComponentsMock{ PeersAcc: &stateMock.AccountsStub{}, Accounts: &stateMock.AccountsStub{}, AccountsAPI: &stateMock.AccountsStub{}, AccountsRepo: &stateMock.AccountsRepositoryStub{}, Tries: &trieMock.TriesHolderStub{}, - StorageManagers: map[string]common.StorageManager{"0": &testscommon.StorageManagerStub{}}, + StorageManagers: map[string]common.StorageManager{"0": &storageManager.StorageManagerStub{}}, } } diff --git a/node/node_test.go b/node/node_test.go index e8309d770dd..a22e98e1b58 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -58,6 +58,7 @@ import ( stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" mockStorage "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/testscommon/txsSenderMock" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" @@ -5017,7 +5018,7 @@ func getDefaultBootstrapComponents() *mainFactoryMocks.BootstrapComponentsStub { return &mainFactoryMocks.BootstrapComponentsStub{ Bootstrapper: &bootstrapMocks.EpochStartBootstrapperStub{ TrieHolder: &trieMock.TriesHolderStub{}, - StorageManagers: map[string]common.StorageManager{"0": &testscommon.StorageManagerStub{}}, + StorageManagers: map[string]common.StorageManager{"0": &storageManager.StorageManagerStub{}}, BootstrapCalled: nil, }, BootstrapParams: &bootstrapMocks.BootstrapParamsHandlerMock{}, diff --git a/outport/disabled/disabledOutport.go b/outport/disabled/disabledOutport.go index c97ecb91d83..97be7894c9b 100644 --- a/outport/disabled/disabledOutport.go +++ b/outport/disabled/disabledOutport.go @@ -1,7 +1,6 @@ package disabled import ( - "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/outport" ) @@ -14,31 +13,33 @@ func NewDisabledOutport() *disabledOutport { } // SaveBlock does nothing -func (n *disabledOutport) SaveBlock(_ *outportcore.ArgsSaveBlockData) { +func (n *disabledOutport) SaveBlock(_ *outportcore.OutportBlockWithHeaderAndBody) error { + return nil } // RevertIndexedBlock does nothing -func (n *disabledOutport) RevertIndexedBlock(_ data.HeaderHandler, _ data.BodyHandler) { +func (n *disabledOutport) RevertIndexedBlock(_ *outportcore.HeaderDataWithBody) error { + return nil } // SaveRoundsInfo does nothing -func (n *disabledOutport) SaveRoundsInfo(_ []*outportcore.RoundInfo) { +func (n *disabledOutport) SaveRoundsInfo(_ *outportcore.RoundsInfo) { } // SaveValidatorsPubKeys does nothing -func (n *disabledOutport) SaveValidatorsPubKeys(_ map[uint32][][]byte, _ uint32) { +func (n *disabledOutport) SaveValidatorsPubKeys(_ *outportcore.ValidatorsPubKeys) { } // SaveValidatorsRating does nothing -func (n *disabledOutport) SaveValidatorsRating(_ string, _ []*outportcore.ValidatorRatingInfo) { +func (n *disabledOutport) SaveValidatorsRating(_ *outportcore.ValidatorsRating) { } // SaveAccounts does nothing -func (n *disabledOutport) SaveAccounts(_ uint64, _ map[string]*outportcore.AlteredAccount, _ uint32) { +func (n *disabledOutport) SaveAccounts(_ *outportcore.Accounts) { } // FinalizedBlock does nothing -func (n *disabledOutport) FinalizedBlock(_ []byte) { +func (n *disabledOutport) FinalizedBlock(_ *outportcore.FinalizedBlock) { } // Close does nothing diff --git a/outport/errors.go b/outport/errors.go index eb44d2c671f..8c7ce22bb98 100644 --- a/outport/errors.go +++ b/outport/errors.go @@ -13,3 +13,7 @@ var ErrInvalidRetrialInterval = errors.New("invalid retrial interval") // ErrNilPubKeyConverter signals that a nil pubkey converter has been provided var ErrNilPubKeyConverter = errors.New("nil pub key converter") + +var errNilSaveBlockArgs = errors.New("nil save blocks args provided") + +var errNilHeaderAndBodyArgs = errors.New("nil header and body args provided") diff --git a/outport/factory/hostDriverFactory.go b/outport/factory/hostDriverFactory.go new file mode 100644 index 00000000000..78dbac7db18 --- /dev/null +++ b/outport/factory/hostDriverFactory.go @@ -0,0 +1,42 @@ +package factory + +import ( + "github.com/multiversx/mx-chain-communication-go/websocket/data" + "github.com/multiversx/mx-chain-communication-go/websocket/factory" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/outport" + "github.com/multiversx/mx-chain-go/outport/host" + logger "github.com/multiversx/mx-chain-logger-go" +) + +type ArgsHostDriverFactory struct { + HostConfig config.HostDriverConfig + Marshaller marshal.Marshalizer +} + +var log = logger.GetOrCreate("outport/factory/hostdriver") + +// CreateHostDriver will create a new instance of outport.Driver +func CreateHostDriver(args ArgsHostDriverFactory) (outport.Driver, error) { + wsHost, err := factory.CreateWebSocketHost(factory.ArgsWebSocketHost{ + WebSocketConfig: data.WebSocketConfig{ + URL: args.HostConfig.URL, + WithAcknowledge: args.HostConfig.WithAcknowledge, + Mode: args.HostConfig.Mode, + RetryDurationInSec: args.HostConfig.RetryDurationInSec, + BlockingAckOnError: args.HostConfig.BlockingAckOnError, + }, + Marshaller: args.Marshaller, + Log: log, + }) + if err != nil { + return nil, err + } + + return host.NewHostDriver(host.ArgsHostDriver{ + Marshaller: args.Marshaller, + SenderHost: wsHost, + Log: log, + }) +} diff --git a/outport/factory/hostDriverFactory_test.go b/outport/factory/hostDriverFactory_test.go new file mode 100644 index 00000000000..834fa793b6c --- /dev/null +++ b/outport/factory/hostDriverFactory_test.go @@ -0,0 +1,30 @@ +package factory + +import ( + "fmt" + "testing" + + "github.com/multiversx/mx-chain-communication-go/websocket/data" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/stretchr/testify/require" +) + +func TestCreateHostDriver(t *testing.T) { + t.Parallel() + + args := ArgsHostDriverFactory{ + HostConfig: config.HostDriverConfig{ + URL: "localhost", + RetryDurationInSec: 1, + MarshallerType: "json", + Mode: data.ModeClient, + }, + Marshaller: &testscommon.MarshalizerMock{}, + } + + driver, err := CreateHostDriver(args) + require.Nil(t, err) + require.NotNil(t, driver) + require.Equal(t, "*host.hostDriver", fmt.Sprintf("%T", driver)) +} diff --git a/outport/factory/notifierFactory.go b/outport/factory/notifierFactory.go index d1ceb412230..a4cc572f491 100644 --- a/outport/factory/notifierFactory.go +++ b/outport/factory/notifierFactory.go @@ -3,7 +3,7 @@ package factory import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/hashing" + "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/outport/notifier" @@ -18,8 +18,6 @@ type EventNotifierFactoryArgs struct { Password string RequestTimeoutSec int Marshaller marshal.Marshalizer - Hasher hashing.Hasher - PubKeyConverter core.PubkeyConverter } // CreateEventNotifier will create a new event notifier client instance @@ -40,11 +38,15 @@ func CreateEventNotifier(args *EventNotifierFactoryArgs) (outport.Driver, error) return nil, err } + blockContainer, err := createBlockCreatorsContainer() + if err != nil { + return nil, err + } + notifierArgs := notifier.ArgsEventNotifier{ - HttpClient: httpClient, - Marshaller: args.Marshaller, - Hasher: args.Hasher, - PubKeyConverter: args.PubKeyConverter, + HttpClient: httpClient, + Marshaller: args.Marshaller, + BlockContainer: blockContainer, } return notifier.NewEventNotifier(notifierArgs) @@ -54,12 +56,24 @@ func checkInputArgs(args *EventNotifierFactoryArgs) error { if check.IfNil(args.Marshaller) { return core.ErrNilMarshalizer } - if check.IfNil(args.Hasher) { - return core.ErrNilHasher + + return nil +} + +func createBlockCreatorsContainer() (notifier.BlockContainerHandler, error) { + container := block.NewEmptyBlockCreatorsContainer() + err := container.Add(core.ShardHeaderV1, block.NewEmptyHeaderCreator()) + if err != nil { + return nil, err + } + err = container.Add(core.ShardHeaderV2, block.NewEmptyHeaderV2Creator()) + if err != nil { + return nil, err } - if check.IfNil(args.PubKeyConverter) { - return outport.ErrNilPubKeyConverter + err = container.Add(core.MetaHeader, block.NewEmptyMetaBlockCreator()) + if err != nil { + return nil, err } - return nil + return container, nil } diff --git a/outport/factory/notifierFactory_test.go b/outport/factory/notifierFactory_test.go index c588e586c83..ae38fe7964b 100644 --- a/outport/factory/notifierFactory_test.go +++ b/outport/factory/notifierFactory_test.go @@ -4,10 +4,8 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/outport/factory" "github.com/multiversx/mx-chain-go/testscommon" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/stretchr/testify/require" ) @@ -20,8 +18,6 @@ func createMockNotifierFactoryArgs() *factory.EventNotifierFactoryArgs { Password: "", RequestTimeoutSec: 1, Marshaller: &testscommon.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - PubKeyConverter: &testscommon.PubkeyConverterMock{}, } } @@ -39,28 +35,6 @@ func TestCreateEventNotifier(t *testing.T) { require.Equal(t, core.ErrNilMarshalizer, err) }) - t.Run("nil hasher", func(t *testing.T) { - t.Parallel() - - args := createMockNotifierFactoryArgs() - args.Hasher = nil - - en, err := factory.CreateEventNotifier(args) - require.Nil(t, en) - require.Equal(t, core.ErrNilHasher, err) - }) - - t.Run("nil pub key converter", func(t *testing.T) { - t.Parallel() - - args := createMockNotifierFactoryArgs() - args.PubKeyConverter = nil - - en, err := factory.CreateEventNotifier(args) - require.Nil(t, en) - require.Equal(t, outport.ErrNilPubKeyConverter, err) - }) - t.Run("should work", func(t *testing.T) { t.Parallel() diff --git a/outport/factory/outportFactory.go b/outport/factory/outportFactory.go index 82465385efe..d7414ec1662 100644 --- a/outport/factory/outportFactory.go +++ b/outport/factory/outportFactory.go @@ -3,23 +3,16 @@ package factory import ( "time" - wsDriverFactory "github.com/multiversx/mx-chain-core-go/websocketOutportDriver/factory" indexerFactory "github.com/multiversx/mx-chain-es-indexer-go/process/factory" "github.com/multiversx/mx-chain-go/outport" ) -// WrappedOutportDriverWebSocketSenderFactoryArgs extends the wsDriverFactory.OutportDriverWebSocketSenderFactoryArgs structure with the Enabled field -type WrappedOutportDriverWebSocketSenderFactoryArgs struct { - Enabled bool - wsDriverFactory.OutportDriverWebSocketSenderFactoryArgs -} - // OutportFactoryArgs holds the factory arguments of different outport drivers type OutportFactoryArgs struct { - RetrialInterval time.Duration - ElasticIndexerFactoryArgs indexerFactory.ArgsIndexerFactory - EventNotifierFactoryArgs *EventNotifierFactoryArgs - WebSocketSenderDriverFactoryArgs WrappedOutportDriverWebSocketSenderFactoryArgs + RetrialInterval time.Duration + ElasticIndexerFactoryArgs indexerFactory.ArgsIndexerFactory + EventNotifierFactoryArgs *EventNotifierFactoryArgs + HostDriverArgs ArgsHostDriverFactory } // CreateOutport will create a new instance of OutportHandler @@ -53,7 +46,7 @@ func createAndSubscribeDrivers(outport outport.OutportHandler, args *OutportFact return err } - return createAndSubscribeWebSocketDriver(outport, args.WebSocketSenderDriverFactoryArgs) + return createAndSubscribeHostDriverIfNeeded(outport, args.HostDriverArgs) } func createAndSubscribeElasticDriverIfNeeded( @@ -96,23 +89,18 @@ func checkArguments(args *OutportFactoryArgs) error { return nil } -func createAndSubscribeWebSocketDriver( +func createAndSubscribeHostDriverIfNeeded( outport outport.OutportHandler, - args WrappedOutportDriverWebSocketSenderFactoryArgs, + args ArgsHostDriverFactory, ) error { - if !args.Enabled { + if !args.HostConfig.Enabled { return nil } - wsFactory, err := wsDriverFactory.NewOutportDriverWebSocketSenderFactory(args.OutportDriverWebSocketSenderFactoryArgs) - if err != nil { - return err - } - - wsDriver, err := wsFactory.Create() + hostDriver, err := CreateHostDriver(args) if err != nil { return err } - return outport.SubscribeDriver(wsDriver) + return outport.SubscribeDriver(hostDriver) } diff --git a/outport/factory/outportFactory_test.go b/outport/factory/outportFactory_test.go index 04dd4fe6633..29446844737 100644 --- a/outport/factory/outportFactory_test.go +++ b/outport/factory/outportFactory_test.go @@ -10,8 +10,6 @@ import ( "github.com/multiversx/mx-chain-go/outport/factory" notifierFactory "github.com/multiversx/mx-chain-go/outport/factory" "github.com/multiversx/mx-chain-go/process/mock" - "github.com/multiversx/mx-chain-go/testscommon" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/stretchr/testify/require" ) @@ -96,8 +94,6 @@ func TestCreateOutport_SubscribeNotifierDriver(t *testing.T) { args := createMockArgsOutportHandler(false, true) args.EventNotifierFactoryArgs.Marshaller = &mock.MarshalizerMock{} - args.EventNotifierFactoryArgs.Hasher = &hashingMocks.HasherMock{} - args.EventNotifierFactoryArgs.PubKeyConverter = &testscommon.PubkeyConverterMock{} args.EventNotifierFactoryArgs.RequestTimeoutSec = 1 outPort, err := factory.CreateOutport(args) require.Nil(t, err) diff --git a/outport/host/driver.go b/outport/host/driver.go new file mode 100644 index 00000000000..c8f428fd331 --- /dev/null +++ b/outport/host/driver.go @@ -0,0 +1,114 @@ +package host + +import ( + "fmt" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/atomic" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/marshal" +) + +// ArgsHostDriver holds the arguments needed for creating a new hostDriver +type ArgsHostDriver struct { + Marshaller marshal.Marshalizer + SenderHost SenderHost + Log core.Logger +} + +type hostDriver struct { + marshaller marshal.Marshalizer + senderHost SenderHost + isClosed atomic.Flag + log core.Logger +} + +// NewHostDriver will create a new instance of hostDriver +func NewHostDriver(args ArgsHostDriver) (*hostDriver, error) { + if check.IfNil(args.SenderHost) { + return nil, ErrNilHost + } + if check.IfNil(args.Marshaller) { + return nil, core.ErrNilMarshalizer + } + if check.IfNil(args.Log) { + return nil, core.ErrNilLogger + } + + return &hostDriver{ + marshaller: args.Marshaller, + senderHost: args.SenderHost, + log: args.Log, + isClosed: atomic.Flag{}, + }, nil +} + +// SaveBlock will handle the saving of block +func (o *hostDriver) SaveBlock(outportBlock *outport.OutportBlock) error { + return o.handleAction(outportBlock, outport.TopicSaveBlock) +} + +// RevertIndexedBlock will handle the action of reverting the indexed block +func (o *hostDriver) RevertIndexedBlock(blockData *outport.BlockData) error { + return o.handleAction(blockData, outport.TopicRevertIndexedBlock) +} + +// SaveRoundsInfo will handle the saving of rounds +func (o *hostDriver) SaveRoundsInfo(roundsInfos *outport.RoundsInfo) error { + return o.handleAction(roundsInfos, outport.TopicSaveRoundsInfo) +} + +// SaveValidatorsPubKeys will handle the saving of the validators' public keys +func (o *hostDriver) SaveValidatorsPubKeys(validatorsPubKeys *outport.ValidatorsPubKeys) error { + return o.handleAction(validatorsPubKeys, outport.TopicSaveValidatorsPubKeys) +} + +// SaveValidatorsRating will handle the saving of the validators' rating +func (o *hostDriver) SaveValidatorsRating(validatorsRating *outport.ValidatorsRating) error { + return o.handleAction(validatorsRating, outport.TopicSaveValidatorsRating) +} + +// SaveAccounts will handle the accounts' saving +func (o *hostDriver) SaveAccounts(accounts *outport.Accounts) error { + return o.handleAction(accounts, outport.TopicSaveAccounts) +} + +// FinalizedBlock will handle the finalized block +func (o *hostDriver) FinalizedBlock(finalizedBlock *outport.FinalizedBlock) error { + return o.handleAction(finalizedBlock, outport.TopicFinalizedBlock) +} + +// GetMarshaller returns the internal marshaller +func (o *hostDriver) GetMarshaller() marshal.Marshalizer { + return o.marshaller +} + +func (o *hostDriver) handleAction(args interface{}, topic string) error { + if o.isClosed.IsSet() { + return ErrHostIsClosed + } + + marshalledPayload, err := o.marshaller.Marshal(args) + if err != nil { + return fmt.Errorf("%w while marshaling block for topic %s", err, topic) + } + + err = o.senderHost.Send(marshalledPayload, topic) + if err != nil { + return fmt.Errorf("%w while sending data on route for topic %s", err, topic) + } + + return nil +} + +// Close will handle the closing of the outport driver web socket sender +func (o *hostDriver) Close() error { + o.isClosed.SetValue(true) + return o.senderHost.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (o *hostDriver) IsInterfaceNil() bool { + return o == nil +} diff --git a/outport/host/driver_test.go b/outport/host/driver_test.go new file mode 100644 index 00000000000..6e595206c07 --- /dev/null +++ b/outport/host/driver_test.go @@ -0,0 +1,333 @@ +package host + +import ( + "errors" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/marshal" + outportStubs "github.com/multiversx/mx-chain-go/testscommon/outport" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/stretchr/testify/require" +) + +var cannotSendOnRouteErr = errors.New("cannot send on route") + +var log = logger.GetOrCreate("test") + +func getMockArgs() ArgsHostDriver { + return ArgsHostDriver{ + Marshaller: &marshal.JsonMarshalizer{}, + SenderHost: &outportStubs.SenderHostStub{}, + Log: log, + } +} + +func TestNewWebsocketOutportDriverNodePart(t *testing.T) { + t.Parallel() + + t.Run("nil marshaller", func(t *testing.T) { + t.Parallel() + + args := getMockArgs() + args.Marshaller = nil + + o, err := NewHostDriver(args) + require.Nil(t, o) + require.Equal(t, core.ErrNilMarshalizer, err) + }) + + t.Run("nil logger", func(t *testing.T) { + t.Parallel() + + args := getMockArgs() + args.Log = nil + + o, err := NewHostDriver(args) + require.Nil(t, o) + require.Equal(t, core.ErrNilLogger, err) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := getMockArgs() + + o, err := NewHostDriver(args) + require.NotNil(t, o) + require.NoError(t, err) + require.False(t, o.IsInterfaceNil()) + }) +} + +func TestWebsocketOutportDriverNodePart_SaveBlock(t *testing.T) { + t.Parallel() + + t.Run("SaveBlock - should error", func(t *testing.T) { + t.Parallel() + + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return cannotSendOnRouteErr + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveBlock(&outport.OutportBlock{}) + require.True(t, errors.Is(err, cannotSendOnRouteErr)) + }) + + t.Run("SaveBlock - should work", func(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + require.Nil(t, r) + }() + args := getMockArgs() + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveBlock(&outport.OutportBlock{}) + require.NoError(t, err) + }) +} + +func TestWebsocketOutportDriverNodePart_FinalizedBlock(t *testing.T) { + t.Parallel() + + t.Run("Finalized block - should error", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return cannotSendOnRouteErr + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.FinalizedBlock(&outport.FinalizedBlock{HeaderHash: []byte("header hash")}) + require.True(t, errors.Is(err, cannotSendOnRouteErr)) + }) + + t.Run("Finalized block - should work", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return nil + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.FinalizedBlock(&outport.FinalizedBlock{HeaderHash: []byte("header hash")}) + require.NoError(t, err) + }) +} + +func TestWebsocketOutportDriverNodePart_RevertIndexedBlock(t *testing.T) { + t.Parallel() + + t.Run("RevertIndexedBlock - should error", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return cannotSendOnRouteErr + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.RevertIndexedBlock(nil) + require.True(t, errors.Is(err, cannotSendOnRouteErr)) + }) + + t.Run("RevertIndexedBlock block - should work", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return nil + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.RevertIndexedBlock(nil) + require.NoError(t, err) + }) +} + +func TestWebsocketOutportDriverNodePart_SaveAccounts(t *testing.T) { + t.Parallel() + + t.Run("SaveAccounts - should error", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return cannotSendOnRouteErr + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveAccounts(nil) + require.True(t, errors.Is(err, cannotSendOnRouteErr)) + }) + + t.Run("SaveAccounts block - should work", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return nil + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveAccounts(nil) + require.NoError(t, err) + }) +} + +func TestWebsocketOutportDriverNodePart_SaveRoundsInfo(t *testing.T) { + t.Parallel() + + t.Run("SaveRoundsInfo - should error", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return cannotSendOnRouteErr + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveRoundsInfo(nil) + require.True(t, errors.Is(err, cannotSendOnRouteErr)) + }) + + t.Run("SaveRoundsInfo block - should work", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return nil + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveRoundsInfo(nil) + require.NoError(t, err) + }) +} + +func TestWebsocketOutportDriverNodePart_SaveValidatorsPubKeys(t *testing.T) { + t.Parallel() + + t.Run("SaveValidatorsPubKeys - should error", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return cannotSendOnRouteErr + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveValidatorsPubKeys(nil) + require.True(t, errors.Is(err, cannotSendOnRouteErr)) + }) + + t.Run("SaveValidatorsPubKeys block - should work", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return nil + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveValidatorsPubKeys(nil) + require.NoError(t, err) + }) +} + +func TestWebsocketOutportDriverNodePart_SaveValidatorsRating(t *testing.T) { + t.Parallel() + + t.Run("SaveValidatorsRating - should error", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return cannotSendOnRouteErr + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveValidatorsRating(nil) + require.True(t, errors.Is(err, cannotSendOnRouteErr)) + }) + + t.Run("SaveValidatorsRating block - should work", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return nil + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveValidatorsRating(nil) + require.NoError(t, err) + }) +} + +func TestWebsocketOutportDriverNodePart_SaveBlock_PayloadCheck(t *testing.T) { + t.Parallel() + + mockArgs := getMockArgs() + + outportBlock := &outport.OutportBlock{BlockData: &outport.BlockData{Body: &block.Body{}}} + marshaledData, err := mockArgs.Marshaller.Marshal(outportBlock) + require.Nil(t, err) + + mockArgs.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(payload []byte, _ string) error { + require.Equal(t, marshaledData, payload) + + return nil + }, + } + o, err := NewHostDriver(mockArgs) + require.NoError(t, err) + + err = o.SaveBlock(outportBlock) + require.NoError(t, err) +} + +func TestWebsocketOutportDriverNodePart_Close(t *testing.T) { + t.Parallel() + + closedWasCalled := false + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + CloseCalled: func() error { + closedWasCalled = true + return nil + }, + } + + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.Close() + require.NoError(t, err) + require.True(t, closedWasCalled) +} diff --git a/outport/host/errors.go b/outport/host/errors.go new file mode 100644 index 00000000000..de45f08acef --- /dev/null +++ b/outport/host/errors.go @@ -0,0 +1,9 @@ +package host + +import "errors" + +// ErrHostIsClosed signals that the host was closed while trying to perform actions +var ErrHostIsClosed = errors.New("server is closed") + +// ErrNilHost signals that a nil host has been provided +var ErrNilHost = errors.New("nil host provided") diff --git a/outport/host/interface.go b/outport/host/interface.go new file mode 100644 index 00000000000..8d0931abd9f --- /dev/null +++ b/outport/host/interface.go @@ -0,0 +1,8 @@ +package host + +// SenderHost defines the actions that a host sender should do +type SenderHost interface { + Send(payload []byte, topic string) error + Close() error + IsInterfaceNil() bool +} diff --git a/outport/interface.go b/outport/interface.go index 7efc264de51..07dc8e9ce58 100644 --- a/outport/interface.go +++ b/outport/interface.go @@ -1,21 +1,22 @@ package outport import ( - "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/outport/process" ) // Driver is an interface for saving node specific data to other storage. // This could be an elastic search index, a MySql database or any other external services. type Driver interface { - SaveBlock(args *outportcore.ArgsSaveBlockData) error - RevertIndexedBlock(header data.HeaderHandler, body data.BodyHandler) error - SaveRoundsInfo(roundsInfos []*outportcore.RoundInfo) error - SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte, epoch uint32) error - SaveValidatorsRating(indexID string, infoRating []*outportcore.ValidatorRatingInfo) error - SaveAccounts(blockTimestamp uint64, acc map[string]*outportcore.AlteredAccount, shardID uint32) error - FinalizedBlock(headerHash []byte) error + SaveBlock(outportBlock *outportcore.OutportBlock) error + RevertIndexedBlock(blockData *outportcore.BlockData) error + SaveRoundsInfo(roundsInfos *outportcore.RoundsInfo) error + SaveValidatorsPubKeys(validatorsPubKeys *outportcore.ValidatorsPubKeys) error + SaveValidatorsRating(validatorsRating *outportcore.ValidatorsRating) error + SaveAccounts(accounts *outportcore.Accounts) error + FinalizedBlock(finalizedBlock *outportcore.FinalizedBlock) error + GetMarshaller() marshal.Marshalizer Close() error IsInterfaceNil() bool } @@ -23,13 +24,13 @@ type Driver interface { // OutportHandler is interface that defines what a proxy implementation should be able to do // The node is able to talk only with this interface type OutportHandler interface { - SaveBlock(args *outportcore.ArgsSaveBlockData) - RevertIndexedBlock(header data.HeaderHandler, body data.BodyHandler) - SaveRoundsInfo(roundsInfos []*outportcore.RoundInfo) - SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte, epoch uint32) - SaveValidatorsRating(indexID string, infoRating []*outportcore.ValidatorRatingInfo) - SaveAccounts(blockTimestamp uint64, acc map[string]*outportcore.AlteredAccount, shardID uint32) - FinalizedBlock(headerHash []byte) + SaveBlock(outportBlock *outportcore.OutportBlockWithHeaderAndBody) error + RevertIndexedBlock(blockData *outportcore.HeaderDataWithBody) error + SaveRoundsInfo(roundsInfos *outportcore.RoundsInfo) + SaveValidatorsPubKeys(validatorsPubKeys *outportcore.ValidatorsPubKeys) + SaveValidatorsRating(validatorsRating *outportcore.ValidatorsRating) + SaveAccounts(accounts *outportcore.Accounts) + FinalizedBlock(finalizedBlock *outportcore.FinalizedBlock) SubscribeDriver(driver Driver) error HasDrivers() bool Close() error @@ -38,6 +39,6 @@ type OutportHandler interface { // DataProviderOutport is an interface that defines what an implementation of data provider outport should be able to do type DataProviderOutport interface { - PrepareOutportSaveBlockData(arg process.ArgPrepareOutportSaveBlockData) (*outportcore.ArgsSaveBlockData, error) + PrepareOutportSaveBlockData(arg process.ArgPrepareOutportSaveBlockData) (*outportcore.OutportBlockWithHeaderAndBody, error) IsInterfaceNil() bool } diff --git a/outport/mock/driverStub.go b/outport/mock/driverStub.go index 7bf27d343fc..e9f4e4a56ab 100644 --- a/outport/mock/driverStub.go +++ b/outport/mock/driverStub.go @@ -1,24 +1,25 @@ package mock import ( - "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/testscommon" ) // DriverStub - type DriverStub struct { - SaveBlockCalled func(args *outportcore.ArgsSaveBlockData) error - RevertBlockCalled func(header data.HeaderHandler, body data.BodyHandler) error - SaveRoundsInfoCalled func(roundsInfos []*outportcore.RoundInfo) error - SaveValidatorsPubKeysCalled func(validatorsPubKeys map[uint32][][]byte, epoch uint32) error - SaveValidatorsRatingCalled func(indexID string, infoRating []*outportcore.ValidatorRatingInfo) error - SaveAccountsCalled func(timestamp uint64, acc map[string]*outportcore.AlteredAccount) error - FinalizedBlockCalled func(headerHash []byte) error + SaveBlockCalled func(outportBlock *outportcore.OutportBlock) error + RevertIndexedBlockCalled func(blockData *outportcore.BlockData) error + SaveRoundsInfoCalled func(roundsInfos *outportcore.RoundsInfo) error + SaveValidatorsPubKeysCalled func(validatorsPubKeys *outportcore.ValidatorsPubKeys) error + SaveValidatorsRatingCalled func(validatorsRating *outportcore.ValidatorsRating) error + SaveAccountsCalled func(accounts *outportcore.Accounts) error + FinalizedBlockCalled func(finalizedBlock *outportcore.FinalizedBlock) error CloseCalled func() error } // SaveBlock - -func (d *DriverStub) SaveBlock(args *outportcore.ArgsSaveBlockData) error { +func (d *DriverStub) SaveBlock(args *outportcore.OutportBlock) error { if d.SaveBlockCalled != nil { return d.SaveBlockCalled(args) } @@ -27,16 +28,16 @@ func (d *DriverStub) SaveBlock(args *outportcore.ArgsSaveBlockData) error { } // RevertIndexedBlock - -func (d *DriverStub) RevertIndexedBlock(header data.HeaderHandler, body data.BodyHandler) error { - if d.RevertBlockCalled != nil { - return d.RevertBlockCalled(header, body) +func (d *DriverStub) RevertIndexedBlock(blockData *outportcore.BlockData) error { + if d.RevertIndexedBlockCalled != nil { + return d.RevertIndexedBlockCalled(blockData) } return nil } // SaveRoundsInfo - -func (d *DriverStub) SaveRoundsInfo(roundsInfos []*outportcore.RoundInfo) error { +func (d *DriverStub) SaveRoundsInfo(roundsInfos *outportcore.RoundsInfo) error { if d.SaveRoundsInfoCalled != nil { return d.SaveRoundsInfoCalled(roundsInfos) } @@ -45,41 +46,46 @@ func (d *DriverStub) SaveRoundsInfo(roundsInfos []*outportcore.RoundInfo) error } // SaveValidatorsPubKeys - -func (d *DriverStub) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte, epoch uint32) error { +func (d *DriverStub) SaveValidatorsPubKeys(validatorsPubKeys *outportcore.ValidatorsPubKeys) error { if d.SaveValidatorsPubKeysCalled != nil { - return d.SaveValidatorsPubKeysCalled(validatorsPubKeys, epoch) + return d.SaveValidatorsPubKeysCalled(validatorsPubKeys) } return nil } // SaveValidatorsRating - -func (d *DriverStub) SaveValidatorsRating(indexID string, infoRating []*outportcore.ValidatorRatingInfo) error { +func (d *DriverStub) SaveValidatorsRating(validatorsRating *outportcore.ValidatorsRating) error { if d.SaveValidatorsRatingCalled != nil { - return d.SaveValidatorsRatingCalled(indexID, infoRating) + return d.SaveValidatorsRatingCalled(validatorsRating) } return nil } // SaveAccounts - -func (d *DriverStub) SaveAccounts(timestamp uint64, acc map[string]*outportcore.AlteredAccount, _ uint32) error { +func (d *DriverStub) SaveAccounts(accounts *outportcore.Accounts) error { if d.SaveAccountsCalled != nil { - return d.SaveAccountsCalled(timestamp, acc) + return d.SaveAccountsCalled(accounts) } return nil } // FinalizedBlock - -func (d *DriverStub) FinalizedBlock(headerHash []byte) error { +func (d *DriverStub) FinalizedBlock(finalizedBlock *outportcore.FinalizedBlock) error { if d.FinalizedBlockCalled != nil { - return d.FinalizedBlockCalled(headerHash) + return d.FinalizedBlockCalled(finalizedBlock) } return nil } +// GetMarshaller - +func (d *DriverStub) GetMarshaller() marshal.Marshalizer { + return testscommon.MarshalizerMock{} +} + // Close - func (d *DriverStub) Close() error { if d.CloseCalled != nil { diff --git a/outport/mock/executionOrderHandlerStub.go b/outport/mock/executionOrderHandlerStub.go index 9d7805b65cf..96db500f223 100644 --- a/outport/mock/executionOrderHandlerStub.go +++ b/outport/mock/executionOrderHandlerStub.go @@ -11,7 +11,7 @@ type ExecutionOrderHandlerStub struct { // PutExecutionOrderInTransactionPool - func (e *ExecutionOrderHandlerStub) PutExecutionOrderInTransactionPool( - _ *outport.Pool, + _ *outport.TransactionPool, _ data.HeaderHandler, _ data.BodyHandler, _ data.HeaderHandler, diff --git a/outport/notifier/errors.go b/outport/notifier/errors.go index 7c6fff363ac..154faf4c1dd 100644 --- a/outport/notifier/errors.go +++ b/outport/notifier/errors.go @@ -4,9 +4,6 @@ import ( "errors" ) -// ErrNilTransactionsPool signals that a nil transactions pool was provided -var ErrNilTransactionsPool = errors.New("nil transactions pool") - // ErrInvalidValue signals that an invalid value has been provided var ErrInvalidValue = errors.New("invalid value") @@ -16,8 +13,5 @@ var ErrNilHTTPClientWrapper = errors.New("nil http client wrapper") // ErrNilMarshaller signals that a nil marshaller has been provided var ErrNilMarshaller = errors.New("nil marshaller") -// ErrNilPubKeyConverter signals that a nil pubkey converter has been provided -var ErrNilPubKeyConverter = errors.New("nil pub key converter") - -// ErrNilHasher is raised when a valid hasher is expected but nil used -var ErrNilHasher = errors.New("hasher is nil") +// ErrNilBlockContainerHandler signals that a nil block container handler has been provided +var ErrNilBlockContainerHandler = errors.New("nil bock container handler") diff --git a/outport/notifier/eventNotifier.go b/outport/notifier/eventNotifier.go index 158c11c2841..b8e8b007258 100644 --- a/outport/notifier/eventNotifier.go +++ b/outport/notifier/eventNotifier.go @@ -6,12 +6,10 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" - nodeData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/outport" - "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" - "github.com/multiversx/mx-chain-core-go/websocketOutportDriver" - outportSenderData "github.com/multiversx/mx-chain-core-go/websocketOutportDriver/data" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -31,24 +29,17 @@ type RevertBlock struct { Epoch uint32 `json:"epoch"` } -// FinalizedBlock holds finalized block data -type FinalizedBlock struct { - Hash string `json:"hash"` -} - type eventNotifier struct { - httpClient httpClientHandler - marshalizer marshal.Marshalizer - hasher hashing.Hasher - pubKeyConverter core.PubkeyConverter + httpClient httpClientHandler + marshalizer marshal.Marshalizer + blockContainer BlockContainerHandler } // ArgsEventNotifier defines the arguments needed for event notifier creation type ArgsEventNotifier struct { - HttpClient httpClientHandler - Marshaller marshal.Marshalizer - Hasher hashing.Hasher - PubKeyConverter core.PubkeyConverter + HttpClient httpClientHandler + Marshaller marshal.Marshalizer + BlockContainer BlockContainerHandler } // NewEventNotifier creates a new instance of the eventNotifier @@ -60,10 +51,9 @@ func NewEventNotifier(args ArgsEventNotifier) (*eventNotifier, error) { } return &eventNotifier{ - httpClient: args.HttpClient, - marshalizer: args.Marshaller, - hasher: args.Hasher, - pubKeyConverter: args.PubKeyConverter, + httpClient: args.HttpClient, + marshalizer: args.Marshaller, + blockContainer: args.BlockContainer, }, nil } @@ -74,29 +64,18 @@ func checkEventNotifierArgs(args ArgsEventNotifier) error { if check.IfNil(args.Marshaller) { return ErrNilMarshaller } - if check.IfNil(args.Hasher) { - return ErrNilHasher - } - if check.IfNil(args.PubKeyConverter) { - return ErrNilPubKeyConverter + if check.IfNilReflect(args.BlockContainer) { + return ErrNilBlockContainerHandler } return nil } // SaveBlock converts block data in order to be pushed to subscribers -func (en *eventNotifier) SaveBlock(args *outport.ArgsSaveBlockData) error { - log.Debug("eventNotifier: SaveBlock called at block", "block hash", args.HeaderHash) - if args.TransactionsPool == nil { - return ErrNilTransactionsPool - } +func (en *eventNotifier) SaveBlock(args *outport.OutportBlock) error { + log.Debug("eventNotifier: SaveBlock called at block", "block hash", args.BlockData.HeaderHash) - argsSaveBlock := outportSenderData.ArgsSaveBlock{ - HeaderType: core.GetHeaderType(args.Header), - ArgsSaveBlockData: websocketOutportDriver.PrepareArgsSaveBlock(*args), - } - - err := en.httpClient.Post(pushEventEndpoint, argsSaveBlock) + err := en.httpClient.Post(pushEventEndpoint, args) if err != nil { return fmt.Errorf("%w in eventNotifier.SaveBlock while posting block data", err) } @@ -105,17 +84,17 @@ func (en *eventNotifier) SaveBlock(args *outport.ArgsSaveBlockData) error { } // RevertIndexedBlock converts revert data in order to be pushed to subscribers -func (en *eventNotifier) RevertIndexedBlock(header nodeData.HeaderHandler, _ nodeData.BodyHandler) error { - blockHash, err := core.CalculateHash(en.marshalizer, en.hasher, header) +func (en *eventNotifier) RevertIndexedBlock(blockData *outport.BlockData) error { + headerHandler, err := en.getHeaderFromBytes(core.HeaderType(blockData.HeaderType), blockData.HeaderBytes) if err != nil { - return fmt.Errorf("%w in eventNotifier.RevertIndexedBlock while computing the block hash", err) + return err } revertBlock := RevertBlock{ - Hash: hex.EncodeToString(blockHash), - Nonce: header.GetNonce(), - Round: header.GetRound(), - Epoch: header.GetEpoch(), + Hash: hex.EncodeToString(blockData.HeaderHash), + Nonce: headerHandler.GetNonce(), + Round: headerHandler.GetRound(), + Epoch: headerHandler.GetEpoch(), } err = en.httpClient.Post(revertEventsEndpoint, revertBlock) @@ -127,11 +106,7 @@ func (en *eventNotifier) RevertIndexedBlock(header nodeData.HeaderHandler, _ nod } // FinalizedBlock converts finalized block data in order to push it to subscribers -func (en *eventNotifier) FinalizedBlock(headerHash []byte) error { - finalizedBlock := FinalizedBlock{ - Hash: hex.EncodeToString(headerHash), - } - +func (en *eventNotifier) FinalizedBlock(finalizedBlock *outport.FinalizedBlock) error { err := en.httpClient.Post(finalizedEventsEndpoint, finalizedBlock) if err != nil { return fmt.Errorf("%w in eventNotifier.FinalizedBlock while posting event data", err) @@ -141,25 +116,30 @@ func (en *eventNotifier) FinalizedBlock(headerHash []byte) error { } // SaveRoundsInfo returns nil -func (en *eventNotifier) SaveRoundsInfo(_ []*outport.RoundInfo) error { +func (en *eventNotifier) SaveRoundsInfo(_ *outport.RoundsInfo) error { return nil } // SaveValidatorsRating returns nil -func (en *eventNotifier) SaveValidatorsRating(_ string, _ []*outport.ValidatorRatingInfo) error { +func (en *eventNotifier) SaveValidatorsRating(_ *outport.ValidatorsRating) error { return nil } // SaveValidatorsPubKeys returns nil -func (en *eventNotifier) SaveValidatorsPubKeys(_ map[uint32][][]byte, _ uint32) error { +func (en *eventNotifier) SaveValidatorsPubKeys(_ *outport.ValidatorsPubKeys) error { return nil } // SaveAccounts does nothing -func (en *eventNotifier) SaveAccounts(_ uint64, _ map[string]*outport.AlteredAccount, _ uint32) error { +func (en *eventNotifier) SaveAccounts(_ *outport.Accounts) error { return nil } +// GetMarshaller returns internal marshaller +func (en *eventNotifier) GetMarshaller() marshal.Marshalizer { + return en.marshalizer +} + // IsInterfaceNil returns whether the interface is nil func (en *eventNotifier) IsInterfaceNil() bool { return en == nil @@ -169,3 +149,12 @@ func (en *eventNotifier) IsInterfaceNil() bool { func (en *eventNotifier) Close() error { return nil } + +func (en *eventNotifier) getHeaderFromBytes(headerType core.HeaderType, headerBytes []byte) (header data.HeaderHandler, err error) { + creator, err := en.blockContainer.Get(headerType) + if err != nil { + return nil, err + } + + return block.GetHeaderFromBytes(en.marshalizer, creator, headerBytes) +} diff --git a/outport/notifier/eventNotifier_test.go b/outport/notifier/eventNotifier_test.go index b30766b59e8..60a3d354206 100644 --- a/outport/notifier/eventNotifier_test.go +++ b/outport/notifier/eventNotifier_test.go @@ -1,28 +1,26 @@ package notifier_test import ( - "encoding/hex" "fmt" "testing" - "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/outport" - outportSenderData "github.com/multiversx/mx-chain-core-go/websocketOutportDriver/data" + "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/outport/mock" "github.com/multiversx/mx-chain-go/outport/notifier" "github.com/multiversx/mx-chain-go/testscommon" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + outportStub "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func createMockEventNotifierArgs() notifier.ArgsEventNotifier { return notifier.ArgsEventNotifier{ - HttpClient: &mock.HTTPClientStub{}, - Marshaller: &testscommon.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - PubKeyConverter: &testscommon.PubkeyConverterMock{}, + HttpClient: &mock.HTTPClientStub{}, + Marshaller: &testscommon.MarshalizerMock{}, + BlockContainer: &outportStub.BlockContainerStub{}, } } @@ -51,26 +49,15 @@ func TestNewEventNotifier(t *testing.T) { require.Equal(t, notifier.ErrNilMarshaller, err) }) - t.Run("nil hasher", func(t *testing.T) { + t.Run("nil block container", func(t *testing.T) { t.Parallel() args := createMockEventNotifierArgs() - args.Hasher = nil + args.BlockContainer = nil en, err := notifier.NewEventNotifier(args) require.Nil(t, en) - require.Equal(t, notifier.ErrNilHasher, err) - }) - - t.Run("nil pub key converter", func(t *testing.T) { - t.Parallel() - - args := createMockEventNotifierArgs() - args.PubKeyConverter = nil - - en, err := notifier.NewEventNotifier(args) - require.Nil(t, en) - require.Equal(t, notifier.ErrNilPubKeyConverter, err) + require.Equal(t, notifier.ErrNilBlockContainerHandler, err) }) t.Run("should work", func(t *testing.T) { @@ -93,15 +80,15 @@ func TestSaveBlock(t *testing.T) { wasCalled := false args.HttpClient = &mock.HTTPClientStub{ PostCalled: func(route string, payload interface{}) error { - saveBlockData := payload.(outportSenderData.ArgsSaveBlock) + saveBlockData := payload.(*outport.OutportBlock) - require.Equal(t, hex.EncodeToString([]byte(txHash1)), saveBlockData.TransactionsPool.Logs[0].TxHash) - for txHash := range saveBlockData.TransactionsPool.Txs { - require.Equal(t, hex.EncodeToString([]byte(txHash1)), txHash) + require.Equal(t, saveBlockData.TransactionPool.Logs[0].TxHash, txHash1) + for txHash := range saveBlockData.TransactionPool.Transactions { + require.Equal(t, txHash1, txHash) } - for scrHash := range saveBlockData.TransactionsPool.Scrs { - require.Equal(t, hex.EncodeToString([]byte(scrHash1)), scrHash) + for scrHash := range saveBlockData.TransactionPool.SmartContractResults { + require.Equal(t, scrHash1, scrHash) } wasCalled = true @@ -111,18 +98,21 @@ func TestSaveBlock(t *testing.T) { en, _ := notifier.NewEventNotifier(args) - saveBlockData := &outport.ArgsSaveBlockData{ - HeaderHash: []byte{}, - TransactionsPool: &outport.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ + saveBlockData := &outport.OutportBlock{ + BlockData: &outport.BlockData{ + HeaderHash: []byte{}, + }, + TransactionPool: &outport.TransactionPool{ + Transactions: map[string]*outport.TxInfo{ txHash1: nil, }, - Scrs: map[string]data.TransactionHandlerWithGasUsedAndFee{ + SmartContractResults: map[string]*outport.SCRInfo{ scrHash1: nil, }, - Logs: []*data.LogData{ + Logs: []*outport.LogData{ { TxHash: txHash1, + Log: &transaction.Log{}, }, }, }, @@ -146,6 +136,11 @@ func TestRevertIndexedBlock(t *testing.T) { return nil }, } + args.BlockContainer = &outportStub.BlockContainerStub{ + GetCalled: func(headerType core.HeaderType) (block.EmptyBlockCreator, error) { + return block.NewEmptyHeaderCreator(), nil + }, + } en, _ := notifier.NewEventNotifier(args) @@ -154,9 +149,15 @@ func TestRevertIndexedBlock(t *testing.T) { Round: 2, Epoch: 3, } - err := en.RevertIndexedBlock(header, &block.Body{}) + headerBytes, _ := args.Marshaller.Marshal(header) + + err := en.RevertIndexedBlock(&outport.BlockData{ + HeaderBytes: headerBytes, + Body: &block.Body{}, + HeaderType: string(core.ShardHeaderV1), + }, + ) require.Nil(t, err) - require.True(t, wasCalled) } @@ -176,7 +177,7 @@ func TestFinalizedBlock(t *testing.T) { en, _ := notifier.NewEventNotifier(args) hash := []byte("headerHash") - err := en.FinalizedBlock(hash) + err := en.FinalizedBlock(&outport.FinalizedBlock{HeaderHash: hash}) require.Nil(t, err) require.True(t, wasCalled) @@ -199,13 +200,13 @@ func TestMockFunctions(t *testing.T) { err = en.SaveRoundsInfo(nil) require.Nil(t, err) - err = en.SaveValidatorsRating("", nil) + err = en.SaveValidatorsRating(nil) require.Nil(t, err) - err = en.SaveValidatorsPubKeys(nil, 0) + err = en.SaveValidatorsPubKeys(nil) require.Nil(t, err) - err = en.SaveAccounts(0, nil, 0) + err = en.SaveAccounts(nil) require.Nil(t, err) err = en.Close() diff --git a/outport/notifier/interface.go b/outport/notifier/interface.go index 52bdf53eb52..2fd931d0295 100644 --- a/outport/notifier/interface.go +++ b/outport/notifier/interface.go @@ -1,6 +1,16 @@ package notifier +import ( + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/block" +) + type httpClientHandler interface { Post(route string, payload interface{}) error IsInterfaceNil() bool } + +// BlockContainerHandler defines what a block container should be able to do +type BlockContainerHandler interface { + Get(headerType core.HeaderType) (block.EmptyBlockCreator, error) +} diff --git a/outport/outport.go b/outport/outport.go index b0366e49c44..542c867b28f 100644 --- a/outport/outport.go +++ b/outport/outport.go @@ -7,7 +7,6 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -44,13 +43,51 @@ func NewOutport(retrialInterval time.Duration) (*outport, error) { } // SaveBlock will save block for every driver -func (o *outport) SaveBlock(args *outportcore.ArgsSaveBlockData) { +func (o *outport) SaveBlock(args *outportcore.OutportBlockWithHeaderAndBody) error { o.mutex.RLock() defer o.mutex.RUnlock() + if args == nil { + return fmt.Errorf("outport.SaveBlock error: %w", errNilSaveBlockArgs) + } + for _, driver := range o.drivers { - o.saveBlockBlocking(args, driver) + blockData, err := prepareBlockData(args.HeaderDataWithBody, driver) + if err != nil { + return err + } + + args.OutportBlock.BlockData = blockData + o.saveBlockBlocking(args.OutportBlock, driver) + } + + return nil +} + +func prepareBlockData( + headerBodyData *outportcore.HeaderDataWithBody, + driver Driver, +) (*outportcore.BlockData, error) { + if headerBodyData == nil { + return nil, fmt.Errorf("outport.prepareBlockData error: %w", errNilHeaderAndBodyArgs) } + + marshaller := driver.GetMarshaller() + headerBytes, headerType, err := outportcore.GetHeaderBytesAndType(marshaller, headerBodyData.Header) + if err != nil { + return nil, err + } + body, err := outportcore.GetBody(headerBodyData.Body) + if err != nil { + return nil, err + } + + return &outportcore.BlockData{ + HeaderBytes: headerBytes, + HeaderType: string(headerType), + HeaderHash: headerBodyData.HeaderHash, + Body: body, + }, nil } func (o *outport) monitorCompletionOnDriver(function string, driver Driver) chan struct{} { @@ -77,7 +114,7 @@ func (o *outport) monitorCompletionOnDriver(function string, driver Driver) chan return ch } -func (o *outport) saveBlockBlocking(args *outportcore.ArgsSaveBlockData, driver Driver) { +func (o *outport) saveBlockBlocking(args *outportcore.OutportBlock, driver Driver) { ch := o.monitorCompletionOnDriver("saveBlockBlocking", driver) defer close(ch) @@ -108,21 +145,28 @@ func (o *outport) shouldTerminate() bool { } // RevertIndexedBlock will revert block for every driver -func (o *outport) RevertIndexedBlock(header data.HeaderHandler, body data.BodyHandler) { +func (o *outport) RevertIndexedBlock(headerDataWithBody *outportcore.HeaderDataWithBody) error { o.mutex.RLock() defer o.mutex.RUnlock() for _, driver := range o.drivers { - o.revertIndexedBlockBlocking(header, body, driver) + blockData, err := prepareBlockData(headerDataWithBody, driver) + if err != nil { + return err + } + + o.revertIndexedBlockBlocking(blockData, driver) } + + return nil } -func (o *outport) revertIndexedBlockBlocking(header data.HeaderHandler, body data.BodyHandler, driver Driver) { +func (o *outport) revertIndexedBlockBlocking(blockData *outportcore.BlockData, driver Driver) { ch := o.monitorCompletionOnDriver("revertIndexedBlockBlocking", driver) defer close(ch) for { - err := driver.RevertIndexedBlock(header, body) + err := driver.RevertIndexedBlock(blockData) if err == nil { return } @@ -139,7 +183,7 @@ func (o *outport) revertIndexedBlockBlocking(header data.HeaderHandler, body dat } // SaveRoundsInfo will save rounds information for every driver -func (o *outport) SaveRoundsInfo(roundsInfo []*outportcore.RoundInfo) { +func (o *outport) SaveRoundsInfo(roundsInfo *outportcore.RoundsInfo) { o.mutex.RLock() defer o.mutex.RUnlock() @@ -148,7 +192,7 @@ func (o *outport) SaveRoundsInfo(roundsInfo []*outportcore.RoundInfo) { } } -func (o *outport) saveRoundsInfoBlocking(roundsInfo []*outportcore.RoundInfo, driver Driver) { +func (o *outport) saveRoundsInfoBlocking(roundsInfo *outportcore.RoundsInfo, driver Driver) { ch := o.monitorCompletionOnDriver("saveRoundsInfoBlocking", driver) defer close(ch) @@ -170,21 +214,21 @@ func (o *outport) saveRoundsInfoBlocking(roundsInfo []*outportcore.RoundInfo, dr } // SaveValidatorsPubKeys will save validators public keys for every driver -func (o *outport) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte, epoch uint32) { +func (o *outport) SaveValidatorsPubKeys(validatorsPubKeys *outportcore.ValidatorsPubKeys) { o.mutex.RLock() defer o.mutex.RUnlock() for _, driver := range o.drivers { - o.saveValidatorsPubKeysBlocking(validatorsPubKeys, epoch, driver) + o.saveValidatorsPubKeysBlocking(validatorsPubKeys, driver) } } -func (o *outport) saveValidatorsPubKeysBlocking(validatorsPubKeys map[uint32][][]byte, epoch uint32, driver Driver) { +func (o *outport) saveValidatorsPubKeysBlocking(validatorsPubKeys *outportcore.ValidatorsPubKeys, driver Driver) { ch := o.monitorCompletionOnDriver("saveValidatorsPubKeysBlocking", driver) defer close(ch) for { - err := driver.SaveValidatorsPubKeys(validatorsPubKeys, epoch) + err := driver.SaveValidatorsPubKeys(validatorsPubKeys) if err == nil { return } @@ -201,21 +245,21 @@ func (o *outport) saveValidatorsPubKeysBlocking(validatorsPubKeys map[uint32][][ } // SaveValidatorsRating will save validators rating for every driver -func (o *outport) SaveValidatorsRating(indexID string, infoRating []*outportcore.ValidatorRatingInfo) { +func (o *outport) SaveValidatorsRating(validatorsRating *outportcore.ValidatorsRating) { o.mutex.RLock() defer o.mutex.RUnlock() for _, driver := range o.drivers { - o.saveValidatorsRatingBlocking(indexID, infoRating, driver) + o.saveValidatorsRatingBlocking(validatorsRating, driver) } } -func (o *outport) saveValidatorsRatingBlocking(indexID string, infoRating []*outportcore.ValidatorRatingInfo, driver Driver) { +func (o *outport) saveValidatorsRatingBlocking(validatorsRating *outportcore.ValidatorsRating, driver Driver) { ch := o.monitorCompletionOnDriver("saveValidatorsRatingBlocking", driver) defer close(ch) for { - err := driver.SaveValidatorsRating(indexID, infoRating) + err := driver.SaveValidatorsRating(validatorsRating) if err == nil { return } @@ -232,21 +276,21 @@ func (o *outport) saveValidatorsRatingBlocking(indexID string, infoRating []*out } // SaveAccounts will save accounts for every driver -func (o *outport) SaveAccounts(blockTimestamp uint64, acc map[string]*outportcore.AlteredAccount, shardID uint32) { +func (o *outport) SaveAccounts(accounts *outportcore.Accounts) { o.mutex.RLock() defer o.mutex.RUnlock() for _, driver := range o.drivers { - o.saveAccountsBlocking(blockTimestamp, acc, shardID, driver) + o.saveAccountsBlocking(accounts, driver) } } -func (o *outport) saveAccountsBlocking(blockTimestamp uint64, acc map[string]*outportcore.AlteredAccount, shardID uint32, driver Driver) { +func (o *outport) saveAccountsBlocking(accounts *outportcore.Accounts, driver Driver) { ch := o.monitorCompletionOnDriver("saveAccountsBlocking", driver) defer close(ch) for { - err := driver.SaveAccounts(blockTimestamp, acc, shardID) + err := driver.SaveAccounts(accounts) if err == nil { return } @@ -263,21 +307,21 @@ func (o *outport) saveAccountsBlocking(blockTimestamp uint64, acc map[string]*ou } // FinalizedBlock will call all the drivers that a block is finalized -func (o *outport) FinalizedBlock(headerHash []byte) { +func (o *outport) FinalizedBlock(finalizedBlock *outportcore.FinalizedBlock) { o.mutex.RLock() defer o.mutex.RUnlock() for _, driver := range o.drivers { - o.finalizedBlockBlocking(headerHash, driver) + o.finalizedBlockBlocking(finalizedBlock, driver) } } -func (o *outport) finalizedBlockBlocking(headerHash []byte, driver Driver) { +func (o *outport) finalizedBlockBlocking(finalizedBlock *outportcore.FinalizedBlock, driver Driver) { ch := o.monitorCompletionOnDriver("finalizedBlockBlocking", driver) defer close(ch) for { - err := driver.FinalizedBlock(headerHash) + err := driver.FinalizedBlock(finalizedBlock) if err == nil { return } diff --git a/outport/outport_test.go b/outport/outport_test.go index e55b7203765..b83603c24af 100644 --- a/outport/outport_test.go +++ b/outport/outport_test.go @@ -9,7 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/outport/mock" logger "github.com/multiversx/mx-chain-logger-go" @@ -19,6 +19,17 @@ import ( const counterPositionInLogMessage = 5 +func createSaveBlockArgs() *outportcore.OutportBlockWithHeaderAndBody { + return &outportcore.OutportBlockWithHeaderAndBody{ + OutportBlock: &outportcore.OutportBlock{}, + HeaderDataWithBody: &outportcore.HeaderDataWithBody{ + Body: &block.Body{}, + Header: &block.HeaderV2{}, + HeaderHash: []byte("hash"), + }, + } +} + func TestNewOutport(t *testing.T) { t.Parallel() @@ -43,7 +54,7 @@ func TestOutport_SaveAccounts(t *testing.T) { numCalled1 := 0 numCalled2 := 0 driver1 := &mock.DriverStub{ - SaveAccountsCalled: func(blockTimestamp uint64, accs map[string]*outportcore.AlteredAccount) error { + SaveAccountsCalled: func(accounts *outportcore.Accounts) error { numCalled1++ if numCalled1 < 10 { return expectedError @@ -53,7 +64,7 @@ func TestOutport_SaveAccounts(t *testing.T) { }, } driver2 := &mock.DriverStub{ - SaveAccountsCalled: func(blockTimestamp uint64, accs map[string]*outportcore.AlteredAccount) error { + SaveAccountsCalled: func(accounts *outportcore.Accounts) error { numCalled2++ return nil }, @@ -69,12 +80,12 @@ func TestOutport_SaveAccounts(t *testing.T) { } } - outportHandler.SaveAccounts(0, map[string]*outportcore.AlteredAccount{}, 0) + outportHandler.SaveAccounts(&outportcore.Accounts{}) time.Sleep(time.Second) _ = outportHandler.SubscribeDriver(driver1) _ = outportHandler.SubscribeDriver(driver2) - outportHandler.SaveAccounts(0, map[string]*outportcore.AlteredAccount{}, 0) + outportHandler.SaveAccounts(&outportcore.Accounts{}) time.Sleep(time.Second) assert.Equal(t, 10, numCalled1) @@ -89,7 +100,7 @@ func TestOutport_SaveBlock(t *testing.T) { numCalled1 := 0 numCalled2 := 0 driver1 := &mock.DriverStub{ - SaveBlockCalled: func(args *outportcore.ArgsSaveBlockData) error { + SaveBlockCalled: func(args *outportcore.OutportBlock) error { numCalled1++ if numCalled1 < 10 { return expectedError @@ -99,7 +110,7 @@ func TestOutport_SaveBlock(t *testing.T) { }, } driver2 := &mock.DriverStub{ - SaveBlockCalled: func(args *outportcore.ArgsSaveBlockData) error { + SaveBlockCalled: func(args *outportcore.OutportBlock) error { numCalled2++ return nil }, @@ -115,11 +126,12 @@ func TestOutport_SaveBlock(t *testing.T) { } } - outportHandler.SaveBlock(nil) + args := createSaveBlockArgs() + _ = outportHandler.SaveBlock(args) _ = outportHandler.SubscribeDriver(driver1) _ = outportHandler.SubscribeDriver(driver2) - outportHandler.SaveBlock(nil) + _ = outportHandler.SaveBlock(args) time.Sleep(time.Second) assert.Equal(t, 10, numCalled1) @@ -134,7 +146,7 @@ func TestOutport_SaveRoundsInfo(t *testing.T) { numCalled1 := 0 numCalled2 := 0 driver1 := &mock.DriverStub{ - SaveRoundsInfoCalled: func(roundsInfos []*outportcore.RoundInfo) error { + SaveRoundsInfoCalled: func(roundsInfos *outportcore.RoundsInfo) error { numCalled1++ if numCalled1 < 10 { return expectedError @@ -144,7 +156,7 @@ func TestOutport_SaveRoundsInfo(t *testing.T) { }, } driver2 := &mock.DriverStub{ - SaveRoundsInfoCalled: func(roundsInfos []*outportcore.RoundInfo) error { + SaveRoundsInfoCalled: func(roundsInfos *outportcore.RoundsInfo) error { numCalled2++ return nil }, @@ -179,7 +191,7 @@ func TestOutport_SaveValidatorsPubKeys(t *testing.T) { numCalled1 := 0 numCalled2 := 0 driver1 := &mock.DriverStub{ - SaveValidatorsPubKeysCalled: func(validatorsPubKeys map[uint32][][]byte, epoch uint32) error { + SaveValidatorsPubKeysCalled: func(validatorsRating *outportcore.ValidatorsPubKeys) error { numCalled1++ if numCalled1 < 10 { return expectedError @@ -189,7 +201,7 @@ func TestOutport_SaveValidatorsPubKeys(t *testing.T) { }, } driver2 := &mock.DriverStub{ - SaveValidatorsPubKeysCalled: func(validatorsPubKeys map[uint32][][]byte, epoch uint32) error { + SaveValidatorsPubKeysCalled: func(validatorsRating *outportcore.ValidatorsPubKeys) error { numCalled2++ return nil }, @@ -205,13 +217,13 @@ func TestOutport_SaveValidatorsPubKeys(t *testing.T) { } } - outportHandler.SaveValidatorsPubKeys(nil, 0) + outportHandler.SaveValidatorsPubKeys(&outportcore.ValidatorsPubKeys{}) time.Sleep(time.Second) _ = outportHandler.SubscribeDriver(driver1) _ = outportHandler.SubscribeDriver(driver2) - outportHandler.SaveValidatorsPubKeys(nil, 0) + outportHandler.SaveValidatorsPubKeys(&outportcore.ValidatorsPubKeys{}) time.Sleep(time.Second) assert.Equal(t, 10, numCalled1) @@ -226,7 +238,7 @@ func TestOutport_SaveValidatorsRating(t *testing.T) { numCalled1 := 0 numCalled2 := 0 driver1 := &mock.DriverStub{ - SaveValidatorsRatingCalled: func(indexID string, infoRating []*outportcore.ValidatorRatingInfo) error { + SaveValidatorsRatingCalled: func(validatorsRating *outportcore.ValidatorsRating) error { numCalled1++ if numCalled1 < 10 { return expectedError @@ -236,7 +248,7 @@ func TestOutport_SaveValidatorsRating(t *testing.T) { }, } driver2 := &mock.DriverStub{ - SaveValidatorsRatingCalled: func(indexID string, infoRating []*outportcore.ValidatorRatingInfo) error { + SaveValidatorsRatingCalled: func(validatorsRating *outportcore.ValidatorsRating) error { numCalled2++ return nil }, @@ -252,13 +264,13 @@ func TestOutport_SaveValidatorsRating(t *testing.T) { } } - outportHandler.SaveValidatorsRating("", nil) + outportHandler.SaveValidatorsRating(&outportcore.ValidatorsRating{}) time.Sleep(time.Second) _ = outportHandler.SubscribeDriver(driver1) _ = outportHandler.SubscribeDriver(driver2) - outportHandler.SaveValidatorsRating("", nil) + outportHandler.SaveValidatorsRating(&outportcore.ValidatorsRating{}) time.Sleep(time.Second) assert.Equal(t, 10, numCalled1) @@ -273,7 +285,7 @@ func TestOutport_RevertIndexedBlock(t *testing.T) { numCalled1 := 0 numCalled2 := 0 driver1 := &mock.DriverStub{ - RevertBlockCalled: func(header data.HeaderHandler, body data.BodyHandler) error { + RevertIndexedBlockCalled: func(blockData *outportcore.BlockData) error { numCalled1++ if numCalled1 < 10 { return expectedError @@ -283,7 +295,7 @@ func TestOutport_RevertIndexedBlock(t *testing.T) { }, } driver2 := &mock.DriverStub{ - RevertBlockCalled: func(header data.HeaderHandler, body data.BodyHandler) error { + RevertIndexedBlockCalled: func(blockData *outportcore.BlockData) error { numCalled2++ return nil }, @@ -299,13 +311,14 @@ func TestOutport_RevertIndexedBlock(t *testing.T) { } } - outportHandler.RevertIndexedBlock(nil, nil) + args := createSaveBlockArgs() + _ = outportHandler.RevertIndexedBlock(args.HeaderDataWithBody) time.Sleep(time.Second) _ = outportHandler.SubscribeDriver(driver1) _ = outportHandler.SubscribeDriver(driver2) - outportHandler.RevertIndexedBlock(nil, nil) + _ = outportHandler.RevertIndexedBlock(args.HeaderDataWithBody) time.Sleep(time.Second) assert.Equal(t, 10, numCalled1) @@ -320,7 +333,7 @@ func TestOutport_FinalizedBlock(t *testing.T) { numCalled1 := 0 numCalled2 := 0 driver1 := &mock.DriverStub{ - FinalizedBlockCalled: func(headerHash []byte) error { + FinalizedBlockCalled: func(finalizedBlock *outportcore.FinalizedBlock) error { numCalled1++ if numCalled1 < 10 { return expectedError @@ -330,7 +343,7 @@ func TestOutport_FinalizedBlock(t *testing.T) { }, } driver2 := &mock.DriverStub{ - FinalizedBlockCalled: func(headerHash []byte) error { + FinalizedBlockCalled: func(finalizedBlock *outportcore.FinalizedBlock) error { numCalled2++ return nil }, @@ -414,25 +427,25 @@ func TestOutport_CloseWhileDriverIsStuckInContinuousErrors(t *testing.T) { localErr := errors.New("driver stuck in error") driver1 := &mock.DriverStub{ - SaveBlockCalled: func(args *outportcore.ArgsSaveBlockData) error { + SaveBlockCalled: func(args *outportcore.OutportBlock) error { return localErr }, - RevertBlockCalled: func(header data.HeaderHandler, body data.BodyHandler) error { + RevertIndexedBlockCalled: func(blockData *outportcore.BlockData) error { return localErr }, - SaveRoundsInfoCalled: func(roundsInfos []*outportcore.RoundInfo) error { + SaveRoundsInfoCalled: func(roundsInfos *outportcore.RoundsInfo) error { return localErr }, - SaveValidatorsPubKeysCalled: func(validatorsPubKeys map[uint32][][]byte, epoch uint32) error { + SaveValidatorsPubKeysCalled: func(validatorsPubKeys *outportcore.ValidatorsPubKeys) error { return localErr }, - SaveValidatorsRatingCalled: func(indexID string, infoRating []*outportcore.ValidatorRatingInfo) error { + SaveValidatorsRatingCalled: func(validatorsRating *outportcore.ValidatorsRating) error { return localErr }, - SaveAccountsCalled: func(timestamp uint64, accs map[string]*outportcore.AlteredAccount) error { + SaveAccountsCalled: func(accounts *outportcore.Accounts) error { return localErr }, - FinalizedBlockCalled: func(headerHash []byte) error { + FinalizedBlockCalled: func(finalizedBlock *outportcore.FinalizedBlock) error { return localErr }, CloseCalled: func() error { @@ -445,15 +458,15 @@ func TestOutport_CloseWhileDriverIsStuckInContinuousErrors(t *testing.T) { wg := &sync.WaitGroup{} wg.Add(9) go func() { - outportHandler.SaveAccounts(0, nil, 0) + outportHandler.SaveAccounts(nil) wg.Done() }() go func() { - outportHandler.SaveBlock(nil) + _ = outportHandler.SaveBlock(nil) wg.Done() }() go func() { - outportHandler.RevertIndexedBlock(nil, nil) + _ = outportHandler.RevertIndexedBlock(nil) wg.Done() }() go func() { @@ -461,15 +474,15 @@ func TestOutport_CloseWhileDriverIsStuckInContinuousErrors(t *testing.T) { wg.Done() }() go func() { - outportHandler.SaveValidatorsPubKeys(nil, 0) + outportHandler.SaveValidatorsPubKeys(nil) wg.Done() }() go func() { - outportHandler.SaveValidatorsRating("", nil) + outportHandler.SaveValidatorsRating(nil) wg.Done() }() go func() { - outportHandler.SaveAccounts(0, nil, 0) + outportHandler.SaveAccounts(nil) wg.Done() }() go func() { @@ -516,13 +529,14 @@ func TestOutport_SaveBlockDriverStuck(t *testing.T) { } _ = outportHandler.SubscribeDriver(&mock.DriverStub{ - SaveBlockCalled: func(args *outportcore.ArgsSaveBlockData) error { + SaveBlockCalled: func(args *outportcore.OutportBlock) error { time.Sleep(time.Second * 5) return nil }, }) - outportHandler.SaveBlock(nil) + args := createSaveBlockArgs() + _ = outportHandler.SaveBlock(args) assert.True(t, logErrorCalled.IsSet()) assert.Equal(t, uint32(1), atomicGo.LoadUint32(&numLogDebugCalled)) @@ -555,12 +569,13 @@ func TestOutport_SaveBlockDriverIsNotStuck(t *testing.T) { } _ = outportHandler.SubscribeDriver(&mock.DriverStub{ - SaveBlockCalled: func(args *outportcore.ArgsSaveBlockData) error { + SaveBlockCalled: func(args *outportcore.OutportBlock) error { return nil }, }) - outportHandler.SaveBlock(nil) + args := createSaveBlockArgs() + _ = outportHandler.SaveBlock(args) time.Sleep(time.Second) assert.Equal(t, uint32(2), atomicGo.LoadUint32(&numLogDebugCalled)) diff --git a/outport/process/alteredaccounts/alteredAccountsProvider.go b/outport/process/alteredaccounts/alteredAccountsProvider.go index 6e9588b2422..b3ba2418de3 100644 --- a/outport/process/alteredaccounts/alteredAccountsProvider.go +++ b/outport/process/alteredaccounts/alteredAccountsProvider.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/esdt" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/outport/process/alteredaccounts/shared" @@ -69,7 +70,7 @@ func NewAlteredAccountsProvider(args ArgsAlteredAccountsProvider) (*alteredAccou } // ExtractAlteredAccountsFromPool will extract and return altered accounts from the pool -func (aap *alteredAccountsProvider) ExtractAlteredAccountsFromPool(txPool *outportcore.Pool, options shared.AlteredAccountsOptions) (map[string]*outportcore.AlteredAccount, error) { +func (aap *alteredAccountsProvider) ExtractAlteredAccountsFromPool(txPool *outportcore.TransactionPool, options shared.AlteredAccountsOptions) (map[string]*alteredAccount.AlteredAccount, error) { if err := options.Verify(); err != nil { return nil, err } @@ -79,7 +80,7 @@ func (aap *alteredAccountsProvider) ExtractAlteredAccountsFromPool(txPool *outpo if txPool == nil { log.Warn("alteredAccountsProvider: ExtractAlteredAccountsFromPool", "txPool is nil", "will return") - return map[string]*outportcore.AlteredAccount{}, nil + return map[string]*alteredAccount.AlteredAccount{}, nil } markedAccounts := make(map[string]*markedAlteredAccount) @@ -92,8 +93,8 @@ func (aap *alteredAccountsProvider) ExtractAlteredAccountsFromPool(txPool *outpo return aap.fetchDataForMarkedAccounts(markedAccounts, options) } -func (aap *alteredAccountsProvider) fetchDataForMarkedAccounts(markedAccounts map[string]*markedAlteredAccount, options shared.AlteredAccountsOptions) (map[string]*outportcore.AlteredAccount, error) { - alteredAccounts := make(map[string]*outportcore.AlteredAccount) +func (aap *alteredAccountsProvider) fetchDataForMarkedAccounts(markedAccounts map[string]*markedAlteredAccount, options shared.AlteredAccountsOptions) (map[string]*alteredAccount.AlteredAccount, error) { + alteredAccounts := make(map[string]*alteredAccount.AlteredAccount) var err error for address, markedAccount := range markedAccounts { err = aap.processMarkedAccountData(address, markedAccount, alteredAccounts, options) @@ -108,7 +109,7 @@ func (aap *alteredAccountsProvider) fetchDataForMarkedAccounts(markedAccounts ma func (aap *alteredAccountsProvider) processMarkedAccountData( addressStr string, markedAccount *markedAlteredAccount, - alteredAccounts map[string]*outportcore.AlteredAccount, + alteredAccounts map[string]*alteredAccount.AlteredAccount, options shared.AlteredAccountsOptions, ) error { addressBytes := []byte(addressStr) @@ -135,8 +136,8 @@ func (aap *alteredAccountsProvider) processMarkedAccountData( return nil } -func (aap *alteredAccountsProvider) addAdditionalDataInAlteredAccount(alteredAccount *outportcore.AlteredAccount, userAccount state.UserAccountHandler, markedAccount *markedAlteredAccount) { - alteredAccount.AdditionalData = &outportcore.AdditionalAccountData{ +func (aap *alteredAccountsProvider) addAdditionalDataInAlteredAccount(alteredAcc *alteredAccount.AlteredAccount, userAccount state.UserAccountHandler, markedAccount *markedAlteredAccount) { + alteredAcc.AdditionalData = &alteredAccount.AdditionalAccountData{ IsSender: markedAccount.isSender, BalanceChanged: markedAccount.balanceChanged, UserName: string(userAccount.GetUserName()), @@ -144,22 +145,20 @@ func (aap *alteredAccountsProvider) addAdditionalDataInAlteredAccount(alteredAcc ownerAddressBytes := userAccount.GetOwnerAddress() if core.IsSmartContractAddress(userAccount.AddressBytes()) && len(ownerAddressBytes) == aap.addressConverter.Len() { - alteredAccount.AdditionalData.CurrentOwner = aap.addressConverter.SilentEncode(ownerAddressBytes, log) + alteredAcc.AdditionalData.CurrentOwner = aap.addressConverter.SilentEncode(ownerAddressBytes, log) } developerRewards := userAccount.GetDeveloperReward() if developerRewards != nil { - alteredAccount.AdditionalData.DeveloperRewards = developerRewards.String() + alteredAcc.AdditionalData.DeveloperRewards = developerRewards.String() } } -func (aap *alteredAccountsProvider) getAlteredAccountFromUserAccounts(userEncodedAddress string, userAccount state.UserAccountHandler) *outportcore.AlteredAccount { - alteredAccount := &outportcore.AlteredAccount{ +func (aap *alteredAccountsProvider) getAlteredAccountFromUserAccounts(userEncodedAddress string, userAccount state.UserAccountHandler) *alteredAccount.AlteredAccount { + return &alteredAccount.AlteredAccount{ Address: userEncodedAddress, Balance: userAccount.GetBalance().String(), Nonce: userAccount.GetNonce(), } - - return alteredAccount } func (aap *alteredAccountsProvider) loadUserAccount(addressBytes []byte, options shared.AlteredAccountsOptions) (state.UserAccountHandler, error) { @@ -188,7 +187,7 @@ func (aap *alteredAccountsProvider) addTokensDataForMarkedAccount( encodedAddress string, userAccount state.UserAccountHandler, markedAccountToken *markedAlteredAccountToken, - alteredAccounts map[string]*outportcore.AlteredAccount, + alteredAccounts map[string]*alteredAccount.AlteredAccount, options shared.AlteredAccountsOptions, ) error { nonce := markedAccountToken.nonce @@ -211,7 +210,7 @@ func (aap *alteredAccountsProvider) addTokensDataForMarkedAccount( return nil } - accountTokenData := &outportcore.AccountTokenData{ + accountTokenData := &alteredAccount.AccountTokenData{ Identifier: tokenID, Balance: esdtToken.Value.String(), Nonce: nonce, @@ -219,25 +218,24 @@ func (aap *alteredAccountsProvider) addTokensDataForMarkedAccount( MetaData: aap.convertMetaData(esdtToken.TokenMetaData), } if options.WithAdditionalOutportData { - accountTokenData.AdditionalData = &outportcore.AdditionalAccountTokenData{ + accountTokenData.AdditionalData = &alteredAccount.AdditionalAccountTokenData{ IsNFTCreate: markedAccountToken.isNFTCreate, } } - alteredAccount := alteredAccounts[encodedAddress] - alteredAccount.Tokens = append(alteredAccounts[encodedAddress].Tokens, accountTokenData) + alteredAcc := alteredAccounts[encodedAddress] + alteredAcc.Tokens = append(alteredAccounts[encodedAddress].Tokens, accountTokenData) return nil } -func (aap *alteredAccountsProvider) convertMetaData(metaData *esdt.MetaData) *outportcore.TokenMetaData { +func (aap *alteredAccountsProvider) convertMetaData(metaData *esdt.MetaData) *alteredAccount.TokenMetaData { if metaData == nil { return nil } metaDataCreatorAddr := aap.addressConverter.SilentEncode(metaData.Creator, log) - - return &outportcore.TokenMetaData{ + return &alteredAccount.TokenMetaData{ Nonce: metaData.Nonce, Name: string(metaData.Name), Creator: metaDataCreatorAddr, @@ -249,20 +247,60 @@ func (aap *alteredAccountsProvider) convertMetaData(metaData *esdt.MetaData) *ou } func (aap *alteredAccountsProvider) extractAddressesWithBalanceChange( - txPool *outportcore.Pool, + txPool *outportcore.TransactionPool, markedAlteredAccounts map[string]*markedAlteredAccount, ) { selfShardID := aap.shardCoordinator.SelfId() - aap.extractAddressesFromTxsHandlers(selfShardID, txPool.Txs, markedAlteredAccounts, process.MoveBalance) - aap.extractAddressesFromTxsHandlers(selfShardID, txPool.Scrs, markedAlteredAccounts, process.SCInvoking) - aap.extractAddressesFromTxsHandlers(selfShardID, txPool.Rewards, markedAlteredAccounts, process.RewardTx) - aap.extractAddressesFromTxsHandlers(selfShardID, txPool.Invalid, markedAlteredAccounts, process.InvalidTransaction) + txs := txsMapToTxHandlerSlice(txPool.Transactions) + scrs := scrsMapToTxHandlerSlice(txPool.SmartContractResults) + rewards := rewardsMapToTxHandlerSlice(txPool.Rewards) + invalidTxs := txsMapToTxHandlerSlice(txPool.InvalidTxs) + + aap.extractAddressesFromTxsHandlers(selfShardID, txs, markedAlteredAccounts, process.MoveBalance) + aap.extractAddressesFromTxsHandlers(selfShardID, scrs, markedAlteredAccounts, process.SCInvoking) + aap.extractAddressesFromTxsHandlers(selfShardID, rewards, markedAlteredAccounts, process.RewardTx) + aap.extractAddressesFromTxsHandlers(selfShardID, invalidTxs, markedAlteredAccounts, process.InvalidTransaction) +} + +func txsMapToTxHandlerSlice(txs map[string]*outportcore.TxInfo) []data.TransactionHandler { + ret := make([]data.TransactionHandler, len(txs)) + + idx := 0 + for _, tx := range txs { + ret[idx] = tx.Transaction + idx++ + } + + return ret +} + +func scrsMapToTxHandlerSlice(scrs map[string]*outportcore.SCRInfo) []data.TransactionHandler { + ret := make([]data.TransactionHandler, len(scrs)) + + idx := 0 + for _, scr := range scrs { + ret[idx] = scr.SmartContractResult + idx++ + } + + return ret +} +func rewardsMapToTxHandlerSlice(rewards map[string]*outportcore.RewardInfo) []data.TransactionHandler { + ret := make([]data.TransactionHandler, len(rewards)) + + idx := 0 + for _, reward := range rewards { + ret[idx] = reward.Reward + idx++ + } + + return ret } func (aap *alteredAccountsProvider) extractAddressesFromTxsHandlers( selfShardID uint32, - txsHandlers map[string]data.TransactionHandlerWithGasUsedAndFee, + txsHandlers []data.TransactionHandler, markedAlteredAccounts map[string]*markedAlteredAccount, txType process.TransactionType, ) { diff --git a/outport/process/alteredaccounts/alteredAccountsProvider_test.go b/outport/process/alteredaccounts/alteredAccountsProvider_test.go index 63a924ffe6a..64275bfff81 100644 --- a/outport/process/alteredaccounts/alteredAccountsProvider_test.go +++ b/outport/process/alteredaccounts/alteredAccountsProvider_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/esdt" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/rewardTx" @@ -95,16 +95,16 @@ func TestGetAlteredAccountFromUserAccount(t *testing.T) { Address: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, } - res := &outportcore.AlteredAccount{ + res := &alteredAccount.AlteredAccount{ Address: "addr", Balance: "1000", } aap.addAdditionalDataInAlteredAccount(res, userAccount, &markedAlteredAccount{}) - require.Equal(t, &outportcore.AlteredAccount{ + require.Equal(t, &alteredAccount.AlteredAccount{ Address: "addr", Balance: "1000", - AdditionalData: &outportcore.AdditionalAccountData{ + AdditionalData: &alteredAccount.AdditionalAccountData{ DeveloperRewards: "100", CurrentOwner: "6f776e6572", UserName: "contract", @@ -118,16 +118,16 @@ func TestGetAlteredAccountFromUserAccount(t *testing.T) { Address: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, } - res = &outportcore.AlteredAccount{ + res = &alteredAccount.AlteredAccount{ Address: "addr", Balance: "5000", } aap.addAdditionalDataInAlteredAccount(res, userAccount, &markedAlteredAccount{}) - require.Equal(t, &outportcore.AlteredAccount{ + require.Equal(t, &alteredAccount.AlteredAccount{ Address: "addr", Balance: "5000", - AdditionalData: &outportcore.AdditionalAccountData{ + AdditionalData: &alteredAccount.AdditionalAccountData{ DeveloperRewards: "5000", }, }, res) @@ -162,7 +162,7 @@ func testExtractAlteredAccountsFromPoolNoTransaction(t *testing.T) { args := getMockArgs() aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{}, shared.AlteredAccountsOptions{}) + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{}, shared.AlteredAccountsOptions{}) require.NoError(t, err) require.Empty(t, res) } @@ -186,18 +186,28 @@ func testExtractAlteredAccountsFromPoolSenderShard(t *testing.T) { args.AddressConverter = testscommon.NewPubkeyConverterMock(20) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash0": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("sender shard - tx0 "), - RcvAddr: []byte("receiver shard - tx0"), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), - "hash1": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("sender shard - tx1 "), - RcvAddr: []byte("receiver shard - tx1"), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "hash0": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("sender shard - tx0 "), + RcvAddr: []byte("receiver shard - tx0"), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + "hash1": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("sender shard - tx1 "), + RcvAddr: []byte("receiver shard - tx1"), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, }, shared.AlteredAccountsOptions{ WithAdditionalOutportData: true, @@ -232,18 +242,28 @@ func testExtractAlteredAccountsFromPoolReceiverShard(t *testing.T) { args.AddressConverter = testscommon.NewPubkeyConverterMock(20) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash0": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("sender shard - tx0 "), - RcvAddr: []byte("receiver shard - tx0"), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), - "hash1": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("sender shard - tx1 "), - RcvAddr: []byte("receiver shard - tx1"), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "hash0": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("sender shard - tx0 "), + RcvAddr: []byte("receiver shard - tx0"), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + "hash1": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("sender shard - tx1 "), + RcvAddr: []byte("receiver shard - tx1"), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, }, shared.AlteredAccountsOptions{ WithAdditionalOutportData: true, @@ -276,33 +296,58 @@ func testExtractAlteredAccountsFromPoolBothSenderAndReceiverShards(t *testing.T) args.AddressConverter = testscommon.NewPubkeyConverterMock(19) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash0": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ // intra-shard 0, different addresses - SndAddr: []byte("shard0 addr - tx0 "), - RcvAddr: []byte("shard0 addr 2 - tx0"), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), - "hash1": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ // intra-shard 0, same addresses - SndAddr: []byte("shard0 addr 3 - tx1"), - RcvAddr: []byte("shard0 addr 3 - tx1"), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), - "hash2": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ // cross-shard, sender in shard 0 - SndAddr: []byte("shard0 addr - tx2 "), - RcvAddr: []byte("shard1 - tx2 "), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), - "hash3": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ // cross-shard, receiver in shard 0 - SndAddr: []byte("shard1 addr - tx3 "), - RcvAddr: []byte("shard0 addr - tx3 "), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), - "hash4": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ // cross-shard, no address in shard 0 - SndAddr: []byte("shard2 addr - tx4 "), - RcvAddr: []byte("shard2 addr - tx3 "), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "hash0": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("shard0 addr - tx0 "), + RcvAddr: []byte("shard0 addr 2 - tx0"), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + "hash1": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("shard0 addr 3 - tx1"), + RcvAddr: []byte("shard0 addr 3 - tx1"), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + "hash2": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("shard0 addr - tx2 "), + RcvAddr: []byte("shard1 - tx2 "), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + "hash3": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("shard1 addr - tx3 "), + RcvAddr: []byte("shard0 addr - tx3 "), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + "hash4": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("shard2 addr - tx4 "), + RcvAddr: []byte("shard2 addr - tx3 "), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, }, shared.AlteredAccountsOptions{}) require.NoError(t, err) @@ -315,16 +360,12 @@ func testExtractAlteredAccountsFromPoolBothSenderAndReceiverShards(t *testing.T) shard0AddrTx0, _ := args.AddressConverter.Encode([]byte("shard0 addr - tx0 ")) require.Contains(t, res, shard0AddrTx0) - shard0Addr2Tx0, _ := args.AddressConverter.Encode([]byte("shard0 addr 2 - tx0")) require.Contains(t, res, shard0Addr2Tx0) - shard0Addr3Tx1, _ := args.AddressConverter.Encode([]byte("shard0 addr 3 - tx1")) require.Contains(t, res, shard0Addr3Tx1) - shard0AddrTx2, _ := args.AddressConverter.Encode([]byte("shard0 addr - tx2 ")) require.Contains(t, res, shard0AddrTx2) - shard0AddrTx3, _ := args.AddressConverter.Encode([]byte("shard0 addr - tx3 ")) require.Contains(t, res, shard0AddrTx3) } @@ -357,13 +398,18 @@ func testExtractAlteredAccountsFromPoolTrieDataChecks(t *testing.T) { args.AddressConverter = testscommon.NewPubkeyConverterMock(19) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash0": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("sender in shard 0 "), - RcvAddr: []byte(receiverInSelfShard), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "hash0": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("sender in shard 0 "), + RcvAddr: []byte(receiverInSelfShard), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, }, shared.AlteredAccountsOptions{}) require.NoError(t, err) @@ -404,32 +450,49 @@ func testExtractAlteredAccountsFromPoolScrsInvalidRewards(t *testing.T) { args.AddressConverter = testscommon.NewPubkeyConverterMock(26) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash0": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("sender in shard 0 - tx 0 "), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), - }, - Rewards: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash1": outportcore.NewTransactionHandlerWithGasAndFee(&rewardTx.RewardTx{ - RcvAddr: []byte("receiver in shard 0 - tx 1"), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), - }, - Scrs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash2": outportcore.NewTransactionHandlerWithGasAndFee(&smartContractResult.SmartContractResult{ - SndAddr: []byte("sender in shard 0 - tx 2 "), - RcvAddr: []byte("receiver in shard 0 - tx 2"), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), - }, - Invalid: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash3": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("sender in shard 0 - tx 3 "), - RcvAddr: []byte("receiver in shard 0 - tx 3"), // receiver for invalid txs should not be included - Value: big.NewInt(1), - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "hash0": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("sender in shard 0 - tx 0 "), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + }, + Rewards: map[string]*outportcore.RewardInfo{ + "hash1": { + Reward: &rewardTx.RewardTx{ + RcvAddr: []byte("receiver in shard 0 - tx 1"), + Value: big.NewInt(1), + }, + }, + }, + SmartContractResults: map[string]*outportcore.SCRInfo{ + "hash2": { + SmartContractResult: &smartContractResult.SmartContractResult{ + SndAddr: []byte("sender in shard 0 - tx 2 "), + RcvAddr: []byte("receiver in shard 0 - tx 2"), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + }, + InvalidTxs: map[string]*outportcore.TxInfo{ + "hash3": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("sender in shard 0 - tx 3 "), + RcvAddr: []byte("receiver in shard 0 - tx 3"), // receiver for invalid txs should not be included + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, }, shared.AlteredAccountsOptions{}) require.NoError(t, err) @@ -456,10 +519,11 @@ func testExtractAlteredAccountsFromPoolShouldReturnErrorWhenCastingToVmCommonUse } aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Logs: []*data.LogData{ + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Logs: []*outportcore.LogData{ { - LogHandler: &transaction.Log{ + TxHash: "hash", + Log: &transaction.Log{ Address: []byte("addr"), Events: []*transaction.Event{ { @@ -507,10 +571,11 @@ func testExtractAlteredAccountsFromPoolShouldIncludeESDT(t *testing.T) { } aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Logs: []*data.LogData{ + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Logs: []*outportcore.LogData{ { - LogHandler: &transaction.Log{ + TxHash: "hash", + Log: &transaction.Log{ Address: []byte("addr"), Events: []*transaction.Event{ { @@ -539,7 +604,7 @@ func testExtractAlteredAccountsFromPoolShouldIncludeESDT(t *testing.T) { encodedAddr, _ := args.AddressConverter.Encode([]byte("addr")) require.Len(t, res, 1) require.Len(t, res[encodedAddr].Tokens, 1) - require.Equal(t, &outportcore.AccountTokenData{ + require.Equal(t, &alteredAccount.AccountTokenData{ Identifier: "token0", Balance: expectedToken.Value.String(), Nonce: 0, @@ -570,10 +635,11 @@ func testExtractAlteredAccountsFromPoolShouldIncludeNFT(t *testing.T) { } aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Logs: []*data.LogData{ + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Logs: []*outportcore.LogData{ { - LogHandler: &transaction.Log{ + TxHash: "hash", + Log: &transaction.Log{ Address: []byte("addr"), Events: []*transaction.Event{ { @@ -592,11 +658,11 @@ func testExtractAlteredAccountsFromPoolShouldIncludeNFT(t *testing.T) { require.NoError(t, err) encodedAddr, _ := args.AddressConverter.Encode([]byte("addr")) - require.Equal(t, &outportcore.AccountTokenData{ + require.Equal(t, &alteredAccount.AccountTokenData{ Identifier: "token0", Balance: expectedToken.Value.String(), Nonce: expectedToken.TokenMetaData.Nonce, - MetaData: &outportcore.TokenMetaData{Nonce: expectedToken.TokenMetaData.Nonce}, + MetaData: &alteredAccount.TokenMetaData{Nonce: expectedToken.TokenMetaData.Nonce}, }, res[encodedAddr].Tokens[0]) } @@ -625,17 +691,23 @@ func testExtractAlteredAccountsFromPoolShouldNotIncludeReceiverAddressIfNftCreat } aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hh": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: sendAddrShard0, - RcvAddr: sendAddrShard0, - Value: big.NewInt(0), - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "hh": { + Transaction: &transaction.Transaction{ + SndAddr: sendAddrShard0, + RcvAddr: sendAddrShard0, + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, - Logs: []*data.LogData{ + Logs: []*outportcore.LogData{ { - LogHandler: &transaction.Log{ + TxHash: "hh", + Log: &transaction.Log{ Address: sendAddrShard0, Events: []*transaction.Event{ { @@ -698,10 +770,11 @@ func testExtractAlteredAccountsFromPoolShouldIncludeDestinationFromTokensLogsTop } aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Logs: []*data.LogData{ + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Logs: []*outportcore.LogData{ { - LogHandler: &transaction.Log{ + TxHash: "hash0", + Log: &transaction.Log{ Address: []byte("addr"), Events: []*transaction.Event{ { @@ -728,11 +801,11 @@ func testExtractAlteredAccountsFromPoolShouldIncludeDestinationFromTokensLogsTop creator, err := args.AddressConverter.Encode(expectedToken.TokenMetaData.Creator) require.Nil(t, err) require.Len(t, res[mapKeyToSearch].Tokens, 1) - require.Equal(t, res[mapKeyToSearch].Tokens[0], &outportcore.AccountTokenData{ + require.Equal(t, res[mapKeyToSearch].Tokens[0], &alteredAccount.AccountTokenData{ Identifier: "token0", Balance: "37", Nonce: 38, - MetaData: &outportcore.TokenMetaData{ + MetaData: &alteredAccount.TokenMetaData{ Nonce: 38, Name: "name", Creator: creator, @@ -766,16 +839,22 @@ func testExtractAlteredAccountsFromPoolAddressHasBalanceChangeEsdtAndfNft(t *tes } aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash0": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("addr"), - Value: big.NewInt(0), - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "hash0": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("addr"), + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, - Logs: []*data.LogData{ + Logs: []*outportcore.LogData{ { - LogHandler: &transaction.Log{ + TxHash: "hash0", + Log: &transaction.Log{ Address: []byte("addr"), Events: []*transaction.Event{ { @@ -877,16 +956,22 @@ func testExtractAlteredAccountsFromPoolAddressHasMultipleNfts(t *testing.T) { } aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash0": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("addr"), - Value: big.NewInt(0), - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "hash0": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("addr"), + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, - Logs: []*data.LogData{ + Logs: []*outportcore.LogData{ { - LogHandler: &transaction.Log{ + TxHash: "hash0", + Log: &transaction.Log{ Address: []byte("addr"), Events: []*transaction.Event{ { @@ -924,28 +1009,28 @@ func testExtractAlteredAccountsFromPoolAddressHasMultipleNfts(t *testing.T) { require.Len(t, res, 1) require.Len(t, res[encodedAddr].Tokens, 3) - require.Contains(t, res[encodedAddr].Tokens, &outportcore.AccountTokenData{ + require.Contains(t, res[encodedAddr].Tokens, &alteredAccount.AccountTokenData{ Identifier: "esdttoken", Balance: expectedToken0.Value.String(), Nonce: 0, MetaData: nil, }) - require.Contains(t, res[encodedAddr].Tokens, &outportcore.AccountTokenData{ + require.Contains(t, res[encodedAddr].Tokens, &alteredAccount.AccountTokenData{ Identifier: string(expectedToken1.TokenMetaData.Name), Balance: expectedToken1.Value.String(), Nonce: expectedToken1.TokenMetaData.Nonce, - MetaData: &outportcore.TokenMetaData{ + MetaData: &alteredAccount.TokenMetaData{ Nonce: expectedToken1.TokenMetaData.Nonce, Name: string(expectedToken1.TokenMetaData.Name), }, }) - require.Contains(t, res[encodedAddr].Tokens, &outportcore.AccountTokenData{ + require.Contains(t, res[encodedAddr].Tokens, &alteredAccount.AccountTokenData{ Identifier: string(expectedToken2.TokenMetaData.Name), Balance: expectedToken2.Value.String(), Nonce: expectedToken2.TokenMetaData.Nonce, - MetaData: &outportcore.TokenMetaData{ + MetaData: &alteredAccount.TokenMetaData{ Nonce: expectedToken2.TokenMetaData.Nonce, Name: string(expectedToken2.TokenMetaData.Name), }, @@ -976,17 +1061,23 @@ func testExtractAlteredAccountsFromPoolESDTTransferBalanceNotChanged(t *testing. args.AddressConverter = testscommon.NewPubkeyConverterMock(3) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "txHash": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("snd"), - RcvAddr: []byte("rcv"), - Value: big.NewInt(0), - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "txHash": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("snd"), + RcvAddr: []byte("rcv"), + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, - Logs: []*data.LogData{ + Logs: []*outportcore.LogData{ { - LogHandler: &transaction.Log{ + TxHash: "txHash", + Log: &transaction.Log{ Address: []byte("snd"), Events: []*transaction.Event{ { @@ -1007,23 +1098,23 @@ func testExtractAlteredAccountsFromPoolESDTTransferBalanceNotChanged(t *testing. encodedAddrSnd, _ := args.AddressConverter.Encode([]byte("snd")) encodedAddrRcv, _ := args.AddressConverter.Encode([]byte("rcv")) - require.Equal(t, map[string]*outportcore.AlteredAccount{ + require.Equal(t, map[string]*alteredAccount.AlteredAccount{ encodedAddrSnd: { Address: encodedAddrSnd, Balance: "10", - Tokens: []*outportcore.AccountTokenData{ + Tokens: []*alteredAccount.AccountTokenData{ { Identifier: "token0", Balance: expectedToken.Value.String(), Nonce: 0, Properties: "6f6b", MetaData: nil, - AdditionalData: &outportcore.AdditionalAccountTokenData{ + AdditionalData: &alteredAccount.AdditionalAccountTokenData{ IsNFTCreate: false, }, }, }, - AdditionalData: &outportcore.AdditionalAccountData{ + AdditionalData: &alteredAccount.AdditionalAccountData{ BalanceChanged: true, IsSender: true, }, @@ -1031,18 +1122,18 @@ func testExtractAlteredAccountsFromPoolESDTTransferBalanceNotChanged(t *testing. encodedAddrRcv: { Address: encodedAddrRcv, Balance: "10", - Tokens: []*outportcore.AccountTokenData{ + Tokens: []*alteredAccount.AccountTokenData{ { Identifier: "token0", Balance: expectedToken.Value.String(), Nonce: 0, Properties: "6f6b", - AdditionalData: &outportcore.AdditionalAccountTokenData{ + AdditionalData: &alteredAccount.AdditionalAccountTokenData{ IsNFTCreate: false, }, }, }, - AdditionalData: &outportcore.AdditionalAccountData{ + AdditionalData: &alteredAccount.AdditionalAccountData{ IsSender: false, BalanceChanged: false, }, @@ -1064,18 +1155,28 @@ func testExtractAlteredAccountsFromPoolReceiverShouldHaveBalanceChanged(t *testi args.AddressConverter = testscommon.NewPubkeyConverterMock(3) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "txHash": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("snd"), - RcvAddr: []byte("rcv"), - Value: big.NewInt(0), - }, 0, big.NewInt(0)), - "txHash2": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("snd"), - RcvAddr: []byte("rcv"), - Value: big.NewInt(2), - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "txHash": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("snd"), + RcvAddr: []byte("rcv"), + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + "txHash2": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("snd"), + RcvAddr: []byte("rcv"), + Value: big.NewInt(2), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, }, shared.AlteredAccountsOptions{ WithAdditionalOutportData: true, @@ -1084,14 +1185,12 @@ func testExtractAlteredAccountsFromPoolReceiverShouldHaveBalanceChanged(t *testi require.NoError(t, err) encodedAddrSnd, _ := args.AddressConverter.Encode([]byte("snd")) - encodedAddrRcv, _ := args.AddressConverter.Encode([]byte("rcv")) - - require.Equal(t, map[string]*outportcore.AlteredAccount{ + require.Equal(t, map[string]*alteredAccount.AlteredAccount{ encodedAddrSnd: { Address: encodedAddrSnd, Balance: "15", - AdditionalData: &outportcore.AdditionalAccountData{ + AdditionalData: &alteredAccount.AdditionalAccountData{ BalanceChanged: true, IsSender: true, }, @@ -1099,7 +1198,7 @@ func testExtractAlteredAccountsFromPoolReceiverShouldHaveBalanceChanged(t *testi encodedAddrRcv: { Address: encodedAddrRcv, Balance: "15", - AdditionalData: &outportcore.AdditionalAccountData{ + AdditionalData: &alteredAccount.AdditionalAccountData{ IsSender: false, BalanceChanged: true, }, @@ -1119,13 +1218,18 @@ func testExtractAlteredAccountsFromPoolOnlySenderShouldHaveBalanceChanged(t *tes args.AddressConverter = testscommon.NewPubkeyConverterMock(3) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "txHash": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("snd"), - RcvAddr: []byte("rcv"), - Value: big.NewInt(0), - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "txHash": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("snd"), + RcvAddr: []byte("rcv"), + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, }, shared.AlteredAccountsOptions{ WithAdditionalOutportData: true, @@ -1133,11 +1237,11 @@ func testExtractAlteredAccountsFromPoolOnlySenderShouldHaveBalanceChanged(t *tes require.NoError(t, err) encodedAddrSnd, _ := args.AddressConverter.Encode([]byte("snd")) - require.Equal(t, map[string]*outportcore.AlteredAccount{ + require.Equal(t, map[string]*alteredAccount.AlteredAccount{ encodedAddrSnd: { Address: encodedAddrSnd, Balance: "15", - AdditionalData: &outportcore.AdditionalAccountData{ + AdditionalData: &alteredAccount.AdditionalAccountData{ BalanceChanged: true, IsSender: true, }, @@ -1168,17 +1272,23 @@ func textExtractAlteredAccountsFromPoolNftCreate(t *testing.T) { args.AddressConverter = testscommon.NewPubkeyConverterMock(3) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "txHash": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("snd"), - RcvAddr: []byte("snd"), - Value: big.NewInt(0), - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "txHash": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("snd"), + RcvAddr: []byte("snd"), + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, - Logs: []*data.LogData{ + Logs: []*outportcore.LogData{ { - LogHandler: &transaction.Log{ + TxHash: "txHash", + Log: &transaction.Log{ Address: []byte("snd"), Events: []*transaction.Event{ { @@ -1198,23 +1308,23 @@ func textExtractAlteredAccountsFromPoolNftCreate(t *testing.T) { require.NoError(t, err) encodedAddrSnd, _ := args.AddressConverter.Encode([]byte("snd")) - require.Equal(t, map[string]*outportcore.AlteredAccount{ + require.Equal(t, map[string]*alteredAccount.AlteredAccount{ encodedAddrSnd: { Address: encodedAddrSnd, Balance: "10", - Tokens: []*outportcore.AccountTokenData{ + Tokens: []*alteredAccount.AccountTokenData{ { Identifier: "token0", Balance: expectedToken.Value.String(), Nonce: 0, Properties: "6f6b", MetaData: nil, - AdditionalData: &outportcore.AdditionalAccountTokenData{ + AdditionalData: &alteredAccount.AdditionalAccountTokenData{ IsNFTCreate: true, }, }, }, - AdditionalData: &outportcore.AdditionalAccountData{ + AdditionalData: &alteredAccount.AdditionalAccountData{ BalanceChanged: true, IsSender: true, }, @@ -1236,13 +1346,18 @@ func textExtractAlteredAccountsFromPoolTransactionValueNil(t *testing.T) { args.AddressConverter = testscommon.NewPubkeyConverterMock(3) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "txHash": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("snd"), - RcvAddr: []byte("rcv"), - Value: nil, - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "txHash": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("snd"), + RcvAddr: []byte("rcv"), + Value: nil, + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, }, shared.AlteredAccountsOptions{ WithAdditionalOutportData: true, @@ -1251,11 +1366,11 @@ func textExtractAlteredAccountsFromPoolTransactionValueNil(t *testing.T) { require.NoError(t, err) encodedAddrSnd, _ := args.AddressConverter.Encode([]byte("snd")) - require.Equal(t, map[string]*outportcore.AlteredAccount{ + require.Equal(t, map[string]*alteredAccount.AlteredAccount{ encodedAddrSnd: { Address: encodedAddrSnd, Balance: "15", - AdditionalData: &outportcore.AdditionalAccountData{ + AdditionalData: &alteredAccount.AdditionalAccountData{ BalanceChanged: true, IsSender: true, }, diff --git a/outport/process/alteredaccounts/tokensProcessor.go b/outport/process/alteredaccounts/tokensProcessor.go index 7807e25d73e..a6f0ef4b276 100644 --- a/outport/process/alteredaccounts/tokensProcessor.go +++ b/outport/process/alteredaccounts/tokensProcessor.go @@ -41,12 +41,12 @@ func newTokensProcessor(shardCoordinator sharding.Coordinator) *tokensProcessor } func (tp *tokensProcessor) extractESDTAccounts( - txPool *outportcore.Pool, + txPool *outportcore.TransactionPool, markedAlteredAccounts map[string]*markedAlteredAccount, ) error { var err error for _, txLog := range txPool.Logs { - for _, event := range txLog.LogHandler.GetLogEvents() { + for _, event := range txLog.Log.Events { err = tp.processEvent(event, markedAlteredAccounts) if err != nil { return err diff --git a/outport/process/disabled/disabledOutportDataProvider.go b/outport/process/disabled/disabledOutportDataProvider.go index 28e0fd4d7c9..777ea1bcc5b 100644 --- a/outport/process/disabled/disabledOutportDataProvider.go +++ b/outport/process/disabled/disabledOutportDataProvider.go @@ -13,8 +13,8 @@ func NewDisabledOutportDataProvider() *disabledOutportDataProvider { } // PrepareOutportSaveBlockData wil do nothing -func (d *disabledOutportDataProvider) PrepareOutportSaveBlockData(_ process.ArgPrepareOutportSaveBlockData) (*outportcore.ArgsSaveBlockData, error) { - return &outportcore.ArgsSaveBlockData{}, nil +func (d *disabledOutportDataProvider) PrepareOutportSaveBlockData(_ process.ArgPrepareOutportSaveBlockData) (*outportcore.OutportBlockWithHeaderAndBody, error) { + return &outportcore.OutportBlockWithHeaderAndBody{}, nil } // IsInterfaceNil returns true if there is no value under the interface diff --git a/outport/process/errors.go b/outport/process/errors.go index 42ccbdac3a8..e7881c29a59 100644 --- a/outport/process/errors.go +++ b/outport/process/errors.go @@ -17,6 +17,16 @@ var errNilHeaderHandler = errors.New("nil header handler") // errNilBodyHandler signal that provided body handler is nil var errNilBodyHandler = errors.New("nil body handler") +var errCannotCastTransaction = errors.New("cannot cast transaction") + +var errCannotCastSCR = errors.New("cannot cast smart contract result") + +var errCannotCastReward = errors.New("cannot cast reward transaction") + +var errCannotCastReceipt = errors.New("cannot cast receipt transaction") + +var errCannotCastLog = errors.New("cannot cast log") + // ErrNilHasher signals that a nil hasher has been provided var ErrNilHasher = errors.New("nil hasher provided") diff --git a/outport/process/executionOrder/dtos.go b/outport/process/executionOrder/dtos.go index d1cbba8ebd2..dec622f236c 100644 --- a/outport/process/executionOrder/dtos.go +++ b/outport/process/executionOrder/dtos.go @@ -17,13 +17,13 @@ type ArgSorter struct { } type resultsTransactionsToMe struct { - transactionsToMe []data.TransactionHandlerWithGasUsedAndFee - scheduledTransactionsToMe []data.TransactionHandlerWithGasUsedAndFee - scrsToMe map[string]data.TransactionHandlerWithGasUsedAndFee + transactionsToMe []data.TxWithExecutionOrderHandler + scheduledTransactionsToMe []data.TxWithExecutionOrderHandler + scrsToMe map[string]data.TxWithExecutionOrderHandler } type resultsTransactionsFromMe struct { - transactionsFromMe []data.TransactionHandlerWithGasUsedAndFee - scheduledTransactionsFromMe []data.TransactionHandlerWithGasUsedAndFee + transactionsFromMe []data.TxWithExecutionOrderHandler + scheduledTransactionsFromMe []data.TxWithExecutionOrderHandler scheduledExecutedInvalidTxsHashesPrevBlock []string } diff --git a/outport/process/executionOrder/scrs.go b/outport/process/executionOrder/scrs.go index 0a22a6fad0e..1eae7a00a53 100644 --- a/outport/process/executionOrder/scrs.go +++ b/outport/process/executionOrder/scrs.go @@ -4,10 +4,9 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/outport" - "github.com/multiversx/mx-chain-core-go/data/smartContractResult" ) -func setOrderSmartContractResults(pool *outport.Pool, scheduledMbsFromPreviousBlock []*block.MiniBlock, scrsToMe map[string]data.TransactionHandlerWithGasUsedAndFee) []string { +func setOrderSmartContractResults(pool *outport.TransactionPool, scheduledMbsFromPreviousBlock []*block.MiniBlock, scrsToMe map[string]data.TxWithExecutionOrderHandler) []string { scheduledExecutedTxsPrevBlockMap := make(map[string]struct{}) for _, mb := range scheduledMbsFromPreviousBlock { for _, txHash := range mb.TxHashes { @@ -16,25 +15,22 @@ func setOrderSmartContractResults(pool *outport.Pool, scheduledMbsFromPreviousBl } scheduledExecutedSCRsPrevBlock := make([]string, 0) - scrsWithNoTxInCurrentShard := make(map[string]map[string]data.TransactionHandlerWithGasUsedAndFee) - for scrHash, scrHandler := range pool.Scrs { - scr, ok := scrHandler.GetTxHandler().(*smartContractResult.SmartContractResult) - if !ok { - continue - } + scrsWithNoTxInCurrentShard := make(map[string]map[string]data.TxWithExecutionOrderHandler) + for scrHash, scrHandler := range pool.SmartContractResults { + scr := scrHandler.SmartContractResult _, originalTxWasScheduledExecuted := scheduledExecutedTxsPrevBlockMap[string(scr.OriginalTxHash)] if originalTxWasScheduledExecuted { scheduledExecutedSCRsPrevBlock = append(scheduledExecutedSCRsPrevBlock, scrHash) } - tx, found := pool.Txs[string(scr.OriginalTxHash)] + tx, found := pool.Transactions[string(scr.OriginalTxHash)] if !found { groupScrsWithNoTxInCurrentShard(scrsWithNoTxInCurrentShard, string(scr.OriginalTxHash), scrHandler, scrHash) continue } - scrHandler.SetExecutionOrder(tx.GetExecutionOrder()) + scrHandler.ExecutionOrder = tx.GetExecutionOrder() } setExecutionOrderScrsWithNoTxInCurrentShard(scrsWithNoTxInCurrentShard, scrsToMe) @@ -42,18 +38,23 @@ func setOrderSmartContractResults(pool *outport.Pool, scheduledMbsFromPreviousBl return scheduledExecutedSCRsPrevBlock } -func groupScrsWithNoTxInCurrentShard(scrsWithNoTxInCurrentShard map[string]map[string]data.TransactionHandlerWithGasUsedAndFee, originalTxHash string, scrHandler data.TransactionHandlerWithGasUsedAndFee, scrHash string) { +func groupScrsWithNoTxInCurrentShard( + scrsWithNoTxInCurrentShard map[string]map[string]data.TxWithExecutionOrderHandler, + originalTxHash string, + scrHandler data.TxWithExecutionOrderHandler, + scrHash string, +) { _, ok := scrsWithNoTxInCurrentShard[originalTxHash] if !ok { - scrsWithNoTxInCurrentShard[originalTxHash] = make(map[string]data.TransactionHandlerWithGasUsedAndFee, 0) + scrsWithNoTxInCurrentShard[originalTxHash] = make(map[string]data.TxWithExecutionOrderHandler, 0) } scrsWithNoTxInCurrentShard[originalTxHash][scrHash] = scrHandler } -func setExecutionOrderScrsWithNoTxInCurrentShard(groupedScrsByOriginalTxHash map[string]map[string]data.TransactionHandlerWithGasUsedAndFee, scrsToMe map[string]data.TransactionHandlerWithGasUsedAndFee) { +func setExecutionOrderScrsWithNoTxInCurrentShard(groupedScrsByOriginalTxHash map[string]map[string]data.TxWithExecutionOrderHandler, scrsToMe map[string]data.TxWithExecutionOrderHandler) { for _, scrsGrouped := range groupedScrsByOriginalTxHash { - maxOrder := 0 + maxOrder := uint32(0) for _, scr := range scrsGrouped { if maxOrder < scr.GetExecutionOrder() { maxOrder = scr.GetExecutionOrder() diff --git a/outport/process/executionOrder/scrs_test.go b/outport/process/executionOrder/scrs_test.go index 2bf139eb923..2a615237267 100644 --- a/outport/process/executionOrder/scrs_test.go +++ b/outport/process/executionOrder/scrs_test.go @@ -11,9 +11,9 @@ import ( "github.com/stretchr/testify/require" ) -func newScr(nonce uint64, originalTxHash string, execOrder int) data.TransactionHandlerWithGasUsedAndFee { - return &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &smartContractResult.SmartContractResult{ +func newScr(nonce uint64, originalTxHash string, execOrder uint32) *outport.SCRInfo { + return &outport.SCRInfo{ + SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: nonce, OriginalTxHash: []byte(originalTxHash), }, @@ -25,11 +25,11 @@ func TestSetOrderSmartContractResults(t *testing.T) { t.Parallel() txHash, txHashNotInPool, scrHash1, scrsHash2, scrsHash3, scrHashToMe := "tx", "txHashNotInPool", "scr1", "scr2", "scr3", "scrHashToMe" - pool := &outport.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - txHash: &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &transaction.Transaction{}, ExecutionOrder: 1}, + pool := &outport.TransactionPool{ + Transactions: map[string]*outport.TxInfo{ + txHash: {Transaction: &transaction.Transaction{}, ExecutionOrder: 1}, }, - Scrs: map[string]data.TransactionHandlerWithGasUsedAndFee{ + SmartContractResults: map[string]*outport.SCRInfo{ scrHash1: newScr(0, txHash, 0), scrsHash2: newScr(1, txHashNotInPool, 0), scrsHash3: newScr(2, txHashNotInPool, 2), @@ -37,15 +37,14 @@ func TestSetOrderSmartContractResults(t *testing.T) { }, } - setOrderSmartContractResults(pool, []*block.MiniBlock{}, map[string]data.TransactionHandlerWithGasUsedAndFee{ - scrHashToMe: newScr(3, txHashNotInPool, 1), - }) + setOrderSmartContractResults(pool, []*block.MiniBlock{}, map[string]data.TxWithExecutionOrderHandler{ + scrHashToMe: newScr(3, txHashNotInPool, 1)}) - require.Equal(t, &outport.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - txHash: &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &transaction.Transaction{}, ExecutionOrder: 1}, + require.Equal(t, &outport.TransactionPool{ + Transactions: map[string]*outport.TxInfo{ + txHash: {Transaction: &transaction.Transaction{}, ExecutionOrder: 1}, }, - Scrs: map[string]data.TransactionHandlerWithGasUsedAndFee{ + SmartContractResults: map[string]*outport.SCRInfo{ scrHash1: newScr(0, txHash, 1), scrsHash2: newScr(1, txHashNotInPool, 2), scrsHash3: newScr(2, txHashNotInPool, 2), diff --git a/outport/process/executionOrder/transactionsExecutionOrder.go b/outport/process/executionOrder/transactionsExecutionOrder.go index 06fe9958968..15603a7e2fa 100644 --- a/outport/process/executionOrder/transactionsExecutionOrder.go +++ b/outport/process/executionOrder/transactionsExecutionOrder.go @@ -49,7 +49,7 @@ func NewSorter(arg ArgSorter) (*sorter, error) { // PutExecutionOrderInTransactionPool will put the execution order for every transaction and smart contract result func (s *sorter) PutExecutionOrderInTransactionPool( - pool *outport.Pool, + pool *outport.TransactionPool, header data.HeaderHandler, body data.BodyHandler, prevHeader data.HeaderHandler, @@ -93,7 +93,7 @@ func (s *sorter) PutExecutionOrderInTransactionPool( allTransaction = append(allTransaction, resultTxsFromMe.scheduledTransactionsFromMe...) for idx, tx := range allTransaction { - tx.SetExecutionOrder(idx) + tx.SetExecutionOrder(uint32(idx)) } scheduledExecutedSCRSHashesPrevBlock := setOrderSmartContractResults(pool, scheduledMbsFromPreviousBlock, resultsTxsToMe.scrsToMe) @@ -101,7 +101,7 @@ func (s *sorter) PutExecutionOrderInTransactionPool( return scheduledExecutedSCRSHashesPrevBlock, resultTxsFromMe.scheduledExecutedInvalidTxsHashesPrevBlock, nil } -func (s *sorter) sortTransactions(transactions []data.TransactionHandlerWithGasUsedAndFee, header data.HeaderHandler) { +func (s *sorter) sortTransactions(transactions []data.TxWithExecutionOrderHandler, header data.HeaderHandler) { if s.enableEpochsHandler.IsFrontRunningProtectionFlagEnabled() { txsSort.SortTransactionsBySenderAndNonceWithFrontRunningProtectionExtendedTransactions(transactions, s.hasher, header.GetPrevRandSeed()) } else { @@ -110,14 +110,14 @@ func (s *sorter) sortTransactions(transactions []data.TransactionHandlerWithGasU } func (s *sorter) extractTransactionsGroupedFromMe( - pool *outport.Pool, blockBody *block.Body, header data.HeaderHandler, scheduledMbsFromPreviousBlock []*block.MiniBlock, + pool *outport.TransactionPool, blockBody *block.Body, header data.HeaderHandler, scheduledMbsFromPreviousBlock []*block.MiniBlock, ) (*resultsTransactionsFromMe, error) { - transactionsFromMe := make([]data.TransactionHandlerWithGasUsedAndFee, 0) - scheduledTransactionsFromMe := make([]data.TransactionHandlerWithGasUsedAndFee, 0) + transactionsFromMe := make([]data.TxWithExecutionOrderHandler, 0) + scheduledTransactionsFromMe := make([]data.TxWithExecutionOrderHandler, 0) scheduledExecutedInvalidTxsHashesPrevBlock := make([]string, 0) for mbIndex, mb := range blockBody.MiniBlocks { - var txs []data.TransactionHandlerWithGasUsedAndFee + var txs []data.TxWithExecutionOrderHandler var err error if isScheduledMBProcessed(header, mbIndex) { continue @@ -129,7 +129,7 @@ func (s *sorter) extractTransactionsGroupedFromMe( } if mb.Type == block.TxBlock { - txs, err = extractTxsFromMap(mb.TxHashes, pool.Txs) + txs, err = extractTxsFromMap(mb.TxHashes, pool.Transactions) } if mb.Type == block.InvalidBlock { var scheduledExecutedInvalidTxsHashesCurrentMB []string @@ -155,9 +155,9 @@ func (s *sorter) extractTransactionsGroupedFromMe( }, nil } -func (s *sorter) getInvalidTxsExecutedInCurrentBlock(scheduledMbsFromPreviousBlock []*block.MiniBlock, mb *block.MiniBlock, pool *outport.Pool) ([]data.TransactionHandlerWithGasUsedAndFee, []string, error) { +func (s *sorter) getInvalidTxsExecutedInCurrentBlock(scheduledMbsFromPreviousBlock []*block.MiniBlock, mb *block.MiniBlock, pool *outport.TransactionPool) ([]data.TxWithExecutionOrderHandler, []string, error) { if len(scheduledMbsFromPreviousBlock) == 0 { - txs, err := extractTxsFromMap(mb.TxHashes, pool.Invalid) + txs, err := extractTxsFromMap(mb.TxHashes, pool.InvalidTxs) return txs, []string{}, err } @@ -173,24 +173,24 @@ func (s *sorter) getInvalidTxsExecutedInCurrentBlock(scheduledMbsFromPreviousBlo for _, hash := range mb.TxHashes { _, found := allScheduledTxs[string(hash)] if found { - scheduledExecutedInvalidTxsHashesPrevBlock = append(scheduledExecutedInvalidTxsHashesPrevBlock, string(hash)) + scheduledExecutedInvalidTxsHashesPrevBlock = append(scheduledExecutedInvalidTxsHashesPrevBlock, hex.EncodeToString(hash)) continue } invalidTxHashes = append(invalidTxHashes, hash) } - txs, err := extractTxsFromMap(invalidTxHashes, pool.Invalid) + txs, err := extractTxsFromMap(invalidTxHashes, pool.InvalidTxs) return txs, scheduledExecutedInvalidTxsHashesPrevBlock, err } -func extractNormalTransactionAndScrsToMe(pool *outport.Pool, blockBody *block.Body, header data.HeaderHandler) (*resultsTransactionsToMe, error) { - transactionsToMe := make([]data.TransactionHandlerWithGasUsedAndFee, 0) - scheduledTransactionsToMe := make([]data.TransactionHandlerWithGasUsedAndFee, 0) - scrsToMe := make(map[string]data.TransactionHandlerWithGasUsedAndFee) +func extractNormalTransactionAndScrsToMe(pool *outport.TransactionPool, blockBody *block.Body, header data.HeaderHandler) (*resultsTransactionsToMe, error) { + transactionsToMe := make([]data.TxWithExecutionOrderHandler, 0) + scheduledTransactionsToMe := make([]data.TxWithExecutionOrderHandler, 0) + scrsToMe := make(map[string]data.TxWithExecutionOrderHandler) for mbIndex, mb := range blockBody.MiniBlocks { var err error - var txs []data.TransactionHandlerWithGasUsedAndFee + var txs []data.TxWithExecutionOrderHandler if isScheduledMBProcessed(header, mbIndex) { continue } @@ -202,14 +202,14 @@ func extractNormalTransactionAndScrsToMe(pool *outport.Pool, blockBody *block.Bo executedTxsHashes := extractExecutedTxHashes(mbIndex, mb.TxHashes, header) if mb.Type == block.TxBlock { - txs, err = extractTxsFromMap(executedTxsHashes, pool.Txs) + txs, err = extractTxsFromMap(executedTxsHashes, pool.Transactions) } if mb.Type == block.SmartContractResultBlock { - txs, err = extractTxsFromMap(executedTxsHashes, pool.Scrs) - extractAndPutScrsToDestinationMap(executedTxsHashes, pool.Scrs, scrsToMe) + txs, err = extractSCRsFromMap(executedTxsHashes, pool.SmartContractResults) + extractAndPutScrsToDestinationMap(executedTxsHashes, pool.SmartContractResults, scrsToMe) } if mb.Type == block.RewardsBlock { - txs, err = extractTxsFromMap(executedTxsHashes, pool.Rewards) + txs, err = extractRewardsFromMap(executedTxsHashes, pool.Rewards) } if err != nil { return nil, err @@ -229,9 +229,9 @@ func extractNormalTransactionAndScrsToMe(pool *outport.Pool, blockBody *block.Bo }, nil } -func getRewardsTxsFromMe(pool *outport.Pool, blockBody *block.Body, header data.HeaderHandler) ([]data.TransactionHandlerWithGasUsedAndFee, error) { +func getRewardsTxsFromMe(pool *outport.TransactionPool, blockBody *block.Body, header data.HeaderHandler) ([]data.TxWithExecutionOrderHandler, error) { rewardsTxsHashes := make([][]byte, 0) - rewardsTxs := make([]data.TransactionHandlerWithGasUsedAndFee, 0) + rewardsTxs := make([]data.TxWithExecutionOrderHandler, 0) if header.GetShardID() != core.MetachainShardId { return rewardsTxs, nil } @@ -243,15 +243,16 @@ func getRewardsTxsFromMe(pool *outport.Pool, blockBody *block.Body, header data. rewardsTxsHashes = append(rewardsTxsHashes, mb.TxHashes...) } - return extractTxsFromMap(rewardsTxsHashes, pool.Rewards) + return extractRewardsFromMap(rewardsTxsHashes, pool.Rewards) } -func extractTxsFromMap(txsHashes [][]byte, txs map[string]data.TransactionHandlerWithGasUsedAndFee) ([]data.TransactionHandlerWithGasUsedAndFee, error) { - result := make([]data.TransactionHandlerWithGasUsedAndFee, 0, len(txsHashes)) +func extractTxsFromMap(txsHashes [][]byte, txs map[string]*outport.TxInfo) ([]data.TxWithExecutionOrderHandler, error) { + result := make([]data.TxWithExecutionOrderHandler, 0, len(txsHashes)) for _, txHash := range txsHashes { - tx, found := txs[string(txHash)] + txHashHex := hex.EncodeToString(txHash) + tx, found := txs[txHashHex] if !found { - return nil, fmt.Errorf("cannot find transaction in pool, txHash: %s", hex.EncodeToString(txHash)) + return nil, fmt.Errorf("cannot find transaction in pool, txHash: %s", txHashHex) } result = append(result, tx) } @@ -259,6 +260,34 @@ func extractTxsFromMap(txsHashes [][]byte, txs map[string]data.TransactionHandle return result, nil } +func extractSCRsFromMap(txsHashes [][]byte, scrs map[string]*outport.SCRInfo) ([]data.TxWithExecutionOrderHandler, error) { + result := make([]data.TxWithExecutionOrderHandler, 0, len(txsHashes)) + for _, txHash := range txsHashes { + txHashHex := hex.EncodeToString(txHash) + scr, found := scrs[txHashHex] + if !found { + return nil, fmt.Errorf("cannot find scr in pool, txHash: %s", txHashHex) + } + result = append(result, scr) + } + + return result, nil +} + +func extractRewardsFromMap(txsHashes [][]byte, rewards map[string]*outport.RewardInfo) ([]data.TxWithExecutionOrderHandler, error) { + result := make([]data.TxWithExecutionOrderHandler, 0, len(txsHashes)) + for _, txHash := range txsHashes { + txHashHex := hex.EncodeToString(txHash) + reward, found := rewards[txHashHex] + if !found { + return nil, fmt.Errorf("cannot find reward in pool, txHash: %s", txHashHex) + } + result = append(result, reward) + } + + return result, nil +} + func extractExecutedTxHashes(mbIndex int, mbTxHashes [][]byte, header data.HeaderHandler) [][]byte { miniblockHeaders := header.GetMiniBlockHeaderHandlers() if len(miniblockHeaders) <= mbIndex { @@ -271,9 +300,10 @@ func extractExecutedTxHashes(mbIndex int, mbTxHashes [][]byte, header data.Heade return mbTxHashes[firstProcessed : lastProcessed+1] } -func extractAndPutScrsToDestinationMap(scrsHashes [][]byte, scrsMap, destinationMap map[string]data.TransactionHandlerWithGasUsedAndFee) { +func extractAndPutScrsToDestinationMap(scrsHashes [][]byte, scrsMap map[string]*outport.SCRInfo, destinationMap map[string]data.TxWithExecutionOrderHandler) { for _, scrHash := range scrsHashes { - scr, found := scrsMap[string(scrHash)] + scrHashHex := hex.EncodeToString(scrHash) + scr, found := scrsMap[scrHashHex] if !found { continue } diff --git a/outport/process/executionOrder/transactionsExecutionOrder_test.go b/outport/process/executionOrder/transactionsExecutionOrder_test.go index 5fe94392217..b2e09e47da2 100644 --- a/outport/process/executionOrder/transactionsExecutionOrder_test.go +++ b/outport/process/executionOrder/transactionsExecutionOrder_test.go @@ -1,12 +1,13 @@ package executionOrder import ( + "encoding/hex" "testing" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/data/receipt" "github.com/multiversx/mx-chain-core-go/data/rewardTx" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" @@ -128,79 +129,79 @@ func TestAddExecutionOrderInTransactionPool(t *testing.T) { }, } - pool := &outport.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(txHashToMe): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &transaction.Transaction{Nonce: 1}}, - string(txHashFromMe): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &transaction.Transaction{Nonce: 2}}, + pool := &outport.TransactionPool{ + Transactions: map[string]*outport.TxInfo{ + hex.EncodeToString(txHashToMe): {Transaction: &transaction.Transaction{Nonce: 1}}, + hex.EncodeToString(txHashFromMe): {Transaction: &transaction.Transaction{Nonce: 2}}, }, - Scrs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(scrHashToMe): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &smartContractResult.SmartContractResult{Nonce: 3}}, - string(scrHashFromMe): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &smartContractResult.SmartContractResult{ + SmartContractResults: map[string]*outport.SCRInfo{ + hex.EncodeToString(scrHashToMe): {SmartContractResult: &smartContractResult.SmartContractResult{Nonce: 3}}, + hex.EncodeToString(scrHashFromMe): {SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 4, OriginalTxHash: txHashToMe, }}, - string(scrHashIntra): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &smartContractResult.SmartContractResult{ + hex.EncodeToString(scrHashIntra): {SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 0, OriginalTxHash: txHashToMe, }}, }, - Rewards: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(rewardTxHash): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &rewardTx.RewardTx{}}, + Rewards: map[string]*outport.RewardInfo{ + hex.EncodeToString(rewardTxHash): {Reward: &rewardTx.RewardTx{}}, }, - Invalid: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(invalidTxHash): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &transaction.Transaction{Nonce: 5}}, + InvalidTxs: map[string]*outport.TxInfo{ + hex.EncodeToString(invalidTxHash): {Transaction: &transaction.Transaction{Nonce: 5}}, }, - Receipts: map[string]data.TransactionHandlerWithGasUsedAndFee{}, + Receipts: map[string]*receipt.Receipt{}, Logs: nil, } _, _, err := s.PutExecutionOrderInTransactionPool(pool, header, blockBody, &block.Header{}) require.Nil(t, err) - require.Equal(t, &outport.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(txHashToMe): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &transaction.Transaction{Nonce: 1}, - ExecutionOrder: 0, + require.Equal(t, &outport.TransactionPool{ + Transactions: map[string]*outport.TxInfo{ + hex.EncodeToString(txHashToMe): { + Transaction: &transaction.Transaction{Nonce: 1}, + ExecutionOrder: 0, }, - string(txHashFromMe): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &transaction.Transaction{Nonce: 2}, - ExecutionOrder: 3, + hex.EncodeToString(txHashFromMe): { + Transaction: &transaction.Transaction{Nonce: 2}, + ExecutionOrder: 3, }, }, - Scrs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(scrHashToMe): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &smartContractResult.SmartContractResult{Nonce: 3}, - ExecutionOrder: 1, + SmartContractResults: map[string]*outport.SCRInfo{ + hex.EncodeToString(scrHashToMe): { + SmartContractResult: &smartContractResult.SmartContractResult{Nonce: 3}, + ExecutionOrder: 1, }, - string(scrHashFromMe): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &smartContractResult.SmartContractResult{ + hex.EncodeToString(scrHashFromMe): { + SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 4, OriginalTxHash: txHashToMe, }, ExecutionOrder: 0, }, - string(scrHashIntra): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &smartContractResult.SmartContractResult{ + hex.EncodeToString(scrHashIntra): { + SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 0, OriginalTxHash: txHashToMe, }, ExecutionOrder: 0, }, }, - Rewards: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(rewardTxHash): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &rewardTx.RewardTx{}, - ExecutionOrder: 2, + Rewards: map[string]*outport.RewardInfo{ + hex.EncodeToString(rewardTxHash): { + Reward: &rewardTx.RewardTx{}, + ExecutionOrder: 2, }, }, - Invalid: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(invalidTxHash): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &transaction.Transaction{Nonce: 5}, - ExecutionOrder: 4, + InvalidTxs: map[string]*outport.TxInfo{ + hex.EncodeToString(invalidTxHash): { + Transaction: &transaction.Transaction{Nonce: 5}, + ExecutionOrder: 4, }, }, - Receipts: map[string]data.TransactionHandlerWithGasUsedAndFee{}, + Receipts: map[string]*receipt.Receipt{}, Logs: nil, }, pool) } @@ -254,25 +255,25 @@ func TestAddExecutionOrderInTransactionPoolFromMeTransactionAndScheduled(t *test }, } - pool := &outport.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(firstTxHash): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &transaction.Transaction{Nonce: 1}}, - string(secondTxHash): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &transaction.Transaction{Nonce: 2}}, + pool := &outport.TransactionPool{ + Transactions: map[string]*outport.TxInfo{ + hex.EncodeToString(firstTxHash): {Transaction: &transaction.Transaction{Nonce: 1}}, + hex.EncodeToString(secondTxHash): {Transaction: &transaction.Transaction{Nonce: 2}}, }, } _, _, err := s.PutExecutionOrderInTransactionPool(pool, header, blockBody, &block.Header{}) require.Nil(t, err) - require.Equal(t, &outport.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(firstTxHash): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &transaction.Transaction{Nonce: 1}, - ExecutionOrder: 0, + require.Equal(t, &outport.TransactionPool{ + Transactions: map[string]*outport.TxInfo{ + hex.EncodeToString(firstTxHash): { + Transaction: &transaction.Transaction{Nonce: 1}, + ExecutionOrder: 0, }, - string(secondTxHash): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &transaction.Transaction{Nonce: 2}, - ExecutionOrder: 1, + hex.EncodeToString(secondTxHash): { + Transaction: &transaction.Transaction{Nonce: 2}, + ExecutionOrder: 1, }, }, }, pool) @@ -367,15 +368,15 @@ func TestAddExecutionOrderInTransactionPoolFromMeTransactionAndScheduledInvalid( }, } - pool := &outport.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(secondTxHash): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &transaction.Transaction{Nonce: 2}}, + pool := &outport.TransactionPool{ + Transactions: map[string]*outport.TxInfo{ + hex.EncodeToString(secondTxHash): {Transaction: &transaction.Transaction{Nonce: 2}}, }, - Invalid: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(firstTxHash): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &transaction.Transaction{Nonce: 1}}, + InvalidTxs: map[string]*outport.TxInfo{ + hex.EncodeToString(firstTxHash): {Transaction: &transaction.Transaction{Nonce: 1}}, }, - Scrs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(scrHash): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &smartContractResult.SmartContractResult{ + SmartContractResults: map[string]*outport.SCRInfo{ + hex.EncodeToString(scrHash): {SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 3, OriginalTxHash: scheduledTx, }}, @@ -384,22 +385,22 @@ func TestAddExecutionOrderInTransactionPoolFromMeTransactionAndScheduledInvalid( scrsHashes, invalidTxsHashes, err := s.PutExecutionOrderInTransactionPool(pool, header, blockBody, prevHeader) require.Nil(t, err) - require.Equal(t, &outport.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(secondTxHash): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &transaction.Transaction{Nonce: 2}, - ExecutionOrder: 1, + require.Equal(t, &outport.TransactionPool{ + Transactions: map[string]*outport.TxInfo{ + hex.EncodeToString(secondTxHash): { + Transaction: &transaction.Transaction{Nonce: 2}, + ExecutionOrder: 1, }, }, - Invalid: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(firstTxHash): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &transaction.Transaction{Nonce: 1}, - ExecutionOrder: 0, + InvalidTxs: map[string]*outport.TxInfo{ + hex.EncodeToString(firstTxHash): { + Transaction: &transaction.Transaction{Nonce: 1}, + ExecutionOrder: 0, }, }, - Scrs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(scrHash): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &smartContractResult.SmartContractResult{ + SmartContractResults: map[string]*outport.SCRInfo{ + hex.EncodeToString(scrHash): { + SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 3, OriginalTxHash: scheduledTx, }, @@ -408,6 +409,6 @@ func TestAddExecutionOrderInTransactionPoolFromMeTransactionAndScheduledInvalid( }, }, pool) - require.Equal(t, []string{string(scrHash)}, scrsHashes) - require.Equal(t, []string{string(scheduledInvalidTxHash)}, invalidTxsHashes) + require.Equal(t, []string{hex.EncodeToString(scrHash)}, scrsHashes) + require.Equal(t, []string{hex.EncodeToString(scheduledInvalidTxHash)}, invalidTxsHashes) } diff --git a/outport/process/interface.go b/outport/process/interface.go index bdf8e3efc73..25f675975ce 100644 --- a/outport/process/interface.go +++ b/outport/process/interface.go @@ -4,19 +4,20 @@ import ( "math/big" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/outport/process/alteredaccounts/shared" ) // AlteredAccountsProviderHandler defines the functionality needed for provisioning of altered accounts when indexing data type AlteredAccountsProviderHandler interface { - ExtractAlteredAccountsFromPool(txPool *outport.Pool, options shared.AlteredAccountsOptions) (map[string]*outport.AlteredAccount, error) + ExtractAlteredAccountsFromPool(txPool *outport.TransactionPool, options shared.AlteredAccountsOptions) (map[string]*alteredAccount.AlteredAccount, error) IsInterfaceNil() bool } // TransactionsFeeHandler defines the functionality needed for computation of the transaction fee and gas used type TransactionsFeeHandler interface { - PutFeeAndGasUsed(pool *outport.Pool) error + PutFeeAndGasUsed(pool *outport.TransactionPool) error IsInterfaceNil() bool } @@ -41,7 +42,7 @@ type EconomicsDataHandler interface { // ExecutionOrderHandler defines the interface for the execution order handler type ExecutionOrderHandler interface { PutExecutionOrderInTransactionPool( - pool *outport.Pool, + pool *outport.TransactionPool, header data.HeaderHandler, body data.BodyHandler, prevHeader data.HeaderHandler, diff --git a/outport/process/outportDataProvider.go b/outport/process/outportDataProvider.go index 6c16ee99d07..6250bfa0960 100644 --- a/outport/process/outportDataProvider.go +++ b/outport/process/outportDataProvider.go @@ -1,6 +1,7 @@ package process import ( + "encoding/hex" "fmt" "math/big" @@ -9,6 +10,11 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/data/receipt" + "github.com/multiversx/mx-chain-core-go/data/rewardTx" + "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/outport/process/alteredaccounts/shared" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" @@ -26,16 +32,21 @@ type ArgOutportDataProvider struct { GasConsumedProvider GasConsumedProvider EconomicsData EconomicsDataHandler ExecutionOrderHandler ExecutionOrderHandler + Marshaller marshal.Marshalizer } // ArgPrepareOutportSaveBlockData holds the arguments needed for prepare outport save block data type ArgPrepareOutportSaveBlockData struct { HeaderHash []byte Header data.HeaderHandler + HeaderBytes []byte + HeaderType string Body data.BodyHandler PreviousHeader data.HeaderHandler RewardsTxs map[string]data.TransactionHandler NotarizedHeadersHashes []string + HighestFinalBlockNonce uint64 + HighestFinalBlockHash []byte } type outportDataProvider struct { @@ -49,6 +60,7 @@ type outportDataProvider struct { gasConsumedProvider GasConsumedProvider economicsData EconomicsDataHandler executionOrderHandler ExecutionOrderHandler + marshaller marshal.Marshalizer } // NewOutportDataProvider will create a new instance of outportDataProvider @@ -63,11 +75,12 @@ func NewOutportDataProvider(arg ArgOutportDataProvider) (*outportDataProvider, e gasConsumedProvider: arg.GasConsumedProvider, economicsData: arg.EconomicsData, executionOrderHandler: arg.ExecutionOrderHandler, + marshaller: arg.Marshaller, }, nil } // PrepareOutportSaveBlockData will prepare the provided data in a format that will be accepted by an outport driver -func (odp *outportDataProvider) PrepareOutportSaveBlockData(arg ArgPrepareOutportSaveBlockData) (*outportcore.ArgsSaveBlockData, error) { +func (odp *outportDataProvider) PrepareOutportSaveBlockData(arg ArgPrepareOutportSaveBlockData) (*outportcore.OutportBlockWithHeaderAndBody, error) { if check.IfNil(arg.Header) { return nil, errNilHeaderHandler } @@ -75,8 +88,12 @@ func (odp *outportDataProvider) PrepareOutportSaveBlockData(arg ArgPrepareOutpor return nil, errNilBodyHandler } - pool := odp.createPool(arg.RewardsTxs) - err := odp.transactionsFeeProcessor.PutFeeAndGasUsed(pool) + pool, err := odp.createPool(arg.RewardsTxs) + if err != nil { + return nil, err + } + + err = odp.transactionsFeeProcessor.PutFeeAndGasUsed(pool) if err != nil { return nil, fmt.Errorf("transactionsFeeProcessor.PutFeeAndGasUsed %w", err) } @@ -101,22 +118,29 @@ func (odp *outportDataProvider) PrepareOutportSaveBlockData(arg ArgPrepareOutpor return nil, err } - return &outportcore.ArgsSaveBlockData{ - HeaderHash: arg.HeaderHash, - Body: arg.Body, - Header: arg.Header, - SignersIndexes: signersIndexes, - HeaderGasConsumption: outportcore.HeaderGasConsumption{ - GasProvided: odp.gasConsumedProvider.TotalGasProvidedWithScheduled(), - GasRefunded: odp.gasConsumedProvider.TotalGasRefunded(), - GasPenalized: odp.gasConsumedProvider.TotalGasPenalized(), - MaxGasPerBlock: odp.economicsData.MaxGasLimitPerBlock(odp.shardID), + return &outportcore.OutportBlockWithHeaderAndBody{ + OutportBlock: &outportcore.OutportBlock{ + BlockData: nil, // this will be filled with specific data for each driver + TransactionPool: pool, + HeaderGasConsumption: &outportcore.HeaderGasConsumption{ + GasProvided: odp.gasConsumedProvider.TotalGasProvidedWithScheduled(), + GasRefunded: odp.gasConsumedProvider.TotalGasRefunded(), + GasPenalized: odp.gasConsumedProvider.TotalGasPenalized(), + MaxGasPerBlock: odp.economicsData.MaxGasLimitPerBlock(odp.shardID), + }, + AlteredAccounts: alteredAccounts, + NotarizedHeadersHashes: arg.NotarizedHeadersHashes, + NumberOfShards: odp.numOfShards, + SignersIndexes: signersIndexes, + + HighestFinalBlockNonce: arg.HighestFinalBlockNonce, + HighestFinalBlockHash: arg.HighestFinalBlockHash, + }, + HeaderDataWithBody: &outportcore.HeaderDataWithBody{ + Body: arg.Body, + Header: arg.Header, + HeaderHash: arg.HeaderHash, }, - NotarizedHeadersHashes: arg.NotarizedHeadersHashes, - TransactionsPool: pool, - AlteredAccounts: alteredAccounts, - NumberOfShards: odp.numOfShards, - IsImportDB: odp.isImportDBMode, }, nil } @@ -150,7 +174,7 @@ func (odp *outportDataProvider) getSignersIndexes(header data.HeaderHandler) ([] return signersIndexes, nil } -func (odp *outportDataProvider) createPool(rewardsTxs map[string]data.TransactionHandler) *outportcore.Pool { +func (odp *outportDataProvider) createPool(rewardsTxs map[string]data.TransactionHandler) (*outportcore.TransactionPool, error) { if odp.shardID == core.MetachainShardId { return odp.createPoolForMeta(rewardsTxs) } @@ -158,33 +182,177 @@ func (odp *outportDataProvider) createPool(rewardsTxs map[string]data.Transactio return odp.createPoolForShard() } -func (odp *outportDataProvider) createPoolForShard() *outportcore.Pool { - return &outportcore.Pool{ - Txs: WrapTxsMap(odp.txCoordinator.GetAllCurrentUsedTxs(block.TxBlock)), - Scrs: WrapTxsMap(odp.txCoordinator.GetAllCurrentUsedTxs(block.SmartContractResultBlock)), - Rewards: WrapTxsMap(odp.txCoordinator.GetAllCurrentUsedTxs(block.RewardsBlock)), - Invalid: WrapTxsMap(odp.txCoordinator.GetAllCurrentUsedTxs(block.InvalidBlock)), - Receipts: WrapTxsMap(odp.txCoordinator.GetAllCurrentUsedTxs(block.ReceiptBlock)), - Logs: odp.txCoordinator.GetAllCurrentLogs(), +func (odp *outportDataProvider) createPoolForShard() (*outportcore.TransactionPool, error) { + txs, err := getTxs(odp.txCoordinator.GetAllCurrentUsedTxs(block.TxBlock)) + if err != nil { + return nil, err + } + + scrs, err := getScrs(odp.txCoordinator.GetAllCurrentUsedTxs(block.SmartContractResultBlock)) + if err != nil { + return nil, err + } + + rewards, err := getRewards(odp.txCoordinator.GetAllCurrentUsedTxs(block.RewardsBlock)) + if err != nil { + return nil, err + } + + invalidTxs, err := getTxs(odp.txCoordinator.GetAllCurrentUsedTxs(block.InvalidBlock)) + if err != nil { + return nil, err + } + + receipts, err := getReceipts(odp.txCoordinator.GetAllCurrentUsedTxs(block.ReceiptBlock)) + if err != nil { + return nil, err + } + + logs, err := getLogs(odp.txCoordinator.GetAllCurrentLogs()) + if err != nil { + return nil, err + } + + return &outportcore.TransactionPool{ + Transactions: txs, + SmartContractResults: scrs, + Rewards: rewards, + InvalidTxs: invalidTxs, + Receipts: receipts, + Logs: logs, + }, nil +} + +func (odp *outportDataProvider) createPoolForMeta(rewardsTxs map[string]data.TransactionHandler) (*outportcore.TransactionPool, error) { + txs, err := getTxs(odp.txCoordinator.GetAllCurrentUsedTxs(block.TxBlock)) + if err != nil { + return nil, err + } + + scrs, err := getScrs(odp.txCoordinator.GetAllCurrentUsedTxs(block.SmartContractResultBlock)) + if err != nil { + return nil, err + } + + rewards, err := getRewards(rewardsTxs) + if err != nil { + return nil, err + } + + logs, err := getLogs(odp.txCoordinator.GetAllCurrentLogs()) + if err != nil { + return nil, err + } + + return &outportcore.TransactionPool{ + Transactions: txs, + SmartContractResults: scrs, + Rewards: rewards, + Logs: logs, + }, nil +} + +func getTxs(txs map[string]data.TransactionHandler) (map[string]*outportcore.TxInfo, error) { + ret := make(map[string]*outportcore.TxInfo, len(txs)) + + for txHash, txHandler := range txs { + tx, castOk := txHandler.(*transaction.Transaction) + txHashHex := getHexEncodedHash(txHash) + if !castOk { + return nil, fmt.Errorf("%w, hash: %s", errCannotCastTransaction, txHashHex) + } + + ret[txHashHex] = &outportcore.TxInfo{ + Transaction: tx, + FeeInfo: newFeeInfo(), + } + } + + return ret, nil +} + +func getHexEncodedHash(txHash string) string { + txHashBytes := []byte(txHash) + return hex.EncodeToString(txHashBytes) +} + +func newFeeInfo() *outportcore.FeeInfo { + return &outportcore.FeeInfo{ + GasUsed: 0, + Fee: big.NewInt(0), + InitialPaidFee: big.NewInt(0), } } -func (odp *outportDataProvider) createPoolForMeta(rewardsTxs map[string]data.TransactionHandler) *outportcore.Pool { - return &outportcore.Pool{ - Txs: WrapTxsMap(odp.txCoordinator.GetAllCurrentUsedTxs(block.TxBlock)), - Scrs: WrapTxsMap(odp.txCoordinator.GetAllCurrentUsedTxs(block.SmartContractResultBlock)), - Rewards: WrapTxsMap(rewardsTxs), - Logs: odp.txCoordinator.GetAllCurrentLogs(), +func getScrs(scrs map[string]data.TransactionHandler) (map[string]*outportcore.SCRInfo, error) { + ret := make(map[string]*outportcore.SCRInfo, len(scrs)) + + for scrHash, txHandler := range scrs { + scr, castOk := txHandler.(*smartContractResult.SmartContractResult) + scrHashHex := getHexEncodedHash(scrHash) + if !castOk { + return nil, fmt.Errorf("%w, hash: %s", errCannotCastSCR, scrHashHex) + } + + ret[scrHashHex] = &outportcore.SCRInfo{ + SmartContractResult: scr, + FeeInfo: newFeeInfo(), + } } + + return ret, nil } -func WrapTxsMap(txs map[string]data.TransactionHandler) map[string]data.TransactionHandlerWithGasUsedAndFee { - newMap := make(map[string]data.TransactionHandlerWithGasUsedAndFee, len(txs)) - for txHash, tx := range txs { - newMap[txHash] = outportcore.NewTransactionHandlerWithGasAndFee(tx, 0, big.NewInt(0)) +func getRewards(rewards map[string]data.TransactionHandler) (map[string]*outportcore.RewardInfo, error) { + ret := make(map[string]*outportcore.RewardInfo, len(rewards)) + + for hash, txHandler := range rewards { + reward, castOk := txHandler.(*rewardTx.RewardTx) + hexHex := getHexEncodedHash(hash) + if !castOk { + return nil, fmt.Errorf("%w, hash: %s", errCannotCastReward, hexHex) + } + + ret[hexHex] = &outportcore.RewardInfo{ + Reward: reward, + } } - return newMap + return ret, nil +} + +func getReceipts(receipts map[string]data.TransactionHandler) (map[string]*receipt.Receipt, error) { + ret := make(map[string]*receipt.Receipt, len(receipts)) + + for hash, receiptHandler := range receipts { + tx, castOk := receiptHandler.(*receipt.Receipt) + hashHex := getHexEncodedHash(hash) + if !castOk { + return nil, fmt.Errorf("%w, hash: %s", errCannotCastReceipt, hashHex) + } + + ret[hashHex] = tx + } + + return ret, nil +} + +func getLogs(logs []*data.LogData) ([]*outportcore.LogData, error) { + ret := make([]*outportcore.LogData, len(logs)) + + for idx, logData := range logs { + txHashHex := getHexEncodedHash(logData.TxHash) + log, castOk := logData.LogHandler.(*transaction.Log) + if !castOk { + return nil, fmt.Errorf("%w, hash: %s", errCannotCastLog, txHashHex) + } + + ret[idx] = &outportcore.LogData{ + TxHash: txHashHex, + Log: log, + } + } + return ret, nil } // IsInterfaceNil returns true if there is no value under the interface diff --git a/outport/process/outportDataProvider_test.go b/outport/process/outportDataProvider_test.go index 0d7d4c75201..3aa79ef774a 100644 --- a/outport/process/outportDataProvider_test.go +++ b/outport/process/outportDataProvider_test.go @@ -29,6 +29,7 @@ func createArgOutportDataProvider() ArgOutportDataProvider { EconomicsData: &mock.EconomicsHandlerMock{}, ShardCoordinator: &testscommon.ShardsCoordinatorMock{}, ExecutionOrderHandler: &mock.ExecutionOrderHandlerStub{}, + Marshaller: &testscommon.MarshalizerMock{}, } } @@ -84,10 +85,10 @@ func TestPrepareOutportSaveBlockData(t *testing.T) { }) require.Nil(t, err) require.NotNil(t, res) - require.NotNil(t, res.HeaderHash) - require.NotNil(t, res.Body) - require.NotNil(t, res.Header) + require.NotNil(t, res.HeaderDataWithBody.HeaderHash) + require.NotNil(t, res.HeaderDataWithBody.Body) + require.NotNil(t, res.HeaderDataWithBody.Header) require.NotNil(t, res.SignersIndexes) require.NotNil(t, res.HeaderGasConsumption) - require.NotNil(t, res.TransactionsPool) + require.NotNil(t, res.TransactionPool) } diff --git a/outport/process/transactionsfee/dataHolders.go b/outport/process/transactionsfee/dataHolders.go index c0f8b518afc..ef2f1a3bba4 100644 --- a/outport/process/transactionsfee/dataHolders.go +++ b/outport/process/transactionsfee/dataHolders.go @@ -1,58 +1,65 @@ package transactionsfee import ( + "encoding/hex" + "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" - "github.com/multiversx/mx-chain-core-go/data/smartContractResult" ) +type txHandlerWithFeeInfo interface { + GetTxHandler() data.TransactionHandler + GetFeeInfo() *outportcore.FeeInfo +} + type transactionWithResults struct { - data.TransactionHandlerWithGasUsedAndFee - scrs []data.TransactionHandlerWithGasUsedAndFee + txHandlerWithFeeInfo + scrs []txHandlerWithFeeInfo log *data.LogData } type transactionsAndScrsHolder struct { txsWithResults map[string]*transactionWithResults - scrsNoTx map[string]data.TransactionHandlerWithGasUsedAndFee + scrsNoTx map[string]txHandlerWithFeeInfo } func newTransactionsAndScrsHolder(nrTxs, nrScrs int) *transactionsAndScrsHolder { return &transactionsAndScrsHolder{ txsWithResults: make(map[string]*transactionWithResults, nrTxs), - scrsNoTx: make(map[string]data.TransactionHandlerWithGasUsedAndFee, nrScrs), + scrsNoTx: make(map[string]txHandlerWithFeeInfo, nrScrs), } } -func prepareTransactionsAndScrs(txPool *outportcore.Pool) *transactionsAndScrsHolder { - totalTxs := len(txPool.Txs) + len(txPool.Invalid) + len(txPool.Rewards) - if totalTxs == 0 && len(txPool.Scrs) == 0 { +func prepareTransactionsAndScrs(txPool *outportcore.TransactionPool) *transactionsAndScrsHolder { + totalTxs := len(txPool.Transactions) + len(txPool.InvalidTxs) + len(txPool.Rewards) + if totalTxs == 0 && len(txPool.SmartContractResults) == 0 { return newTransactionsAndScrsHolder(0, 0) } - transactionsAndScrs := newTransactionsAndScrsHolder(totalTxs, len(txPool.Scrs)) - for txHash, tx := range txPool.Txs { + transactionsAndScrs := newTransactionsAndScrsHolder(totalTxs, len(txPool.SmartContractResults)) + for txHash, tx := range txPool.Transactions { transactionsAndScrs.txsWithResults[txHash] = &transactionWithResults{ - TransactionHandlerWithGasUsedAndFee: tx, + txHandlerWithFeeInfo: tx, } } for _, txLog := range txPool.Logs { - txWithResults, ok := transactionsAndScrs.txsWithResults[txLog.TxHash] + txHash := txLog.TxHash + txWithResults, ok := transactionsAndScrs.txsWithResults[txHash] if !ok { continue } - txWithResults.log = txLog + txWithResults.log = &data.LogData{ + LogHandler: txLog.Log, + TxHash: txHash, + } } - for scrHash, scrHandler := range txPool.Scrs { - scr, ok := scrHandler.GetTxHandler().(*smartContractResult.SmartContractResult) - if !ok { - continue - } + for scrHash, scrHandler := range txPool.SmartContractResults { + scr := scrHandler.SmartContractResult - txWithResults, ok := transactionsAndScrs.txsWithResults[string(scr.OriginalTxHash)] + txWithResults, ok := transactionsAndScrs.txsWithResults[hex.EncodeToString(scr.OriginalTxHash)] if !ok { transactionsAndScrs.scrsNoTx[scrHash] = scrHandler continue diff --git a/outport/process/transactionsfee/dataHolders_test.go b/outport/process/transactionsfee/dataHolders_test.go index fbee9eec883..00ddb466575 100644 --- a/outport/process/transactionsfee/dataHolders_test.go +++ b/outport/process/transactionsfee/dataHolders_test.go @@ -1,10 +1,10 @@ package transactionsfee import ( + "encoding/hex" "math/big" "testing" - "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" @@ -14,37 +14,56 @@ import ( func TestTransactionsAndScrsHolder(t *testing.T) { t.Parallel() - txHash := "txHash" - scrHash1 := "scrHash1" - scrHash2 := "scrHash2" - scrHash3 := "scrHash3" - pool := &outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - txHash: outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - Nonce: 1, - }, 0, big.NewInt(0)), + txHash := []byte("txHash") + scrHash1 := []byte("scrHash1") + scrHash2 := []byte("scrHash2") + scrHash3 := []byte("scrHash3") + pool := &outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + hex.EncodeToString(txHash): { + Transaction: &transaction.Transaction{ + Nonce: 1, + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, - Scrs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - scrHash1: outportcore.NewTransactionHandlerWithGasAndFee(&smartContractResult.SmartContractResult{ - Nonce: 2, - OriginalTxHash: []byte(txHash), - }, 0, big.NewInt(0)), + SmartContractResults: map[string]*outportcore.SCRInfo{ + hex.EncodeToString(scrHash1): { + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 2, + OriginalTxHash: txHash, + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, - scrHash2: outportcore.NewTransactionHandlerWithGasAndFee(&smartContractResult.SmartContractResult{}, 0, big.NewInt(0)), - scrHash3: outportcore.NewTransactionHandlerWithGasAndFee(&smartContractResult.SmartContractResult{ - Nonce: 3, - OriginalTxHash: []byte(txHash), - }, 0, big.NewInt(0)), + hex.EncodeToString(scrHash2): { + SmartContractResult: &smartContractResult.SmartContractResult{}, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + hex.EncodeToString(scrHash3): { + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 3, + OriginalTxHash: txHash, + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, - Logs: []*data.LogData{ + Logs: []*outportcore.LogData{ { - TxHash: "hash", + Log: &transaction.Log{Address: []byte("addr")}, + TxHash: hex.EncodeToString(txHash), }, { - TxHash: txHash, - LogHandler: &transaction.Log{ - Address: []byte("addr"), - }, + Log: &transaction.Log{}, + TxHash: "hash", }, }, } @@ -52,7 +71,7 @@ func TestTransactionsAndScrsHolder(t *testing.T) { res := prepareTransactionsAndScrs(pool) require.NotNil(t, res) require.Equal(t, 1, len(res.txsWithResults)) - require.Equal(t, 2, len(res.txsWithResults[txHash].scrs)) - require.NotNil(t, res.txsWithResults[txHash].log) + require.Equal(t, 2, len(res.txsWithResults[hex.EncodeToString(txHash)].scrs)) + require.NotNil(t, res.txsWithResults[hex.EncodeToString(txHash)].log) require.Equal(t, 1, len(res.scrsNoTx)) } diff --git a/outport/process/transactionsfee/transactionChecker.go b/outport/process/transactionsfee/transactionChecker.go index 593ab51a08c..546fdd9f432 100644 --- a/outport/process/transactionsfee/transactionChecker.go +++ b/outport/process/transactionsfee/transactionChecker.go @@ -12,7 +12,7 @@ import ( vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) -func (tep *transactionsFeeProcessor) isESDTOperationWithSCCall(tx data.TransactionHandlerWithGasUsedAndFee) bool { +func (tep *transactionsFeeProcessor) isESDTOperationWithSCCall(tx data.TransactionHandler) bool { res := tep.dataFieldParser.Parse(tx.GetData(), tx.GetSndAddr(), tx.GetRcvAddr(), tep.shardCoordinator.NumberOfShards()) isESDTTransferOperation := res.Operation == core.BuiltInFunctionESDTTransfer || @@ -37,16 +37,16 @@ func (tep *transactionsFeeProcessor) isESDTOperationWithSCCall(tx data.Transacti return isESDTTransferOperation && isReceiverSC && hasFunction } -func isSCRForSenderWithRefund(scr *smartContractResult.SmartContractResult, txHash []byte, tx data.TransactionHandlerWithGasUsedAndFee) bool { +func isSCRForSenderWithRefund(scr *smartContractResult.SmartContractResult, txHashHex string, tx data.TransactionHandler) bool { isForSender := bytes.Equal(scr.RcvAddr, tx.GetSndAddr()) isRightNonce := scr.Nonce == tx.GetNonce()+1 - isFromCurrentTx := bytes.Equal(scr.PrevTxHash, txHash) + isFromCurrentTx := hex.EncodeToString(scr.PrevTxHash) == txHashHex isScrDataOk := isDataOk(scr.Data) return isFromCurrentTx && isForSender && isRightNonce && isScrDataOk } -func isRefundForRelayed(dbScResult *smartContractResult.SmartContractResult, tx data.TransactionHandlerWithGasUsedAndFee) bool { +func isRefundForRelayed(dbScResult *smartContractResult.SmartContractResult, tx data.TransactionHandler) bool { isForRelayed := string(dbScResult.ReturnMessage) == core.GasRefundForRelayerMessage isForSender := bytes.Equal(dbScResult.RcvAddr, tx.GetSndAddr()) differentHash := !bytes.Equal(dbScResult.OriginalTxHash, dbScResult.PrevTxHash) @@ -73,7 +73,7 @@ func isSCRWithRefundNoTx(scr *smartContractResult.SmartContractResult) bool { } func isRelayedTx(tx *transactionWithResults) bool { - txData := string(tx.GetData()) + txData := string(tx.GetTxHandler().GetData()) isRelayed := strings.HasPrefix(txData, core.RelayedTransaction) || strings.HasPrefix(txData, core.RelayedTransactionV2) return isRelayed && len(tx.scrs) > 0 } diff --git a/outport/process/transactionsfee/transactionsFeeProcessor.go b/outport/process/transactionsfee/transactionsFeeProcessor.go index 1047e2ddc3a..593a5d6b83b 100644 --- a/outport/process/transactionsfee/transactionsFeeProcessor.go +++ b/outport/process/transactionsfee/transactionsFeeProcessor.go @@ -79,7 +79,7 @@ func checkArg(arg ArgTransactionsFeeProcessor) error { } // PutFeeAndGasUsed will compute and set in transactions pool fee and gas used -func (tep *transactionsFeeProcessor) PutFeeAndGasUsed(pool *outportcore.Pool) error { +func (tep *transactionsFeeProcessor) PutFeeAndGasUsed(pool *outportcore.TransactionPool) error { tep.prepareInvalidTxs(pool) txsWithResultsMap := prepareTransactionsAndScrs(pool) @@ -88,35 +88,38 @@ func (tep *transactionsFeeProcessor) PutFeeAndGasUsed(pool *outportcore.Pool) er return tep.prepareScrsNoTx(txsWithResultsMap) } -func (tep *transactionsFeeProcessor) prepareInvalidTxs(pool *outportcore.Pool) { - for _, invalidTx := range pool.Invalid { - fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(invalidTx, invalidTx.GetGasLimit()) - invalidTx.SetGasUsed(invalidTx.GetGasLimit()) - invalidTx.SetFee(fee) - invalidTx.SetInitialPaidFee(fee) +func (tep *transactionsFeeProcessor) prepareInvalidTxs(pool *outportcore.TransactionPool) { + for _, invalidTx := range pool.InvalidTxs { + fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(invalidTx.Transaction, invalidTx.Transaction.GasLimit) + invalidTx.FeeInfo.SetGasUsed(invalidTx.Transaction.GetGasLimit()) + invalidTx.FeeInfo.SetFee(fee) + invalidTx.FeeInfo.SetInitialPaidFee(fee) } } func (tep *transactionsFeeProcessor) prepareNormalTxs(transactionsAndScrs *transactionsAndScrsHolder) { - for txHash, txWithResult := range transactionsAndScrs.txsWithResults { - gasUsed := tep.txFeeCalculator.ComputeGasLimit(txWithResult) - fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txWithResult, gasUsed) - initialPaidFee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txWithResult, txWithResult.GetGasLimit()) - - txWithResult.SetGasUsed(gasUsed) - txWithResult.SetFee(fee) - txWithResult.SetInitialPaidFee(initialPaidFee) - - if isRelayedTx(txWithResult) || tep.isESDTOperationWithSCCall(txWithResult) { - txWithResult.SetGasUsed(txWithResult.GetGasLimit()) - txWithResult.SetFee(initialPaidFee) + for txHashHex, txWithResult := range transactionsAndScrs.txsWithResults { + txHandler := txWithResult.GetTxHandler() + + gasUsed := tep.txFeeCalculator.ComputeGasLimit(txHandler) + fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txHandler, gasUsed) + initialPaidFee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txHandler, txHandler.GetGasLimit()) + + feeInfo := txWithResult.GetFeeInfo() + feeInfo.SetGasUsed(gasUsed) + feeInfo.SetFee(fee) + feeInfo.SetInitialPaidFee(initialPaidFee) + + if isRelayedTx(txWithResult) || tep.isESDTOperationWithSCCall(txHandler) { + feeInfo.SetGasUsed(txWithResult.GetTxHandler().GetGasLimit()) + feeInfo.SetFee(initialPaidFee) } - tep.prepareTxWithResults([]byte(txHash), txWithResult) + tep.prepareTxWithResults(txHashHex, txWithResult) } } -func (tep *transactionsFeeProcessor) prepareTxWithResults(txHash []byte, txWithResults *transactionWithResults) { +func (tep *transactionsFeeProcessor) prepareTxWithResults(txHashHex string, txWithResults *transactionWithResults) { hasRefund := false for _, scrHandler := range txWithResults.scrs { scr, ok := scrHandler.GetTxHandler().(*smartContractResult.SmartContractResult) @@ -124,11 +127,11 @@ func (tep *transactionsFeeProcessor) prepareTxWithResults(txHash []byte, txWithR continue } - if isSCRForSenderWithRefund(scr, txHash, txWithResults) || isRefundForRelayed(scr, txWithResults) { - gasUsed, fee := tep.txFeeCalculator.ComputeGasUsedAndFeeBasedOnRefundValue(txWithResults, scr.Value) + if isSCRForSenderWithRefund(scr, txHashHex, txWithResults.GetTxHandler()) || isRefundForRelayed(scr, txWithResults.GetTxHandler()) { + gasUsed, fee := tep.txFeeCalculator.ComputeGasUsedAndFeeBasedOnRefundValue(txWithResults.GetTxHandler(), scr.Value) - txWithResults.SetGasUsed(gasUsed) - txWithResults.SetFee(fee) + txWithResults.GetFeeInfo().SetGasUsed(gasUsed) + txWithResults.GetFeeInfo().SetFee(fee) hasRefund = true break } @@ -148,16 +151,16 @@ func (tep *transactionsFeeProcessor) prepareTxWithResultsBasedOnLogs( for _, event := range txWithResults.log.GetLogEvents() { if core.WriteLogIdentifier == string(event.GetIdentifier()) && !hasRefund { - gasUsed, fee := tep.txFeeCalculator.ComputeGasUsedAndFeeBasedOnRefundValue(txWithResults, big.NewInt(0)) - txWithResults.SetGasUsed(gasUsed) - txWithResults.SetFee(fee) + gasUsed, fee := tep.txFeeCalculator.ComputeGasUsedAndFeeBasedOnRefundValue(txWithResults.GetTxHandler(), big.NewInt(0)) + txWithResults.GetFeeInfo().SetGasUsed(gasUsed) + txWithResults.GetFeeInfo().SetFee(fee) continue } if core.SignalErrorOperation == string(event.GetIdentifier()) { - fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txWithResults, txWithResults.GetGasLimit()) - txWithResults.SetGasUsed(txWithResults.GetGasLimit()) - txWithResults.SetFee(fee) + fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txWithResults.GetTxHandler(), txWithResults.GetTxHandler().GetGasLimit()) + txWithResults.GetFeeInfo().SetGasUsed(txWithResults.GetTxHandler().GetGasLimit()) + txWithResults.GetFeeInfo().SetFee(fee) } } @@ -192,8 +195,8 @@ func (tep *transactionsFeeProcessor) prepareScrsNoTx(transactionsAndScrs *transa gasUsed, fee := tep.txFeeCalculator.ComputeGasUsedAndFeeBasedOnRefundValue(txFromStorage, scr.Value) - scrHandler.SetGasUsed(gasUsed) - scrHandler.SetFee(fee) + scrHandler.GetFeeInfo().SetGasUsed(gasUsed) + scrHandler.GetFeeInfo().SetFee(fee) } return nil diff --git a/outport/process/transactionsfee/transactionsFeeProcessor_test.go b/outport/process/transactionsfee/transactionsFeeProcessor_test.go index 65bc9622b1e..4495b1d0c75 100644 --- a/outport/process/transactionsfee/transactionsFeeProcessor_test.go +++ b/outport/process/transactionsfee/transactionsFeeProcessor_test.go @@ -1,12 +1,12 @@ package transactionsfee import ( + "encoding/hex" "math/big" "testing" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/pubkeyConverter" - coreData "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" @@ -66,15 +66,18 @@ func TestPutFeeAndGasUsedTx1(t *testing.T) { scrHash1 := []byte("scrHash1") scrWithRefund := []byte("scrWithRefund") refundValueBig, _ := big.NewInt(0).SetString("86271830000000", 10) - initialTx := outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - Nonce: 1196667, - SndAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), - RcvAddr: []byte("erd14eyayfrvlrhzfrwg5zwleua25mkzgncggn35nvc6xhv5yxwml2es0f3dht"), - GasLimit: 16610000, - GasPrice: 1000000000, - Data: []byte("relayedTx@7b226e6f6e6365223a322c2276616c7565223a302c227265636569766572223a22414141414141414141414146414974673738352f736c73554148686b57334569624c6e47524b76496f4e4d3d222c2273656e646572223a22726b6e534a477a343769534e794b43642f504f717075776b5477684534306d7a476a585a51686e622b724d3d222c226761735072696365223a313030303030303030302c226761734c696d6974223a31353030303030302c2264617461223a22633246325a5546306447567a644746306157397551444668597a49314d6a5935596d51335a44497759324a6959544d31596d566c4f4459314d4464684f574e6a4e7a677a5a4755774f445a694e4445334e546b345a54517a59544e6b5a6a566a593245795a5468684d6a6c414d6a51344e54677a4d574e6d4d5445304d54566d596a41354d6a63774e4451324e5755324e7a597a59574d314f4445345a5467314e4751345957526d4e54417a596a63354d6a6c6b4f54526c4e6d49794e6a49775a673d3d222c22636861696e4944223a224d513d3d222c2276657273696f6e223a312c227369676e6174757265223a225239462b34546352415a386d7771324559303163596c337662716c46657176387a76474a775a6833594d4f556b4234643451574e66376744626c484832576b71614a76614845744356617049713365356562384e41773d3d227d"), - Value: big.NewInt(0), - }, 0, big.NewInt(0)) + initialTx := &outportcore.TxInfo{ + Transaction: &transaction.Transaction{ + Nonce: 1196667, + SndAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), + RcvAddr: []byte("erd14eyayfrvlrhzfrwg5zwleua25mkzgncggn35nvc6xhv5yxwml2es0f3dht"), + GasLimit: 16610000, + GasPrice: 1000000000, + Data: []byte("relayedTx@7b226e6f6e6365223a322c2276616c7565223a302c227265636569766572223a22414141414141414141414146414974673738352f736c73554148686b57334569624c6e47524b76496f4e4d3d222c2273656e646572223a22726b6e534a477a343769534e794b43642f504f717075776b5477684534306d7a476a585a51686e622b724d3d222c226761735072696365223a313030303030303030302c226761734c696d6974223a31353030303030302c2264617461223a22633246325a5546306447567a644746306157397551444668597a49314d6a5935596d51335a44497759324a6959544d31596d566c4f4459314d4464684f574e6a4e7a677a5a4755774f445a694e4445334e546b345a54517a59544e6b5a6a566a593245795a5468684d6a6c414d6a51344e54677a4d574e6d4d5445304d54566d596a41354d6a63774e4451324e5755324e7a597a59574d314f4445345a5467314e4751345957526d4e54417a596a63354d6a6c6b4f54526c4e6d49794e6a49775a673d3d222c22636861696e4944223a224d513d3d222c2276657273696f6e223a312c227369676e6174757265223a225239462b34546352415a386d7771324559303163596c337662716c46657176387a76474a775a6833594d4f556b4234643451574e66376744626c484832576b71614a76614845744356617049713365356562384e41773d3d227d"), + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } scr1 := &smartContractResult.SmartContractResult{ Nonce: 2, @@ -87,23 +90,32 @@ func TestPutFeeAndGasUsedTx1(t *testing.T) { OriginalTxHash: txHash, } - pool := &outportcore.Pool{ - Txs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ - string(txHash): initialTx, + pool := &outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + hex.EncodeToString(txHash): initialTx, }, - Scrs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ - "wrong": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{}, 0, big.NewInt(0)), - string(scrHash1): outportcore.NewTransactionHandlerWithGasAndFee(scr1, 0, big.NewInt(0)), - string(scrWithRefund): outportcore.NewTransactionHandlerWithGasAndFee(&smartContractResult.SmartContractResult{ - Nonce: 3, - SndAddr: []byte("erd1qqqqqqqqqqqqqpgq3dswlnnlkfd3gqrcv3dhzgnvh8ryf27g5rfsecnn2s"), - RcvAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), - PrevTxHash: []byte("f639cb7a0231191e04ec19dcb1359bd93a03fe8dc4a28a80d00835c5d1c988f8"), - OriginalTxHash: txHash, - Value: refundValueBig, - Data: []byte(""), - ReturnMessage: []byte("gas refund for relayer"), - }, 0, big.NewInt(0)), + SmartContractResults: map[string]*outportcore.SCRInfo{ + hex.EncodeToString(scrHash1): { + SmartContractResult: scr1, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + hex.EncodeToString(scrWithRefund): { + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 3, + SndAddr: []byte("erd1qqqqqqqqqqqqqpgq3dswlnnlkfd3gqrcv3dhzgnvh8ryf27g5rfsecnn2s"), + RcvAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), + PrevTxHash: []byte("f639cb7a0231191e04ec19dcb1359bd93a03fe8dc4a28a80d00835c5d1c988f8"), + OriginalTxHash: txHash, + Value: refundValueBig, + Data: []byte(""), + ReturnMessage: []byte("gas refund for relayer"), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, } @@ -114,9 +126,9 @@ func TestPutFeeAndGasUsedTx1(t *testing.T) { err = txsFeeProc.PutFeeAndGasUsed(pool) require.Nil(t, err) - require.Equal(t, big.NewInt(1673728170000000), initialTx.GetFee()) - require.Equal(t, uint64(7982817), initialTx.GetGasUsed()) - require.Equal(t, "1760000000000000", initialTx.GetInitialPaidFee().String()) + require.Equal(t, big.NewInt(1673728170000000), initialTx.GetFeeInfo().GetFee()) + require.Equal(t, uint64(7982817), initialTx.GetFeeInfo().GetGasUsed()) + require.Equal(t, "1760000000000000", initialTx.GetFeeInfo().GetInitialPaidFee().String()) } func TestPutFeeAndGasUsedScrNoTx(t *testing.T) { @@ -127,20 +139,22 @@ func TestPutFeeAndGasUsedScrNoTx(t *testing.T) { refundValueBig, _ := big.NewInt(0).SetString("226498540000000", 10) - scr := outportcore.NewTransactionHandlerWithGasAndFee(&smartContractResult.SmartContractResult{ - Nonce: 3, - SndAddr: []byte("erd1qqqqqqqqqqqqqpgq3dswlnnlkfd3gqrcv3dhzgnvh8ryf27g5rfsecnn2s"), - RcvAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), - PrevTxHash: []byte("f639cb7a0231191e04ec19dcb1359bd93a03fe8dc4a28a80d00835c5d1c988f8"), - OriginalTxHash: txHash, - Value: refundValueBig, - Data: []byte(""), - ReturnMessage: []byte("gas refund for relayer"), - }, 0, big.NewInt(0)) - - pool := &outportcore.Pool{ - Scrs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ - "wrong": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{}, 0, big.NewInt(0)), + scr := &outportcore.SCRInfo{ + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 3, + SndAddr: []byte("erd1qqqqqqqqqqqqqpgq3dswlnnlkfd3gqrcv3dhzgnvh8ryf27g5rfsecnn2s"), + RcvAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), + PrevTxHash: []byte("f639cb7a0231191e04ec19dcb1359bd93a03fe8dc4a28a80d00835c5d1c988f8"), + OriginalTxHash: txHash, + Value: refundValueBig, + Data: []byte(""), + ReturnMessage: []byte("gas refund for relayer"), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } + + pool := &outportcore.TransactionPool{ + SmartContractResults: map[string]*outportcore.SCRInfo{ string(scrWithRefund): scr, }, } @@ -162,20 +176,23 @@ func TestPutFeeAndGasUsedScrNoTx(t *testing.T) { err = txsFeeProc.PutFeeAndGasUsed(pool) require.Nil(t, err) - require.Equal(t, big.NewInt(123001460000000), scr.GetFee()) - require.Equal(t, uint64(7350146), scr.GetGasUsed()) + require.Equal(t, big.NewInt(123001460000000), scr.GetFeeInfo().GetFee()) + require.Equal(t, uint64(7350146), scr.GetFeeInfo().GetGasUsed()) } func TestPutFeeAndGasUsedInvalidTxs(t *testing.T) { t.Parallel() - tx := outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - GasLimit: 30000000, - GasPrice: 1000000000, - }, 0, big.NewInt(0)) + tx := &outportcore.TxInfo{ + Transaction: &transaction.Transaction{ + GasLimit: 30000000, + GasPrice: 1000000000, + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } - pool := &outportcore.Pool{ - Invalid: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ + pool := &outportcore.TransactionPool{ + InvalidTxs: map[string]*outportcore.TxInfo{ "tx": tx, }, } @@ -187,43 +204,49 @@ func TestPutFeeAndGasUsedInvalidTxs(t *testing.T) { err = txsFeeProc.PutFeeAndGasUsed(pool) require.Nil(t, err) - require.Equal(t, big.NewInt(349500000000000), tx.GetFee()) - require.Equal(t, tx.GetGasLimit(), tx.GetGasUsed()) + require.Equal(t, big.NewInt(349500000000000), tx.GetFeeInfo().GetFee()) + require.Equal(t, tx.GetTxHandler().GetGasLimit(), tx.GetFeeInfo().GetGasUsed()) } func TestPutFeeAndGasUsedLogWithErrorAndInformative(t *testing.T) { t.Parallel() tx1Hash := "h1" - tx1 := outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - GasLimit: 30000000, - GasPrice: 1000000000, - }, 0, big.NewInt(0)) + tx1 := &outportcore.TxInfo{ + Transaction: &transaction.Transaction{ + GasLimit: 30000000, + GasPrice: 1000000000, + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } tx2Hash := "h2" - tx2 := outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - GasLimit: 50000000, - GasPrice: 1000000000, - }, 0, big.NewInt(0)) + tx2 := &outportcore.TxInfo{ + Transaction: &transaction.Transaction{ + GasLimit: 50000000, + GasPrice: 1000000000, + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } - pool := &outportcore.Pool{ - Txs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ + pool := &outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ tx1Hash: tx1, tx2Hash: tx2, - "t3": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{}, 0, big.NewInt(0)), - }, - Logs: []*coreData.LogData{ + "t3": {Transaction: &transaction.Transaction{}, FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}}}, + Logs: []*outportcore.LogData{ { - LogHandler: &transaction.Log{ + Log: &transaction.Log{ Events: []*transaction.Event{ { Identifier: []byte("ignore"), }, }, }, + TxHash: "hhh", }, { - LogHandler: &transaction.Log{ + Log: &transaction.Log{ Events: []*transaction.Event{ { Identifier: []byte("ignore"), @@ -236,7 +259,7 @@ func TestPutFeeAndGasUsedLogWithErrorAndInformative(t *testing.T) { TxHash: tx1Hash, }, { - LogHandler: &transaction.Log{ + Log: &transaction.Log{ Events: []*transaction.Event{ { Identifier: []byte(core.WriteLogIdentifier), @@ -256,8 +279,8 @@ func TestPutFeeAndGasUsedLogWithErrorAndInformative(t *testing.T) { err = txsFeeProc.PutFeeAndGasUsed(pool) require.Nil(t, err) - require.Equal(t, tx1.GetGasLimit(), tx1.GetGasUsed()) - require.Equal(t, tx2.GetGasLimit(), tx2.GetGasUsed()) + require.Equal(t, tx1.GetTxHandler().GetGasLimit(), tx1.GetFeeInfo().GetGasUsed()) + require.Equal(t, tx2.GetTxHandler().GetGasLimit(), tx2.GetFeeInfo().GetGasUsed()) } func TestPutFeeAndGasUsedWrongRelayedTx(t *testing.T) { @@ -265,31 +288,37 @@ func TestPutFeeAndGasUsedWrongRelayedTx(t *testing.T) { txHash := []byte("relayedTx") scrHash1 := []byte("scrHash1") - initialTx := outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - Nonce: 1011, - SndAddr: []byte("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), - RcvAddr: []byte("erd1xlrw5j482m3fwl72fsu9saj984rxqdrjd860e02tcz0qakvqrp6q2pjqgg"), - GasLimit: 550000000, - GasPrice: 1000000000, - Data: []byte("relayedTxV2@000000000000000005005eaf5311cedc6fa17f08f33e156926f8f3816d8ed8dc@06e2@7472616e73666572546f6b656e4064633132346163313733323937623836623936316362636663363339326231643130303533326533336530663933313838373634396336613935636236633931403031@ba26daf1353b8fa62d183b7d7df8db48846ea982a0cb26450b703e16720c77b9d7d4e47b652d270b160ae6866ca7b04aae38ca83a58ce508bf660db07d5b6401"), - Value: big.NewInt(0), - }, 0, big.NewInt(0)) + initialTx := &outportcore.TxInfo{ + Transaction: &transaction.Transaction{ + Nonce: 1011, + SndAddr: []byte("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), + RcvAddr: []byte("erd1xlrw5j482m3fwl72fsu9saj984rxqdrjd860e02tcz0qakvqrp6q2pjqgg"), + GasLimit: 550000000, + GasPrice: 1000000000, + Data: []byte("relayedTxV2@000000000000000005005eaf5311cedc6fa17f08f33e156926f8f3816d8ed8dc@06e2@7472616e73666572546f6b656e4064633132346163313733323937623836623936316362636663363339326231643130303533326533336530663933313838373634396336613935636236633931403031@ba26daf1353b8fa62d183b7d7df8db48846ea982a0cb26450b703e16720c77b9d7d4e47b652d270b160ae6866ca7b04aae38ca83a58ce508bf660db07d5b6401"), + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } - scr1 := &smartContractResult.SmartContractResult{ - Nonce: 1011, - SndAddr: []byte("erd1xlrw5j482m3fwl72fsu9saj984rxqdrjd860e02tcz0qakvqrp6q2pjqgg"), - RcvAddr: []byte("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), - PrevTxHash: txHash, - OriginalTxHash: txHash, - ReturnMessage: []byte("higher nonce in transaction"), + scr1 := &outportcore.SCRInfo{ + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 1011, + SndAddr: []byte("erd1xlrw5j482m3fwl72fsu9saj984rxqdrjd860e02tcz0qakvqrp6q2pjqgg"), + RcvAddr: []byte("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), + PrevTxHash: txHash, + OriginalTxHash: txHash, + ReturnMessage: []byte("higher nonce in transaction"), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, } - pool := &outportcore.Pool{ - Txs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ - string(txHash): initialTx, + pool := &outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + hex.EncodeToString(txHash): initialTx, }, - Scrs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ - string(scrHash1): outportcore.NewTransactionHandlerWithGasAndFee(scr1, 0, big.NewInt(0)), + SmartContractResults: map[string]*outportcore.SCRInfo{ + hex.EncodeToString(scrHash1): scr1, }, } @@ -300,27 +329,30 @@ func TestPutFeeAndGasUsedWrongRelayedTx(t *testing.T) { err = txsFeeProc.PutFeeAndGasUsed(pool) require.Nil(t, err) - require.Equal(t, big.NewInt(6103405000000000), initialTx.GetFee()) - require.Equal(t, uint64(550000000), initialTx.GetGasUsed()) - require.Equal(t, "6103405000000000", initialTx.GetInitialPaidFee().String()) + require.Equal(t, big.NewInt(6103405000000000), initialTx.GetFeeInfo().GetFee()) + require.Equal(t, uint64(550000000), initialTx.GetFeeInfo().GetGasUsed()) + require.Equal(t, "6103405000000000", initialTx.GetFeeInfo().GetInitialPaidFee().String()) } func TestPutFeeAndGasUsedESDTWithScCall(t *testing.T) { t.Parallel() txHash := []byte("tx") - tx := outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - Nonce: 1011, - SndAddr: silentDecodeAddress("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), - RcvAddr: silentDecodeAddress("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), - GasLimit: 55_000_000, - GasPrice: 1000000000, - Data: []byte("ESDTNFTTransfer@434f572d636434363364@080c@01@00000000000000000500d3b28828d62052124f07dcd50ed31b0825f60eee1526@616363657074476c6f62616c4f66666572@c3e5"), - Value: big.NewInt(0), - }, 0, big.NewInt(0)) + tx := &outportcore.TxInfo{ + Transaction: &transaction.Transaction{ + Nonce: 1011, + SndAddr: silentDecodeAddress("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), + RcvAddr: silentDecodeAddress("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), + GasLimit: 55_000_000, + GasPrice: 1000000000, + Data: []byte("ESDTNFTTransfer@434f572d636434363364@080c@01@00000000000000000500d3b28828d62052124f07dcd50ed31b0825f60eee1526@616363657074476c6f62616c4f66666572@c3e5"), + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } - pool := &outportcore.Pool{ - Txs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ + pool := &outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ string(txHash): tx, }, } @@ -332,9 +364,9 @@ func TestPutFeeAndGasUsedESDTWithScCall(t *testing.T) { err = txsFeeProc.PutFeeAndGasUsed(pool) require.Nil(t, err) - require.Equal(t, big.NewInt(820765000000000), tx.GetFee()) - require.Equal(t, uint64(55_000_000), tx.GetGasUsed()) - require.Equal(t, "820765000000000", tx.GetInitialPaidFee().String()) + require.Equal(t, big.NewInt(820765000000000), tx.GetFeeInfo().GetFee()) + require.Equal(t, uint64(55_000_000), tx.GetFeeInfo().GetGasUsed()) + require.Equal(t, "820765000000000", tx.GetFeeInfo().GetInitialPaidFee().String()) } func silentDecodeAddress(address string) []byte { @@ -352,19 +384,21 @@ func TestPutFeeAndGasUsedScrWithRefundNoTx(t *testing.T) { refundValueBig, _ := big.NewInt(0).SetString("226498540000000", 10) - scr := outportcore.NewTransactionHandlerWithGasAndFee(&smartContractResult.SmartContractResult{ - Nonce: 3, - SndAddr: []byte("erd1qqqqqqqqqqqqqpgq3dswlnnlkfd3gqrcv3dhzgnvh8ryf27g5rfsecnn2s"), - RcvAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), - PrevTxHash: []byte("f639cb7a0231191e04ec19dcb1359bd93a03fe8dc4a28a80d00835c5d1c988f8"), - OriginalTxHash: txHash, - Value: refundValueBig, - Data: []byte("@ok"), - }, 0, big.NewInt(0)) + scr := &outportcore.SCRInfo{ + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 3, + SndAddr: []byte("erd1qqqqqqqqqqqqqpgq3dswlnnlkfd3gqrcv3dhzgnvh8ryf27g5rfsecnn2s"), + RcvAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), + PrevTxHash: []byte("f639cb7a0231191e04ec19dcb1359bd93a03fe8dc4a28a80d00835c5d1c988f8"), + OriginalTxHash: txHash, + Value: refundValueBig, + Data: []byte("@ok"), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } - pool := &outportcore.Pool{ - Scrs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ - "wrong": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{}, 0, big.NewInt(0)), + pool := &outportcore.TransactionPool{ + SmartContractResults: map[string]*outportcore.SCRInfo{ string(scrWithRefund): scr, }, } @@ -385,8 +419,8 @@ func TestPutFeeAndGasUsedScrWithRefundNoTx(t *testing.T) { err = txsFeeProc.PutFeeAndGasUsed(pool) require.Nil(t, err) - require.Equal(t, big.NewInt(0), scr.GetFee()) - require.Equal(t, uint64(0), scr.GetGasUsed()) + require.Equal(t, big.NewInt(0), scr.GetFeeInfo().GetFee()) + require.Equal(t, uint64(0), scr.GetFeeInfo().GetGasUsed()) require.True(t, wasCalled) } @@ -398,19 +432,21 @@ func TestPutFeeAndGasUsedScrWithRefundNotForInitialSender(t *testing.T) { refundValueBig, _ := big.NewInt(0).SetString("226498540000000", 10) - scr := outportcore.NewTransactionHandlerWithGasAndFee(&smartContractResult.SmartContractResult{ - Nonce: 3, - SndAddr: []byte("erd1qqqqqqqqqqqqqpgq3dswlnnlkfd3gqrcv3dhzgnvh8ryf27g5rfsecnn2s"), - RcvAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), - PrevTxHash: []byte("f639cb7a0231191e04ec19dcb1359bd93a03fe8dc4a28a80d00835c5d1c988f8"), - OriginalTxHash: txHash, - Value: refundValueBig, - Data: []byte(""), - }, 0, big.NewInt(0)) + scr := &outportcore.SCRInfo{ + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 3, + SndAddr: []byte("erd1qqqqqqqqqqqqqpgq3dswlnnlkfd3gqrcv3dhzgnvh8ryf27g5rfsecnn2s"), + RcvAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), + PrevTxHash: []byte("f639cb7a0231191e04ec19dcb1359bd93a03fe8dc4a28a80d00835c5d1c988f8"), + OriginalTxHash: txHash, + Value: refundValueBig, + Data: []byte(""), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } - pool := &outportcore.Pool{ - Scrs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ - "wrong": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{}, 0, big.NewInt(0)), + pool := &outportcore.TransactionPool{ + SmartContractResults: map[string]*outportcore.SCRInfo{ string(scrWithRefund): scr, }, } @@ -432,6 +468,54 @@ func TestPutFeeAndGasUsedScrWithRefundNotForInitialSender(t *testing.T) { err = txsFeeProc.PutFeeAndGasUsed(pool) require.Nil(t, err) - require.Equal(t, big.NewInt(0), scr.GetFee()) - require.Equal(t, uint64(0), scr.GetGasUsed()) + require.Equal(t, big.NewInt(0), scr.GetFeeInfo().GetFee()) + require.Equal(t, uint64(0), scr.GetFeeInfo().GetGasUsed()) +} + +func TestPutFeeAndGasUsedScrWithRefund(t *testing.T) { + t.Parallel() + + txHash := []byte("e3cdb8b4936fdbee2d3b1244b4c49959df5f90ada683d650019d244e5a64afaf") + scrWithRefund := []byte("scrWithRefund") + + initialTx := &outportcore.TxInfo{Transaction: &transaction.Transaction{ + Nonce: 1004, + GasLimit: 60_000_000, + GasPrice: 1000000000, + SndAddr: []byte("erd1s8jr8e8hsvv7c9ehmshcjlpzf9ua5l50qeswa8feshrp6xlz9c7quacmtx"), + }, FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}} + + refundValueBig, _ := big.NewInt(0).SetString("96635000000000", 10) + + scr := &outportcore.SCRInfo{ + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 1005, + SndAddr: []byte("erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqzllls8a5w6u"), + RcvAddr: []byte("erd1s8jr8e8hsvv7c9ehmshcjlpzf9ua5l50qeswa8feshrp6xlz9c7quacmtx"), + PrevTxHash: txHash, + OriginalTxHash: txHash, + Value: refundValueBig, + Data: []byte("@6f6b"), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } + + pool := &outportcore.TransactionPool{ + SmartContractResults: map[string]*outportcore.SCRInfo{ + hex.EncodeToString(scrWithRefund): scr, + }, + Transactions: map[string]*outportcore.TxInfo{ + hex.EncodeToString(txHash): initialTx, + }, + } + + arg := prepareMockArg() + txsFeeProc, err := NewTransactionsFeeProcessor(arg) + require.NotNil(t, txsFeeProc) + require.Nil(t, err) + + err = txsFeeProc.PutFeeAndGasUsed(pool) + require.Nil(t, err) + require.Equal(t, big.NewInt(552865000000000), initialTx.GetFeeInfo().GetFee()) + require.Equal(t, uint64(50_336_500), initialTx.GetFeeInfo().GetGasUsed()) } diff --git a/p2p/config/config.go b/p2p/config/config.go index 00ee4b5775b..eb2bf95d07c 100644 --- a/p2p/config/config.go +++ b/p2p/config/config.go @@ -1,6 +1,6 @@ package config -import "github.com/multiversx/mx-chain-p2p-go/config" +import "github.com/multiversx/mx-chain-communication-go/p2p/config" // P2PConfig will hold all the P2P settings type P2PConfig = config.P2PConfig diff --git a/p2p/constants.go b/p2p/constants.go index 271cae06736..4f0807484b7 100644 --- a/p2p/constants.go +++ b/p2p/constants.go @@ -1,7 +1,7 @@ package p2p import ( - p2p "github.com/multiversx/mx-chain-p2p-go" + "github.com/multiversx/mx-chain-communication-go/p2p" ) // NodeOperation defines the p2p node operation diff --git a/p2p/errors.go b/p2p/errors.go index 4f841161bbc..d80b9445433 100644 --- a/p2p/errors.go +++ b/p2p/errors.go @@ -3,7 +3,7 @@ package p2p import ( "errors" - p2p "github.com/multiversx/mx-chain-p2p-go" + "github.com/multiversx/mx-chain-communication-go/p2p" ) // ErrNilMessage signals that a nil message has been received diff --git a/p2p/factory/factory.go b/p2p/factory/factory.go index cfc6c6f6e6f..c64ee34775c 100644 --- a/p2p/factory/factory.go +++ b/p2p/factory/factory.go @@ -1,13 +1,13 @@ package factory import ( + "github.com/multiversx/mx-chain-communication-go/p2p/libp2p" + "github.com/multiversx/mx-chain-communication-go/p2p/libp2p/crypto" + "github.com/multiversx/mx-chain-communication-go/p2p/message" + messagecheck "github.com/multiversx/mx-chain-communication-go/p2p/messageCheck" + "github.com/multiversx/mx-chain-communication-go/p2p/peersHolder" + "github.com/multiversx/mx-chain-communication-go/p2p/rating" "github.com/multiversx/mx-chain-go/p2p" - "github.com/multiversx/mx-chain-p2p-go/libp2p" - p2pCrypto "github.com/multiversx/mx-chain-p2p-go/libp2p/crypto" - "github.com/multiversx/mx-chain-p2p-go/message" - messagecheck "github.com/multiversx/mx-chain-p2p-go/messageCheck" - "github.com/multiversx/mx-chain-p2p-go/peersHolder" - "github.com/multiversx/mx-chain-p2p-go/rating" ) // ArgsNetworkMessenger defines the options used to create a p2p wrapper @@ -53,7 +53,7 @@ func NewPeersHolder(preferredConnectionAddresses []string) (p2p.PreferredPeersHo // NewP2PKeyConverter returns a new instance of p2pKeyConverter func NewP2PKeyConverter() p2p.P2PKeyConverter { - return p2pCrypto.NewP2PKeyConverter() + return crypto.NewP2PKeyConverter() } // NewMessageVerifier will return a new instance of messages verifier diff --git a/p2p/interface.go b/p2p/interface.go index 8f57650b85c..f643852dc32 100644 --- a/p2p/interface.go +++ b/p2p/interface.go @@ -4,9 +4,9 @@ import ( "encoding/hex" "time" + "github.com/multiversx/mx-chain-communication-go/p2p" "github.com/multiversx/mx-chain-core-go/core" crypto "github.com/multiversx/mx-chain-crypto-go" - p2p "github.com/multiversx/mx-chain-p2p-go" ) // MessageProcessor is the interface used to describe what a receive message processor should do diff --git a/process/block/argProcessor.go b/process/block/argProcessor.go index ccacd4aa0c5..5e97ac59686 100644 --- a/process/block/argProcessor.go +++ b/process/block/argProcessor.go @@ -14,6 +14,7 @@ import ( "github.com/multiversx/mx-chain-go/dblookupext" "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/block/cutoff" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" @@ -65,6 +66,7 @@ type ArgBaseProcessor struct { StatusCoreComponents statusCoreComponentsHolder Config config.Config + PrefsConfig config.Preferences AccountsDB map[state.AccountsDbIdentifier]state.AccountsAdapter ForkDetector process.ForkDetector NodesCoordinator nodesCoordinator.NodesCoordinator @@ -88,6 +90,7 @@ type ArgBaseProcessor struct { ScheduledMiniBlocksEnableEpoch uint32 ProcessedMiniBlocksTracker process.ProcessedMiniBlocksTracker ReceiptsRepository receiptsRepository + BlockProcessingCutoffHandler cutoff.BlockProcessingCutoffHandler } // ArgShardProcessor holds all dependencies required by the process data factory in order to create diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index c37f8fa65eb..d966615e378 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -14,6 +14,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/scheduled" "github.com/multiversx/mx-chain-core-go/data/typeConverters" "github.com/multiversx/mx-chain-core-go/display" @@ -29,10 +30,10 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dblookupext" debugFactory "github.com/multiversx/mx-chain-go/debug/factory" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" + "github.com/multiversx/mx-chain-go/process/block/cutoff" "github.com/multiversx/mx-chain-go/process/block/processedMb" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" @@ -89,6 +90,7 @@ type baseProcessor struct { versionedHeaderFactory nodeFactory.VersionedHeaderFactory headerIntegrityVerifier process.HeaderIntegrityVerifier scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler + blockProcessingCutoffHandler cutoff.BlockProcessingCutoffHandler appStatusHandler core.AppStatusHandler stateCheckpointModulus uint @@ -417,8 +419,8 @@ func displayHeader(headerHandler data.HeaderHandler) []*display.LineData { } } -// checkProcessorNilParameters will check the input parameters for nil values -func checkProcessorNilParameters(arguments ArgBaseProcessor) error { +// checkProcessorParameters will check the input parameters values +func checkProcessorParameters(arguments ArgBaseProcessor) error { for key := range arguments.AccountsDB { if check.IfNil(arguments.AccountsDB[key]) { @@ -536,6 +538,9 @@ func checkProcessorNilParameters(arguments ArgBaseProcessor) error { if check.IfNil(arguments.ReceiptsRepository) { return process.ErrNilReceiptsRepository } + if check.IfNil(arguments.BlockProcessingCutoffHandler) { + return process.ErrNilBlockProcessingCutoffHandler + } return nil } @@ -1377,9 +1382,9 @@ func getLastSelfNotarizedHeaderByItself(chainHandler data.ChainHandler) (data.He } func (bp *baseProcessor) setFinalizedHeaderHashInIndexer(hdrHash []byte) { - log.Debug("baseProcessor.setFinalizedBlockInIndexer", "finalized header hash", hdrHash) + log.Debug("baseProcessor.setFinalizedHeaderHashInIndexer", "finalized header hash", hdrHash) - bp.outportHandler.FinalizedBlock(hdrHash) + bp.outportHandler.FinalizedBlock(&outportcore.FinalizedBlock{HeaderHash: hdrHash}) } func (bp *baseProcessor) updateStateStorage( @@ -1686,7 +1691,7 @@ func (bp *baseProcessor) recordBlockInHistory(blockHeaderHash []byte, blockHeade err := bp.historyRepo.RecordBlock(blockHeaderHash, blockHeader, blockBody, scrResultsFromPool, receiptsFromPool, intraMiniBlocks, logs) if err != nil { logLevel := logger.LogError - if errors.IsClosingError(err) { + if core.IsClosingError(err) { logLevel = logger.LogDebug } log.Log(logLevel, "historyRepo.RecordBlock()", "blockHeaderHash", blockHeaderHash, "error", err.Error()) diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index b86791c52ef..3b93fb7a465 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -122,6 +122,7 @@ func createArgBaseProcessor( ScheduledMiniBlocksEnableEpoch: 2, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, + BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, } } diff --git a/process/block/cutoff/blockProcessingCutoffFactory.go b/process/block/cutoff/blockProcessingCutoffFactory.go new file mode 100644 index 00000000000..e2a53f02caa --- /dev/null +++ b/process/block/cutoff/blockProcessingCutoffFactory.go @@ -0,0 +1,12 @@ +package cutoff + +import "github.com/multiversx/mx-chain-go/config" + +// CreateBlockProcessingCutoffHandler will create the desired block processing cutoff handler based on configuration +func CreateBlockProcessingCutoffHandler(cfg config.BlockProcessingCutoffConfig) (BlockProcessingCutoffHandler, error) { + if !cfg.Enabled { + return NewDisabledBlockProcessingCutoff(), nil + } + + return NewBlockProcessingCutoffHandler(cfg) +} diff --git a/process/block/cutoff/blockProcessingCutoffFactory_test.go b/process/block/cutoff/blockProcessingCutoffFactory_test.go new file mode 100644 index 00000000000..1e259a182c4 --- /dev/null +++ b/process/block/cutoff/blockProcessingCutoffFactory_test.go @@ -0,0 +1,40 @@ +package cutoff + +import ( + "fmt" + "testing" + + "github.com/multiversx/mx-chain-go/config" + "github.com/stretchr/testify/require" +) + +func TestCreateBlockProcessingCutoffHandler(t *testing.T) { + t.Parallel() + + t.Run("should create disabled instance", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: false, + } + + instance, err := CreateBlockProcessingCutoffHandler(cfg) + require.NoError(t, err) + require.Equal(t, "*cutoff.disabledBlockProcessingCutoff", fmt.Sprintf("%T", instance)) + }) + + t.Run("should create regular instance", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: "pause", + CutoffTrigger: "nonce", + Value: 37, + } + + instance, err := CreateBlockProcessingCutoffHandler(cfg) + require.NoError(t, err) + require.Equal(t, "*cutoff.blockProcessingCutoffHandler", fmt.Sprintf("%T", instance)) + }) +} diff --git a/process/block/cutoff/blockProcessingCutoffHandler.go b/process/block/cutoff/blockProcessingCutoffHandler.go new file mode 100644 index 00000000000..90eec668507 --- /dev/null +++ b/process/block/cutoff/blockProcessingCutoffHandler.go @@ -0,0 +1,124 @@ +package cutoff + +import ( + "fmt" + "math" + "time" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("process/block/cutoff") + +type blockProcessingCutoffHandler struct { + config config.BlockProcessingCutoffConfig + stopRound uint64 + stopNonce uint64 + stopEpoch uint32 +} + +// NewBlockProcessingCutoffHandler will return a new instance of blockProcessingCutoffHandler +func NewBlockProcessingCutoffHandler(cfg config.BlockProcessingCutoffConfig) (*blockProcessingCutoffHandler, error) { + b := &blockProcessingCutoffHandler{ + config: cfg, + stopEpoch: math.MaxUint32, + stopNonce: math.MaxUint64, + stopRound: math.MaxUint64, + } + + err := b.applyConfig(cfg) + if err != nil { + return nil, err + } + + log.Warn("node is started by using block processing cutoff and will pause/error at the provided coordinate", "mode", cfg.Mode, cfg.CutoffTrigger, cfg.Value) + return b, nil +} + +func (b *blockProcessingCutoffHandler) applyConfig(cfg config.BlockProcessingCutoffConfig) error { + switch common.BlockProcessingCutoffMode(cfg.Mode) { + case common.BlockProcessingCutoffModeProcessError: + case common.BlockProcessingCutoffModePause: + default: + return fmt.Errorf("%w, provided value=%s", errInvalidBlockProcessingCutOffMode, cfg.Mode) + } + + switch common.BlockProcessingCutoffTrigger(cfg.CutoffTrigger) { + case common.BlockProcessingCutoffByRound: + b.stopRound = cfg.Value + case common.BlockProcessingCutoffByNonce: + b.stopNonce = cfg.Value + case common.BlockProcessingCutoffByEpoch: + b.stopEpoch = uint32(cfg.Value) + default: + return fmt.Errorf("%w, provided value=%s", errInvalidBlockProcessingCutOffTrigger, cfg.CutoffTrigger) + } + + return nil +} + +// HandlePauseCutoff will pause the processing if the required coordinates are met +func (b *blockProcessingCutoffHandler) HandlePauseCutoff(header data.HeaderHandler) { + shouldSkip := !b.config.Enabled || + check.IfNil(header) || + b.config.Mode != common.BlockProcessingCutoffModePause + if shouldSkip { + return + } + + trigger, value, isTriggered := b.isTriggered(header) + if !isTriggered { + return + } + + log.Info("cutting off the block processing. The node will not advance", trigger, value) + go func() { + for { + time.Sleep(time.Minute) + log.Info("node is in block processing cut-off mode", trigger, value) + } + }() + neverEndingChannel := make(chan struct{}) + <-neverEndingChannel +} + +// HandleProcessErrorCutoff will return error if the processing block matches the required coordinates +func (b *blockProcessingCutoffHandler) HandleProcessErrorCutoff(header data.HeaderHandler) error { + shouldSkip := !b.config.Enabled || + check.IfNil(header) || + b.config.Mode != common.BlockProcessingCutoffModeProcessError + if shouldSkip { + return nil + } + + trigger, value, isTriggered := b.isTriggered(header) + if !isTriggered { + return nil + } + + log.Info("block processing cutoff - return err", trigger, value) + return errProcess +} + +func (b *blockProcessingCutoffHandler) isTriggered(header data.HeaderHandler) (common.BlockProcessingCutoffTrigger, uint64, bool) { + if header.GetRound() >= b.stopRound { + return common.BlockProcessingCutoffByRound, header.GetRound(), true + } + if header.GetNonce() >= b.stopNonce { + return common.BlockProcessingCutoffByNonce, header.GetNonce(), true + } + if header.GetEpoch() >= b.stopEpoch { + return common.BlockProcessingCutoffByEpoch, uint64(header.GetEpoch()), true + } + + return "", 0, false +} + +// IsInterfaceNil returns true if there is no value under the interface +func (b *blockProcessingCutoffHandler) IsInterfaceNil() bool { + return b == nil +} diff --git a/process/block/cutoff/blockProcessingCutoffHandler_test.go b/process/block/cutoff/blockProcessingCutoffHandler_test.go new file mode 100644 index 00000000000..84cd4403ecc --- /dev/null +++ b/process/block/cutoff/blockProcessingCutoffHandler_test.go @@ -0,0 +1,183 @@ +package cutoff + +import ( + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/stretchr/testify/require" +) + +func TestNewBlockProcessingCutoffHandler(t *testing.T) { + t.Parallel() + + t.Run("invalid mode - should error", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: "invalid", + } + b, err := NewBlockProcessingCutoffHandler(cfg) + require.Equal(t, "invalid block processing cutoff mode, provided value=invalid", err.Error()) + require.Nil(t, b) + }) + + t.Run("invalid cutoff trigger - should error", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: "pause", + CutoffTrigger: "invalid", + } + b, err := NewBlockProcessingCutoffHandler(cfg) + require.Equal(t, "invalid block processing cutoff trigger, provided value=invalid", err.Error()) + require.Nil(t, b) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: "pause", + CutoffTrigger: "epoch", + } + b, err := NewBlockProcessingCutoffHandler(cfg) + require.NoError(t, err) + require.False(t, check.IfNil(b)) + }) +} + +func TestBlockProcessingCutoffHandler_HandlePauseBackoff(t *testing.T) { + t.Parallel() + + t.Run("bad config - should skip", func(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + require.Nil(t, r) + }() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: false, + Mode: "pause", + CutoffTrigger: "nonce", + } + b, err := NewBlockProcessingCutoffHandler(cfg) + require.NoError(t, err) + + b.HandlePauseCutoff(nil) + b.HandlePauseCutoff(&block.MetaBlock{}) + b.config.CutoffTrigger = common.BlockProcessingCutoffModeProcessError + b.HandlePauseCutoff(&block.MetaBlock{}) + }) + + t.Run("pause via round - should work", testHandlePauseCutoff(string(common.BlockProcessingCutoffByRound))) + t.Run("pause via nonce - should work", testHandlePauseCutoff(string(common.BlockProcessingCutoffByNonce))) + t.Run("pause via epoch - should work", testHandlePauseCutoff(string(common.BlockProcessingCutoffByEpoch))) +} + +func testHandlePauseCutoff(trigger string) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: common.BlockProcessingCutoffModePause, + CutoffTrigger: trigger, + Value: 20, + } + b, err := NewBlockProcessingCutoffHandler(cfg) + require.NoError(t, err) + + b.HandlePauseCutoff(&block.MetaBlock{ + Epoch: 19, // not the desired epoch + Nonce: 19, // not the desired nonce + Round: 19, // not the desired round + }) + + done := make(chan struct{}) + go func() { + b.HandlePauseCutoff(&block.MetaBlock{ + Epoch: 20, + Nonce: 20, + Round: 20, + }) + done <- struct{}{} + }() + + select { + case <-done: + require.Fail(t, "should have not advanced") + case <-time.After(100 * time.Millisecond): + } + } +} + +func TestBlockProcessingCutoffHandler_HandleProcessErrorBackoff(t *testing.T) { + t.Parallel() + + t.Run("bad config - should skip", func(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + require.Nil(t, r) + }() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: false, + Mode: "pause", + CutoffTrigger: "nonce", + } + b, err := NewBlockProcessingCutoffHandler(cfg) + require.NoError(t, err) + + err = b.HandleProcessErrorCutoff(nil) + require.NoError(t, err) + err = b.HandleProcessErrorCutoff(&block.MetaBlock{}) + require.NoError(t, err) + b.config.CutoffTrigger = "pause" + err = b.HandleProcessErrorCutoff(&block.MetaBlock{}) + require.NoError(t, err) + }) + + t.Run("process error via round - should work", testHandleProcessErrorCutoff(string(common.BlockProcessingCutoffByRound))) + t.Run("process error via nonce - should work", testHandleProcessErrorCutoff(string(common.BlockProcessingCutoffByNonce))) + t.Run("process error via epoch - should work", testHandleProcessErrorCutoff(string(common.BlockProcessingCutoffByEpoch))) +} + +func testHandleProcessErrorCutoff(trigger string) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: common.BlockProcessingCutoffModeProcessError, + CutoffTrigger: trigger, + Value: 20, + } + b, err := NewBlockProcessingCutoffHandler(cfg) + require.NoError(t, err) + + err = b.HandleProcessErrorCutoff(&block.MetaBlock{ + Epoch: 19, // not the desired epoch + Nonce: 19, // not the desired nonce + Round: 19, // not the desired round + }) + require.NoError(t, err) + + err = b.HandleProcessErrorCutoff(&block.MetaBlock{ + Epoch: 20, + Nonce: 20, + Round: 20, + }) + require.Equal(t, errProcess, err) + } +} diff --git a/process/block/cutoff/disabledBlockProcessingCutoff.go b/process/block/cutoff/disabledBlockProcessingCutoff.go new file mode 100644 index 00000000000..b909e88b964 --- /dev/null +++ b/process/block/cutoff/disabledBlockProcessingCutoff.go @@ -0,0 +1,25 @@ +package cutoff + +import "github.com/multiversx/mx-chain-core-go/data" + +type disabledBlockProcessingCutoff struct { +} + +// NewDisabledBlockProcessingCutoff will return a new instance of disabledBlockProcessingCutoff +func NewDisabledBlockProcessingCutoff() *disabledBlockProcessingCutoff { + return &disabledBlockProcessingCutoff{} +} + +// HandleProcessErrorCutoff returns nil +func (d *disabledBlockProcessingCutoff) HandleProcessErrorCutoff(_ data.HeaderHandler) error { + return nil +} + +// HandlePauseCutoff does nothing +func (d *disabledBlockProcessingCutoff) HandlePauseCutoff(_ data.HeaderHandler) { +} + +// IsInterfaceNil returns true since this structure uses value receivers +func (d *disabledBlockProcessingCutoff) IsInterfaceNil() bool { + return d == nil +} diff --git a/process/block/cutoff/disabledBlockProcessingCutoff_test.go b/process/block/cutoff/disabledBlockProcessingCutoff_test.go new file mode 100644 index 00000000000..47bbc422062 --- /dev/null +++ b/process/block/cutoff/disabledBlockProcessingCutoff_test.go @@ -0,0 +1,26 @@ +package cutoff + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/stretchr/testify/require" +) + +func TestDisabledBlockProcessingCutoff_FunctionsShouldNotPanic(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + require.Nil(t, r) + }() + d := NewDisabledBlockProcessingCutoff() + + d.HandlePauseCutoff(&block.MetaBlock{Nonce: 37}) + err := d.HandleProcessErrorCutoff(&block.MetaBlock{Round: 37}) + require.NoError(t, err) + require.False(t, d.IsInterfaceNil()) + + var nilObj *disabledBlockProcessingCutoff + require.True(t, nilObj.IsInterfaceNil()) +} diff --git a/process/block/cutoff/errors.go b/process/block/cutoff/errors.go new file mode 100644 index 00000000000..f91484088e9 --- /dev/null +++ b/process/block/cutoff/errors.go @@ -0,0 +1,9 @@ +package cutoff + +import "errors" + +var errProcess = errors.New("block processing cutoff - intended processing error") + +var errInvalidBlockProcessingCutOffMode = errors.New("invalid block processing cutoff mode") + +var errInvalidBlockProcessingCutOffTrigger = errors.New("invalid block processing cutoff trigger") diff --git a/process/block/cutoff/interface.go b/process/block/cutoff/interface.go new file mode 100644 index 00000000000..849c4f88c5d --- /dev/null +++ b/process/block/cutoff/interface.go @@ -0,0 +1,10 @@ +package cutoff + +import "github.com/multiversx/mx-chain-core-go/data" + +// BlockProcessingCutoffHandler defines the actions that a block processing handler has to take care of +type BlockProcessingCutoffHandler interface { + HandleProcessErrorCutoff(header data.HeaderHandler) error + HandlePauseCutoff(header data.HeaderHandler) + IsInterfaceNil() bool +} diff --git a/process/block/export_test.go b/process/block/export_test.go index a47d9851500..a382ac21519 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -163,6 +163,7 @@ func NewShardProcessorEmptyWith3shards( ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, + BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, }, } shardProc, err := NewShardProcessor(arguments) @@ -495,7 +496,7 @@ func (mp *metaProcessor) GetFinalMiniBlockHeaders(miniBlockHeaderHandlers []data } func CheckProcessorNilParameters(arguments ArgBaseProcessor) error { - return checkProcessorNilParameters(arguments) + return checkProcessorParameters(arguments) } func (bp *baseProcessor) SetIndexOfFirstTxProcessed(miniBlockHeaderHandler data.MiniBlockHeaderHandler) error { diff --git a/process/block/metablock.go b/process/block/metablock.go index 768970cb44b..534ddb367d4 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -46,7 +46,7 @@ type metaProcessor struct { // NewMetaProcessor creates a new metaProcessor object func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { - err := checkProcessorNilParameters(arguments.ArgBaseProcessor) + err := checkProcessorParameters(arguments.ArgBaseProcessor) if err != nil { return nil, err } @@ -133,6 +133,7 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { processDebugger: processDebugger, outportDataProvider: arguments.OutportDataProvider, processStatusHandler: arguments.CoreComponents.ProcessStatusHandler(), + blockProcessingCutoffHandler: arguments.BlockProcessingCutoffHandler, } mp := metaProcessor{ @@ -396,6 +397,11 @@ func (mp *metaProcessor) ProcessBlock( return err } + err = mp.blockProcessingCutoffHandler.HandleProcessErrorCutoff(header) + if err != nil { + return err + } + return nil } @@ -617,15 +623,24 @@ func (mp *metaProcessor) indexBlock( HeaderHash: headerHash, Header: metaBlock, Body: body, + PreviousHeader: lastMetaBlock, RewardsTxs: rewardsTxs, NotarizedHeadersHashes: notarizedHeadersHashes, - PreviousHeader: lastMetaBlock, + HighestFinalBlockNonce: mp.forkDetector.GetHighestFinalBlockNonce(), + HighestFinalBlockHash: mp.forkDetector.GetHighestFinalBlockHash(), }) if err != nil { - log.Error("metaProcessor.indexBlock cannot prepare argSaveBlock", "error", err.Error()) + log.Error("metaProcessor.indexBlock cannot prepare argSaveBlock", "error", err.Error(), + "hash", headerHash, "nonce", metaBlock.GetNonce(), "round", metaBlock.GetRound()) return } - mp.outportHandler.SaveBlock(argSaveBlock) + err = mp.outportHandler.SaveBlock(argSaveBlock) + if err != nil { + log.Error("metaProcessor.outportHandler.SaveBlock cannot save block", "error", err, + "hash", headerHash, "nonce", metaBlock.GetNonce(), "round", metaBlock.GetRound()) + return + } + log.Debug("indexed block", "hash", headerHash, "nonce", metaBlock.GetNonce(), "round", metaBlock.GetRound()) indexRoundInfo(mp.outportHandler, mp.nodesCoordinator, core.MetachainShardId, metaBlock, lastMetaBlock, argSaveBlock.SignersIndexes) @@ -1333,6 +1348,8 @@ func (mp *metaProcessor) CommitBlock( mp.cleanupPools(headerHandler) + mp.blockProcessingCutoffHandler.HandlePauseCutoff(header) + return nil } diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index 02e3f607814..0916ebc80b6 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -145,6 +145,7 @@ func createMockMetaArguments( ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, OutportDataProvider: &outport.OutportDataProviderStub{}, + BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, }, SCToProtocol: &mock.SCToProtocolStub{}, PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, @@ -520,6 +521,17 @@ func TestNewMetaProcessor_NilScheduledTxsExecutionHandlerShouldErr(t *testing.T) assert.Nil(t, be) } +func TestNewMetaProcessor_NilBlockProcessingCutoffHandlerShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockMetaArguments(createMockComponentHolders()) + arguments.BlockProcessingCutoffHandler = nil + + be, err := blproc.NewMetaProcessor(arguments) + assert.Equal(t, process.ErrNilBlockProcessingCutoffHandler, err) + assert.Nil(t, be) +} + func TestNewMetaProcessor_OkValsShouldWork(t *testing.T) { t.Parallel() diff --git a/process/block/metrics.go b/process/block/metrics.go index 06bef33a096..a97e60e7602 100644 --- a/process/block/metrics.go +++ b/process/block/metrics.go @@ -208,16 +208,16 @@ func indexRoundInfo( signersIndexes []uint64, ) { roundInfo := &outportcore.RoundInfo{ - Index: header.GetRound(), + Round: header.GetRound(), SignersIndexes: signersIndexes, BlockWasProposed: true, ShardId: shardId, Epoch: header.GetEpoch(), - Timestamp: time.Duration(header.GetTimeStamp()), + Timestamp: uint64(time.Duration(header.GetTimeStamp())), } if check.IfNil(lastHeader) { - outportHandler.SaveRoundsInfo([]*outportcore.RoundInfo{roundInfo}) + outportHandler.SaveRoundsInfo(&outportcore.RoundsInfo{RoundsInfo: []*outportcore.RoundInfo{roundInfo}}) return } @@ -239,18 +239,18 @@ func indexRoundInfo( } roundInfo = &outportcore.RoundInfo{ - Index: i, + Round: i, SignersIndexes: signersIndexes, BlockWasProposed: false, ShardId: shardId, Epoch: header.GetEpoch(), - Timestamp: time.Duration(header.GetTimeStamp() - ((currentBlockRound - i) * roundDuration)), + Timestamp: uint64(time.Duration(header.GetTimeStamp() - ((currentBlockRound - i) * roundDuration))), } roundsInfo = append(roundsInfo, roundInfo) } - outportHandler.SaveRoundsInfo(roundsInfo) + outportHandler.SaveRoundsInfo(&outportcore.RoundsInfo{RoundsInfo: roundsInfo}) } func indexValidatorsRating( @@ -269,7 +269,6 @@ func indexValidatorsRating( return } - shardValidatorsRating := make(map[string][]*outportcore.ValidatorRatingInfo) for shardID, validatorInfosInShard := range validators { validatorsInfos := make([]*outportcore.ValidatorRatingInfo, 0) for _, validatorInfo := range validatorInfosInShard { @@ -279,19 +278,11 @@ func indexValidatorsRating( }) } - indexID := fmt.Sprintf("%d_%d", shardID, metaBlock.GetEpoch()) - shardValidatorsRating[indexID] = validatorsInfos - } - - indexShardValidatorsRating(outportHandler, shardValidatorsRating) -} - -func indexShardValidatorsRating( - outportHandler outport.OutportHandler, - shardValidatorsRating map[string][]*outportcore.ValidatorRatingInfo, -) { - for indexID, validatorsInfos := range shardValidatorsRating { - outportHandler.SaveValidatorsRating(indexID, validatorsInfos) + outportHandler.SaveValidatorsRating(&outportcore.ValidatorsRating{ + ShardID: shardID, + Epoch: metaBlock.GetEpoch(), + ValidatorsRatingInfo: validatorsInfos, + }) } } diff --git a/process/block/preprocess/basePreProcess.go b/process/block/preprocess/basePreProcess.go index d6363496de3..08f3e4cfa37 100644 --- a/process/block/preprocess/basePreProcess.go +++ b/process/block/preprocess/basePreProcess.go @@ -406,17 +406,21 @@ func (bpp *basePreProcess) requestMissingTxsForShard( return requestedTxs } -func (bpp *basePreProcess) saveAccountBalanceForAddress(address []byte) { +func (bpp *basePreProcess) saveAccountBalanceForAddress(address []byte) error { if bpp.balanceComputation.IsAddressSet(address) { - return + return nil } balance, err := bpp.getBalanceForAddress(address) if err != nil { + if core.IsGetNodeFromDBError(err) { + return err + } balance = big.NewInt(0) } bpp.balanceComputation.SetBalanceToAddress(address, balance) + return nil } func (bpp *basePreProcess) getBalanceForAddress(address []byte) (*big.Int, error) { diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index b563c1deeef..ab6f29656e2 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -265,9 +265,12 @@ func (rtp *rewardTxPreprocessor) ProcessBlockTransactions( return process.ErrWrongTypeAssertion } - rtp.saveAccountBalanceForAddress(rTx.GetRcvAddr()) + err = rtp.saveAccountBalanceForAddress(rTx.GetRcvAddr()) + if err != nil { + return err + } - err := rtp.rewardsProcessor.ProcessRewardTransaction(rTx) + err = rtp.rewardsProcessor.ProcessRewardTransaction(rTx) if err != nil { return err } @@ -491,7 +494,10 @@ func (rtp *rewardTxPreprocessor) ProcessMiniBlock( break } - rtp.saveAccountBalanceForAddress(miniBlockRewardTxs[txIndex].GetRcvAddr()) + err = rtp.saveAccountBalanceForAddress(miniBlockRewardTxs[txIndex].GetRcvAddr()) + if err != nil { + break + } snapshot := rtp.handleProcessTransactionInit(preProcessorExecutionInfoHandler, miniBlockTxHashes[txIndex]) err = rtp.rewardsProcessor.ProcessRewardTransaction(miniBlockRewardTxs[txIndex]) diff --git a/process/block/preprocess/rewardTxPreProcessor_test.go b/process/block/preprocess/rewardTxPreProcessor_test.go index 325c7a178fd..80b29223e34 100644 --- a/process/block/preprocess/rewardTxPreProcessor_test.go +++ b/process/block/preprocess/rewardTxPreProcessor_test.go @@ -1,6 +1,7 @@ package preprocess import ( + "fmt" "testing" "time" @@ -16,6 +17,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" ) @@ -678,6 +680,59 @@ func TestRewardTxPreprocessor_ProcessBlockTransactions(t *testing.T) { assert.Nil(t, err) } +func TestRewardTxPreprocessor_ProcessBlockTransactionsMissingTrieNode(t *testing.T) { + t.Parallel() + + missingNodeErr := fmt.Errorf(core.GetNodeFromDBErrorString) + txHash := testTxHash + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &storageStubs.ChainStorerStub{}, + &hashingMocks.HasherMock{}, + &mock.MarshalizerMock{}, + &testscommon.RewardTxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &stateMock.AccountsStub{ + GetExistingAccountCalled: func(_ []byte) (vmcommon.AccountHandler, error) { + return nil, missingNodeErr + }, + }, + func(shardID uint32, txHashes [][]byte) {}, + &testscommon.GasHandlerStub{}, + createMockPubkeyConverter(), + &testscommon.BlockSizeComputationStub{}, + &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, + ) + + txHashes := [][]byte{[]byte(txHash)} + txs := []data.TransactionHandler{&rewardTx.RewardTx{}} + rtp.AddTxs(txHashes, txs) + + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + mb2 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 0, + SenderShardID: 1, + Type: block.RewardsBlock, + } + + mbHash1, _ := core.CalculateHash(rtp.marshalizer, rtp.hasher, &mb1) + mbHash2, _ := core.CalculateHash(rtp.marshalizer, rtp.hasher, &mb2) + + var blockBody block.Body + blockBody.MiniBlocks = append(blockBody.MiniBlocks, &mb1, &mb2) + + err := rtp.ProcessBlockTransactions(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{TxCount: 1, Hash: mbHash1}, {TxCount: 1, Hash: mbHash2}}}, &blockBody, haveTimeTrue) + assert.Equal(t, missingNodeErr, err) +} + func TestRewardTxPreprocessor_IsDataPreparedShouldErr(t *testing.T) { t.Parallel() diff --git a/process/block/preprocess/smartContractResults.go b/process/block/preprocess/smartContractResults.go index 4a6b313b777..67fb2706edf 100644 --- a/process/block/preprocess/smartContractResults.go +++ b/process/block/preprocess/smartContractResults.go @@ -329,9 +329,12 @@ func (scr *smartContractResults) ProcessBlockTransactions( scr.gasHandler.SetGasProvided(gasProvidedByTxInSelfShard, txHash) } - scr.saveAccountBalanceForAddress(currScr.GetRcvAddr()) + err = scr.saveAccountBalanceForAddress(currScr.GetRcvAddr()) + if err != nil { + return err + } - _, err := scr.scrProcessor.ProcessSmartContractResult(currScr) + _, err = scr.scrProcessor.ProcessSmartContractResult(currScr) if err != nil { return err } @@ -611,7 +614,10 @@ func (scr *smartContractResults) ProcessMiniBlock( } } - scr.saveAccountBalanceForAddress(miniBlockScrs[txIndex].GetRcvAddr()) + err = scr.saveAccountBalanceForAddress(miniBlockScrs[txIndex].GetRcvAddr()) + if err != nil { + break + } snapshot := scr.handleProcessTransactionInit(preProcessorExecutionInfoHandler, miniBlockTxHashes[txIndex]) _, err = scr.scrProcessor.ProcessSmartContractResult(miniBlockScrs[txIndex]) diff --git a/process/block/preprocess/smartContractResults_test.go b/process/block/preprocess/smartContractResults_test.go index 8985c30c105..af8b9ffaa14 100644 --- a/process/block/preprocess/smartContractResults_test.go +++ b/process/block/preprocess/smartContractResults_test.go @@ -2,6 +2,7 @@ package preprocess import ( "encoding/json" + "fmt" "reflect" "testing" "time" @@ -1195,6 +1196,68 @@ func TestScrsPreprocessor_ProcessBlockTransactionsShouldWork(t *testing.T) { assert.Nil(t, err) } +func TestScrsPreprocessor_ProcessBlockTransactionsMissingTrieNode(t *testing.T) { + t.Parallel() + + missingNodeErr := fmt.Errorf(core.GetNodeFromDBErrorString) + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + scrPreproc, _ := NewSmartContractResultPreprocessor( + tdp.UnsignedTransactions(), + &storageStubs.ChainStorerStub{}, + &hashingMocks.HasherMock{}, + &mock.MarshalizerMock{}, + &testscommon.TxProcessorMock{ + ProcessSmartContractResultCalled: func(_ *smartContractResult.SmartContractResult) (vmcommon.ReturnCode, error) { + return 0, nil + }, + }, + mock.NewMultiShardsCoordinatorMock(3), + &stateMock.AccountsStub{ + GetExistingAccountCalled: func(_ []byte) (vmcommon.AccountHandler, error) { + return nil, missingNodeErr + }, + }, + requestTransaction, + &testscommon.GasHandlerStub{}, + feeHandlerMock(), + createMockPubkeyConverter(), + &testscommon.BlockSizeComputationStub{}, + &testscommon.BalanceComputationStub{}, + &testscommon.EnableEpochsHandlerStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, + ) + + body := &block.Body{} + + txHash := []byte("txHash") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + Type: block.SmartContractResultBlock, + } + + miniblockHash, _ := core.CalculateHash(scrPreproc.marshalizer, scrPreproc.hasher, &miniblock) + + body.MiniBlocks = append(body.MiniBlocks, &miniblock) + + scrPreproc.AddScrHashToRequestedList([]byte("txHash")) + txshardInfo := txShardInfo{0, 0} + scr := smartContractResult.SmartContractResult{ + Nonce: 1, + Data: []byte("tx"), + } + + scrPreproc.scrForBlock.txHashAndInfo["txHash"] = &txInfo{&scr, &txshardInfo} + + err := scrPreproc.ProcessBlockTransactions(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{TxCount: 1, Hash: miniblockHash}}}, body, haveTimeTrue) + assert.Equal(t, missingNodeErr, err) +} + func TestScrsPreprocessor_ProcessBlockTransactionsShouldErrMaxGasLimitPerBlockInSelfShardIsReached(t *testing.T) { t.Parallel() diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 344d430a179..14a3a8fa8d7 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -17,7 +17,6 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" - chainErr "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" @@ -575,7 +574,10 @@ func (txs *transactions) processTxsToMe( txs.gasHandler.SetGasProvided(gasProvidedByTxInSelfShard, txHash) } - txs.saveAccountBalanceForAddress(tx.GetRcvAddr()) + err = txs.saveAccountBalanceForAddress(tx.GetRcvAddr()) + if err != nil { + return err + } if scheduledMode { txs.scheduledTxsExecutionHandler.AddScheduledTx(txHash, tx) @@ -711,7 +713,7 @@ func (txs *transactions) createAndProcessScheduledMiniBlocksFromMeAsValidator( txs.sortTransactionsBySenderAndNonce(scheduledTxsFromMe, randomness) - scheduledMiniBlocks := txs.createScheduledMiniBlocks( + scheduledMiniBlocks, err := txs.createScheduledMiniBlocks( haveTime, haveAdditionalTime, isShardStuck, @@ -719,6 +721,9 @@ func (txs *transactions) createAndProcessScheduledMiniBlocksFromMeAsValidator( scheduledTxsFromMe, mapSCTxs, ) + if err != nil { + return nil, err + } if !haveTime() && !haveAdditionalTime() { return nil, process.ErrTimeIsOut @@ -1120,7 +1125,7 @@ func (txs *transactions) createAndProcessScheduledMiniBlocksFromMeAsProposer( } startTime := time.Now() - scheduledMiniBlocks := txs.createScheduledMiniBlocks( + scheduledMiniBlocks, err := txs.createScheduledMiniBlocks( haveTime, haveAdditionalTime, txs.blockTracker.IsShardStuck, @@ -1132,6 +1137,9 @@ func (txs *transactions) createAndProcessScheduledMiniBlocksFromMeAsProposer( log.Debug("elapsed time to createScheduledMiniBlocks", "time [s]", elapsedTime, ) + if err != nil { + return nil, err + } return scheduledMiniBlocks, nil } @@ -1192,6 +1200,9 @@ func (txs *transactions) createAndProcessMiniBlocksFromMeV1( err = txs.processMiniBlockBuilderTx(mbBuilder, wtx, tx) if err != nil { + if core.IsGetNodeFromDBError(err) { + return nil, nil, err + } continue } @@ -1281,7 +1292,7 @@ func (txs *transactions) handleBadTransaction( ) { log.Trace("bad tx", "error", err.Error(), "hash", wtx.TxHash) errRevert := txs.accounts.RevertToSnapshot(snapshot) - if errRevert != nil && !chainErr.IsClosingError(errRevert) { + if errRevert != nil && !core.IsClosingError(errRevert) { log.Warn("revert to snapshot", "error", err.Error()) } @@ -1544,7 +1555,10 @@ func (txs *transactions) ProcessMiniBlock( } } - txs.saveAccountBalanceForAddress(miniBlockTxs[txIndex].GetRcvAddr()) + err = txs.saveAccountBalanceForAddress(miniBlockTxs[txIndex].GetRcvAddr()) + if err != nil { + break + } if !scheduledMode { err = txs.processInNormalMode( diff --git a/process/block/preprocess/transactionsV2.go b/process/block/preprocess/transactionsV2.go index 31b526971f9..654ff4231a8 100644 --- a/process/block/preprocess/transactionsV2.go +++ b/process/block/preprocess/transactionsV2.go @@ -9,7 +9,6 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" - chainErr "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/storage/txcache" ) @@ -71,6 +70,9 @@ func (txs *transactions) createAndProcessMiniBlocksFromMeV2( receiverShardID, mbInfo) if err != nil { + if core.IsGetNodeFromDBError(err) { + return nil, nil, nil, err + } if shouldAddToRemaining { remainingTxs = append(remainingTxs, sortedTxs[index]) } @@ -186,7 +188,7 @@ func (txs *transactions) processTransaction( log.Trace("bad tx", "error", err.Error(), "hash", txHash) errRevert := txs.accounts.RevertToSnapshot(snapshot) - if errRevert != nil && !chainErr.IsClosingError(errRevert) { + if errRevert != nil && !core.IsClosingError(errRevert) { log.Warn("revert to snapshot", "error", errRevert.Error()) } @@ -270,7 +272,7 @@ func (txs *transactions) createScheduledMiniBlocks( isMaxBlockSizeReached func(int, int) bool, sortedTxs []*txcache.WrappedTransaction, mapSCTxs map[string]struct{}, -) block.MiniBlockSlice { +) (block.MiniBlockSlice, error) { log.Debug("createScheduledMiniBlocks has been started") mbInfo := txs.initCreateScheduledMiniBlocks() @@ -313,6 +315,9 @@ func (txs *transactions) createScheduledMiniBlocks( receiverShardID, mbInfo) if err != nil { + if core.IsGetNodeFromDBError(err) { + return nil, err + } continue } @@ -331,7 +336,7 @@ func (txs *transactions) createScheduledMiniBlocks( log.Debug("createScheduledMiniBlocks has been finished") - return miniBlocks + return miniBlocks, nil } func (txs *transactions) verifyTransaction( diff --git a/process/block/preprocess/transactionsV2_test.go b/process/block/preprocess/transactionsV2_test.go index a4071410965..a2b0326068a 100644 --- a/process/block/preprocess/transactionsV2_test.go +++ b/process/block/preprocess/transactionsV2_test.go @@ -2,6 +2,7 @@ package preprocess import ( "bytes" + "fmt" "math/big" "testing" @@ -562,7 +563,8 @@ func TestTransactions_CreateScheduledMiniBlocksShouldWork(t *testing.T) { tx := &txcache.WrappedTransaction{} sortedTxs = append(sortedTxs, tx) - mbs := preprocessor.createScheduledMiniBlocks(haveTimeMethod, haveAdditionalTimeMethod, isShardStuckMethod, isMaxBlockSizeReachedMethod, sortedTxs, mapSCTxs) + mbs, err := preprocessor.createScheduledMiniBlocks(haveTimeMethod, haveAdditionalTimeMethod, isShardStuckMethod, isMaxBlockSizeReachedMethod, sortedTxs, mapSCTxs) + assert.Nil(t, err) assert.Equal(t, 0, len(mbs)) // should not create scheduled mini blocks when max block size is reached @@ -577,7 +579,8 @@ func TestTransactions_CreateScheduledMiniBlocksShouldWork(t *testing.T) { } sortedTxs = append(sortedTxs, tx) - mbs = preprocessor.createScheduledMiniBlocks(haveTimeMethod, haveAdditionalTimeMethod, isShardStuckMethod, isMaxBlockSizeReachedMethod, sortedTxs, mapSCTxs) + mbs, err = preprocessor.createScheduledMiniBlocks(haveTimeMethod, haveAdditionalTimeMethod, isShardStuckMethod, isMaxBlockSizeReachedMethod, sortedTxs, mapSCTxs) + assert.Nil(t, err) assert.Equal(t, 0, len(mbs)) // should not create scheduled mini blocks when verifyTransaction returns error @@ -593,7 +596,8 @@ func TestTransactions_CreateScheduledMiniBlocksShouldWork(t *testing.T) { } sortedTxs = append(sortedTxs, tx) - mbs = preprocessor.createScheduledMiniBlocks(haveTimeMethod, haveAdditionalTimeMethod, isShardStuckMethod, isMaxBlockSizeReachedMethod, sortedTxs, mapSCTxs) + mbs, err = preprocessor.createScheduledMiniBlocks(haveTimeMethod, haveAdditionalTimeMethod, isShardStuckMethod, isMaxBlockSizeReachedMethod, sortedTxs, mapSCTxs) + assert.Nil(t, err) assert.Equal(t, 0, len(mbs)) // should create two scheduled mini blocks @@ -624,7 +628,8 @@ func TestTransactions_CreateScheduledMiniBlocksShouldWork(t *testing.T) { sortedTxs = append(sortedTxs, tx3) mapSCTxs["hash1"] = struct{}{} - mbs = preprocessor.createScheduledMiniBlocks(haveTimeMethod, haveAdditionalTimeMethod, isShardStuckMethod, isMaxBlockSizeReachedMethod, sortedTxs, mapSCTxs) + mbs, err = preprocessor.createScheduledMiniBlocks(haveTimeMethod, haveAdditionalTimeMethod, isShardStuckMethod, isMaxBlockSizeReachedMethod, sortedTxs, mapSCTxs) + assert.Nil(t, err) assert.Equal(t, 2, len(mbs)) } @@ -749,6 +754,56 @@ func TestTransactions_CreateAndProcessMiniBlocksFromMeV2ShouldWork(t *testing.T) assert.Equal(t, 2, len(mapSCTxs)) } +func TestTransactions_CreateAndProcessMiniBlocksFromMeV2MissingTrieNode(t *testing.T) { + t.Parallel() + + missingNodeErr := fmt.Errorf(core.GetNodeFromDBErrorString) + preprocessor := createTransactionPreprocessor() + preprocessor.txProcessor = &testscommon.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction) (vmcommon.ReturnCode, error) { + return vmcommon.ExecutionFailed, missingNodeErr + }, + } + + haveTimeMethodReturn := true + isShardStuckMethodReturn := false + isMaxBlockSizeReachedMethodReturn := false + sortedTxs := make([]*txcache.WrappedTransaction, 0) + mapSCTxs := make(map[string]struct{}) + tx1 := &txcache.WrappedTransaction{ + ReceiverShardID: 0, + Tx: &transaction.Transaction{Nonce: 1}, + TxHash: []byte("hash1"), + } + tx2 := &txcache.WrappedTransaction{ + ReceiverShardID: 1, + Tx: &transaction.Transaction{Nonce: 2, RcvAddr: []byte("smart contract address")}, + TxHash: []byte("hash2"), + } + tx3 := &txcache.WrappedTransaction{ + ReceiverShardID: 2, + Tx: &transaction.Transaction{Nonce: 3, RcvAddr: []byte("smart contract address")}, + TxHash: []byte("hash3"), + } + sortedTxs = append(sortedTxs, tx1) + sortedTxs = append(sortedTxs, tx2) + sortedTxs = append(sortedTxs, tx3) + mapSCTxs["hash1"] = struct{}{} + + haveTimeMethod := func() bool { + return haveTimeMethodReturn + } + isShardStuckMethod := func(uint32) bool { + return isShardStuckMethodReturn + } + isMaxBlockSizeReachedMethod := func(int, int) bool { + return isMaxBlockSizeReachedMethodReturn + } + + _, _, _, err := preprocessor.createAndProcessMiniBlocksFromMeV2(haveTimeMethod, isShardStuckMethod, isMaxBlockSizeReachedMethod, sortedTxs) + assert.Equal(t, missingNodeErr, err) +} + func TestTransactions_ProcessTransactionShouldWork(t *testing.T) { t.Parallel() diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index 205ba8b58e2..872472cd218 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -1153,6 +1153,38 @@ func TestTransactionPreprocessor_ProcessTxsToMeShouldUseCorrectSenderAndReceiver assert.Equal(t, uint32(0), receiverShardID) } +func TestTransactionPreprocessor_ProcessTxsToMeMissingTrieNode(t *testing.T) { + t.Parallel() + + missingNodeErr := fmt.Errorf(core.GetNodeFromDBErrorString) + + args := createDefaultTransactionsProcessorArgs() + args.Accounts = &stateMock.AccountsStub{ + GetExistingAccountCalled: func(_ []byte) (vmcommon.AccountHandler, error) { + return nil, missingNodeErr + }, + } + preprocessor, _ := NewTransactionPreprocessor(args) + + tx := transaction.Transaction{SndAddr: []byte("2"), RcvAddr: []byte("0")} + txHash, _ := core.CalculateHash(preprocessor.marshalizer, preprocessor.hasher, tx) + miniBlock := &block.MiniBlock{ + TxHashes: [][]byte{txHash}, + SenderShardID: 1, + ReceiverShardID: 0, + Type: block.TxBlock, + } + miniBlockHash, _ := core.CalculateHash(preprocessor.marshalizer, preprocessor.hasher, miniBlock) + body := block.Body{ + MiniBlocks: []*block.MiniBlock{miniBlock}, + } + + preprocessor.AddTxForCurrentBlock(txHash, &tx, 1, 0) + + err := preprocessor.ProcessTxsToMe(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{Hash: miniBlockHash, TxCount: 1}}}, &body, haveTimeTrue) + assert.Equal(t, missingNodeErr, err) +} + func TestTransactionsPreprocessor_ProcessMiniBlockShouldWork(t *testing.T) { t.Parallel() diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 14faf2a8507..4bb9ec1b113 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -51,7 +51,7 @@ type shardProcessor struct { // NewShardProcessor creates a new shardProcessor object func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { - err := checkProcessorNilParameters(arguments.ArgBaseProcessor) + err := checkProcessorParameters(arguments.ArgBaseProcessor) if err != nil { return nil, err } @@ -118,6 +118,7 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { processDebugger: processDebugger, outportDataProvider: arguments.OutportDataProvider, processStatusHandler: arguments.CoreComponents.ProcessStatusHandler(), + blockProcessingCutoffHandler: arguments.BlockProcessingCutoffHandler, } sp := shardProcessor{ @@ -346,6 +347,11 @@ func (sp *shardProcessor) ProcessBlock( return err } + err = sp.blockProcessingCutoffHandler.HandleProcessErrorCutoff(header) + if err != nil { + return err + } + return nil } @@ -596,16 +602,25 @@ func (sp *shardProcessor) indexBlockIfNeeded( log.Debug("preparing to index block", "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) argSaveBlock, err := sp.outportDataProvider.PrepareOutportSaveBlockData(processOutport.ArgPrepareOutportSaveBlockData{ - HeaderHash: headerHash, - Header: header, - Body: body, - PreviousHeader: lastBlockHeader, + HeaderHash: headerHash, + Header: header, + Body: body, + PreviousHeader: lastBlockHeader, + HighestFinalBlockNonce: sp.forkDetector.GetHighestFinalBlockNonce(), + HighestFinalBlockHash: sp.forkDetector.GetHighestFinalBlockHash(), }) if err != nil { - log.Error("shardProcessor.indexBlockIfNeeded cannot prepare argSaveBlock", "error", err.Error()) + log.Error("shardProcessor.indexBlockIfNeeded cannot prepare argSaveBlock", "error", err.Error(), + "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) return } - sp.outportHandler.SaveBlock(argSaveBlock) + err = sp.outportHandler.SaveBlock(argSaveBlock) + if err != nil { + log.Error("shardProcessor.outportHandler.SaveBlock cannot save block", "error", err, + "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) + return + } + log.Debug("indexed block", "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) shardID := sp.shardCoordinator.SelfId() @@ -1077,6 +1092,8 @@ func (sp *shardProcessor) CommitBlock( sp.cleanupPools(headerHandler) + sp.blockProcessingCutoffHandler.HandlePauseCutoff(header) + return nil } diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 1cd3d4f761e..5d0b6364a3a 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -2229,16 +2229,24 @@ func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { called := false statusComponents.Outport = &outport.OutportStub{ - SaveBlockCalled: func(args *outportcore.ArgsSaveBlockData) { + SaveBlockCalled: func(args *outportcore.OutportBlockWithHeaderAndBody) error { called = true + return nil }, HasDriversCalled: func() bool { return true }, } arguments.OutportDataProvider = &outport.OutportDataProviderStub{ - PrepareOutportSaveBlockDataCalled: func(_ processOutport.ArgPrepareOutportSaveBlockData) (*outportcore.ArgsSaveBlockData, error) { - return &outportcore.ArgsSaveBlockData{}, nil + PrepareOutportSaveBlockDataCalled: func(_ processOutport.ArgPrepareOutportSaveBlockData) (*outportcore.OutportBlockWithHeaderAndBody, error) { + return &outportcore.OutportBlockWithHeaderAndBody{ + HeaderDataWithBody: &outportcore.HeaderDataWithBody{ + Body: &block.Body{}, + Header: &block.HeaderV2{}, + HeaderHash: []byte("hash"), + }, + OutportBlock: &outportcore.OutportBlock{}, + }, nil }} arguments.AccountsDB[state.UserAccountsState] = accounts diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 1830c602c0a..32e75824e5c 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -719,6 +719,7 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe "total gas penalized", tc.gasHandler.TotalGasPenalized(), "error", errProc, ) + continue } diff --git a/process/errors.go b/process/errors.go index b00039aaa91..6303006239a 100644 --- a/process/errors.go +++ b/process/errors.go @@ -1131,6 +1131,9 @@ var ErrNilProcessedMiniBlocksTracker = errors.New("nil processed mini blocks tra // ErrNilReceiptsRepository signals that a nil receipts repository has been provided var ErrNilReceiptsRepository = errors.New("nil receipts repository") +// ErrNilBlockProcessingCutoffHandler signals that a nil block processing cutoff handler has been provided +var ErrNilBlockProcessingCutoffHandler = errors.New("nil block processing cutoff handler") + // ErrNilESDTGlobalSettingsHandler signals that nil global settings handler was provided var ErrNilESDTGlobalSettingsHandler = errors.New("nil esdt global settings handler") diff --git a/process/factory/metachain/vmContainerFactory.go b/process/factory/metachain/vmContainerFactory.go index 665be533e17..0134cda878b 100644 --- a/process/factory/metachain/vmContainerFactory.go +++ b/process/factory/metachain/vmContainerFactory.go @@ -68,7 +68,7 @@ func NewVMContainerFactory(args ArgsNewVMContainerFactory) (*vmContainerFactory, return nil, fmt.Errorf("%w in NewVMContainerFactory", process.ErrNilEconomicsData) } if check.IfNil(args.MessageSignVerifier) { - return nil, fmt.Errorf("%w in NewVMContainerFactory", process.ErrNilKeyGen) + return nil, fmt.Errorf("%w in NewVMContainerFactory", vm.ErrNilMessageSignVerifier) } if check.IfNil(args.NodesConfigProvider) { return nil, fmt.Errorf("%w in NewVMContainerFactory", process.ErrNilNodesConfigProvider) diff --git a/process/factory/metachain/vmContainerFactory_test.go b/process/factory/metachain/vmContainerFactory_test.go index cc98654e8f3..116c23d225f 100644 --- a/process/factory/metachain/vmContainerFactory_test.go +++ b/process/factory/metachain/vmContainerFactory_test.go @@ -47,6 +47,7 @@ func createVmContainerMockArgument(gasSchedule core.GasScheduleNotifier) ArgsNew MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, }, StakingSystemSCConfig: config.StakingSystemSCConfig{ @@ -92,7 +93,7 @@ func TestNewVMContainerFactory_NilMessageSignVerifier(t *testing.T) { vmf, err := NewVMContainerFactory(argsNewVmContainerFactory) assert.True(t, check.IfNil(vmf)) - assert.True(t, errors.Is(err, process.ErrNilKeyGen)) + assert.True(t, errors.Is(err, vm.ErrNilMessageSignVerifier)) } func TestNewVMContainerFactory_NilNodesConfigProvider(t *testing.T) { @@ -313,8 +314,9 @@ func TestVmContainerFactory_Create(t *testing.T) { MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, - ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", + OwnerAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", diff --git a/process/interface.go b/process/interface.go index 8adfa1c3bfe..ad78dd65586 100644 --- a/process/interface.go +++ b/process/interface.go @@ -347,7 +347,7 @@ type Bootstrapper interface { Close() error AddSyncStateListener(func(isSyncing bool)) GetNodeState() common.NodeState - StartSyncingBlocks() + StartSyncingBlocks() error IsInterfaceNil() bool } @@ -1194,7 +1194,7 @@ type InterceptedChunksProcessor interface { // AccountsDBSyncer defines the methods for the accounts db syncer type AccountsDBSyncer interface { - SyncAccounts(rootHash []byte) error + SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error IsInterfaceNil() bool } diff --git a/process/mock/accountsDBSyncerStub.go b/process/mock/accountsDBSyncerStub.go index 9ff9abb9017..39477bdc70a 100644 --- a/process/mock/accountsDBSyncerStub.go +++ b/process/mock/accountsDBSyncerStub.go @@ -7,7 +7,7 @@ import ( // AccountsDBSyncerStub - type AccountsDBSyncerStub struct { GetSyncedTriesCalled func() map[string]common.Trie - SyncAccountsCalled func(rootHash []byte) error + SyncAccountsCalled func(rootHash []byte, storageMarker common.StorageMarker) error } // GetSyncedTries - @@ -19,9 +19,9 @@ func (a *AccountsDBSyncerStub) GetSyncedTries() map[string]common.Trie { } // SyncAccounts - -func (a *AccountsDBSyncerStub) SyncAccounts(rootHash []byte) error { +func (a *AccountsDBSyncerStub) SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error { if a.SyncAccountsCalled != nil { - return a.SyncAccountsCalled(rootHash) + return a.SyncAccountsCalled(rootHash, storageMarker) } return nil } diff --git a/process/mock/bootstrapperStub.go b/process/mock/bootstrapperStub.go index 171062a29f1..bd4a1b98bf2 100644 --- a/process/mock/bootstrapperStub.go +++ b/process/mock/bootstrapperStub.go @@ -11,7 +11,7 @@ type BootstrapperStub struct { CreateAndCommitEmptyBlockCalled func(uint32) (data.BodyHandler, data.HeaderHandler, error) AddSyncStateListenerCalled func(func(bool)) GetNodeStateCalled func() common.NodeState - StartSyncingBlocksCalled func() + StartSyncingBlocksCalled func() error } // CreateAndCommitEmptyBlock - @@ -40,8 +40,12 @@ func (boot *BootstrapperStub) GetNodeState() common.NodeState { } // StartSyncingBlocks - -func (boot *BootstrapperStub) StartSyncingBlocks() { - boot.StartSyncingBlocksCalled() +func (boot *BootstrapperStub) StartSyncingBlocks() error { + if boot.StartSyncingBlocksCalled != nil { + return boot.StartSyncingBlocksCalled() + } + + return nil } // Close - diff --git a/process/rewardTransaction/process.go b/process/rewardTransaction/process.go index 13d5b323512..e641ef5d0cd 100644 --- a/process/rewardTransaction/process.go +++ b/process/rewardTransaction/process.go @@ -96,7 +96,10 @@ func (rtp *rewardTxProcessor) ProcessRewardTransaction(rTx *rewardTx.RewardTx) e return err } - rtp.saveAccumulatedRewards(rTx, accHandler) + err = rtp.saveAccumulatedRewards(rTx, accHandler) + if err != nil { + return err + } return rtp.accounts.SaveAccount(accHandler) } @@ -104,9 +107,9 @@ func (rtp *rewardTxProcessor) ProcessRewardTransaction(rTx *rewardTx.RewardTx) e func (rtp *rewardTxProcessor) saveAccumulatedRewards( rtx *rewardTx.RewardTx, userAccount state.UserAccountHandler, -) { +) error { if !core.IsSmartContractAddress(rtx.RcvAddr) { - return + return nil } existingReward := big.NewInt(0) @@ -116,8 +119,14 @@ func (rtp *rewardTxProcessor) saveAccumulatedRewards( existingReward.SetBytes(val) } + if core.IsGetNodeFromDBError(err) { + return err + } + existingReward.Add(existingReward, rtx.Value) _ = userAccount.SaveKeyValue([]byte(fullRewardKey), existingReward.Bytes()) + + return nil } // IsInterfaceNil returns true if there is no value under the interface diff --git a/process/rewardTransaction/process_test.go b/process/rewardTransaction/process_test.go index 0bd15167995..97112e792b3 100644 --- a/process/rewardTransaction/process_test.go +++ b/process/rewardTransaction/process_test.go @@ -2,6 +2,7 @@ package rewardTransaction_test import ( "errors" + "fmt" "math/big" "testing" @@ -12,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-go/process/rewardTransaction" "github.com/multiversx/mx-chain-go/state" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" + "github.com/multiversx/mx-chain-go/testscommon/trie" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" ) @@ -212,6 +214,41 @@ func TestRewardTxProcessor_ProcessRewardTransactionShouldWork(t *testing.T) { assert.True(t, saveAccountWasCalled) } +func TestRewardTxProcessor_ProcessRewardTransactionMissingTrieNode(t *testing.T) { + t.Parallel() + + missingNodeErr := fmt.Errorf(core.GetNodeFromDBErrorString) + accountsDb := &stateMock.AccountsStub{ + LoadAccountCalled: func(address []byte) (vmcommon.AccountHandler, error) { + acc, _ := state.NewUserAccount(address) + acc.SetDataTrie(&trie.TrieStub{ + GetCalled: func(key []byte) ([]byte, uint32, error) { + return nil, 0, missingNodeErr + }, + }, + ) + + return acc, nil + }, + } + + rtp, _ := rewardTransaction.NewRewardTxProcessor( + accountsDb, + createMockPubkeyConverter(), + mock.NewMultiShardsCoordinatorMock(3), + ) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: big.NewInt(100), + RcvAddr: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6}, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Equal(t, missingNodeErr, err) +} + func TestRewardTxProcessor_ProcessRewardTransactionToASmartContractShouldWork(t *testing.T) { t.Parallel() diff --git a/process/smartContract/builtInFunctions/factory.go b/process/smartContract/builtInFunctions/factory.go index b13c0fa0b12..8a2a4967296 100644 --- a/process/smartContract/builtInFunctions/factory.go +++ b/process/smartContract/builtInFunctions/factory.go @@ -20,6 +20,7 @@ var log = logger.GetOrCreate("process/smartcontract/builtInFunctions") type ArgsCreateBuiltInFunctionContainer struct { GasSchedule core.GasScheduleNotifier MapDNSAddresses map[string]struct{} + MapDNSV2Addresses map[string]struct{} EnableUserNameChange bool Marshalizer marshal.Marshalizer Accounts state.AccountsAdapter @@ -42,7 +43,7 @@ func CreateBuiltInFunctionsFactory(args ArgsCreateBuiltInFunctionContainer) (vmc if check.IfNil(args.Accounts) { return nil, process.ErrNilAccountsAdapter } - if args.MapDNSAddresses == nil { + if args.MapDNSAddresses == nil || args.MapDNSV2Addresses == nil { return nil, process.ErrNilDnsAddresses } if check.IfNil(args.ShardCoordinator) { @@ -78,6 +79,7 @@ func CreateBuiltInFunctionsFactory(args ArgsCreateBuiltInFunctionContainer) (vmc modifiedArgs := vmcommonBuiltInFunctions.ArgsCreateBuiltInFunctionContainer{ GasMap: args.GasSchedule.LatestGasSchedule(), MapDNSAddresses: args.MapDNSAddresses, + MapDNSV2Addresses: args.MapDNSV2Addresses, EnableUserNameChange: args.EnableUserNameChange, Marshalizer: args.Marshalizer, Accounts: vmcommonAccounts, diff --git a/process/smartContract/builtInFunctions/factory_test.go b/process/smartContract/builtInFunctions/factory_test.go index 9b4d5b9b9f9..04c180235b5 100644 --- a/process/smartContract/builtInFunctions/factory_test.go +++ b/process/smartContract/builtInFunctions/factory_test.go @@ -27,6 +27,7 @@ func createMockArguments() ArgsCreateBuiltInFunctionContainer { args := ArgsCreateBuiltInFunctionContainer{ GasSchedule: gasScheduleNotifier, MapDNSAddresses: make(map[string]struct{}), + MapDNSV2Addresses: make(map[string]struct{}), EnableUserNameChange: false, Marshalizer: &mock.MarshalizerMock{}, Accounts: &stateMock.AccountsStub{}, @@ -164,7 +165,7 @@ func TestCreateBuiltInFunctionContainer(t *testing.T) { args := createMockArguments() builtInFuncFactory, err := CreateBuiltInFunctionsFactory(args) assert.Nil(t, err) - assert.Equal(t, 34, len(builtInFuncFactory.BuiltInFunctionContainer().Keys())) + assert.Equal(t, 35, len(builtInFuncFactory.BuiltInFunctionContainer().Keys())) err = builtInFuncFactory.SetPayableHandler(&testscommon.BlockChainHookStub{}) assert.Nil(t, err) diff --git a/process/smartContract/hooks/blockChainHook.go b/process/smartContract/hooks/blockChainHook.go index 6e88bcf17da..cfdf70e1178 100644 --- a/process/smartContract/hooks/blockChainHook.go +++ b/process/smartContract/hooks/blockChainHook.go @@ -41,26 +41,27 @@ const executeDurationAlarmThreshold = time.Duration(50) * time.Millisecond // ArgBlockChainHook represents the arguments structure for the blockchain hook type ArgBlockChainHook struct { - Accounts state.AccountsAdapter - PubkeyConv core.PubkeyConverter - StorageService dataRetriever.StorageService - DataPool dataRetriever.PoolsHolder - BlockChain data.ChainHandler - ShardCoordinator sharding.Coordinator - Marshalizer marshal.Marshalizer - Uint64Converter typeConverters.Uint64ByteSliceConverter - BuiltInFunctions vmcommon.BuiltInFunctionContainer - NFTStorageHandler vmcommon.SimpleESDTNFTStorageHandler - GlobalSettingsHandler vmcommon.ESDTGlobalSettingsHandler - CompiledSCPool storage.Cacher - ConfigSCStorage config.StorageConfig - EnableEpochs config.EnableEpochs - EpochNotifier vmcommon.EpochNotifier - EnableEpochsHandler common.EnableEpochsHandler - WorkingDir string - NilCompiledSCStore bool - GasSchedule core.GasScheduleNotifier - Counter BlockChainHookCounter + Accounts state.AccountsAdapter + PubkeyConv core.PubkeyConverter + StorageService dataRetriever.StorageService + DataPool dataRetriever.PoolsHolder + BlockChain data.ChainHandler + ShardCoordinator sharding.Coordinator + Marshalizer marshal.Marshalizer + Uint64Converter typeConverters.Uint64ByteSliceConverter + BuiltInFunctions vmcommon.BuiltInFunctionContainer + NFTStorageHandler vmcommon.SimpleESDTNFTStorageHandler + GlobalSettingsHandler vmcommon.ESDTGlobalSettingsHandler + CompiledSCPool storage.Cacher + ConfigSCStorage config.StorageConfig + EnableEpochs config.EnableEpochs + EpochNotifier vmcommon.EpochNotifier + EnableEpochsHandler common.EnableEpochsHandler + WorkingDir string + NilCompiledSCStore bool + GasSchedule core.GasScheduleNotifier + Counter BlockChainHookCounter + MissingTrieNodesNotifier common.MissingTrieNodesNotifier } // BlockChainHookImpl is a wrapper over AccountsAdapter that satisfy vmcommon.BlockchainHook interface @@ -89,8 +90,9 @@ type BlockChainHookImpl struct { mapActivationEpochs map[uint32]struct{} - mutGasLock sync.RWMutex - gasSchedule core.GasScheduleNotifier + mutGasLock sync.RWMutex + gasSchedule core.GasScheduleNotifier + missingTrieNodesNotifier common.MissingTrieNodesNotifier } // NewBlockChainHookImpl creates a new BlockChainHookImpl instance @@ -103,23 +105,24 @@ func NewBlockChainHookImpl( } blockChainHookImpl := &BlockChainHookImpl{ - accounts: args.Accounts, - pubkeyConv: args.PubkeyConv, - storageService: args.StorageService, - blockChain: args.BlockChain, - shardCoordinator: args.ShardCoordinator, - marshalizer: args.Marshalizer, - uint64Converter: args.Uint64Converter, - builtInFunctions: args.BuiltInFunctions, - compiledScPool: args.CompiledSCPool, - configSCStorage: args.ConfigSCStorage, - workingDir: args.WorkingDir, - nilCompiledSCStore: args.NilCompiledSCStore, - nftStorageHandler: args.NFTStorageHandler, - globalSettingsHandler: args.GlobalSettingsHandler, - enableEpochsHandler: args.EnableEpochsHandler, - gasSchedule: args.GasSchedule, - counter: args.Counter, + accounts: args.Accounts, + pubkeyConv: args.PubkeyConv, + storageService: args.StorageService, + blockChain: args.BlockChain, + shardCoordinator: args.ShardCoordinator, + marshalizer: args.Marshalizer, + uint64Converter: args.Uint64Converter, + builtInFunctions: args.BuiltInFunctions, + compiledScPool: args.CompiledSCPool, + configSCStorage: args.ConfigSCStorage, + workingDir: args.WorkingDir, + nilCompiledSCStore: args.NilCompiledSCStore, + nftStorageHandler: args.NFTStorageHandler, + globalSettingsHandler: args.GlobalSettingsHandler, + enableEpochsHandler: args.EnableEpochsHandler, + gasSchedule: args.GasSchedule, + counter: args.Counter, + missingTrieNodesNotifier: args.MissingTrieNodesNotifier, } err = blockChainHookImpl.makeCompiledSCStorage() @@ -198,7 +201,9 @@ func checkForNil(args ArgBlockChainHook) error { if check.IfNil(args.Counter) { return ErrNilBlockchainHookCounter } - + if check.IfNil(args.MissingTrieNodesNotifier) { + return ErrNilMissingTrieNodesNotifier + } return nil } @@ -264,6 +269,8 @@ func (bh *BlockChainHookImpl) GetStorageData(accountAddress []byte, index []byte if err != nil { messages = append(messages, "error") messages = append(messages, err) + + bh.syncIfMissingDataTrieNode(err) } log.Trace("GetStorageData ", messages...) @@ -272,6 +279,19 @@ func (bh *BlockChainHookImpl) GetStorageData(accountAddress []byte, index []byte return value, trieDepth, nil } +func (bh *BlockChainHookImpl) syncIfMissingDataTrieNode(err error) { + if !core.IsGetNodeFromDBError(err) { + return + } + + getNodeErr := core.UnwrapGetNodeFromDBErr(err) + if check.IfNil(getNodeErr) { + return + } + + bh.missingTrieNodesNotifier.AsyncNotifyMissingTrieNode(getNodeErr.GetKey()) +} + func (bh *BlockChainHookImpl) processMaxReadsCounters() error { if !bh.enableEpochsHandler.IsMaxBlockchainHookCountersFlagEnabled() { return nil diff --git a/process/smartContract/hooks/blockChainHook_test.go b/process/smartContract/hooks/blockChainHook_test.go index a8ead4366ea..75d2b9e37c3 100644 --- a/process/smartContract/hooks/blockChainHook_test.go +++ b/process/smartContract/hooks/blockChainHook_test.go @@ -63,8 +63,9 @@ func createMockBlockChainHookArgs() hooks.ArgBlockChainHook { EnableEpochs: config.EnableEpochs{ DoNotReturnOldBlockInBlockchainHookEnableEpoch: math.MaxUint32, }, - GasSchedule: testscommon.NewGasScheduleNotifierMock(make(map[string]map[string]uint64)), - Counter: &testscommon.BlockChainHookCounterStub{}, + GasSchedule: testscommon.NewGasScheduleNotifierMock(make(map[string]map[string]uint64)), + Counter: &testscommon.BlockChainHookCounterStub{}, + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } return arguments } @@ -214,6 +215,14 @@ func TestNewBlockChainHookImpl(t *testing.T) { }, expectedErr: storage.ErrCacheSizeIsLowerThanBatchSize, }, + { + args: func() hooks.ArgBlockChainHook { + args := createMockBlockChainHookArgs() + args.MissingTrieNodesNotifier = nil + return args + }, + expectedErr: hooks.ErrNilMissingTrieNodesNotifier, + }, { args: func() hooks.ArgBlockChainHook { return createMockBlockChainHookArgs() @@ -522,6 +531,95 @@ func TestBlockChainHookImpl_GetStorageData(t *testing.T) { assert.Equal(t, variableValue, value) assert.False(t, counterProcessedCalled) }) + t.Run("data trie node not found should call missingTrieNodesNotifier", func(t *testing.T) { + t.Parallel() + + missingDataTrieKey := []byte("missingDataTrieKey") + notifyMissingTrieNodeCalled := false + accnt := stateMock.NewAccountWrapMock(nil) + accnt.AccountDataHandlerCalled = func() (handler vmcommon.AccountDataHandler) { + return &trie.DataTrieTrackerStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + trieErr := core.NewGetNodeFromDBErrWithKey(key, errors.New(core.GetNodeFromDBErrorString), "") + return nil, 0, fmt.Errorf("error: %w", trieErr) + }, + } + } + + args := createMockBlockChainHookArgs() + args.Accounts = &stateMock.AccountsStub{ + GetExistingAccountCalled: func(address []byte) (handler vmcommon.AccountHandler, e error) { + return accnt, nil + }, + } + args.MissingTrieNodesNotifier = &testscommon.MissingTrieNodesNotifierStub{ + AsyncNotifyMissingTrieNodeCalled: func(hash []byte) { + assert.Equal(t, missingDataTrieKey, hash) + notifyMissingTrieNodeCalled = true + }, + } + bh, _ := hooks.NewBlockChainHookImpl(args) + + _, _, _ = bh.GetStorageData([]byte("address"), missingDataTrieKey) + assert.True(t, notifyMissingTrieNodeCalled) + }) + t.Run("random retrieve err should not call missingTrieNodesNotifier", func(t *testing.T) { + t.Parallel() + + missingDataTrieKey := []byte("missingDataTrieKey") + accnt := stateMock.NewAccountWrapMock(nil) + accnt.AccountDataHandlerCalled = func() (handler vmcommon.AccountDataHandler) { + return &trie.DataTrieTrackerStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + return nil, 0, errors.New("random error") + }, + } + } + + args := createMockBlockChainHookArgs() + args.Accounts = &stateMock.AccountsStub{ + GetExistingAccountCalled: func(address []byte) (handler vmcommon.AccountHandler, e error) { + return accnt, nil + }, + } + args.MissingTrieNodesNotifier = &testscommon.MissingTrieNodesNotifierStub{ + AsyncNotifyMissingTrieNodeCalled: func(hash []byte) { + assert.Fail(t, "should not have been called") + }, + } + bh, _ := hooks.NewBlockChainHookImpl(args) + + _, _, _ = bh.GetStorageData([]byte("address"), missingDataTrieKey) + }) + t.Run("unwrapped err is not of wanted type, should not call missingTrieNodesNotifier", func(t *testing.T) { + t.Parallel() + + missingDataTrieKey := []byte("missingDataTrieKey") + accnt := stateMock.NewAccountWrapMock(nil) + accnt.AccountDataHandlerCalled = func() (handler vmcommon.AccountDataHandler) { + return &trie.DataTrieTrackerStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + baseErr := errors.New(core.GetNodeFromDBErrorString) + return nil, 0, fmt.Errorf("error: %w", baseErr) + }, + } + } + + args := createMockBlockChainHookArgs() + args.Accounts = &stateMock.AccountsStub{ + GetExistingAccountCalled: func(address []byte) (handler vmcommon.AccountHandler, e error) { + return accnt, nil + }, + } + args.MissingTrieNodesNotifier = &testscommon.MissingTrieNodesNotifierStub{ + AsyncNotifyMissingTrieNodeCalled: func(hash []byte) { + assert.Fail(t, "should not have been called") + }, + } + bh, _ := hooks.NewBlockChainHookImpl(args) + + _, _, _ = bh.GetStorageData([]byte("address"), missingDataTrieKey) + }) } func TestBlockChainHookImpl_NewAddressLengthNoGood(t *testing.T) { diff --git a/process/smartContract/hooks/errors.go b/process/smartContract/hooks/errors.go index 402e2c5311e..43894f455e0 100644 --- a/process/smartContract/hooks/errors.go +++ b/process/smartContract/hooks/errors.go @@ -16,3 +16,6 @@ var ErrVMTypeLengthIsNotCorrect = errors.New("vm type length is not correct") // ErrNilBlockchainHookCounter signals that a nil blockchain hook counter was provided var ErrNilBlockchainHookCounter = errors.New("nil blockchain hook counter") + +// ErrNilMissingTrieNodesNotifier signals that a nil missing trie nodes notifier was provided +var ErrNilMissingTrieNodesNotifier = errors.New("nil missing trie nodes notifier") diff --git a/process/smartContract/process.go b/process/smartContract/process.go index 990095a617a..620dafae8b2 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -19,7 +19,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" @@ -339,6 +338,9 @@ func (sc *scProcessor) doExecuteSmartContractTransaction( var results []data.TransactionHandler results, err = sc.processVMOutput(vmOutput, txHash, tx, vmInput.CallType, vmInput.GasProvided) if err != nil { + if core.IsGetNodeFromDBError(err) { + return vmcommon.ExecutionFailed, err + } log.Trace("process vm output returned with problem ", "err", err.Error()) return vmcommon.ExecutionFailed, sc.ProcessIfError(acntSnd, txHash, tx, err.Error(), []byte(vmOutput.ReturnMessage), snapshot, vmInput.GasLocked) } @@ -378,6 +380,9 @@ func (sc *scProcessor) executeSmartContractCall( vmOutput, err = vmExec.RunSmartContractCall(vmInput) sc.wasmVMChangeLocker.RUnlock() if err != nil { + if core.IsGetNodeFromDBError(err) { + return nil, err + } log.Debug("run smart contract call error", "error", err.Error()) return userErrorVmOutput, sc.ProcessIfError(acntSnd, txHash, tx, err.Error(), []byte(""), snapshot, vmInput.GasLocked) } @@ -978,6 +983,9 @@ func (sc *scProcessor) doExecuteBuiltInFunction( tmpCreatedAsyncCallback := false tmpCreatedAsyncCallback, newSCRTxs, err = sc.processSCOutputAccounts(newVMOutput, vmInput.CallType, outPutAccounts, tx, txHash) if err != nil { + if core.IsGetNodeFromDBError(err) { + return vmcommon.ExecutionFailed, err + } return vmcommon.ExecutionFailed, sc.ProcessIfError(acntSnd, txHash, tx, err.Error(), []byte(err.Error()), snapshot, vmInput.GasLocked) } createdAsyncCallback = createdAsyncCallback || tmpCreatedAsyncCallback @@ -1077,6 +1085,10 @@ func (sc *scProcessor) resolveBuiltInFunctions( GasRemaining: 0, } + if core.IsGetNodeFromDBError(err) { + return nil, err + } + return vmOutput, nil } @@ -1345,7 +1357,8 @@ func (sc *scProcessor) ProcessIfError( return sc.processIfErrorWithAddedLogs(acntSnd, txHash, tx, returnCode, returnMessage, snapshot, gasLocked, nil, nil) } -func (sc *scProcessor) processIfErrorWithAddedLogs(acntSnd state.UserAccountHandler, +func (sc *scProcessor) processIfErrorWithAddedLogs( + acntSnd state.UserAccountHandler, txHash []byte, tx data.TransactionHandler, returnCode string, @@ -1362,7 +1375,7 @@ func (sc *scProcessor) processIfErrorWithAddedLogs(acntSnd state.UserAccountHand err := sc.accounts.RevertToSnapshot(snapshot) if err != nil { - if !errors.IsClosingError(err) { + if !core.IsClosingError(err) { log.Warn("revert to snapshot", "error", err.Error()) } @@ -1716,6 +1729,9 @@ func (sc *scProcessor) doDeploySmartContract( sc.wasmVMChangeLocker.RUnlock() if err != nil { log.Debug("VM error", "error", err.Error()) + if core.IsGetNodeFromDBError(err) { + return vmcommon.ExecutionFailed, err + } return vmcommon.UserError, sc.ProcessIfError(acntSnd, txHash, tx, err.Error(), []byte(""), snapshot, vmInput.GasLocked) } @@ -1738,6 +1754,9 @@ func (sc *scProcessor) doDeploySmartContract( results, err := sc.processVMOutput(vmOutput, txHash, tx, vmInput.CallType, vmInput.GasProvided) if err != nil { log.Trace("Processing error", "error", err.Error()) + if core.IsGetNodeFromDBError(err) { + return vmcommon.ExecutionFailed, err + } return vmcommon.ExecutionFailed, sc.ProcessIfError(acntSnd, txHash, tx, err.Error(), []byte(vmOutput.ReturnMessage), snapshot, vmInput.GasLocked) } diff --git a/process/sync/argBootstrapper.go b/process/sync/argBootstrapper.go index 9441c595e7b..ec3f64a58d8 100644 --- a/process/sync/argBootstrapper.go +++ b/process/sync/argBootstrapper.go @@ -47,6 +47,7 @@ type ArgBaseBootstrapper struct { IsInImportMode bool ScheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler ProcessWaitTime time.Duration + RepopulateTokensSupplies bool } // ArgShardBootstrapper holds all dependencies required by the bootstrap data factory in order to create diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index 2c011e50c10..aa43d8cecc1 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/closing" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/typeConverters" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" @@ -23,9 +24,11 @@ import ( "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/sync/storageBootstrap/metricsLoader" + "github.com/multiversx/mx-chain-go/process/sync/trieIterators" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/trie/storageMarker" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -123,6 +126,8 @@ type baseBootstrap struct { isInImportMode bool scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler processWaitTime time.Duration + + repopulateTokensSupplies bool } // setRequestedHeaderNonce method sets the header nonce requested by the sync mechanism @@ -693,14 +698,9 @@ func (boot *baseBootstrap) handleTrieSyncError(err error, ctx context.Context) { } } -func (boot *baseBootstrap) syncUserAccountsState() error { - rootHash, err := boot.accounts.RootHash() - if err != nil { - return err - } - +func (boot *baseBootstrap) syncUserAccountsState(key []byte) error { log.Warn("base sync: started syncUserAccountsState") - return boot.accountsDBSyncer.SyncAccounts(rootHash) + return boot.accountsDBSyncer.SyncAccounts(key, storageMarker.NewDisabledStorageMarker()) } func (boot *baseBootstrap) cleanNoncesSyncedWithErrorsBehindFinal() { @@ -816,7 +816,14 @@ func (boot *baseBootstrap) rollBack(revertUsingForkNonce bool) error { boot.scheduledTxsExecutionHandler.SetScheduledInfo(scheduledInfo) } - boot.outportHandler.RevertIndexedBlock(currHeader, currBody) + err = boot.outportHandler.RevertIndexedBlock(&outportcore.HeaderDataWithBody{ + Body: currBody, + HeaderHash: currHeaderHash, + Header: currHeader, + }) + if err != nil { + log.Warn("baseBootstrap.outportHandler.RevertIndexedBlock cannot revert indexed block", "error", err) + } shouldAddHeaderToBlackList := revertUsingForkNonce && boot.blockBootstrapper.isForkTriggeredByMeta() if shouldAddHeaderToBlackList { @@ -1191,6 +1198,42 @@ func (boot *baseBootstrap) GetNodeState() common.NodeState { return common.NsNotSynchronized } +func (boot *baseBootstrap) handleAccountsTrieIteration() error { + if boot.repopulateTokensSupplies { + return boot.handleTokensSuppliesRepopulation() + } + + // add more flags and trie iterators here + return nil +} + +func (boot *baseBootstrap) handleTokensSuppliesRepopulation() error { + argsTrieAccountsIteratorProc := trieIterators.ArgsTrieAccountsIterator{ + Marshaller: boot.marshalizer, + Accounts: boot.accounts, + } + trieAccountsIteratorProc, err := trieIterators.NewTrieAccountsIterator(argsTrieAccountsIteratorProc) + if err != nil { + return err + } + + argsTokensSuppliesProc := trieIterators.ArgsTokensSuppliesProcessor{ + StorageService: boot.store, + Marshaller: boot.marshalizer, + } + tokensSuppliesProc, err := trieIterators.NewTokensSuppliesProcessor(argsTokensSuppliesProc) + if err != nil { + return err + } + + err = trieAccountsIteratorProc.Process(tokensSuppliesProc.HandleTrieAccountIteration) + if err != nil { + return err + } + + return tokensSuppliesProc.SaveSupplies() +} + // Close will close the endless running go routine func (boot *baseBootstrap) Close() error { if boot.cancelFunc != nil { diff --git a/process/sync/disabled/disabledBootstrapper.go b/process/sync/disabled/disabledBootstrapper.go index 5d0a9a02086..ce39c262612 100644 --- a/process/sync/disabled/disabledBootstrapper.go +++ b/process/sync/disabled/disabledBootstrapper.go @@ -22,7 +22,8 @@ func (d *disabledBootstrapper) GetNodeState() common.NodeState { } // StartSyncingBlocks won't do anything as this is a disabled component -func (d *disabledBootstrapper) StartSyncingBlocks() { +func (d *disabledBootstrapper) StartSyncingBlocks() error { + return nil } // Close will return a nil error as this is a disabled component diff --git a/process/sync/export_test.go b/process/sync/export_test.go index e65a1f4098f..719e7599f9f 100644 --- a/process/sync/export_test.go +++ b/process/sync/export_test.go @@ -175,6 +175,11 @@ func (boot *MetaBootstrap) GetNotarizedInfo( } } +// SyncAccountsDBs - +func (boot *MetaBootstrap) SyncAccountsDBs(key []byte, id string) error { + return boot.syncAccountsDBs(key, id) +} + // ProcessReceivedHeader - func (boot *baseBootstrap) ProcessReceivedHeader(headerHandler data.HeaderHandler, headerHash []byte) { boot.processReceivedHeader(headerHandler, headerHash) diff --git a/process/sync/metablock.go b/process/sync/metablock.go index c4c10196dcf..1b3c69c7386 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -2,6 +2,7 @@ package sync import ( "context" + "fmt" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" @@ -11,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/trie/storageMarker" ) // MetaBootstrap implements the bootstrap mechanism @@ -135,7 +137,7 @@ func (boot *MetaBootstrap) getBlockBody(headerHandler data.HeaderHandler) (data. } // StartSyncingBlocks method will start syncing blocks as a go routine -func (boot *MetaBootstrap) StartSyncingBlocks() { +func (boot *MetaBootstrap) StartSyncingBlocks() error { // when a node starts it first tries to bootstrap from storage, if there already exist a database saved errNotCritical := boot.storageBootstrapper.LoadFromStorage() if errNotCritical != nil { @@ -147,6 +149,8 @@ func (boot *MetaBootstrap) StartSyncingBlocks() { var ctx context.Context ctx, boot.cancelFunc = context.WithCancel(context.Background()) go boot.syncBlocks(ctx) + + return nil } func (boot *MetaBootstrap) setLastEpochStartRound() { @@ -178,38 +182,34 @@ func (boot *MetaBootstrap) setLastEpochStartRound() { // in the blockchain, and all this mechanism will be reiterated for the next block. func (boot *MetaBootstrap) SyncBlock(ctx context.Context) error { err := boot.syncBlock() - if isErrGetNodeFromDB(err) { - errSync := boot.syncAccountsDBs() + if core.IsGetNodeFromDBError(err) { + getNodeErr := core.UnwrapGetNodeFromDBErr(err) + if getNodeErr == nil { + return err + } + + errSync := boot.syncAccountsDBs(getNodeErr.GetKey(), getNodeErr.GetIdentifier()) boot.handleTrieSyncError(errSync, ctx) } return err } -func (boot *MetaBootstrap) syncAccountsDBs() error { - var err error - - err = boot.syncValidatorAccountsState() - if err != nil { - return err - } - - err = boot.syncUserAccountsState() - if err != nil { - return err +func (boot *MetaBootstrap) syncAccountsDBs(key []byte, id string) error { + // TODO: refactor this in order to avoid treatment based on identifier + switch id { + case dataRetriever.UserAccountsUnit.String(): + return boot.syncUserAccountsState(key) + case dataRetriever.PeerAccountsUnit.String(): + return boot.syncValidatorAccountsState(key) + default: + return fmt.Errorf("invalid trie identifier, id: %s", id) } - - return nil } -func (boot *MetaBootstrap) syncValidatorAccountsState() error { - rootHash, err := boot.validatorAccountsDB.RootHash() - if err != nil { - return err - } - +func (boot *MetaBootstrap) syncValidatorAccountsState(key []byte) error { log.Warn("base sync: started syncValidatorAccountsState") - return boot.validatorStatisticsDBSyncer.SyncAccounts(rootHash) + return boot.validatorStatisticsDBSyncer.SyncAccounts(key, storageMarker.NewDisabledStorageMarker()) } // Close closes the synchronization loop diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index 3f8ddd83267..fff94e55389 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -58,6 +58,8 @@ func createMetaStore() dataRetriever.StorageService { store.AddStorer(dataRetriever.MetaBlockUnit, generateTestUnit()) store.AddStorer(dataRetriever.ShardHdrNonceHashDataUnit, generateTestUnit()) store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, generateTestUnit()) + store.AddStorer(dataRetriever.UserAccountsUnit, generateTestUnit()) + store.AddStorer(dataRetriever.PeerAccountsUnit, generateTestUnit()) return store } @@ -89,6 +91,7 @@ func CreateMetaBootstrapMockArguments() sync.ArgMetaBootstrapper { HistoryRepo: &dblookupext.HistoryRepositoryStub{}, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, ProcessWaitTime: testProcessWaitTime, + RepopulateTokensSupplies: false, } argsMetaBootstrapper := sync.ArgMetaBootstrapper{ @@ -522,7 +525,7 @@ func TestMetaBootstrap_ShouldNotNeedToSync(t *testing.T) { bs, _ := sync.NewMetaBootstrap(args) - bs.StartSyncingBlocks() + _ = bs.StartSyncingBlocks() time.Sleep(200 * time.Millisecond) _ = bs.Close() } @@ -598,7 +601,7 @@ func TestMetaBootstrap_SyncShouldSyncOneBlock(t *testing.T) { ) bs, _ := sync.NewMetaBootstrap(args) - bs.StartSyncingBlocks() + _ = bs.StartSyncingBlocks() time.Sleep(200 * time.Millisecond) @@ -1126,7 +1129,8 @@ func TestMetaBootstrap_ReceivedHeadersFoundInPoolShouldAddToForkDetector(t *test args.ShardCoordinator = shardCoordinator args.RoundHandler = initRoundHandler() - bs, _ := sync.NewMetaBootstrap(args) + bs, err := sync.NewMetaBootstrap(args) + require.Nil(t, err) bs.ReceivedHeaders(addedHdr, addedHash) time.Sleep(500 * time.Millisecond) @@ -1177,7 +1181,8 @@ func TestMetaBootstrap_ReceivedHeadersNotFoundInPoolShouldNotAddToForkDetector(t args.ChainHandler, _ = blockchain.NewBlockChain(&statusHandlerMock.AppStatusHandlerStub{}) args.RoundHandler = initRoundHandler() - bs, _ := sync.NewMetaBootstrap(args) + bs, err := sync.NewMetaBootstrap(args) + require.Nil(t, err) bs.ReceivedHeaders(addedHdr, addedHash) time.Sleep(500 * time.Millisecond) @@ -1621,7 +1626,7 @@ func TestMetaBootstrap_SyncBlockErrGetNodeDBShouldSyncAccounts(t *testing.T) { } args.ChainHandler = blkc - errGetNodeFromDB := errors.New(common.GetNodeFromDBErrorString) + errGetNodeFromDB := core.NewGetNodeFromDBErrWithKey([]byte("key"), errors.New("get error"), dataRetriever.UserAccountsUnit.String()) blockProcessor := createMetaBlockProcessor(args.ChainHandler) blockProcessor.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return errGetNodeFromDB @@ -1677,16 +1682,11 @@ func TestMetaBootstrap_SyncBlockErrGetNodeDBShouldSyncAccounts(t *testing.T) { ) accountsSyncCalled := false args.AccountsDBSyncer = &mock.AccountsDBSyncerStub{ - SyncAccountsCalled: func(rootHash []byte) error { + SyncAccountsCalled: func(rootHash []byte, _ common.StorageMarker) error { accountsSyncCalled = true return nil - }} - validatorSyncCalled := false - args.ValidatorStatisticsDBSyncer = &mock.AccountsDBSyncerStub{ - SyncAccountsCalled: func(rootHash []byte) error { - validatorSyncCalled = true - return nil - }} + }, + } args.Accounts = &stateMock.AccountsStub{RootHashCalled: func() ([]byte, error) { return []byte("roothash"), nil }} @@ -1694,10 +1694,119 @@ func TestMetaBootstrap_SyncBlockErrGetNodeDBShouldSyncAccounts(t *testing.T) { return []byte("roothash"), nil }} + args.Store = &storageStubs.ChainStorerStub{ + GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { + var dbIdentifier string + switch unitType { + case dataRetriever.UserAccountsUnit: + dbIdentifier = "userAccountsUnit" + case dataRetriever.PeerAccountsUnit: + dbIdentifier = "peerAccountsUnit" + default: + dbIdentifier = "" + } + + return &storageStubs.StorerStub{ + GetCalled: func(key []byte) ([]byte, error) { + return nil, process.ErrMissingHeader + }, + RemoveCalled: func(key []byte) error { + return nil + }, + GetIdentifierCalled: func() string { + return dbIdentifier + }, + }, nil + }, + } + bs, _ := sync.NewMetaBootstrap(args) err := bs.SyncBlock(context.Background()) assert.Equal(t, errGetNodeFromDB, err) assert.True(t, accountsSyncCalled) - assert.True(t, validatorSyncCalled) +} + +func TestMetaBootstrap_SyncAccountsDBs(t *testing.T) { + t.Parallel() + + t.Run("sync user accounts state", func(t *testing.T) { + t.Parallel() + + args := CreateMetaBootstrapMockArguments() + accountsSyncCalled := false + args.AccountsDBSyncer = &mock.AccountsDBSyncerStub{ + SyncAccountsCalled: func(rootHash []byte, _ common.StorageMarker) error { + accountsSyncCalled = true + return nil + }, + } + + dbIdentifier := dataRetriever.UserAccountsUnit.String() + args.Store = &storageStubs.ChainStorerStub{ + GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { + if unitType != dataRetriever.UserAccountsUnit { + return &storageStubs.StorerStub{}, nil + } + + return &storageStubs.StorerStub{ + GetCalled: func(key []byte) ([]byte, error) { + return nil, process.ErrMissingHeader + }, + RemoveCalled: func(key []byte) error { + return nil + }, + GetIdentifierCalled: func() string { + return dbIdentifier + }, + }, nil + }, + } + + bs, _ := sync.NewMetaBootstrap(args) + + err := bs.SyncAccountsDBs([]byte("key"), dbIdentifier) + require.Nil(t, err) + require.True(t, accountsSyncCalled) + }) + + t.Run("sync validator accounts state", func(t *testing.T) { + t.Parallel() + + args := CreateMetaBootstrapMockArguments() + accountsSyncCalled := false + args.ValidatorStatisticsDBSyncer = &mock.AccountsDBSyncerStub{ + SyncAccountsCalled: func(rootHash []byte, _ common.StorageMarker) error { + accountsSyncCalled = true + return nil + }, + } + + dbIdentifier := dataRetriever.PeerAccountsUnit.String() + args.Store = &storageStubs.ChainStorerStub{ + GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { + if unitType != dataRetriever.PeerAccountsUnit { + return &storageStubs.StorerStub{}, nil + } + + return &storageStubs.StorerStub{ + GetCalled: func(key []byte) ([]byte, error) { + return nil, process.ErrMissingHeader + }, + RemoveCalled: func(key []byte) error { + return nil + }, + GetIdentifierCalled: func() string { + return dbIdentifier + }, + }, nil + }, + } + + bs, _ := sync.NewMetaBootstrap(args) + + err := bs.SyncAccountsDBs([]byte("key"), dbIdentifier) + require.Nil(t, err) + require.True(t, accountsSyncCalled) + }) } diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index 10965aec981..8cca3954ef0 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -2,16 +2,14 @@ package sync import ( "context" + "fmt" "math" - "strings" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/storage" ) @@ -67,6 +65,7 @@ func NewShardBootstrap(arguments ArgShardBootstrapper) (*ShardBootstrap, error) historyRepo: arguments.HistoryRepo, scheduledTxsExecutionHandler: arguments.ScheduledTxsExecutionHandler, processWaitTime: arguments.ProcessWaitTime, + repopulateTokensSupplies: arguments.RepopulateTokensSupplies, } if base.isInImportMode { @@ -124,7 +123,7 @@ func (boot *ShardBootstrap) getBlockBody(headerHandler data.HeaderHandler) (data } // StartSyncingBlocks method will start syncing blocks as a go routine -func (boot *ShardBootstrap) StartSyncingBlocks() { +func (boot *ShardBootstrap) StartSyncingBlocks() error { errNotCritical := boot.storageBootstrapper.LoadFromStorage() if errNotCritical != nil { log.Debug("boot.syncFromStorer", @@ -134,7 +133,15 @@ func (boot *ShardBootstrap) StartSyncingBlocks() { var ctx context.Context ctx, boot.cancelFunc = context.WithCancel(context.Background()) + + err := boot.handleAccountsTrieIteration() + if err != nil { + return fmt.Errorf("%w while handling accounts trie iteration", err) + } + go boot.syncBlocks(ctx) + + return nil } // SyncBlock method actually does the synchronization. It requests the next block header from the pool @@ -145,34 +152,19 @@ func (boot *ShardBootstrap) StartSyncingBlocks() { // in the blockchain, and all this mechanism will be reiterated for the next block. func (boot *ShardBootstrap) SyncBlock(ctx context.Context) error { err := boot.syncBlock() - if isErrGetNodeFromDB(err) { - errSync := boot.syncUserAccountsState() + if core.IsGetNodeFromDBError(err) { + getNodeErr := core.UnwrapGetNodeFromDBErr(err) + if getNodeErr == nil { + return err + } + + errSync := boot.syncUserAccountsState(getNodeErr.GetKey()) boot.handleTrieSyncError(errSync, ctx) } return err } -func isErrGetNodeFromDB(err error) bool { - if err == nil { - return false - } - - if strings.Contains(err.Error(), storage.ErrDBIsClosed.Error()) { - return false - } - - if strings.Contains(err.Error(), errors.ErrContextClosing.Error()) { - return false - } - - if strings.Contains(err.Error(), common.GetNodeFromDBErrorString) { - return true - } - - return false -} - // Close closes the synchronization loop func (boot *ShardBootstrap) Close() error { if check.IfNil(boot.baseBootstrap) { diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index 90acef3f45d..8abfd29e6bc 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -12,6 +12,7 @@ import ( "testing" "time" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" @@ -106,6 +107,8 @@ func createFullStore() dataRetriever.StorageService { store.AddStorer(dataRetriever.ShardHdrNonceHashDataUnit, generateTestUnit()) store.AddStorer(dataRetriever.ReceiptsUnit, generateTestUnit()) store.AddStorer(dataRetriever.ScheduledSCRsUnit, generateTestUnit()) + store.AddStorer(dataRetriever.UserAccountsUnit, generateTestUnit()) + store.AddStorer(dataRetriever.PeerAccountsUnit, generateTestUnit()) return store } @@ -215,6 +218,7 @@ func CreateShardBootstrapMockArguments() sync.ArgShardBootstrapper { HistoryRepo: &dblookupext.HistoryRepositoryStub{}, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, ProcessWaitTime: testProcessWaitTime, + RepopulateTokensSupplies: false, } argsShardBootstrapper := sync.ArgShardBootstrapper{ @@ -653,7 +657,7 @@ func TestBootstrap_ShouldNotNeedToSync(t *testing.T) { bs, _ := sync.NewShardBootstrap(args) - bs.StartSyncingBlocks() + _ = bs.StartSyncingBlocks() time.Sleep(200 * time.Millisecond) _ = bs.Close() } @@ -748,7 +752,7 @@ func TestBootstrap_SyncShouldSyncOneBlock(t *testing.T) { ) bs, _ := sync.NewShardBootstrap(args) - bs.StartSyncingBlocks() + _ = bs.StartSyncingBlocks() time.Sleep(200 * time.Millisecond) @@ -1738,7 +1742,8 @@ func TestBootstrap_GetTxBodyHavingHashNotFoundInCacherOrStorageShouldRetEmptySli args.Store = createFullStore() args.Store.AddStorer(dataRetriever.TransactionUnit, txBlockUnit) - bs, _ := sync.NewShardBootstrap(args) + bs, err := sync.NewShardBootstrap(args) + require.Nil(t, err) gotMbsAndHashes, _ := bs.GetMiniBlocks(requestedHash) assert.Equal(t, 0, len(gotMbsAndHashes)) @@ -2061,7 +2066,7 @@ func TestShardBootstrap_SyncBlockGetNodeDBErrorShouldSync(t *testing.T) { } args.ChainHandler = blkc - errGetNodeFromDB := errors.New(common.GetNodeFromDBErrorString) + errGetNodeFromDB := core.NewGetNodeFromDBErrWithKey([]byte("key"), errors.New("get error"), "") blockProcessor := createBlockProcessor(args.ChainHandler) blockProcessor.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return errGetNodeFromDB @@ -2131,7 +2136,7 @@ func TestShardBootstrap_SyncBlockGetNodeDBErrorShouldSync(t *testing.T) { syncCalled := false args.AccountsDBSyncer = &mock.AccountsDBSyncerStub{ - SyncAccountsCalled: func(rootHash []byte) error { + SyncAccountsCalled: func(rootHash []byte, _ common.StorageMarker) error { syncCalled = true return nil }} diff --git a/process/sync/trieIterators/errors.go b/process/sync/trieIterators/errors.go new file mode 100644 index 00000000000..5fc876f1de3 --- /dev/null +++ b/process/sync/trieIterators/errors.go @@ -0,0 +1,11 @@ +package trieIterators + +import "errors" + +var errNilAccountsAdapter = errors.New("nil accounts adapter") + +var errNilStorageService = errors.New("nil storage service") + +var errNilMarshaller = errors.New("nil marshaller") + +var errNilUserAccount = errors.New("nil user account") diff --git a/process/sync/trieIterators/tokensSuppliesProcessor.go b/process/sync/trieIterators/tokensSuppliesProcessor.go new file mode 100644 index 00000000000..632115eb214 --- /dev/null +++ b/process/sync/trieIterators/tokensSuppliesProcessor.go @@ -0,0 +1,160 @@ +package trieIterators + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "math/big" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/esdt" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/dblookupext/esdtSupply" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/trie/keyBuilder" +) + +type tokensSuppliesProcessor struct { + storageService dataRetriever.StorageService + marshaller marshal.Marshalizer + tokensSupplies map[string]*big.Int +} + +// ArgsTokensSuppliesProcessor is the arguments struct for NewTokensSuppliesProcessor +type ArgsTokensSuppliesProcessor struct { + StorageService dataRetriever.StorageService + Marshaller marshal.Marshalizer +} + +// NewTokensSuppliesProcessor returns a new instance of tokensSuppliesProcessor +func NewTokensSuppliesProcessor(args ArgsTokensSuppliesProcessor) (*tokensSuppliesProcessor, error) { + if check.IfNil(args.StorageService) { + return nil, errNilStorageService + } + if check.IfNil(args.Marshaller) { + return nil, errNilMarshaller + } + + return &tokensSuppliesProcessor{ + storageService: args.StorageService, + marshaller: args.Marshaller, + tokensSupplies: make(map[string]*big.Int), + }, nil +} + +// HandleTrieAccountIteration is the handler for the trie account iteration +// note that this function is not concurrent safe +func (t *tokensSuppliesProcessor) HandleTrieAccountIteration(userAccount state.UserAccountHandler) error { + if check.IfNil(userAccount) { + return errNilUserAccount + } + if bytes.Equal(core.SystemAccountAddress, userAccount.AddressBytes()) { + log.Debug("repopulate tokens supplies: skipping system account address") + return nil + } + + rh := userAccount.GetRootHash() + isValidRootHashToIterateFor := len(rh) > 0 && !bytes.Equal(rh, make([]byte, len(rh))) + if !isValidRootHashToIterateFor { + return nil + } + + dataTrie := &common.TrieIteratorChannels{ + LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), + ErrChan: errChan.NewErrChanWrapper(), + } + + errDataTrieGet := userAccount.DataTrie().GetAllLeavesOnChannel(dataTrie, context.Background(), rh, keyBuilder.NewKeyBuilder()) + if errDataTrieGet != nil { + return fmt.Errorf("%w while getting all leaves for root hash %s", errDataTrieGet, hex.EncodeToString(rh)) + } + + log.Trace("extractTokensSupplies - parsing account", "address", userAccount.AddressBytes()) + esdtPrefix := []byte(core.ProtectedKeyPrefix + core.ESDTKeyIdentifier) + for userLeaf := range dataTrie.LeavesChan { + if !bytes.HasPrefix(userLeaf.Key(), esdtPrefix) { + continue + } + + tokenKey := userLeaf.Key() + lenESDTPrefix := len(esdtPrefix) + suffix := append(userLeaf.Key(), userAccount.AddressBytes()...) + value, errVal := userLeaf.ValueWithoutSuffix(suffix) + if errVal != nil { + return fmt.Errorf("%w while parsing the token with key %s", errVal, hex.EncodeToString(tokenKey)) + } + var esToken esdt.ESDigitalToken + err := t.marshaller.Unmarshal(&esToken, value) + if err != nil { + return fmt.Errorf("%w while unmarshaling the token with key %s", err, hex.EncodeToString(tokenKey)) + } + + tokenName := string(tokenKey)[lenESDTPrefix:] + tokenID, nonce := common.ExtractTokenIDAndNonceFromTokenStorageKey([]byte(tokenName)) + t.addToBalance(tokenID, nonce, esToken.Value) + } + + err := dataTrie.ErrChan.ReadFromChanNonBlocking() + if err != nil { + return fmt.Errorf("%w while parsing errors from the trie iteration", err) + } + + return nil +} + +func (t *tokensSuppliesProcessor) addToBalance(tokenID []byte, nonce uint64, value *big.Int) { + tokenIDStr := string(tokenID) + if nonce > 0 { + t.putInSuppliesMap(string(tokenID), value) // put for collection as well + nonceStr := hex.EncodeToString(big.NewInt(int64(nonce)).Bytes()) + tokenIDStr += fmt.Sprintf("-%s", nonceStr) + } + + t.putInSuppliesMap(tokenIDStr, value) +} + +func (t *tokensSuppliesProcessor) putInSuppliesMap(id string, value *big.Int) { + currentValue, found := t.tokensSupplies[id] + if !found { + t.tokensSupplies[id] = value + return + } + + currentValue = big.NewInt(0).Add(currentValue, value) + t.tokensSupplies[id] = currentValue +} + +// SaveSupplies will store the recomputed tokens supplies into the database +// note that this function is not concurrent safe +func (t *tokensSuppliesProcessor) SaveSupplies() error { + suppliesStorer, err := t.storageService.GetStorer(dataRetriever.ESDTSuppliesUnit) + if err != nil { + return err + } + + for tokenName, supply := range t.tokensSupplies { + log.Trace("repopulate tokens supplies", "token", tokenName, "supply", supply.String()) + supplyObj := &esdtSupply.SupplyESDT{ + Supply: supply, + RecomputedSupply: true, + } + supplyObjBytes, err := t.marshaller.Marshal(supplyObj) + if err != nil { + return err + } + + err = suppliesStorer.Put([]byte(tokenName), supplyObjBytes) + if err != nil { + return fmt.Errorf("%w while saving recomputed supply of the token %s", err, tokenName) + } + } + + log.Debug("finished the repopulation of the tokens supplies", "num tokens", len(t.tokensSupplies)) + + return nil +} diff --git a/process/sync/trieIterators/tokensSuppliesProcessor_test.go b/process/sync/trieIterators/tokensSuppliesProcessor_test.go new file mode 100644 index 00000000000..21eaf09f919 --- /dev/null +++ b/process/sync/trieIterators/tokensSuppliesProcessor_test.go @@ -0,0 +1,315 @@ +package trieIterators + +import ( + "bytes" + "context" + "errors" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core/keyValStorage" + "github.com/multiversx/mx-chain-core-go/data/esdt" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/dataRetriever" + coreEsdt "github.com/multiversx/mx-chain-go/dblookupext/esdtSupply" + "github.com/multiversx/mx-chain-go/state" + chainStorage "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" + "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/multiversx/mx-chain-go/testscommon/trie" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +func getTokensSuppliesProcessorArgs() ArgsTokensSuppliesProcessor { + return ArgsTokensSuppliesProcessor{ + StorageService: &genericMocks.ChainStorerMock{}, + Marshaller: &testscommon.MarshalizerMock{}, + } +} + +func TestNewTokensSuppliesProcessor(t *testing.T) { + t.Parallel() + + t.Run("nil storage service", func(t *testing.T) { + t.Parallel() + + args := getTokensSuppliesProcessorArgs() + args.StorageService = nil + + tsp, err := NewTokensSuppliesProcessor(args) + require.Nil(t, tsp) + require.Equal(t, errNilStorageService, err) + }) + + t.Run("nil marshaller", func(t *testing.T) { + t.Parallel() + + args := getTokensSuppliesProcessorArgs() + args.Marshaller = nil + + tsp, err := NewTokensSuppliesProcessor(args) + require.Nil(t, tsp) + require.Equal(t, errNilMarshaller, err) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := getTokensSuppliesProcessorArgs() + + tsp, err := NewTokensSuppliesProcessor(args) + require.NotNil(t, tsp) + require.NoError(t, err) + }) +} + +func TestTokensSuppliesProcessor_HandleTrieAccountIteration(t *testing.T) { + t.Parallel() + + t.Run("nil user account", func(t *testing.T) { + t.Parallel() + + tsp, _ := NewTokensSuppliesProcessor(getTokensSuppliesProcessorArgs()) + err := tsp.HandleTrieAccountIteration(nil) + require.Equal(t, errNilUserAccount, err) + }) + + t.Run("should skip system account", func(t *testing.T) { + t.Parallel() + + tsp, _ := NewTokensSuppliesProcessor(getTokensSuppliesProcessorArgs()) + + userAcc := stateMock.NewAccountWrapMock(vmcommon.SystemAccountAddress) + err := tsp.HandleTrieAccountIteration(userAcc) + require.NoError(t, err) + }) + + t.Run("empty root hash of account", func(t *testing.T) { + t.Parallel() + + tsp, _ := NewTokensSuppliesProcessor(getTokensSuppliesProcessorArgs()) + + userAcc := stateMock.NewAccountWrapMock([]byte("addr")) + err := tsp.HandleTrieAccountIteration(userAcc) + require.NoError(t, err) + }) + + t.Run("root hash of account is zero only", func(t *testing.T) { + t.Parallel() + + tsp, _ := NewTokensSuppliesProcessor(getTokensSuppliesProcessorArgs()) + + userAcc := stateMock.NewAccountWrapMock([]byte("addr")) + userAcc.SetRootHash(bytes.Repeat([]byte{0}, 32)) + err := tsp.HandleTrieAccountIteration(userAcc) + require.NoError(t, err) + }) + + t.Run("cannot get all leaves on channel", func(t *testing.T) { + t.Parallel() + + args := getTokensSuppliesProcessorArgs() + tsp, _ := NewTokensSuppliesProcessor(args) + + expectedErr := errors.New("error") + userAcc, _ := state.NewUserAccount([]byte("addr")) + userAcc.SetRootHash([]byte("rootHash")) + userAcc.SetDataTrie(&trie.TrieStub{ + GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, keyBuilder common.KeyBuilder) error { + return expectedErr + }, + }) + + err := tsp.HandleTrieAccountIteration(userAcc) + require.ErrorIs(t, err, expectedErr) + require.Empty(t, tsp.tokensSupplies) + }) + + t.Run("should ignore non-token keys", func(t *testing.T) { + t.Parallel() + + args := getTokensSuppliesProcessorArgs() + tsp, _ := NewTokensSuppliesProcessor(args) + + userAcc, _ := state.NewUserAccount([]byte("addr")) + userAcc.SetRootHash([]byte("rootHash")) + userAcc.SetDataTrie(&trie.TrieStub{ + GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, keyBuilder common.KeyBuilder) error { + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("not a token key"), []byte("not a token value")) + + close(leavesChannels.LeavesChan) + return nil + }, + }) + + err := tsp.HandleTrieAccountIteration(userAcc) + require.NoError(t, err) + require.Empty(t, tsp.tokensSupplies) + }) + + t.Run("should return error if trie value cannot be extracted", func(t *testing.T) { + t.Parallel() + + args := getTokensSuppliesProcessorArgs() + tsp, _ := NewTokensSuppliesProcessor(args) + + userAcc, _ := state.NewUserAccount([]byte("addr")) + userAcc.SetRootHash([]byte("rootHash")) + userAcc.SetDataTrie(&trie.TrieStub{ + GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, keyBuilder common.KeyBuilder) error { + esToken := &esdt.ESDigitalToken{ + Value: big.NewInt(37), + } + esBytes, _ := args.Marshaller.Marshal(esToken) + tknKey := []byte("ELRONDesdtTKN-00aacc") + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage(tknKey, esBytes) + + close(leavesChannels.LeavesChan) + return nil + }, + }) + + err := tsp.HandleTrieAccountIteration(userAcc) + require.Error(t, err) + require.Contains(t, err.Error(), "suffix is not present or the position is incorrect") + require.Empty(t, tsp.tokensSupplies) + }) + + t.Run("should not save tokens from the system account", func(t *testing.T) { + t.Parallel() + + args := getTokensSuppliesProcessorArgs() + tsp, _ := NewTokensSuppliesProcessor(args) + + userAcc, _ := state.NewUserAccount(vmcommon.SystemAccountAddress) + userAcc.SetRootHash([]byte("rootHash")) + userAcc.SetDataTrie(&trie.TrieStub{ + GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, keyBuilder common.KeyBuilder) error { + esToken := &esdt.ESDigitalToken{ + Value: big.NewInt(37), + } + esBytes, _ := args.Marshaller.Marshal(esToken) + tknKey := []byte("ELRONDesdtTKN-00aacc") + value := append(esBytes, tknKey...) + value = append(value, []byte("addr")...) + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage(tknKey, value) + + close(leavesChannels.LeavesChan) + return nil + }, + }) + + err := tsp.HandleTrieAccountIteration(userAcc) + require.NoError(t, err) + require.Empty(t, tsp.tokensSupplies) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := getTokensSuppliesProcessorArgs() + tsp, _ := NewTokensSuppliesProcessor(args) + + userAcc, _ := state.NewUserAccount([]byte("addr")) + userAcc.SetRootHash([]byte("rootHash")) + userAcc.SetDataTrie(&trie.TrieStub{ + GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, keyBuilder common.KeyBuilder) error { + esToken := &esdt.ESDigitalToken{ + Value: big.NewInt(37), + } + esBytes, _ := args.Marshaller.Marshal(esToken) + tknKey := []byte("ELRONDesdtTKN-00aacc") + value := append(esBytes, tknKey...) + value = append(value, []byte("addr")...) + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage(tknKey, value) + + sft := &esdt.ESDigitalToken{ + Value: big.NewInt(1), + } + sftBytes, _ := args.Marshaller.Marshal(sft) + sftKey := []byte("ELRONDesdtSFT-00aabb") + sftKey = append(sftKey, big.NewInt(37).Bytes()...) + value = append(sftBytes, sftKey...) + value = append(value, []byte("addr")...) + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage(sftKey, value) + + close(leavesChannels.LeavesChan) + return nil + }, + }) + + err := tsp.HandleTrieAccountIteration(userAcc) + require.NoError(t, err) + + err = tsp.HandleTrieAccountIteration(userAcc) + require.NoError(t, err) + + expectedSupplies := map[string]*big.Int{ + "SFT-00aabb-25": big.NewInt(2), + "SFT-00aabb": big.NewInt(2), + "TKN-00aacc": big.NewInt(74), + } + require.Equal(t, expectedSupplies, tsp.tokensSupplies) + }) +} + +func TestTokensSuppliesProcessor_SaveSupplies(t *testing.T) { + t.Parallel() + + t.Run("cannot find esdt supplies storer", func(t *testing.T) { + t.Parallel() + + errStorerNotFound := errors.New("storer not found") + args := getTokensSuppliesProcessorArgs() + args.StorageService = &storage.ChainStorerStub{ + GetStorerCalled: func(unitType dataRetriever.UnitType) (chainStorage.Storer, error) { + return nil, errStorerNotFound + }, + } + tsp, _ := NewTokensSuppliesProcessor(args) + err := tsp.SaveSupplies() + require.Equal(t, errStorerNotFound, err) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + savedItems := make(map[string][]byte) + args := getTokensSuppliesProcessorArgs() + args.StorageService = &storage.ChainStorerStub{ + GetStorerCalled: func(unitType dataRetriever.UnitType) (chainStorage.Storer, error) { + return &storage.StorerStub{ + PutCalled: func(key, data []byte) error { + savedItems[string(key)] = data + return nil + }, + }, nil + }, + } + tsp, _ := NewTokensSuppliesProcessor(args) + + supplies := map[string]*big.Int{ + "SFT-00aabb-37": big.NewInt(2), + "SFT-00aabb": big.NewInt(2), + "TKN-00aacc": big.NewInt(74), + } + tsp.tokensSupplies = supplies + + err := tsp.SaveSupplies() + require.NoError(t, err) + + checkStoredSupply := func(t *testing.T, key string, storedValue []byte, expectedSupply *big.Int) { + supply := coreEsdt.SupplyESDT{} + _ = args.Marshaller.Unmarshal(&supply, storedValue) + require.Equal(t, expectedSupply, supply.Supply) + } + + require.Len(t, savedItems, 3) + for key, value := range savedItems { + checkStoredSupply(t, key, value, supplies[key]) + } + }) +} diff --git a/process/sync/trieIterators/trieAccountsIterator.go b/process/sync/trieIterators/trieAccountsIterator.go new file mode 100644 index 00000000000..e936d723e3e --- /dev/null +++ b/process/sync/trieIterators/trieAccountsIterator.go @@ -0,0 +1,110 @@ +package trieIterators + +import ( + "context" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" + "github.com/multiversx/mx-chain-go/state" + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("trieIterators") + +// TrieAccountIteratorHandler represents a type that maps a handler for the trie's accounts iterator +type TrieAccountIteratorHandler func(account state.UserAccountHandler) error + +type trieAccountsIterator struct { + marshaller marshal.Marshalizer + accounts state.AccountsAdapter +} + +// ArgsTrieAccountsIterator holds the arguments needed to create a new trie Accounts iterator +type ArgsTrieAccountsIterator struct { + Marshaller marshal.Marshalizer + Accounts state.AccountsAdapter +} + +// NewTrieAccountsIterator returns a new instance of trieAccountsIterator +func NewTrieAccountsIterator(args ArgsTrieAccountsIterator) (*trieAccountsIterator, error) { + if check.IfNil(args.Marshaller) { + return nil, errNilMarshaller + } + if check.IfNil(args.Accounts) { + return nil, errNilAccountsAdapter + } + + return &trieAccountsIterator{ + marshaller: args.Marshaller, + accounts: args.Accounts, + }, nil +} + +// Process will iterate over the entire trie and iterate over the Accounts while calling the received handlers +func (t *trieAccountsIterator) Process(handlers ...TrieAccountIteratorHandler) error { + if len(handlers) == 0 { + return nil + } + + rootHash, err := t.accounts.RootHash() + if err != nil { + return err + } + + iteratorChannels := &common.TrieIteratorChannels{ + LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), + ErrChan: errChan.NewErrChanWrapper(), + } + err = t.accounts.GetAllLeaves(iteratorChannels, context.Background(), rootHash) + if err != nil { + return err + } + + return t.iterateOverHandlers(iteratorChannels, handlers) +} + +func (t *trieAccountsIterator) iterateOverHandlers(iteratorChannels *common.TrieIteratorChannels, handlers []TrieAccountIteratorHandler) error { + log.Debug("starting the trie's accounts iteration with calling the handlers") + for leaf := range iteratorChannels.LeavesChan { + userAddress, isAccount := t.getAddress(leaf) + if !isAccount { + continue + } + + acc, err := t.accounts.GetExistingAccount(userAddress) + if err != nil { + return err + } + + userAccount, ok := acc.(state.UserAccountHandler) + if !ok { + continue + } + + for _, handler := range handlers { + err = handler(userAccount) + if err != nil { + return err + } + } + } + + return nil +} + +func (t *trieAccountsIterator) getAddress(kv core.KeyValueHolder) ([]byte, bool) { + userAccount := &state.UserAccountData{} + errUnmarshal := t.marshaller.Unmarshal(userAccount, kv.Value()) + if errUnmarshal != nil { + // probably a code node + return nil, false + } + if len(userAccount.RootHash) == 0 { + return nil, false + } + + return kv.Key(), true +} diff --git a/process/sync/trieIterators/trieAccountsIterator_test.go b/process/sync/trieIterators/trieAccountsIterator_test.go new file mode 100644 index 00000000000..8eb00d7a7f3 --- /dev/null +++ b/process/sync/trieIterators/trieAccountsIterator_test.go @@ -0,0 +1,319 @@ +package trieIterators + +import ( + "context" + "errors" + "testing" + + "github.com/multiversx/mx-chain-core-go/core/keyValStorage" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +func getTrieAccountsIteratorArgs() ArgsTrieAccountsIterator { + return ArgsTrieAccountsIterator{ + Marshaller: &testscommon.MarshalizerMock{}, + Accounts: &stateMock.AccountsStub{}, + } +} + +func dummyIterator(_ state.UserAccountHandler) error { + return nil +} + +func TestNewTrieAccountsIterator(t *testing.T) { + t.Parallel() + + t.Run("nil marshaller", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Marshaller = nil + + tai, err := NewTrieAccountsIterator(args) + require.Nil(t, tai) + require.Equal(t, errNilMarshaller, err) + }) + + t.Run("nil accounts", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Accounts = nil + + tai, err := NewTrieAccountsIterator(args) + require.Nil(t, tai) + require.Equal(t, errNilAccountsAdapter, err) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + tai, err := NewTrieAccountsIterator(args) + require.NotNil(t, tai) + require.NoError(t, err) + }) +} + +func TestTrieAccountsIterator_Process(t *testing.T) { + t.Parallel() + + var expectedErr = errors.New("expected error") + + t.Run("skip processing if no handler", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return nil, errors.New("error that should not be returned") + }, + } + tai, _ := NewTrieAccountsIterator(args) + err := tai.Process() + require.NoError(t, err) + }) + + t.Run("cannot get root hash", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return nil, expectedErr + }, + } + tai, _ := NewTrieAccountsIterator(args) + + err := tai.Process(dummyIterator) + require.Equal(t, expectedErr, err) + }) + + t.Run("cannot get all leaves", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return []byte("rootHash"), nil + }, + GetAllLeavesCalled: func(_ *common.TrieIteratorChannels, _ context.Context, _ []byte) error { + return expectedErr + }, + } + tai, _ := NewTrieAccountsIterator(args) + + err := tai.Process(dummyIterator) + require.Equal(t, expectedErr, err) + }) + + t.Run("cannot get existing account", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return []byte("rootHash"), nil + }, + GetAllLeavesCalled: func(iter *common.TrieIteratorChannels, _ context.Context, _ []byte) error { + userAcc := &stateMock.AccountWrapMock{ + RootHash: []byte("rootHash"), + } + userAccBytes, _ := args.Marshaller.Marshal(userAcc) + iter.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("addr"), userAccBytes) + close(iter.LeavesChan) + return nil + }, + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return nil, expectedErr + }, + } + tai, _ := NewTrieAccountsIterator(args) + + err := tai.Process(dummyIterator) + require.Equal(t, expectedErr, err) + }) + + t.Run("should ignore non-accounts leaves", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return []byte("rootHash"), nil + }, + GetAllLeavesCalled: func(iter *common.TrieIteratorChannels, _ context.Context, _ []byte) error { + userAcc := &stateMock.AccountWrapMock{ + RootHash: []byte("rootHash"), + } + userAccBytes, _ := args.Marshaller.Marshal(userAcc) + iter.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("addr"), userAccBytes) + iter.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("non-addr"), []byte("not an account")) + close(iter.LeavesChan) + return nil + }, + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return &stateMock.AccountWrapMock{}, nil + }, + } + tai, _ := NewTrieAccountsIterator(args) + + err := tai.Process(dummyIterator) + require.NoError(t, err) + }) + + t.Run("should ignore user account without root hash", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return []byte("rootHash"), nil + }, + GetAllLeavesCalled: func(iter *common.TrieIteratorChannels, _ context.Context, _ []byte) error { + userAcc := &stateMock.AccountWrapMock{ + RootHash: nil, + } + userAccBytes, _ := args.Marshaller.Marshal(userAcc) + iter.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("addr"), userAccBytes) + close(iter.LeavesChan) + return nil + }, + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return &stateMock.AccountWrapMock{}, nil + }, + } + tai, _ := NewTrieAccountsIterator(args) + + err := tai.Process(dummyIterator) + require.NoError(t, err) + }) + + t.Run("should ignore accounts that cannot be casted", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return []byte("rootHash"), nil + }, + GetAllLeavesCalled: func(iter *common.TrieIteratorChannels, _ context.Context, _ []byte) error { + userAcc := state.NewEmptyUserAccount() + userAcc.SetRootHash([]byte("root")) + userAccBytes, _ := args.Marshaller.Marshal(userAcc) + iter.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("addr"), userAccBytes) + close(iter.LeavesChan) + return nil + }, + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return state.NewEmptyPeerAccount(), nil + }, + } + tai, _ := NewTrieAccountsIterator(args) + + err := tai.Process(dummyIterator) + require.NoError(t, err) + }) + + t.Run("should work with dummy handler", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return []byte("rootHash"), nil + }, + GetAllLeavesCalled: func(iter *common.TrieIteratorChannels, _ context.Context, _ []byte) error { + userAcc := &stateMock.AccountWrapMock{ + RootHash: []byte("rootHash"), + } + userAccBytes, _ := args.Marshaller.Marshal(userAcc) + iter.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("addr"), userAccBytes) + close(iter.LeavesChan) + return nil + }, + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return &stateMock.AccountWrapMock{}, nil + }, + } + tai, _ := NewTrieAccountsIterator(args) + + err := tai.Process(dummyIterator) + require.NoError(t, err) + }) + + t.Run("one handler returns error, should error", func(t *testing.T) { + t.Parallel() + + handler1 := func(account state.UserAccountHandler) error { + return nil + } + handler2 := func(account state.UserAccountHandler) error { + return expectedErr + } + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return []byte("rootHash"), nil + }, + GetAllLeavesCalled: func(iter *common.TrieIteratorChannels, _ context.Context, _ []byte) error { + userAcc := &stateMock.AccountWrapMock{ + RootHash: []byte("rootHash"), + } + userAccBytes, _ := args.Marshaller.Marshal(userAcc) + iter.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("addr"), userAccBytes) + close(iter.LeavesChan) + return nil + }, + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return &stateMock.AccountWrapMock{RootHash: []byte("rootHash")}, nil + }, + } + tai, _ := NewTrieAccountsIterator(args) + + err := tai.Process(handler1, handler2) + require.Equal(t, expectedErr, err) + }) + + t.Run("should work with handlers", func(t *testing.T) { + t.Parallel() + + handlersReceived := make(map[int]struct{}) + handler1 := func(account state.UserAccountHandler) error { + handlersReceived[0] = struct{}{} + return nil + } + handler2 := func(account state.UserAccountHandler) error { + handlersReceived[1] = struct{}{} + return nil + } + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return []byte("rootHash"), nil + }, + GetAllLeavesCalled: func(iter *common.TrieIteratorChannels, _ context.Context, _ []byte) error { + userAcc := &stateMock.AccountWrapMock{ + RootHash: []byte("rootHash"), + } + userAccBytes, _ := args.Marshaller.Marshal(userAcc) + iter.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("addr"), userAccBytes) + close(iter.LeavesChan) + return nil + }, + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return &stateMock.AccountWrapMock{RootHash: []byte("rootHash")}, nil + }, + } + tai, _ := NewTrieAccountsIterator(args) + + err := tai.Process(handler1, handler2) + require.NoError(t, err) + require.Len(t, handlersReceived, 2) + }) +} diff --git a/process/track/baseBlockTrack.go b/process/track/baseBlockTrack.go index f4a264e5086..22eb1c86cc1 100644 --- a/process/track/baseBlockTrack.go +++ b/process/track/baseBlockTrack.go @@ -787,6 +787,9 @@ func checkTrackerNilParameters(arguments ArgBaseTracker) error { if check.IfNil(arguments.FeeHandler) { return process.ErrNilEconomicsData } + if check.IfNil(arguments.WhitelistHandler) { + return process.ErrNilWhiteListHandler + } return nil } diff --git a/process/track/baseBlockTrack_test.go b/process/track/baseBlockTrack_test.go index 7f633452179..8c919cd9ee7 100644 --- a/process/track/baseBlockTrack_test.go +++ b/process/track/baseBlockTrack_test.go @@ -307,6 +307,24 @@ func TestNewBlockTrack_ShouldErrNotarizedHeadersSliceIsNil(t *testing.T) { assert.True(t, check.IfNil(mbt)) } +func TestNewBlockTrack_ShouldErrNilWhitelistHandler(t *testing.T) { + t.Parallel() + + shardArguments := CreateShardTrackerMockArguments() + shardArguments.WhitelistHandler = nil + sbt, err := track.NewShardBlockTrack(shardArguments) + + assert.Equal(t, process.ErrNilWhiteListHandler, err) + assert.Nil(t, sbt) + + metaArguments := CreateMetaTrackerMockArguments() + metaArguments.WhitelistHandler = nil + mbt, err := track.NewMetaBlockTrack(metaArguments) + + assert.Equal(t, process.ErrNilWhiteListHandler, err) + assert.True(t, check.IfNil(mbt)) +} + func TestNewBlockTrack_ShouldWork(t *testing.T) { t.Parallel() diff --git a/process/transaction/shardProcess.go b/process/transaction/shardProcess.go index 08640eead16..d9709cd2bf5 100644 --- a/process/transaction/shardProcess.go +++ b/process/transaction/shardProcess.go @@ -227,6 +227,10 @@ func (txProc *txProcessor) executeAfterFailedMoveBalanceTransaction( tx *transaction.Transaction, txError error, ) error { + if core.IsGetNodeFromDBError(txError) { + return txError + } + acntSnd, err := txProc.getAccountFromAddress(tx.SndAddr) if err != nil { return err diff --git a/scripts/testnet/include/config.sh b/scripts/testnet/include/config.sh index 425339f99c6..9d8c9490a86 100644 --- a/scripts/testnet/include/config.sh +++ b/scripts/testnet/include/config.sh @@ -112,6 +112,13 @@ updateNodeConfig() { updateTOMLValue config_observer.toml "ChainID" "\"local-testnet"\" fi + if [ $ROUNDS_PER_EPOCH -ne 0 ]; then + sed -i "s,RoundsPerEpoch.*$,RoundsPerEpoch = $ROUNDS_PER_EPOCH," config_observer.toml + sed -i "s,MinRoundsBetweenEpochs.*$,MinRoundsBetweenEpochs = $ROUNDS_PER_EPOCH," config_observer.toml + sed -i "s,RoundsPerEpoch.*$,RoundsPerEpoch = $ROUNDS_PER_EPOCH," config_validator.toml + sed -i "s,MinRoundsBetweenEpochs.*$,MinRoundsBetweenEpochs = $ROUNDS_PER_EPOCH," config_validator.toml + fi + cp nodesSetup_edit.json nodesSetup.json rm nodesSetup_edit.json diff --git a/scripts/testnet/variables.sh b/scripts/testnet/variables.sh index 14eff94e7e9..135a29b8478 100644 --- a/scripts/testnet/variables.sh +++ b/scripts/testnet/variables.sh @@ -68,6 +68,9 @@ export MULTI_KEY_NODES=0 # ALWAYS_NEW_CHAINID will generate a fresh new chain ID each time start.sh/config.sh is called export ALWAYS_NEW_CHAINID=1 +# ROUNDS_PER_EPOCH represents the number of rounds per epoch. If set to 0, it won't override the node's config +export ROUNDS_PER_EPOCH=0 + # HYSTERESIS defines the hysteresis value for number of nodes in shard export HYSTERESIS=0.0 diff --git a/sharding/mock/enableEpochsHandlerMock.go b/sharding/mock/enableEpochsHandlerMock.go index cbe6fb10014..dca1d41a6c7 100644 --- a/sharding/mock/enableEpochsHandlerMock.go +++ b/sharding/mock/enableEpochsHandlerMock.go @@ -581,6 +581,16 @@ func (mock *EnableEpochsHandlerMock) IsMultiClaimOnDelegationEnabled() bool { return false } +// IsChangeUsernameEnabled - +func (mock *EnableEpochsHandlerMock) IsChangeUsernameEnabled() bool { + return false +} + +// IsConsistentTokensValuesLengthCheckEnabled - +func (mock *EnableEpochsHandlerMock) IsConsistentTokensValuesLengthCheckEnabled() bool { + return false +} + // IsInterfaceNil returns true if there is no value under the interface func (mock *EnableEpochsHandlerMock) IsInterfaceNil() bool { return mock == nil diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinator.go b/sharding/nodesCoordinator/indexHashedNodesCoordinator.go index fbdfe04914b..6840eec6cd1 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinator.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinator.go @@ -17,7 +17,6 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" logger "github.com/multiversx/mx-chain-logger-go" @@ -810,7 +809,7 @@ func (ihnc *indexHashedNodesCoordinator) handleErrorLog(err error, message strin } logLevel := logger.LogError - if errors.IsClosingError(err) { + if core.IsClosingError(err) { logLevel = logger.LogDebug } diff --git a/state/accountsDB.go b/state/accountsDB.go index f357ae9f42c..5daea6408ab 100644 --- a/state/accountsDB.go +++ b/state/accountsDB.go @@ -18,9 +18,9 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/holders" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/trie/keyBuilder" "github.com/multiversx/mx-chain-go/trie/statistics" + "github.com/multiversx/mx-chain-go/trie/storageMarker" logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -220,7 +220,7 @@ func handleLoggingWhenError(message string, err error, extraArguments ...interfa if err == nil { return } - if errors.IsClosingError(err) { + if core.IsClosingError(err) { args := []interface{}{"reason", err} log.Debug(message, append(args, extraArguments...)...) return @@ -1303,7 +1303,7 @@ func (adb *AccountsDB) syncMissingNodes(missingNodesChan chan []byte, errChan co } for missingNode := range missingNodesChan { - err := syncer.SyncAccounts(missingNode) + err := syncer.SyncAccounts(missingNode, storageMarker.NewDisabledStorageMarker()) if err != nil { log.Error("could not sync missing node", "missing node hash", missingNode, diff --git a/state/accountsDB_test.go b/state/accountsDB_test.go index a70f2a1fff3..ae56e67ccfd 100644 --- a/state/accountsDB_test.go +++ b/state/accountsDB_test.go @@ -31,6 +31,8 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/trie/hashesHolder" @@ -45,7 +47,7 @@ func createMockAccountsDBArgs() state.ArgsAccountsDB { return state.ArgsAccountsDB{ Trie: &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, }, Hasher: &hashingMocks.HasherMock{}, @@ -90,7 +92,7 @@ func getDefaultTrieAndAccountsDb() (common.Trie, *state.AccountsDB) { return tr, adb } -func getDefaultTrieAndAccountsDbWithCustomDB(db common.DBWriteCacher) (common.Trie, *state.AccountsDB) { +func getDefaultTrieAndAccountsDbWithCustomDB(db common.BaseStorer) (common.Trie, *state.AccountsDB) { checkpointHashesHolder := hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize) adb, tr, _ := getDefaultStateComponents(checkpointHashesHolder, db) return tr, adb @@ -98,7 +100,7 @@ func getDefaultTrieAndAccountsDbWithCustomDB(db common.DBWriteCacher) (common.Tr func getDefaultStateComponents( hashesHolder trie.CheckpointHashesHolder, - db common.DBWriteCacher, + db common.BaseStorer, ) (*state.AccountsDB, common.Trie, common.StorageManager) { generalCfg := config.TrieStorageManagerConfig{ PruningBufferLen: 1000, @@ -108,15 +110,9 @@ func getDefaultStateComponents( marshaller := &testscommon.MarshalizerMock{} hasher := &hashingMocks.HasherMock{} - args := trie.NewTrieStorageManagerArgs{ - MainStorer: db, - CheckpointsStorer: testscommon.NewSnapshotPruningStorerMock(), - Marshalizer: marshaller, - Hasher: hasher, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder, - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := storage.GetStorageManagerArgs() + args.MainStorer = db + args.CheckpointHashesHolder = hashesHolder trieStorage, _ := trie.NewTrieStorageManager(args) tr, _ := trie.NewTrie(trieStorage, marshaller, hasher, 5) ewlArgs := evictionWaitingList.MemoryEvictionWaitingListArgs{ @@ -233,7 +229,7 @@ func TestAccountsDB_SaveAccountNilAccountShouldErr(t *testing.T) { adb := generateAccountDBFromTrie(&trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, }) @@ -250,7 +246,7 @@ func TestAccountsDB_SaveAccountErrWhenGettingOldAccountShouldErr(t *testing.T) { return nil, 0, expectedErr }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, }) @@ -269,7 +265,7 @@ func TestAccountsDB_SaveAccountNilOldAccount(t *testing.T) { return nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, }) @@ -293,7 +289,7 @@ func TestAccountsDB_SaveAccountExistingOldAccount(t *testing.T) { return nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, }) @@ -330,7 +326,7 @@ func TestAccountsDB_SaveAccountSavesCodeAndDataTrieForUserAccount(t *testing.T) return trieStub, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, }) @@ -352,7 +348,7 @@ func TestAccountsDB_SaveAccountMalfunctionMarshallerShouldErr(t *testing.T) { account := generateAccount() mockTrie := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } marshaller := &testscommon.MarshalizerMock{} @@ -381,7 +377,7 @@ func TestAccountsDB_SaveAccountWithSomeValuesShouldWork(t *testing.T) { return nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } _, account, adb := generateAddressAccountAccountsDB(ts) @@ -408,7 +404,7 @@ func TestAccountsDB_RemoveAccountShouldWork(t *testing.T) { return nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } @@ -428,7 +424,7 @@ func TestAccountsDB_LoadAccountMalfunctionTrieShouldErr(t *testing.T) { trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } adr := make([]byte, 32) @@ -449,7 +445,7 @@ func TestAccountsDB_LoadAccountNotFoundShouldCreateEmpty(t *testing.T) { return nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } @@ -490,7 +486,7 @@ func TestAccountsDB_LoadAccountExistingShouldLoadDataTrie(t *testing.T) { return dataTrie, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } @@ -509,7 +505,7 @@ func TestAccountsDB_GetExistingAccountMalfunctionTrieShouldErr(t *testing.T) { trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } adr := make([]byte, 32) @@ -527,7 +523,7 @@ func TestAccountsDB_GetExistingAccountNotFoundShouldRetNil(t *testing.T) { return nil, 0, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } @@ -568,7 +564,7 @@ func TestAccountsDB_GetExistingAccountFoundShouldRetAccount(t *testing.T) { return dataTrie, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } @@ -587,7 +583,7 @@ func TestAccountsDB_GetAccountAccountNotFound(t *testing.T) { tr := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } adr, _, _ := generateAddressAccountAccountsDB(tr) @@ -627,7 +623,7 @@ func TestAccountsDB_LoadCodeWrongHashLengthShouldErr(t *testing.T) { tr := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } _, account, adb := generateAddressAccountAccountsDB(tr) @@ -645,7 +641,7 @@ func TestAccountsDB_LoadCodeMalfunctionTrieShouldErr(t *testing.T) { account := generateAccount() mockTrie := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } adb := generateAccountDBFromTrie(mockTrie) @@ -662,7 +658,7 @@ func TestAccountsDB_LoadCodeOkValsShouldWork(t *testing.T) { tr := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } adr, account, _ := generateAddressAccountAccountsDB(tr) @@ -675,7 +671,7 @@ func TestAccountsDB_LoadCodeOkValsShouldWork(t *testing.T) { return serializedCodeEntry, 0, err }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } @@ -700,7 +696,7 @@ func TestAccountsDB_LoadDataNilRootShouldRetNil(t *testing.T) { tr := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } _, account, adb := generateAddressAccountAccountsDB(tr) @@ -716,7 +712,7 @@ func TestAccountsDB_LoadDataBadLengthShouldErr(t *testing.T) { _, account, adb := generateAddressAccountAccountsDB(&trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, }) @@ -735,7 +731,7 @@ func TestAccountsDB_LoadDataMalfunctionTrieShouldErr(t *testing.T) { mockTrie := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } adb := generateAccountDBFromTrie(mockTrie) @@ -750,7 +746,7 @@ func TestAccountsDB_LoadDataNotFoundRootShouldReturnErr(t *testing.T) { _, account, adb := generateAddressAccountAccountsDB(&trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, }) @@ -795,7 +791,7 @@ func TestAccountsDB_LoadDataWithSomeValuesShouldWork(t *testing.T) { return dataTrie, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } adb := generateAccountDBFromTrie(mockTrie) @@ -851,7 +847,7 @@ func TestAccountsDB_CommitShouldCallCommitFromTrie(t *testing.T) { }, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } @@ -877,7 +873,7 @@ func TestAccountsDB_RecreateTrieMalfunctionTrieShouldErr(t *testing.T) { errExpected := errors.New("failure") trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } trieStub.RecreateFromEpochCalled = func(_ common.RootHashHolder) (tree common.Trie, e error) { @@ -899,7 +895,7 @@ func TestAccountsDB_RecreateTrieOutputsNilTrieShouldErr(t *testing.T) { trieStub := trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } trieStub.RecreateFromEpochCalled = func(_ common.RootHashHolder) (tree common.Trie, e error) { @@ -922,7 +918,7 @@ func TestAccountsDB_RecreateTrieOkValsShouldWork(t *testing.T) { trieStub := trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, RecreateFromEpochCalled: func(_ common.RootHashHolder) (common.Trie, error) { wasCalled = true @@ -944,7 +940,7 @@ func TestAccountsDB_SnapshotState(t *testing.T) { snapshotMut := sync.Mutex{} trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ TakeSnapshotCalled: func(_ string, _ []byte, _ []byte, _ *common.TrieIteratorChannels, _ chan []byte, _ common.SnapshotStatisticsHandler, _ uint32) { snapshotMut.Lock() takeSnapshotWasCalled = true @@ -970,7 +966,7 @@ func TestAccountsDB_SnapshotStateOnAClosedStorageManagerShouldNotMarkActiveDB(t activeDBWasPut := false trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ ShouldTakeSnapshotCalled: func() bool { return true }, @@ -1023,7 +1019,7 @@ func TestAccountsDB_SnapshotStateWithErrorsShouldNotMarkActiveDB(t *testing.T) { expectedErr := errors.New("expected error") trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ ShouldTakeSnapshotCalled: func() bool { return true }, @@ -1074,7 +1070,7 @@ func TestAccountsDB_SnapshotStateGetLatestStorageEpochErrDoesNotSnapshot(t *test takeSnapshotCalled := false trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ GetLatestStorageEpochCalled: func() (uint32, error) { return 0, fmt.Errorf("new error") }, @@ -1101,7 +1097,7 @@ func TestAccountsDB_SnapshotStateSnapshotSameRootHash(t *testing.T) { takeSnapshotCalled := 0 trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ GetLatestStorageEpochCalled: func() (uint32, error) { return latestEpoch, nil }, @@ -1184,7 +1180,7 @@ func TestAccountsDB_SnapshotStateSkipSnapshotIfSnapshotInProgress(t *testing.T) takeSnapshotCalled := 0 trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ GetLatestStorageEpochCalled: func() (uint32, error) { return latestEpoch, nil }, @@ -1248,7 +1244,7 @@ func TestAccountsDB_SetStateCheckpoint(t *testing.T) { snapshotMut := sync.Mutex{} trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ SetCheckpointCalled: func(_ []byte, _ []byte, _ *common.TrieIteratorChannels, _ chan []byte, _ common.SnapshotStatisticsHandler) { snapshotMut.Lock() setCheckPointWasCalled = true @@ -1271,7 +1267,7 @@ func TestAccountsDB_IsPruningEnabled(t *testing.T) { trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ IsPruningEnabledCalled: func() bool { return true }, @@ -1289,7 +1285,7 @@ func TestAccountsDB_RevertToSnapshotOutOfBounds(t *testing.T) { trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } adb := generateAccountDBFromTrie(trieStub) @@ -1411,7 +1407,7 @@ func TestAccountsDB_RootHash(t *testing.T) { return rootHash, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } adb := generateAccountDBFromTrie(trieStub) @@ -1434,7 +1430,7 @@ func TestAccountsDB_GetAllLeaves(t *testing.T) { return nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } @@ -1735,15 +1731,7 @@ func TestAccountsDB_MainTrieAutomaticallyMarksCodeUpdatesForEviction(t *testing. marshaller := &testscommon.MarshalizerMock{} hasher := &hashingMocks.HasherMock{} ewl := stateMock.NewEvictionWaitingListMock(100) - args := trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.CreateMemUnit(), - CheckpointsStorer: testscommon.CreateMemUnit(), - Marshalizer: marshaller, - Hasher: hasher, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: &trieMock.CheckpointHashesHolderStub{}, - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := storage.GetStorageManagerArgs() storageManager, _ := trie.NewTrieStorageManager(args) maxTrieLevelInMemory := uint(5) tr, _ := trie.NewTrie(storageManager, marshaller, hasher, maxTrieLevelInMemory) @@ -1820,15 +1808,7 @@ func TestAccountsDB_RemoveAccountMarksObsoleteHashesForEviction(t *testing.T) { hasher := &hashingMocks.HasherMock{} ewl := stateMock.NewEvictionWaitingListMock(100) - args := trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.CreateMemUnit(), - CheckpointsStorer: testscommon.CreateMemUnit(), - Marshalizer: marshaller, - Hasher: hasher, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: &trieMock.CheckpointHashesHolderStub{}, - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := storage.GetStorageManagerArgs() storageManager, _ := trie.NewTrieStorageManager(args) tr, _ := trie.NewTrie(storageManager, marshaller, hasher, maxTrieLevelInMemory) spm, _ := storagePruningManager.NewStoragePruningManager(ewl, 5) @@ -2020,6 +2000,7 @@ func mergeMaps(map1 common.ModifiedHashes, map2 common.ModifiedHashes) { func TestAccountsDB_CommitSetsStateCheckpointIfCheckpointHashesHolderIsFull(t *testing.T) { t.Parallel() + mutex := &sync.Mutex{} newHashes := make(common.ModifiedHashes) numRemoveCalls := 0 checkpointHashesHolder := &trieMock.CheckpointHashesHolderStub{ @@ -2027,9 +2008,11 @@ func TestAccountsDB_CommitSetsStateCheckpointIfCheckpointHashesHolderIsFull(t *t return true }, RemoveCalled: func(hash []byte) { + mutex.Lock() _, ok := newHashes[string(hash)] assert.True(t, ok) numRemoveCalls++ + mutex.Unlock() }, } @@ -2248,15 +2231,7 @@ func TestAccountsDB_GetCode(t *testing.T) { marshaller := &testscommon.MarshalizerMock{} hasher := &hashingMocks.HasherMock{} - args := trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.CreateMemUnit(), - CheckpointsStorer: testscommon.CreateMemUnit(), - Marshalizer: marshaller, - Hasher: hasher, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: &trieMock.CheckpointHashesHolderStub{}, - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := storage.GetStorageManagerArgs() storageManager, _ := trie.NewTrieStorageManager(args) tr, _ := trie.NewTrie(storageManager, marshaller, hasher, maxTrieLevelInMemory) spm := disabled.NewDisabledStoragePruningManager() @@ -2404,7 +2379,7 @@ func TestAccountsDB_Close(t *testing.T) { return nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } marshaller := &testscommon.MarshalizerMock{} @@ -2475,7 +2450,7 @@ func TestAccountsDB_GetAccountFromBytesShouldLoadDataTrie(t *testing.T) { return dataTrie, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } @@ -2499,7 +2474,7 @@ func TestAccountsDB_SetSyncerAndStartSnapshotIfNeeded(t *testing.T) { return rootHash, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ ShouldTakeSnapshotCalled: func() bool { return true }, @@ -2532,7 +2507,7 @@ func TestAccountsDB_SetSyncerAndStartSnapshotIfNeeded(t *testing.T) { return rootHash, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ ShouldTakeSnapshotCalled: func() bool { return true }, @@ -2561,7 +2536,7 @@ func TestAccountsDB_SetSyncerAndStartSnapshotIfNeeded(t *testing.T) { return rootHash, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ ShouldTakeSnapshotCalled: func() bool { return true }, @@ -2604,7 +2579,7 @@ func TestAccountsDB_NewAccountsDbStartsSnapshotAfterRestart(t *testing.T) { return rootHash, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ GetCalled: func(key []byte) ([]byte, error) { if bytes.Equal(key, []byte(common.ActiveDBKey)) { return nil, fmt.Errorf("key not found") @@ -2638,15 +2613,7 @@ func BenchmarkAccountsDb_GetCodeEntry(b *testing.B) { marshaller := &testscommon.MarshalizerMock{} hasher := &hashingMocks.HasherMock{} - args := trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.CreateMemUnit(), - CheckpointsStorer: testscommon.CreateMemUnit(), - Marshalizer: marshaller, - Hasher: hasher, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: &trieMock.CheckpointHashesHolderStub{}, - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := storage.GetStorageManagerArgs() storageManager, _ := trie.NewTrieStorageManager(args) tr, _ := trie.NewTrie(storageManager, marshaller, hasher, maxTrieLevelInMemory) spm := disabled.NewDisabledStoragePruningManager() @@ -2839,7 +2806,7 @@ func TestAccountsDB_SyncMissingSnapshotNodes(t *testing.T) { trieHashes, _ = tr.GetAllHashes() syncer := &mock.AccountsDBSyncerStub{ - SyncAccountsCalled: func(rootHash []byte) error { + SyncAccountsCalled: func(rootHash []byte, _ common.StorageMarker) error { isSyncError = true return errors.New("sync error") }, diff --git a/state/factory/accountsAdapterAPICreator_test.go b/state/factory/accountsAdapterAPICreator_test.go index dd88f13dd4e..b0151b907c6 100644 --- a/state/factory/accountsAdapterAPICreator_test.go +++ b/state/factory/accountsAdapterAPICreator_test.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" mockState "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" mockTrie "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" ) @@ -18,7 +19,7 @@ func createMockAccountsArgs() state.ArgsAccountsDB { return state.ArgsAccountsDB{ Trie: &mockTrie.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, }, Hasher: &testscommon.HasherStub{}, diff --git a/state/interface.go b/state/interface.go index 22655227f4a..8071418796c 100644 --- a/state/interface.go +++ b/state/interface.go @@ -132,7 +132,7 @@ type AccountsAdapter interface { // AccountsDBSyncer defines the methods for the accounts db syncer type AccountsDBSyncer interface { - SyncAccounts(rootHash []byte) error + SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error IsInterfaceNil() bool } diff --git a/state/peerAccountsDB_test.go b/state/peerAccountsDB_test.go index a3524d5aab8..06e5f777179 100644 --- a/state/peerAccountsDB_test.go +++ b/state/peerAccountsDB_test.go @@ -12,7 +12,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" ) @@ -98,7 +98,7 @@ func TestNewPeerAccountsDB_SnapshotState(t *testing.T) { args := createMockAccountsDBArgs() args.Trie = &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ TakeSnapshotCalled: func(_ string, _ []byte, _ []byte, _ *common.TrieIteratorChannels, _ chan []byte, _ common.SnapshotStatisticsHandler, _ uint32) { snapshotCalled = true }, @@ -121,7 +121,7 @@ func TestNewPeerAccountsDB_SnapshotStateGetLatestStorageEpochErrDoesNotSnapshot( args := createMockAccountsDBArgs() args.Trie = &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ GetLatestStorageEpochCalled: func() (uint32, error) { return 0, fmt.Errorf("new error") }, @@ -146,7 +146,7 @@ func TestNewPeerAccountsDB_SetStateCheckpoint(t *testing.T) { args := createMockAccountsDBArgs() args.Trie = &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ SetCheckpointCalled: func(_ []byte, _ []byte, _ *common.TrieIteratorChannels, _ chan []byte, _ common.SnapshotStatisticsHandler) { checkpointCalled = true }, @@ -169,7 +169,7 @@ func TestNewPeerAccountsDB_RecreateAllTries(t *testing.T) { args := createMockAccountsDBArgs() args.Trie = &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, RecreateCalled: func(_ []byte) (common.Trie, error) { recreateCalled = true @@ -198,7 +198,7 @@ func TestPeerAccountsDB_SetSyncerAndStartSnapshotIfNeeded(t *testing.T) { return rootHash, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ GetCalled: func(key []byte) ([]byte, error) { if bytes.Equal(key, []byte(common.ActiveDBKey)) { return nil, fmt.Errorf("key not found") @@ -253,7 +253,7 @@ func TestPeerAccountsDB_MarkSnapshotDone(t *testing.T) { args := createMockAccountsDBArgs() args.Trie = &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ PutInEpochCalled: func(bytes []byte, bytes2 []byte, u uint32) error { assert.Fail(t, "should have not called put in epoch") return nil @@ -283,7 +283,7 @@ func TestPeerAccountsDB_MarkSnapshotDone(t *testing.T) { args := createMockAccountsDBArgs() args.Trie = &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ PutInEpochWithoutCacheCalled: func(key []byte, value []byte, epoch uint32) error { assert.Equal(t, common.ActiveDBKey, string(key)) assert.Equal(t, common.ActiveDBVal, string(value)) @@ -306,7 +306,7 @@ func TestPeerAccountsDB_MarkSnapshotDone(t *testing.T) { args := createMockAccountsDBArgs() args.Trie = &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ PutInEpochWithoutCacheCalled: func(key []byte, value []byte, epoch uint32) error { assert.Equal(t, common.ActiveDBKey, string(key)) assert.Equal(t, common.ActiveDBVal, string(value)) @@ -337,7 +337,7 @@ func TestPeerAccountsDB_SetSyncerAndStartSnapshotIfNeededMarksActiveDB(t *testin return rootHash, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ ShouldTakeSnapshotCalled: func() bool { return true }, @@ -372,7 +372,7 @@ func TestPeerAccountsDB_SetSyncerAndStartSnapshotIfNeededMarksActiveDB(t *testin return rootHash, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ ShouldTakeSnapshotCalled: func() bool { return true }, @@ -403,7 +403,7 @@ func TestPeerAccountsDB_SetSyncerAndStartSnapshotIfNeededMarksActiveDB(t *testin return rootHash, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ ShouldTakeSnapshotCalled: func() bool { return true }, @@ -443,7 +443,7 @@ func TestPeerAccountsDB_SnapshotStateOnAClosedStorageManagerShouldNotMarkActiveD activeDBWasPut := false trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ ShouldTakeSnapshotCalled: func() bool { return true }, diff --git a/state/storagePruningManager/storagePruningManager.go b/state/storagePruningManager/storagePruningManager.go index 2dcf7bad076..757d04cc9ed 100644 --- a/state/storagePruningManager/storagePruningManager.go +++ b/state/storagePruningManager/storagePruningManager.go @@ -8,7 +8,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/storagePruningManager/pruningBuffer" logger "github.com/multiversx/mx-chain-logger-go" @@ -176,7 +175,7 @@ func (spm *storagePruningManager) prune(rootHash []byte, tsm common.StorageManag err := spm.removeFromDb(rootHash, tsm, handler) if err != nil { - if errors.IsClosingError(err) { + if core.IsClosingError(err) { log.Debug("did not remove hash", "rootHash", rootHash, "error", err) return } diff --git a/state/storagePruningManager/storagePruningManager_test.go b/state/storagePruningManager/storagePruningManager_test.go index 28b2473ff2c..1a1a8ace76e 100644 --- a/state/storagePruningManager/storagePruningManager_test.go +++ b/state/storagePruningManager/storagePruningManager_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/stretchr/testify/assert" @@ -24,15 +25,8 @@ func getDefaultTrieAndAccountsDbAndStoragePruningManager() (common.Trie, *state. } marshaller := &testscommon.MarshalizerMock{} hasher := &hashingMocks.HasherMock{} - args := trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.CreateMemUnit(), - CheckpointsStorer: testscommon.CreateMemUnit(), - Marshalizer: marshaller, - Hasher: hasher, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := storage.GetStorageManagerArgs() + args.CheckpointHashesHolder = hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize) trieStorage, _ := trie.NewTrieStorageManager(args) tr, _ := trie.NewTrie(trieStorage, marshaller, hasher, 5) ewlArgs := evictionWaitingList.MemoryEvictionWaitingListArgs{ diff --git a/state/syncer/baseAccountsSyncer.go b/state/syncer/baseAccountsSyncer.go index af0ef1fb456..f31575562bb 100644 --- a/state/syncer/baseAccountsSyncer.go +++ b/state/syncer/baseAccountsSyncer.go @@ -31,7 +31,6 @@ type baseAccountsSyncer struct { name string maxHardCapForMissingNodes int checkNodesOnDisk bool - storageMarker trie.StorageMarker userAccountsSyncStatisticsHandler common.SizeSyncStatisticsHandler appStatusHandler core.AppStatusHandler @@ -47,7 +46,6 @@ type ArgsNewBaseAccountsSyncer struct { Hasher hashing.Hasher Marshalizer marshal.Marshalizer TrieStorageManager common.StorageManager - StorageMarker trie.StorageMarker RequestHandler trie.RequestHandler Timeout time.Duration Cacher storage.Cacher diff --git a/state/syncer/baseAccoutnsSyncer_test.go b/state/syncer/baseAccoutnsSyncer_test.go index 12ba52df5fe..de71219d74b 100644 --- a/state/syncer/baseAccoutnsSyncer_test.go +++ b/state/syncer/baseAccoutnsSyncer_test.go @@ -9,7 +9,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" - "github.com/multiversx/mx-chain-go/trie/storageMarker" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" "github.com/stretchr/testify/require" ) @@ -17,8 +17,7 @@ func getDefaultBaseAccSyncerArgs() syncer.ArgsNewBaseAccountsSyncer { return syncer.ArgsNewBaseAccountsSyncer{ Hasher: &hashingMocks.HasherMock{}, Marshalizer: testscommon.MarshalizerMock{}, - TrieStorageManager: &testscommon.StorageManagerStub{}, - StorageMarker: storageMarker.NewDisabledStorageMarker(), + TrieStorageManager: &storageManager.StorageManagerStub{}, RequestHandler: &testscommon.RequestHandlerStub{}, Timeout: time.Second, Cacher: testscommon.NewCacherMock(), diff --git a/state/syncer/errors.go b/state/syncer/errors.go index c42c5fd08f2..5a12356ecf8 100644 --- a/state/syncer/errors.go +++ b/state/syncer/errors.go @@ -4,3 +4,6 @@ import "errors" // ErrNilPubkeyConverter signals that a nil public key converter was provided var ErrNilPubkeyConverter = errors.New("nil pubkey converter") + +// ErrNilStorageMarker signals that a nil storage marker was provided +var ErrNilStorageMarker = errors.New("nil storage marker") diff --git a/state/syncer/export_test.go b/state/syncer/export_test.go index e8fade258ac..cfd917aba66 100644 --- a/state/syncer/export_test.go +++ b/state/syncer/export_test.go @@ -24,3 +24,8 @@ func (u *userAccountsSyncer) SyncAccountDataTries( ) error { return u.syncAccountDataTries(leavesChannels, ctx) } + +// GetNumHandlers - +func (mtnn *missingTrieNodesNotifier) GetNumHandlers() int { + return len(mtnn.handlers) +} diff --git a/state/syncer/missingTrieNodesNotifier.go b/state/syncer/missingTrieNodesNotifier.go new file mode 100644 index 00000000000..545fab32609 --- /dev/null +++ b/state/syncer/missingTrieNodesNotifier.go @@ -0,0 +1,54 @@ +package syncer + +import ( + "sync" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" +) + +type missingTrieNodesNotifier struct { + handlers []common.StateSyncNotifierSubscriber + mutex sync.RWMutex +} + +// NewMissingTrieNodesNotifier creates a new missing trie nodes notifier +func NewMissingTrieNodesNotifier() *missingTrieNodesNotifier { + return &missingTrieNodesNotifier{ + handlers: make([]common.StateSyncNotifierSubscriber, 0), + mutex: sync.RWMutex{}, + } +} + +// RegisterHandler registers a new handler for the missing trie nodes notifier +func (mtnn *missingTrieNodesNotifier) RegisterHandler(handler common.StateSyncNotifierSubscriber) error { + if check.IfNil(handler) { + return common.ErrNilStateSyncNotifierSubscriber + } + + mtnn.mutex.Lock() + mtnn.handlers = append(mtnn.handlers, handler) + mtnn.mutex.Unlock() + + return nil +} + +// AsyncNotifyMissingTrieNode asynchronously notifies all the registered handlers that a trie node is missing +func (mtnn *missingTrieNodesNotifier) AsyncNotifyMissingTrieNode(hash []byte) { + if common.IsEmptyTrie(hash) { + log.Warn("missingTrieNodesNotifier: empty trie hash") + return + } + + mtnn.mutex.RLock() + defer mtnn.mutex.RUnlock() + + for _, handler := range mtnn.handlers { + go handler.MissingDataTrieNodeFound(hash) + } +} + +// IsInterfaceNil returns true if there is no value under the interface +func (mtnn *missingTrieNodesNotifier) IsInterfaceNil() bool { + return mtnn == nil +} diff --git a/state/syncer/missingTrieNodesNotifier_test.go b/state/syncer/missingTrieNodesNotifier_test.go new file mode 100644 index 00000000000..64682333ff4 --- /dev/null +++ b/state/syncer/missingTrieNodesNotifier_test.go @@ -0,0 +1,63 @@ +package syncer + +import ( + "sync" + "testing" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/stretchr/testify/assert" +) + +func TestNewMissingTrieNodesNotifier(t *testing.T) { + t.Parallel() + + assert.False(t, check.IfNil(NewMissingTrieNodesNotifier())) +} + +func TestMissingTrieNodesNotifier_RegisterHandler(t *testing.T) { + t.Parallel() + + notifier := NewMissingTrieNodesNotifier() + + err := notifier.RegisterHandler(nil) + assert.Equal(t, common.ErrNilStateSyncNotifierSubscriber, err) + assert.Equal(t, 0, notifier.GetNumHandlers()) + + err = notifier.RegisterHandler(&testscommon.StateSyncNotifierSubscriberStub{}) + assert.Nil(t, err) + + assert.Equal(t, 1, notifier.GetNumHandlers()) +} + +func TestMissingTrieNodesNotifier_AsyncNotifyMissingTrieNode(t *testing.T) { + t.Parallel() + + numMissingDataTrieNodeFoundCalled := 0 + notifier := NewMissingTrieNodesNotifier() + notifier.AsyncNotifyMissingTrieNode([]byte("hash1")) + + wg := sync.WaitGroup{} + wg.Add(2) + mutex := sync.Mutex{} + + err := notifier.RegisterHandler(&testscommon.StateSyncNotifierSubscriberStub{ + MissingDataTrieNodeFoundCalled: func(_ []byte) { + mutex.Lock() + numMissingDataTrieNodeFoundCalled++ + wg.Done() + mutex.Unlock() + }, + }) + assert.Nil(t, err) + + notifier.AsyncNotifyMissingTrieNode(nil) + notifier.AsyncNotifyMissingTrieNode([]byte("hash2")) + notifier.AsyncNotifyMissingTrieNode([]byte("hash3")) + + wg.Wait() + + assert.Equal(t, 1, notifier.GetNumHandlers()) + assert.Equal(t, 2, numMissingDataTrieNodeFoundCalled) +} diff --git a/state/syncer/userAccountsSyncer.go b/state/syncer/userAccountsSyncer.go index fec67fe8fc6..f503849f943 100644 --- a/state/syncer/userAccountsSyncer.go +++ b/state/syncer/userAccountsSyncer.go @@ -13,14 +13,13 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" - "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/trie" logger "github.com/multiversx/mx-chain-logger-go" ) -var _ epochStart.AccountsDBSyncer = (*userAccountsSyncer)(nil) +var _ state.AccountsDBSyncer = (*userAccountsSyncer)(nil) var log = logger.GetOrCreate("syncer") @@ -87,7 +86,6 @@ func NewUserAccountsSyncer(args ArgsNewUserAccountsSyncer) (*userAccountsSyncer, maxHardCapForMissingNodes: args.MaxHardCapForMissingNodes, trieSyncerVersion: args.TrieSyncerVersion, checkNodesOnDisk: args.CheckNodesOnDisk, - storageMarker: args.StorageMarker, userAccountsSyncStatisticsHandler: args.UserAccountsSyncStatisticsHandler, appStatusHandler: args.AppStatusHandler, } @@ -103,7 +101,11 @@ func NewUserAccountsSyncer(args ArgsNewUserAccountsSyncer) (*userAccountsSyncer, } // SyncAccounts will launch the syncing method to gather all the data needed for userAccounts - it is a blocking method -func (u *userAccountsSyncer) SyncAccounts(rootHash []byte) error { +func (u *userAccountsSyncer) SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error { + if check.IfNil(storageMarker) { + return ErrNilStorageMarker + } + u.mutex.Lock() defer u.mutex.Unlock() @@ -148,7 +150,7 @@ func (u *userAccountsSyncer) SyncAccounts(rootHash []byte) error { return err } - u.storageMarker.MarkStorerAsSyncedAndActive(u.trieStorageManager) + storageMarker.MarkStorerAsSyncedAndActive(u.trieStorageManager) log.Debug("main trie and data tries synced", "main trie root hash", rootHash, "num data tries", len(u.dataTries)) @@ -166,6 +168,21 @@ func (u *userAccountsSyncer) syncDataTrie(rootHash []byte, address []byte, ctx c u.dataTries[string(rootHash)] = struct{}{} u.syncerMutex.Unlock() + trieSyncer, err := u.createAndStartSyncer(ctx, rootHash, u.checkNodesOnDisk) + if err != nil { + return err + } + + u.updateDataTrieStatistics(trieSyncer, address) + + return nil +} + +func (u *userAccountsSyncer) createAndStartSyncer( + ctx context.Context, + hash []byte, + checkNodesOnDisk bool, +) (trie.TrieSyncer, error) { arg := trie.ArgTrieSyncer{ RequestHandler: u.requestHandler, InterceptedNodes: u.cacher, @@ -177,23 +194,20 @@ func (u *userAccountsSyncer) syncDataTrie(rootHash []byte, address []byte, ctx c TrieSyncStatistics: u.userAccountsSyncStatisticsHandler, TimeoutHandler: u.timeoutHandler, MaxHardCapForMissingNodes: u.maxHardCapForMissingNodes, - CheckNodesOnDisk: u.checkNodesOnDisk, + CheckNodesOnDisk: checkNodesOnDisk, LeavesChan: nil, // not used for data tries } trieSyncer, err := trie.CreateTrieSyncer(arg, u.trieSyncerVersion) if err != nil { - - return err + return nil, err } - err = trieSyncer.StartSyncing(rootHash, ctx) + err = trieSyncer.StartSyncing(hash, ctx) if err != nil { - return err + return nil, err } - u.updateDataTrieStatistics(trieSyncer, address) - - return nil + return trieSyncer, nil } func (u *userAccountsSyncer) updateDataTrieStatistics(trieSyncer trie.TrieSyncer, address []byte) { @@ -321,6 +335,30 @@ func (u *userAccountsSyncer) resetTimeoutHandlerWatchdog() { u.timeoutHandler.ResetWatchdog() } +// MissingDataTrieNodeFound is called whenever a missing data trie node is found. +// This will trigger the sync process for the whole sub trie, starting from the given hash. +func (u *userAccountsSyncer) MissingDataTrieNodeFound(hash []byte) { + defer u.printDataTrieStatistics() + + u.timeoutHandler.ResetWatchdog() + + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + u.cacher.Clear() + cancel() + }() + + trieSyncer, err := u.createAndStartSyncer(ctx, hash, true) + if err != nil { + log.Error("cannot sync trie", "err", err, "hash", hash) + return + } + + u.updateDataTrieStatistics(trieSyncer, hash) + + log.Debug("finished sync data trie", "hash", hash) +} + // IsInterfaceNil returns true if there is no value under the interface func (u *userAccountsSyncer) IsInterfaceNil() bool { return u == nil diff --git a/state/syncer/userAccountsSyncer_test.go b/state/syncer/userAccountsSyncer_test.go index 51184d76d91..8f1ca462be3 100644 --- a/state/syncer/userAccountsSyncer_test.go +++ b/state/syncer/userAccountsSyncer_test.go @@ -17,9 +17,11 @@ import ( "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/syncer" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/multiversx/mx-chain-go/trie/keyBuilder" + "github.com/multiversx/mx-chain-go/trie/storageMarker" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -96,7 +98,7 @@ func getSerializedTrieNode( hasher hashing.Hasher, ) []byte { var serializedLeafNode []byte - tsm := &testscommon.StorageManagerStub{ + tsm := &storageManager.StorageManagerStub{ PutCalled: func(key []byte, val []byte) error { serializedLeafNode = val return nil @@ -113,29 +115,45 @@ func getSerializedTrieNode( func TestUserAccountsSyncer_SyncAccounts(t *testing.T) { t.Parallel() - args := getDefaultUserAccountsSyncerArgs() - args.Timeout = 5 * time.Second + t.Run("nil storage marker", func(t *testing.T) { + t.Parallel() - key := []byte("rootHash") - serializedLeafNode := getSerializedTrieNode(key, args.Marshalizer, args.Hasher) - itn, err := trie.NewInterceptedTrieNode(serializedLeafNode, args.Hasher) - require.Nil(t, err) + args := getDefaultUserAccountsSyncerArgs() + s, err := syncer.NewUserAccountsSyncer(args) + assert.Nil(t, err) + assert.NotNil(t, s) - args.TrieStorageManager = &testscommon.StorageManagerStub{ - GetCalled: func(b []byte) ([]byte, error) { - return serializedLeafNode, nil - }, - } + err = s.SyncAccounts([]byte("rootHash"), nil) + assert.Equal(t, syncer.ErrNilStorageMarker, err) + }) - cacher := testscommon.NewCacherMock() - cacher.Put(key, itn, 0) - args.Cacher = cacher + t.Run("should work", func(t *testing.T) { + t.Parallel() - s, err := syncer.NewUserAccountsSyncer(args) - require.Nil(t, err) + args := getDefaultUserAccountsSyncerArgs() + args.Timeout = 5 * time.Second - err = s.SyncAccounts(key) - require.Nil(t, err) + key := []byte("rootHash") + serializedLeafNode := getSerializedTrieNode(key, args.Marshalizer, args.Hasher) + itn, err := trie.NewInterceptedTrieNode(serializedLeafNode, args.Hasher) + require.Nil(t, err) + + args.TrieStorageManager = &storageManager.StorageManagerStub{ + GetCalled: func(b []byte) ([]byte, error) { + return serializedLeafNode, nil + }, + } + + cacher := testscommon.NewCacherMock() + cacher.Put(key, itn, 0) + args.Cacher = cacher + + s, err := syncer.NewUserAccountsSyncer(args) + require.Nil(t, err) + + err = s.SyncAccounts(key, storageMarker.NewDisabledStorageMarker()) + require.Nil(t, err) + }) } func getDefaultTrieParameters() (common.StorageManager, marshal.Marshalizer, hashing.Hasher, uint) { @@ -156,6 +174,7 @@ func getDefaultTrieParameters() (common.StorageManager, marshal.Marshalizer, has GeneralConfig: generalCfg, CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize), IdleProvider: &testscommon.ProcessStatusHandlerStub{}, + Identifier: "identifier", } trieStorageManager, _ := trie.NewTrieStorageManager(args) @@ -195,7 +214,7 @@ func TestUserAccountsSyncer_SyncAccountDataTries(t *testing.T) { itn, err := trie.NewInterceptedTrieNode(serializedLeafNode, args.Hasher) require.Nil(t, err) - args.TrieStorageManager = &testscommon.StorageManagerStub{ + args.TrieStorageManager = &storageManager.StorageManagerStub{ GetCalled: func(b []byte) ([]byte, error) { return serializedLeafNode, nil }, @@ -257,7 +276,7 @@ func TestUserAccountsSyncer_SyncAccountDataTries(t *testing.T) { itn, err := trie.NewInterceptedTrieNode(serializedLeafNode, args.Hasher) require.Nil(t, err) - args.TrieStorageManager = &testscommon.StorageManagerStub{ + args.TrieStorageManager = &storageManager.StorageManagerStub{ GetCalled: func(b []byte) ([]byte, error) { return serializedLeafNode, nil }, @@ -301,6 +320,66 @@ func TestUserAccountsSyncer_SyncAccountDataTries(t *testing.T) { }) } +func TestUserAccountsSyncer_MissingDataTrieNodeFound(t *testing.T) { + t.Parallel() + + numNodesSynced := 0 + numProcessedCalled := 0 + setNumMissingCalled := 0 + args := syncer.ArgsNewUserAccountsSyncer{ + ArgsNewBaseAccountsSyncer: getDefaultBaseAccSyncerArgs(), + ShardId: 0, + Throttler: &mock.ThrottlerStub{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterStub{}, + } + args.TrieStorageManager = &storageManager.StorageManagerStub{ + PutInEpochCalled: func(_ []byte, _ []byte, _ uint32) error { + numNodesSynced++ + return nil + }, + } + args.UserAccountsSyncStatisticsHandler = &testscommon.SizeSyncStatisticsHandlerStub{ + AddNumProcessedCalled: func(value int) { + numProcessedCalled++ + }, + SetNumMissingCalled: func(rootHash []byte, value int) { + setNumMissingCalled++ + assert.Equal(t, 0, value) + }, + } + + var serializedLeafNode []byte + tsm := &storageManager.StorageManagerStub{ + PutCalled: func(key []byte, val []byte) error { + serializedLeafNode = val + return nil + }, + } + + tr, _ := trie.NewTrie(tsm, args.Marshalizer, args.Hasher, 5) + key := []byte("key") + value := []byte("value") + _ = tr.Update(key, value) + rootHash, _ := tr.RootHash() + _ = tr.Commit() + + args.Cacher = &testscommon.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + interceptedNode, _ := trie.NewInterceptedTrieNode(serializedLeafNode, args.Hasher) + return interceptedNode, true + }, + } + + syncer, _ := syncer.NewUserAccountsSyncer(args) + // test that timeout watchdog is reset + time.Sleep(args.Timeout * 2) + syncer.MissingDataTrieNodeFound(rootHash) + + assert.Equal(t, 1, numNodesSynced) + assert.Equal(t, 1, numProcessedCalled) + assert.Equal(t, 1, setNumMissingCalled) +} + func TestUserAccountsSyncer_IsInterfaceNil(t *testing.T) { t.Parallel() diff --git a/state/syncer/validatorAccountsSyncer.go b/state/syncer/validatorAccountsSyncer.go index 856d3ddc2cc..db70df18930 100644 --- a/state/syncer/validatorAccountsSyncer.go +++ b/state/syncer/validatorAccountsSyncer.go @@ -4,13 +4,14 @@ import ( "context" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process/factory" + "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/trie/statistics" ) -var _ epochStart.AccountsDBSyncer = (*validatorAccountsSyncer)(nil) +var _ state.AccountsDBSyncer = (*validatorAccountsSyncer)(nil) type validatorAccountsSyncer struct { *baseAccountsSyncer @@ -47,7 +48,6 @@ func NewValidatorAccountsSyncer(args ArgsNewValidatorAccountsSyncer) (*validator maxHardCapForMissingNodes: args.MaxHardCapForMissingNodes, trieSyncerVersion: args.TrieSyncerVersion, checkNodesOnDisk: args.CheckNodesOnDisk, - storageMarker: args.StorageMarker, userAccountsSyncStatisticsHandler: statistics.NewTrieSyncStatistics(), appStatusHandler: args.AppStatusHandler, } @@ -60,7 +60,11 @@ func NewValidatorAccountsSyncer(args ArgsNewValidatorAccountsSyncer) (*validator } // SyncAccounts will launch the syncing method to gather all the data needed for validatorAccounts - it is a blocking method -func (v *validatorAccountsSyncer) SyncAccounts(rootHash []byte) error { +func (v *validatorAccountsSyncer) SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error { + if check.IfNil(storageMarker) { + return ErrNilStorageMarker + } + v.mutex.Lock() defer v.mutex.Unlock() @@ -84,7 +88,7 @@ func (v *validatorAccountsSyncer) SyncAccounts(rootHash []byte) error { return err } - v.storageMarker.MarkStorerAsSyncedAndActive(v.trieStorageManager) + storageMarker.MarkStorerAsSyncedAndActive(v.trieStorageManager) return nil } diff --git a/state/syncer/validatorAccountsSyncer_test.go b/state/syncer/validatorAccountsSyncer_test.go index cdb33719c4d..b4a025883f1 100644 --- a/state/syncer/validatorAccountsSyncer_test.go +++ b/state/syncer/validatorAccountsSyncer_test.go @@ -8,7 +8,9 @@ import ( "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/syncer" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" "github.com/multiversx/mx-chain-go/trie" + "github.com/multiversx/mx-chain-go/trie/storageMarker" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -57,30 +59,50 @@ func TestNewValidatorAccountsSyncer(t *testing.T) { func TestValidatorAccountsSyncer_SyncAccounts(t *testing.T) { t.Parallel() - args := syncer.ArgsNewValidatorAccountsSyncer{ - ArgsNewBaseAccountsSyncer: getDefaultBaseAccSyncerArgs(), - } - key := []byte("rootHash") - serializedLeafNode := getSerializedTrieNode(key, args.Marshalizer, args.Hasher) - itn, err := trie.NewInterceptedTrieNode(serializedLeafNode, args.Hasher) - require.Nil(t, err) - args.TrieStorageManager = &testscommon.StorageManagerStub{ - GetCalled: func(b []byte) ([]byte, error) { - return serializedLeafNode, nil - }, - } + t.Run("nil storage marker", func(t *testing.T) { + t.Parallel() - cacher := testscommon.NewCacherMock() - cacher.Put(key, itn, 0) - args.Cacher = cacher + args := syncer.ArgsNewValidatorAccountsSyncer{ + ArgsNewBaseAccountsSyncer: getDefaultBaseAccSyncerArgs(), + } - v, err := syncer.NewValidatorAccountsSyncer(args) - require.Nil(t, err) + v, err := syncer.NewValidatorAccountsSyncer(args) + require.Nil(t, err) + require.NotNil(t, v) - err = v.SyncAccounts(key) - require.Nil(t, err) + err = v.SyncAccounts(key, nil) + require.Equal(t, syncer.ErrNilStorageMarker, err) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := syncer.ArgsNewValidatorAccountsSyncer{ + ArgsNewBaseAccountsSyncer: getDefaultBaseAccSyncerArgs(), + } + + serializedLeafNode := getSerializedTrieNode(key, args.Marshalizer, args.Hasher) + itn, err := trie.NewInterceptedTrieNode(serializedLeafNode, args.Hasher) + require.Nil(t, err) + + args.TrieStorageManager = &storageManager.StorageManagerStub{ + GetCalled: func(b []byte) ([]byte, error) { + return serializedLeafNode, nil + }, + } + + cacher := testscommon.NewCacherMock() + cacher.Put(key, itn, 0) + args.Cacher = cacher + + v, err := syncer.NewValidatorAccountsSyncer(args) + require.Nil(t, err) + + err = v.SyncAccounts(key, storageMarker.NewDisabledStorageMarker()) + require.Nil(t, err) + }) } func TestValidatorAccountsSyncer_IsInterfaceNil(t *testing.T) { diff --git a/storage/factory/storageServiceFactory.go b/storage/factory/storageServiceFactory.go index df34577ceb1..d1030f1a479 100644 --- a/storage/factory/storageServiceFactory.go +++ b/storage/factory/storageServiceFactory.go @@ -3,6 +3,7 @@ package factory import ( "fmt" "path/filepath" + "time" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" @@ -51,6 +52,7 @@ type StorageServiceFactory struct { storageType StorageServiceType nodeProcessingMode common.NodeProcessingMode snapshotsEnabled bool + repopulateTokensSupplies bool } // StorageServiceFactoryArgs holds the arguments needed for creating a new storage service factory @@ -67,6 +69,7 @@ type StorageServiceFactoryArgs struct { CreateTrieEpochRootHashStorer bool NodeProcessingMode common.NodeProcessingMode SnapshotsEnabled bool + RepopulateTokensSupplies bool } // NewStorageServiceFactory will return a new instance of StorageServiceFactory @@ -101,6 +104,7 @@ func NewStorageServiceFactory(args StorageServiceFactoryArgs) (*StorageServiceFa storageType: args.StorageType, nodeProcessingMode: args.NodeProcessingMode, snapshotsEnabled: args.SnapshotsEnabled, + repopulateTokensSupplies: args.RepopulateTokensSupplies, }, nil } @@ -291,12 +295,12 @@ func (psf *StorageServiceFactory) CreateForShard() (dataRetriever.StorageService hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(psf.shardCoordinator.SelfId()) store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnit) - err = psf.setupDbLookupExtensions(store) + err = psf.setUpDbLookupExtensions(store) if err != nil { return nil, err } - err = psf.setupLogsAndEventsStorer(store) + err = psf.setUpLogsAndEventsStorer(store) if err != nil { return nil, err } @@ -351,12 +355,12 @@ func (psf *StorageServiceFactory) CreateForMeta() (dataRetriever.StorageService, store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnits[i]) } - err = psf.setupDbLookupExtensions(store) + err = psf.setUpDbLookupExtensions(store) if err != nil { return nil, err } - err = psf.setupLogsAndEventsStorer(store) + err = psf.setUpLogsAndEventsStorer(store) if err != nil { return nil, err } @@ -392,7 +396,7 @@ func (psf *StorageServiceFactory) createTrieUnit( return psf.createTriePruningPersister(pruningStorageArgs) } -func (psf *StorageServiceFactory) setupLogsAndEventsStorer(chainStorer *dataRetriever.ChainStorer) error { +func (psf *StorageServiceFactory) setUpLogsAndEventsStorer(chainStorer *dataRetriever.ChainStorer) error { var txLogsUnit storage.Storer txLogsUnit = storageDisabled.NewStorer() @@ -414,7 +418,7 @@ func (psf *StorageServiceFactory) setupLogsAndEventsStorer(chainStorer *dataRetr return nil } -func (psf *StorageServiceFactory) setupDbLookupExtensions(chainStorer *dataRetriever.ChainStorer) error { +func (psf *StorageServiceFactory) setUpDbLookupExtensions(chainStorer *dataRetriever.ChainStorer) error { if !psf.generalConfig.DbLookupExtensions.Enabled { return nil } @@ -477,20 +481,41 @@ func (psf *StorageServiceFactory) setupDbLookupExtensions(chainStorer *dataRetri chainStorer.AddStorer(dataRetriever.EpochByHashUnit, epochByHashUnit) - esdtSuppliesConfig := psf.generalConfig.DbLookupExtensions.ESDTSuppliesStorageConfig - esdtSuppliesDbConfig := GetDBFromConfig(esdtSuppliesConfig.DB) - esdtSuppliesDbConfig.FilePath = psf.pathManager.PathForStatic(shardID, esdtSuppliesConfig.DB.FilePath) - esdtSuppliesCacherConfig := GetCacherFromConfig(esdtSuppliesConfig.Cache) - esdtSuppliesUnit, err := storageunit.NewStorageUnitFromConf(esdtSuppliesCacherConfig, esdtSuppliesDbConfig) + return psf.setUpEsdtSuppliesStorer(chainStorer, shardID) +} + +func (psf *StorageServiceFactory) setUpEsdtSuppliesStorer(chainStorer *dataRetriever.ChainStorer, shardIDStr string) error { + esdtSuppliesUnit, err := psf.createEsdtSuppliesUnit(shardIDStr) if err != nil { return fmt.Errorf("%w for DbLookupExtensions.ESDTSuppliesStorageConfig", err) } - chainStorer.AddStorer(dataRetriever.ESDTSuppliesUnit, esdtSuppliesUnit) + if psf.repopulateTokensSupplies { + // if the flag is set, then we need to clear the storer at this point. The easiest way is to destroy it and then create it again + err = esdtSuppliesUnit.DestroyUnit() + if err != nil { + return err + } + + time.Sleep(time.Second) // making sure the unit was properly closed and destroyed + esdtSuppliesUnit, err = psf.createEsdtSuppliesUnit(shardIDStr) + if err != nil { + return err + } + } + chainStorer.AddStorer(dataRetriever.ESDTSuppliesUnit, esdtSuppliesUnit) return nil } +func (psf *StorageServiceFactory) createEsdtSuppliesUnit(shardIDStr string) (storage.Storer, error) { + esdtSuppliesConfig := psf.generalConfig.DbLookupExtensions.ESDTSuppliesStorageConfig + esdtSuppliesDbConfig := GetDBFromConfig(esdtSuppliesConfig.DB) + esdtSuppliesDbConfig.FilePath = psf.pathManager.PathForStatic(shardIDStr, esdtSuppliesConfig.DB.FilePath) + esdtSuppliesCacherConfig := GetCacherFromConfig(esdtSuppliesConfig.Cache) + return storageunit.NewStorageUnitFromConf(esdtSuppliesCacherConfig, esdtSuppliesDbConfig) +} + func (psf *StorageServiceFactory) createPruningStorerArgs( storageConfig config.StorageConfig, customDatabaseRemover storage.CustomDatabaseRemoverHandler, diff --git a/testscommon/alteredAccountsProviderStub.go b/testscommon/alteredAccountsProviderStub.go index 5772bdac68b..86e8947ab5e 100644 --- a/testscommon/alteredAccountsProviderStub.go +++ b/testscommon/alteredAccountsProviderStub.go @@ -1,17 +1,18 @@ package testscommon import ( + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/outport/process/alteredaccounts/shared" ) // AlteredAccountsProviderStub - type AlteredAccountsProviderStub struct { - ExtractAlteredAccountsFromPoolCalled func(txPool *outport.Pool, options shared.AlteredAccountsOptions) (map[string]*outport.AlteredAccount, error) + ExtractAlteredAccountsFromPoolCalled func(txPool *outport.TransactionPool, options shared.AlteredAccountsOptions) (map[string]*alteredAccount.AlteredAccount, error) } // ExtractAlteredAccountsFromPool - -func (a *AlteredAccountsProviderStub) ExtractAlteredAccountsFromPool(txPool *outport.Pool, options shared.AlteredAccountsOptions) (map[string]*outport.AlteredAccount, error) { +func (a *AlteredAccountsProviderStub) ExtractAlteredAccountsFromPool(txPool *outport.TransactionPool, options shared.AlteredAccountsOptions) (map[string]*alteredAccount.AlteredAccount, error) { if a.ExtractAlteredAccountsFromPoolCalled != nil { return a.ExtractAlteredAccountsFromPoolCalled(txPool, options) } diff --git a/testscommon/blockProcessingCutoffStub.go b/testscommon/blockProcessingCutoffStub.go new file mode 100644 index 00000000000..4082d484871 --- /dev/null +++ b/testscommon/blockProcessingCutoffStub.go @@ -0,0 +1,32 @@ +package testscommon + +import ( + "github.com/multiversx/mx-chain-core-go/data" +) + +// BlockProcessingCutoffStub - +type BlockProcessingCutoffStub struct { + HandleProcessErrorCutoffCalled func(header data.HeaderHandler) error + HandlePauseCutoffCalled func(header data.HeaderHandler) +} + +// HandleProcessErrorCutoff - +func (b *BlockProcessingCutoffStub) HandleProcessErrorCutoff(header data.HeaderHandler) error { + if b.HandleProcessErrorCutoffCalled != nil { + return b.HandleProcessErrorCutoffCalled(header) + } + + return nil +} + +// HandlePauseCutoff - +func (b *BlockProcessingCutoffStub) HandlePauseCutoff(header data.HeaderHandler) { + if b.HandlePauseCutoffCalled != nil { + b.HandlePauseCutoffCalled(header) + } +} + +// IsInterfaceNil - +func (b *BlockProcessingCutoffStub) IsInterfaceNil() bool { + return b == nil +} diff --git a/testscommon/components/components.go b/testscommon/components/components.go index a2344fce33a..e713d3ed758 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -12,6 +12,7 @@ import ( commonFactory "github.com/multiversx/mx-chain-go/common/factory" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" "github.com/multiversx/mx-chain-go/factory" bootstrapComp "github.com/multiversx/mx-chain-go/factory/bootstrap" @@ -35,12 +36,10 @@ import ( "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/dblookupext" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" logger "github.com/multiversx/mx-chain-logger-go" wasmConfig "github.com/multiversx/mx-chain-vm-v1_4-go/config" "github.com/stretchr/testify/require" @@ -161,6 +160,7 @@ func GetConsensusArgs(shardCoordinator sharding.Coordinator) consensusComp.Conse return consensusComp.ConsensusComponentsFactoryArgs{ Config: testscommon.GetGeneralConfig(), + FlagsConfig: config.ContextFlagsConfig{}, BootstrapRoundIndex: 0, CoreComponents: coreComponents, NetworkComponents: networkComponents, @@ -218,7 +218,7 @@ func GetDataArgs(coreComponents factory.CoreComponentsHolder, shardCoordinator s CurrentEpoch: 0, CreateTrieEpochRootHashStorer: false, NodeProcessingMode: common.Normal, - SnapshotsEnabled: false, + FlagsConfigs: config.ContextFlagsConfig{}, } } @@ -318,34 +318,22 @@ func GetNetworkFactoryArgs() networkComp.NetworkComponentsFactoryArgs { } } -func getNewTrieStorageManagerArgs() trie.NewTrieStorageManagerArgs { - return trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.CreateMemUnit(), - CheckpointsStorer: testscommon.CreateMemUnit(), - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10, 32), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } -} - // GetStateFactoryArgs - func GetStateFactoryArgs(coreComponents factory.CoreComponentsHolder) stateComp.StateComponentsFactoryArgs { - tsm, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + tsm, _ := trie.NewTrieStorageManager(storage.GetStorageManagerArgs()) storageManagerUser, _ := trie.NewTrieStorageManagerWithoutPruning(tsm) - tsm, _ = trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + tsm, _ = trie.NewTrieStorageManager(storage.GetStorageManagerArgs()) storageManagerPeer, _ := trie.NewTrieStorageManagerWithoutPruning(tsm) trieStorageManagers := make(map[string]common.StorageManager) - trieStorageManagers[trieFactory.UserAccountTrie] = storageManagerUser - trieStorageManagers[trieFactory.PeerAccountTrie] = storageManagerPeer + trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = storageManagerUser + trieStorageManagers[dataRetriever.PeerAccountsUnit.String()] = storageManagerPeer triesHolder := state.NewDataTriesHolder() trieUsers, _ := trie.NewTrie(storageManagerUser, coreComponents.InternalMarshalizer(), coreComponents.Hasher(), 5) triePeers, _ := trie.NewTrie(storageManagerPeer, coreComponents.InternalMarshalizer(), coreComponents.Hasher(), 5) - triesHolder.Put([]byte(trieFactory.UserAccountTrie), trieUsers) - triesHolder.Put([]byte(trieFactory.PeerAccountTrie), triePeers) + triesHolder.Put([]byte(dataRetriever.UserAccountsUnit.String()), trieUsers) + triesHolder.Put([]byte(dataRetriever.PeerAccountsUnit.String()), triePeers) stateComponentsFactoryArgs := stateComp.StateComponentsFactoryArgs{ Config: GetGeneralConfig(), @@ -495,10 +483,10 @@ func GetProcessArgs( return initialAccounts }, - GenerateInitialTransactionsCalled: func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.Pool, error) { - txsPool := make(map[uint32]*outport.Pool) + GenerateInitialTransactionsCalled: func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.TransactionPool, error) { + txsPool := make(map[uint32]*outport.TransactionPool) for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - txsPool[i] = &outport.Pool{} + txsPool[i] = &outport.TransactionPool{} } return make([]*block.MiniBlock, 4), txsPool, nil @@ -538,8 +526,9 @@ func GetProcessArgs( MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, - ChangeConfigAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + OwnerAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "2500000000000000000000", @@ -564,9 +553,10 @@ func GetProcessArgs( MaxServiceFee: 100, }, }, - Version: "v1.0.0", - HistoryRepo: &dblookupext.HistoryRepositoryStub{}, - SnapshotsEnabled: false, + HistoryRepo: &dblookupext.HistoryRepositoryStub{}, + FlagsConfig: config.ContextFlagsConfig{ + Version: "v1.0.0", + }, } } @@ -591,6 +581,11 @@ func GetStatusComponents( Password: elasticPassword, EnabledIndexes: []string{"transactions", "blocks"}, }, + EventNotifierConnector: config.EventNotifierConfig{ + Enabled: false, + ProxyUrl: "https://localhost:5000", + MarshallerType: "json", + }, }, EconomicsConfig: config.EconomicsConfig{}, ShardCoordinator: shardCoordinator, @@ -647,8 +642,17 @@ func GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator shardin Password: elasticPassword, EnabledIndexes: []string{"transactions", "blocks"}, }, - WebSocketConnector: config.WebSocketDriverConfig{ - MarshallerType: "json", + EventNotifierConnector: config.EventNotifierConfig{ + Enabled: false, + ProxyUrl: "http://localhost:5000", + RequestTimeoutSec: 30, + MarshallerType: "json", + }, + HostDriverConfig: config.HostDriverConfig{ + MarshallerType: "json", + Mode: "client", + URL: "localhost:12345", + RetryDurationInSec: 1, }, }, EconomicsConfig: config.EconomicsConfig{}, diff --git a/testscommon/components/configs.go b/testscommon/components/configs.go index 6d44e383818..55e806aab45 100644 --- a/testscommon/components/configs.go +++ b/testscommon/components/configs.go @@ -142,6 +142,11 @@ func GetGeneralConfig() config.Config { "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c", //shard 1 "erd1najnxxweyw6plhg8efql330nttrj6l5cf87wqsuym85s9ha0hmdqnqgenp", //shard 2 }, + DNSV2Addresses: []string{ + "erd1he8wwxn4az3j82p7wwqsdk794dm7hcrwny6f8dfegkfla34udx7qrf7xje", //shard 0 + "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c", //shard 1 + "erd1najnxxweyw6plhg8efql330nttrj6l5cf87wqsuym85s9ha0hmdqnqgenp", //shard 2 + }, MaxNumAddressesInTransferRole: 100, }, EpochStartConfig: GetEpochStartConfig(), diff --git a/testscommon/components/default.go b/testscommon/components/default.go index 6cb28f54616..d90406199db 100644 --- a/testscommon/components/default.go +++ b/testscommon/components/default.go @@ -5,6 +5,7 @@ import ( crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/factory/mock" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/testscommon" @@ -13,13 +14,14 @@ import ( dataRetrieverTests "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" ) // GetDefaultCoreComponents - @@ -86,16 +88,17 @@ func GetDefaultNetworkComponents() *mock.NetworkComponentsMock { } // GetDefaultStateComponents - -func GetDefaultStateComponents() *testscommon.StateComponentsMock { - return &testscommon.StateComponentsMock{ +func GetDefaultStateComponents() *factory.StateComponentsMock { + return &factory.StateComponentsMock{ PeersAcc: &stateMock.AccountsStub{}, Accounts: &stateMock.AccountsStub{}, Tries: &trieMock.TriesHolderStub{}, StorageManagers: map[string]common.StorageManager{ - "0": &testscommon.StorageManagerStub{}, - trieFactory.UserAccountTrie: &testscommon.StorageManagerStub{}, - trieFactory.PeerAccountTrie: &testscommon.StorageManagerStub{}, + "0": &storageManager.StorageManagerStub{}, + dataRetriever.UserAccountsUnit.String(): &storageManager.StorageManagerStub{}, + dataRetriever.PeerAccountsUnit.String(): &storageManager.StorageManagerStub{}, }, + MissingNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } } diff --git a/testscommon/enableEpochsHandlerStub.go b/testscommon/enableEpochsHandlerStub.go index d076a3676d3..4fb26b750f8 100644 --- a/testscommon/enableEpochsHandlerStub.go +++ b/testscommon/enableEpochsHandlerStub.go @@ -120,6 +120,8 @@ type EnableEpochsHandlerStub struct { IsSetGuardianEnabledField bool IsKeepExecOrderOnCreatedSCRsEnabledField bool IsMultiClaimOnDelegationEnabledField bool + IsChangeUsernameEnabledField bool + IsConsistentTokensValuesLengthCheckEnabledField bool } // ResetPenalizedTooMuchGasFlag - @@ -1041,6 +1043,22 @@ func (stub *EnableEpochsHandlerStub) IsMultiClaimOnDelegationEnabled() bool { return stub.IsMultiClaimOnDelegationEnabledField } +// IsChangeUsernameEnabled - +func (stub *EnableEpochsHandlerStub) IsChangeUsernameEnabled() bool { + stub.RLock() + defer stub.RUnlock() + + return stub.IsChangeUsernameEnabledField +} + +// IsConsistentTokensValuesLengthCheckEnabled - +func (stub *EnableEpochsHandlerStub) IsConsistentTokensValuesLengthCheckEnabled() bool { + stub.RLock() + defer stub.RUnlock() + + return stub.IsConsistentTokensValuesLengthCheckEnabledField +} + // IsInterfaceNil - func (stub *EnableEpochsHandlerStub) IsInterfaceNil() bool { return stub == nil diff --git a/testscommon/stateComponentsMock.go b/testscommon/factory/stateComponentsMock.go similarity index 50% rename from testscommon/stateComponentsMock.go rename to testscommon/factory/stateComponentsMock.go index 15b11bb4ad0..5aa541dffa0 100644 --- a/testscommon/stateComponentsMock.go +++ b/testscommon/factory/stateComponentsMock.go @@ -1,18 +1,34 @@ -package testscommon +package factory import ( "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/state" ) // StateComponentsMock - type StateComponentsMock struct { - PeersAcc state.AccountsAdapter - Accounts state.AccountsAdapter - AccountsAPI state.AccountsAdapter - AccountsRepo state.AccountsRepository - Tries common.TriesHolder - StorageManagers map[string]common.StorageManager + PeersAcc state.AccountsAdapter + Accounts state.AccountsAdapter + AccountsAPI state.AccountsAdapter + AccountsAdapterAPICalled func() state.AccountsAdapter + AccountsRepo state.AccountsRepository + Tries common.TriesHolder + StorageManagers map[string]common.StorageManager + MissingNodesNotifier common.MissingTrieNodesNotifier +} + +// NewStateComponentsMockFromRealComponent - +func NewStateComponentsMockFromRealComponent(stateComponents factory.StateComponentsHolder) *StateComponentsMock { + return &StateComponentsMock{ + PeersAcc: stateComponents.PeerAccounts(), + Accounts: stateComponents.AccountsAdapter(), + AccountsAPI: stateComponents.AccountsAdapterAPI(), + AccountsRepo: stateComponents.AccountsRepository(), + Tries: stateComponents.TriesContainer(), + StorageManagers: stateComponents.TrieStorageManagers(), + MissingNodesNotifier: stateComponents.MissingTrieNodesNotifier(), + } } // Create - @@ -42,6 +58,9 @@ func (scm *StateComponentsMock) AccountsAdapter() state.AccountsAdapter { // AccountsAdapterAPI - func (scm *StateComponentsMock) AccountsAdapterAPI() state.AccountsAdapter { + if scm.AccountsAdapterAPICalled != nil { + return scm.AccountsAdapterAPICalled() + } return scm.AccountsAPI } @@ -65,6 +84,11 @@ func (scm *StateComponentsMock) String() string { return "StateComponentsMock" } +// MissingTrieNodesNotifier - +func (scm *StateComponentsMock) MissingTrieNodesNotifier() common.MissingTrieNodesNotifier { + return scm.MissingNodesNotifier +} + // IsInterfaceNil - func (scm *StateComponentsMock) IsInterfaceNil() bool { return scm == nil diff --git a/testscommon/generalConfig.go b/testscommon/generalConfig.go index 5c246138c50..1a653313e0e 100644 --- a/testscommon/generalConfig.go +++ b/testscommon/generalConfig.go @@ -430,6 +430,11 @@ func GetGeneralConfig() config.Config { "erd1najnxxweyw6plhg8efql330nttrj6l5cf87wqsuym85s9ha0hmdqnqgenp", //shard 2 }, MaxNumAddressesInTransferRole: 100, + DNSV2Addresses: []string{ + "erd1he8wwxn4az3j82p7wwqsdk794dm7hcrwny6f8dfegkfla34udx7qrf7xje", //shard 0 + "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c", //shard 1 + "erd1najnxxweyw6plhg8efql330nttrj6l5cf87wqsuym85s9ha0hmdqnqgenp", //shard 2 + }, }, } } diff --git a/testscommon/genesisMocks/accountsParserStub.go b/testscommon/genesisMocks/accountsParserStub.go index 412fa70d817..04182b04ff6 100644 --- a/testscommon/genesisMocks/accountsParserStub.go +++ b/testscommon/genesisMocks/accountsParserStub.go @@ -16,7 +16,7 @@ type AccountsParserStub struct { InitialAccountsCalled func() []genesis.InitialAccountHandler GetTotalStakedForDelegationAddressCalled func(delegationAddress string) *big.Int GetInitialAccountsForDelegatedCalled func(addressBytes []byte) []genesis.InitialAccountHandler - GenerateInitialTransactionsCalled func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.Pool, error) + GenerateInitialTransactionsCalled func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.TransactionPool, error) GenesisMintingAddressCalled func() string } @@ -75,12 +75,12 @@ func (aps *AccountsParserStub) InitialAccounts() []genesis.InitialAccountHandler } // GenerateInitialTransactions - -func (aps *AccountsParserStub) GenerateInitialTransactions(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.Pool, error) { +func (aps *AccountsParserStub) GenerateInitialTransactions(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.TransactionPool, error) { if aps.GenerateInitialTransactionsCalled != nil { return aps.GenerateInitialTransactionsCalled(shardCoordinator, initialIndexingData) } - return make([]*block.MiniBlock, 0), make(map[uint32]*outport.Pool), nil + return make([]*block.MiniBlock, 0), make(map[uint32]*outport.TransactionPool), nil } // IsInterfaceNil - diff --git a/testscommon/integrationtests/factory.go b/testscommon/integrationtests/factory.go index 97ab0e858a0..3a1302d43b5 100644 --- a/testscommon/integrationtests/factory.go +++ b/testscommon/integrationtests/factory.go @@ -14,8 +14,8 @@ import ( "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + testcommonStorage "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" ) // TestMarshalizer - @@ -84,20 +84,12 @@ func CreateAccountsDB(db storage.Storer) *state.AccountsDB { HashesSize: 10000, } ewl, _ := evictionWaitingList.NewMemoryEvictionWaitingList(ewlArgs) - generalCfg := config.TrieStorageManagerConfig{ - PruningBufferLen: 1000, - SnapshotsBufferLen: 10, - SnapshotsGoroutineNum: 1, - } - args := trie.NewTrieStorageManagerArgs{ - MainStorer: db, - CheckpointsStorer: CreateMemUnit(), - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + + args := testcommonStorage.GetStorageManagerArgs() + args.MainStorer = db + args.Marshalizer = TestMarshalizer + args.Hasher = TestHasher + trieStorage, _ := trie.NewTrieStorageManager(args) tr, _ := trie.NewTrie(trieStorage, TestMarshalizer, TestHasher, MaxTrieLevelInMemory) diff --git a/testscommon/mainFactoryMocks/bootstrapComponentsStub.go b/testscommon/mainFactoryMocks/bootstrapComponentsStub.go index f76a59c7150..8c9d56dca7b 100644 --- a/testscommon/mainFactoryMocks/bootstrapComponentsStub.go +++ b/testscommon/mainFactoryMocks/bootstrapComponentsStub.go @@ -14,6 +14,7 @@ type BootstrapComponentsStub struct { BootstrapParams factory.BootstrapParamsHolder NodeRole core.NodeType ShCoordinator sharding.Coordinator + ShardCoordinatorCalled func() sharding.Coordinator HdrVersionHandler nodeFactory.HeaderVersionHandler VersionedHdrFactory nodeFactory.VersionedHeaderFactory HdrIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler @@ -52,6 +53,9 @@ func (bcs *BootstrapComponentsStub) NodeType() core.NodeType { // ShardCoordinator - func (bcs *BootstrapComponentsStub) ShardCoordinator() sharding.Coordinator { + if bcs.ShardCoordinatorCalled != nil { + return bcs.ShardCoordinatorCalled() + } return bcs.ShCoordinator } diff --git a/testscommon/memDbMock.go b/testscommon/memDbMock.go index 365ffffba8b..7caa6ad947f 100644 --- a/testscommon/memDbMock.go +++ b/testscommon/memDbMock.go @@ -10,10 +10,11 @@ import ( // MemDbMock represents the memory database storage. It holds a map of key value pairs // and a mutex to handle concurrent accesses to the map type MemDbMock struct { - db map[string][]byte - mutx sync.RWMutex - PutCalled func(key, val []byte) error - GetCalled func(key []byte) ([]byte, error) + db map[string][]byte + mutx sync.RWMutex + PutCalled func(key, val []byte) error + GetCalled func(key []byte) ([]byte, error) + GetIdentifierCalled func() string } // NewMemDbMock creates a new memorydb object @@ -117,6 +118,15 @@ func (s *MemDbMock) RangeKeys(handler func(key []byte, value []byte) bool) { } } +// GetIdentifier returns the identifier of the storage medium +func (s *MemDbMock) GetIdentifier() string { + if s.GetIdentifierCalled != nil { + return s.GetIdentifierCalled() + } + + return "" +} + // IsInterfaceNil returns true if there is no value under the interface func (s *MemDbMock) IsInterfaceNil() bool { return s == nil diff --git a/testscommon/missingTrieNodesNotifierStub.go b/testscommon/missingTrieNodesNotifierStub.go new file mode 100644 index 00000000000..484cd48d797 --- /dev/null +++ b/testscommon/missingTrieNodesNotifierStub.go @@ -0,0 +1,30 @@ +package testscommon + +import "github.com/multiversx/mx-chain-go/common" + +// MissingTrieNodesNotifierStub - +type MissingTrieNodesNotifierStub struct { + RegisterHandlerCalled func(handler common.StateSyncNotifierSubscriber) error + AsyncNotifyMissingTrieNodeCalled func(hash []byte) +} + +// RegisterHandler - +func (mtnns *MissingTrieNodesNotifierStub) RegisterHandler(handler common.StateSyncNotifierSubscriber) error { + if mtnns.RegisterHandlerCalled != nil { + return mtnns.RegisterHandlerCalled(handler) + } + + return nil +} + +// AsyncNotifyMissingTrieNode - +func (mtnns *MissingTrieNodesNotifierStub) AsyncNotifyMissingTrieNode(hash []byte) { + if mtnns.AsyncNotifyMissingTrieNodeCalled != nil { + mtnns.AsyncNotifyMissingTrieNodeCalled(hash) + } +} + +// IsInterfaceNil returns true if there is no value under the interface +func (mtnns *MissingTrieNodesNotifierStub) IsInterfaceNil() bool { + return mtnns == nil +} diff --git a/testscommon/outport/blockContainerStub.go b/testscommon/outport/blockContainerStub.go new file mode 100644 index 00000000000..cec28498b2f --- /dev/null +++ b/testscommon/outport/blockContainerStub.go @@ -0,0 +1,20 @@ +package outport + +import ( + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/block" +) + +// BlockContainerStub - +type BlockContainerStub struct { + GetCalled func(headerType core.HeaderType) (block.EmptyBlockCreator, error) +} + +// Get - +func (bcs *BlockContainerStub) Get(headerType core.HeaderType) (block.EmptyBlockCreator, error) { + if bcs.GetCalled != nil { + return bcs.GetCalled(headerType) + } + + return nil, nil +} diff --git a/testscommon/outport/outportDataProviderStub.go b/testscommon/outport/outportDataProviderStub.go index b8cae58fd6e..9bf8e95c7c6 100644 --- a/testscommon/outport/outportDataProviderStub.go +++ b/testscommon/outport/outportDataProviderStub.go @@ -9,13 +9,13 @@ import ( type OutportDataProviderStub struct { PrepareOutportSaveBlockDataCalled func( arg process.ArgPrepareOutportSaveBlockData, - ) (*outportcore.ArgsSaveBlockData, error) + ) (*outportcore.OutportBlockWithHeaderAndBody, error) } // PrepareOutportSaveBlockData - func (a *OutportDataProviderStub) PrepareOutportSaveBlockData( arg process.ArgPrepareOutportSaveBlockData, -) (*outportcore.ArgsSaveBlockData, error) { +) (*outportcore.OutportBlockWithHeaderAndBody, error) { if a.PrepareOutportSaveBlockDataCalled != nil { return a.PrepareOutportSaveBlockDataCalled(arg) } diff --git a/testscommon/outport/outportStub.go b/testscommon/outport/outportStub.go index faa200ef05f..e9cd2649d3e 100644 --- a/testscommon/outport/outportStub.go +++ b/testscommon/outport/outportStub.go @@ -1,37 +1,38 @@ package outport import ( - "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/outport" ) // OutportStub is a mock implementation fot the OutportHandler interface type OutportStub struct { - SaveBlockCalled func(args *outportcore.ArgsSaveBlockData) - SaveValidatorsRatingCalled func(index string, validatorsInfo []*outportcore.ValidatorRatingInfo) - SaveValidatorsPubKeysCalled func(shardPubKeys map[uint32][][]byte, epoch uint32) + SaveBlockCalled func(args *outportcore.OutportBlockWithHeaderAndBody) error + SaveValidatorsRatingCalled func(validatorsRating *outportcore.ValidatorsRating) + SaveValidatorsPubKeysCalled func(validatorsPubKeys *outportcore.ValidatorsPubKeys) HasDriversCalled func() bool } // SaveBlock - -func (as *OutportStub) SaveBlock(args *outportcore.ArgsSaveBlockData) { +func (as *OutportStub) SaveBlock(args *outportcore.OutportBlockWithHeaderAndBody) error { if as.SaveBlockCalled != nil { - as.SaveBlockCalled(args) + return as.SaveBlockCalled(args) } + + return nil } // SaveValidatorsRating - -func (as *OutportStub) SaveValidatorsRating(index string, validatorsInfo []*outportcore.ValidatorRatingInfo) { +func (as *OutportStub) SaveValidatorsRating(validatorsRating *outportcore.ValidatorsRating) { if as.SaveValidatorsRatingCalled != nil { - as.SaveValidatorsRatingCalled(index, validatorsInfo) + as.SaveValidatorsRatingCalled(validatorsRating) } } // SaveValidatorsPubKeys - -func (as *OutportStub) SaveValidatorsPubKeys(shardPubKeys map[uint32][][]byte, epoch uint32) { +func (as *OutportStub) SaveValidatorsPubKeys(validatorsPubKeys *outportcore.ValidatorsPubKeys) { if as.SaveValidatorsPubKeysCalled != nil { - as.SaveValidatorsPubKeysCalled(shardPubKeys, epoch) + as.SaveValidatorsPubKeysCalled(validatorsPubKeys) } } @@ -49,12 +50,12 @@ func (as *OutportStub) HasDrivers() bool { } // RevertIndexedBlock - -func (as *OutportStub) RevertIndexedBlock(_ data.HeaderHandler, _ data.BodyHandler) { - +func (as *OutportStub) RevertIndexedBlock(_ *outportcore.HeaderDataWithBody) error { + return nil } // SaveAccounts - -func (as *OutportStub) SaveAccounts(_ uint64, _ map[string]*outportcore.AlteredAccount, _ uint32) { +func (as *OutportStub) SaveAccounts(_ *outportcore.Accounts) { } @@ -64,7 +65,7 @@ func (as *OutportStub) Close() error { } // SaveRoundsInfo - -func (as *OutportStub) SaveRoundsInfo(_ []*outportcore.RoundInfo) { +func (as *OutportStub) SaveRoundsInfo(_ *outportcore.RoundsInfo) { } @@ -74,5 +75,5 @@ func (as *OutportStub) SubscribeDriver(_ outport.Driver) error { } // FinalizedBlock - -func (as *OutportStub) FinalizedBlock(_ []byte) { +func (as *OutportStub) FinalizedBlock(_ *outportcore.FinalizedBlock) { } diff --git a/testscommon/outport/senderHostStub.go b/testscommon/outport/senderHostStub.go new file mode 100644 index 00000000000..ee506100fd4 --- /dev/null +++ b/testscommon/outport/senderHostStub.go @@ -0,0 +1,28 @@ +package outport + +// SenderHostStub - +type SenderHostStub struct { + SendCalled func(payload []byte, topic string) error + CloseCalled func() error +} + +// Send - +func (s *SenderHostStub) Send(payload []byte, topic string) error { + if s.SendCalled != nil { + return s.SendCalled(payload, topic) + } + return nil +} + +// Close - +func (s *SenderHostStub) Close() error { + if s.CloseCalled() != nil { + return s.CloseCalled() + } + return nil +} + +// IsInterfaceNil - +func (s *SenderHostStub) IsInterfaceNil() bool { + return s == nil +} diff --git a/testscommon/realConfigsHandling.go b/testscommon/realConfigsHandling.go new file mode 100644 index 00000000000..2041d9f7375 --- /dev/null +++ b/testscommon/realConfigsHandling.go @@ -0,0 +1,105 @@ +package testscommon + +import ( + "io/ioutil" + "os/exec" + "path" + "strings" + "testing" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/stretchr/testify/require" +) + +// CreateTestConfigs will try to copy the whole configs directory to a temp directory and return the configs after load +// The copying of the configs is required because minor adjustments of their contents is required for the tests to pass +func CreateTestConfigs(tb testing.TB, originalConfigsPath string) *config.Configs { + tempDir := tb.TempDir() + + newConfigsPath := path.Join(tempDir, "config") + + // TODO refactor this cp to work on all OSes + cmd := exec.Command("cp", "-r", originalConfigsPath, newConfigsPath) + err := cmd.Run() + require.Nil(tb, err) + + newGenesisSmartContractsFilename := path.Join(newConfigsPath, "genesisSmartContracts.json") + correctTestPathInGenesisSmartContracts(tb, tempDir, newGenesisSmartContractsFilename) + + apiConfig, err := common.LoadApiConfig(path.Join(newConfigsPath, "api.toml")) + require.Nil(tb, err) + + generalConfig, err := common.LoadMainConfig(path.Join(newConfigsPath, "config.toml")) + require.Nil(tb, err) + + ratingsConfig, err := common.LoadRatingsConfig(path.Join(newConfigsPath, "ratings.toml")) + require.Nil(tb, err) + + economicsConfig, err := common.LoadEconomicsConfig(path.Join(newConfigsPath, "economics.toml")) + require.Nil(tb, err) + + prefsConfig, err := common.LoadPreferencesConfig(path.Join(newConfigsPath, "prefs.toml")) + require.Nil(tb, err) + + p2pConfig, err := common.LoadP2PConfig(path.Join(newConfigsPath, "p2p.toml")) + require.Nil(tb, err) + + externalConfig, err := common.LoadExternalConfig(path.Join(newConfigsPath, "external.toml")) + require.Nil(tb, err) + + systemSCConfig, err := common.LoadSystemSmartContractsConfig(path.Join(newConfigsPath, "systemSmartContractsConfig.toml")) + require.Nil(tb, err) + + epochConfig, err := common.LoadEpochConfig(path.Join(newConfigsPath, "enableEpochs.toml")) + require.Nil(tb, err) + + roundConfig, err := common.LoadRoundConfig(path.Join(newConfigsPath, "enableRounds.toml")) + require.Nil(tb, err) + + // make the node pass the network wait constraints + p2pConfig.Node.MinNumPeersToWaitForOnBootstrap = 0 + p2pConfig.Node.ThresholdMinConnectedPeers = 0 + + return &config.Configs{ + GeneralConfig: generalConfig, + ApiRoutesConfig: apiConfig, + EconomicsConfig: economicsConfig, + SystemSCConfig: systemSCConfig, + RatingsConfig: ratingsConfig, + PreferencesConfig: prefsConfig, + ExternalConfig: externalConfig, + P2pConfig: p2pConfig, + FlagsConfig: &config.ContextFlagsConfig{ + WorkingDir: tempDir, + NoKeyProvided: true, + Version: "test version", + DbDir: path.Join(tempDir, "db"), + }, + ImportDbConfig: &config.ImportDbConfig{}, + ConfigurationPathsHolder: &config.ConfigurationPathsHolder{ + GasScheduleDirectoryName: path.Join(newConfigsPath, "gasSchedules"), + Nodes: path.Join(newConfigsPath, "nodesSetup.json"), + Genesis: path.Join(newConfigsPath, "genesis.json"), + SmartContracts: newGenesisSmartContractsFilename, + ValidatorKey: "validatorKey.pem", + }, + EpochConfig: epochConfig, + RoundConfig: roundConfig, + } +} + +func correctTestPathInGenesisSmartContracts(tb testing.TB, tempDir string, newGenesisSmartContractsFilename string) { + input, err := ioutil.ReadFile(newGenesisSmartContractsFilename) + require.Nil(tb, err) + + lines := strings.Split(string(input), "\n") + for i, line := range lines { + if strings.Contains(line, "./config") { + lines[i] = strings.Replace(line, "./config", path.Join(tempDir, "config"), 1) + } + } + output := strings.Join(lines, "\n") + err = ioutil.WriteFile(newGenesisSmartContractsFilename, []byte(output), 0644) + require.Nil(tb, err) +} diff --git a/testscommon/stateSyncNotifierSubscriberStub.go b/testscommon/stateSyncNotifierSubscriberStub.go new file mode 100644 index 00000000000..d6dfc3df276 --- /dev/null +++ b/testscommon/stateSyncNotifierSubscriberStub.go @@ -0,0 +1,18 @@ +package testscommon + +// StateSyncNotifierSubscriberStub - +type StateSyncNotifierSubscriberStub struct { + MissingDataTrieNodeFoundCalled func(hash []byte) +} + +// MissingDataTrieNodeFound - +func (ssns *StateSyncNotifierSubscriberStub) MissingDataTrieNodeFound(hash []byte) { + if ssns.MissingDataTrieNodeFoundCalled != nil { + ssns.MissingDataTrieNodeFoundCalled(hash) + } +} + +// IsInterfaceNil - +func (ssns *StateSyncNotifierSubscriberStub) IsInterfaceNil() bool { + return ssns == nil +} diff --git a/testscommon/storage/storageManagerArgs.go b/testscommon/storage/storageManagerArgs.go index b69d19c6b99..a69e795a9d2 100644 --- a/testscommon/storage/storageManagerArgs.go +++ b/testscommon/storage/storageManagerArgs.go @@ -2,32 +2,37 @@ package storage import ( "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/genesis/mock" "github.com/multiversx/mx-chain-go/testscommon" - "github.com/multiversx/mx-chain-go/testscommon/genericMocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" ) -// GetStorageManagerArgsAndOptions returns mock args for trie storage manager creation -func GetStorageManagerArgsAndOptions() (trie.NewTrieStorageManagerArgs, trie.StorageManagerOptions) { - storageManagerArgs := trie.NewTrieStorageManagerArgs{ - MainStorer: genericMocks.NewStorerMock(), - CheckpointsStorer: genericMocks.NewStorerMock(), +// GetStorageManagerArgs returns mock args for trie storage manager creation +func GetStorageManagerArgs() trie.NewTrieStorageManagerArgs { + return trie.NewTrieStorageManagerArgs{ + MainStorer: testscommon.NewSnapshotPruningStorerMock(), + CheckpointsStorer: testscommon.NewSnapshotPruningStorerMock(), Marshalizer: &mock.MarshalizerMock{}, Hasher: &hashingMocks.HasherMock{}, GeneralConfig: config.TrieStorageManagerConfig{ + PruningBufferLen: 1000, + SnapshotsBufferLen: 10, SnapshotsGoroutineNum: 2, }, CheckpointHashesHolder: &trieMock.CheckpointHashesHolderStub{}, IdleProvider: &testscommon.ProcessStatusHandlerStub{}, + Identifier: dataRetriever.UserAccountsUnit.String(), } - options := trie.StorageManagerOptions{ +} + +// GetStorageManagerOptions returns default options for trie storage manager creation +func GetStorageManagerOptions() trie.StorageManagerOptions { + return trie.StorageManagerOptions{ PruningEnabled: true, SnapshotsEnabled: true, CheckpointsEnabled: true, } - - return storageManagerArgs, options } diff --git a/testscommon/storage/storerStub.go b/testscommon/storage/storerStub.go index 9a675057927..f5fa6fa97d6 100644 --- a/testscommon/storage/storerStub.go +++ b/testscommon/storage/storerStub.go @@ -19,6 +19,7 @@ type StorerStub struct { GetBulkFromEpochCalled func(keys [][]byte, epoch uint32) ([]storage.KeyValuePair, error) GetOldestEpochCalled func() (uint32, error) RangeKeysCalled func(handler func(key []byte, val []byte) bool) + GetIdentifierCalled func() string CloseCalled func() error } @@ -124,6 +125,14 @@ func (ss *StorerStub) RangeKeys(handler func(key []byte, val []byte) bool) { } } +// GetIdentifier - +func (ss *StorerStub) GetIdentifier() string { + if ss.GetIdentifierCalled != nil { + return ss.GetIdentifierCalled() + } + return "" +} + // Close - func (ss *StorerStub) Close() error { if ss.CloseCalled != nil { diff --git a/testscommon/storageManagerStub.go b/testscommon/storageManager/storageManagerStub.go similarity index 93% rename from testscommon/storageManagerStub.go rename to testscommon/storageManager/storageManagerStub.go index b7673a4b4cd..303be7c06ed 100644 --- a/testscommon/storageManagerStub.go +++ b/testscommon/storageManager/storageManagerStub.go @@ -1,4 +1,4 @@ -package testscommon +package storageManager import ( "github.com/multiversx/mx-chain-go/common" @@ -13,7 +13,7 @@ type StorageManagerStub struct { GetFromCurrentEpochCalled func([]byte) ([]byte, error) TakeSnapshotCalled func(string, []byte, []byte, *common.TrieIteratorChannels, chan []byte, common.SnapshotStatisticsHandler, uint32) SetCheckpointCalled func([]byte, []byte, *common.TrieIteratorChannels, chan []byte, common.SnapshotStatisticsHandler) - GetDbThatContainsHashCalled func([]byte) common.DBWriteCacher + GetDbThatContainsHashCalled func([]byte) common.BaseStorer IsPruningEnabledCalled func() bool IsPruningBlockedCalled func() bool EnterPruningBufferingModeCalled func() @@ -28,6 +28,8 @@ type StorageManagerStub struct { IsClosedCalled func() bool RemoveFromCheckpointHashesHolderCalled func([]byte) GetBaseTrieStorageManagerCalled func() common.StorageManager + GetIdentifierCalled func() string + CloseCalled func() error } // Put - @@ -186,6 +188,9 @@ func (sms *StorageManagerStub) GetLatestStorageEpoch() (uint32, error) { // Close - func (sms *StorageManagerStub) Close() error { + if sms.CloseCalled != nil { + return sms.CloseCalled() + } return nil } @@ -214,6 +219,15 @@ func (sms *StorageManagerStub) GetBaseTrieStorageManager() common.StorageManager return nil } +// GetIdentifier - +func (sms *StorageManagerStub) GetIdentifier() string { + if sms.GetIdentifierCalled != nil { + return sms.GetIdentifierCalled() + } + + return "" +} + // IsInterfaceNil - func (sms *StorageManagerStub) IsInterfaceNil() bool { return sms == nil diff --git a/testscommon/trie/snapshotPruningStorerStub.go b/testscommon/trie/snapshotPruningStorerStub.go index 59c113eae9b..e1a03119734 100644 --- a/testscommon/trie/snapshotPruningStorerStub.go +++ b/testscommon/trie/snapshotPruningStorerStub.go @@ -16,6 +16,7 @@ type SnapshotPruningStorerStub struct { PutInEpochWithoutCacheCalled func(key []byte, data []byte, epoch uint32) error GetLatestStorageEpochCalled func() (uint32, error) RemoveFromCurrentEpochCalled func(key []byte) error + CloseCalled func() error } // GetFromOldEpochsWithoutAddingToCache - @@ -88,3 +89,11 @@ func (spss *SnapshotPruningStorerStub) RemoveFromCurrentEpoch(key []byte) error } return spss.Remove(key) } + +// Close - +func (spss *SnapshotPruningStorerStub) Close() error { + if spss.CloseCalled != nil { + return spss.CloseCalled() + } + return nil +} diff --git a/trie/baseIterator.go b/trie/baseIterator.go index 9f1d6d1f52b..8ff558790d8 100644 --- a/trie/baseIterator.go +++ b/trie/baseIterator.go @@ -8,7 +8,7 @@ import ( type baseIterator struct { currentNode node nextNodes []node - db common.DBWriteCacher + db common.TrieStorageInteractor } // newBaseIterator creates a new instance of trie iterator diff --git a/trie/branchNode.go b/trie/branchNode.go index 3e6f26768b5..66fa48e8d9e 100644 --- a/trie/branchNode.go +++ b/trie/branchNode.go @@ -13,7 +13,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" ) var _ = node(&branchNode{}) @@ -248,7 +247,7 @@ func (bn *branchNode) hashNode() ([]byte, error) { return encodeNodeAndGetHash(bn) } -func (bn *branchNode) commitDirty(level byte, maxTrieLevelInMemory uint, originDb common.DBWriteCacher, targetDb common.DBWriteCacher) error { +func (bn *branchNode) commitDirty(level byte, maxTrieLevelInMemory uint, originDb common.TrieStorageInteractor, targetDb common.BaseStorer) error { level++ err := bn.isEmptyOrNil() if err != nil { @@ -289,8 +288,8 @@ func (bn *branchNode) commitDirty(level byte, maxTrieLevelInMemory uint, originD } func (bn *branchNode) commitCheckpoint( - originDb common.DBWriteCacher, - targetDb common.DBWriteCacher, + originDb common.TrieStorageInteractor, + targetDb common.BaseStorer, checkpointHashes CheckpointHashesHolder, leavesChan chan core.KeyValueHolder, ctx context.Context, @@ -299,7 +298,7 @@ func (bn *branchNode) commitCheckpoint( depthLevel int, ) error { if shouldStopIfContextDoneBlockingIfBusy(ctx, idleProvider) { - return errors.ErrContextClosing + return core.ErrContextClosing } err := bn.isEmptyOrNil() @@ -338,7 +337,7 @@ func (bn *branchNode) commitCheckpoint( } func (bn *branchNode) commitSnapshot( - db common.DBWriteCacher, + db common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, missingNodesChan chan []byte, ctx context.Context, @@ -347,7 +346,7 @@ func (bn *branchNode) commitSnapshot( depthLevel int, ) error { if shouldStopIfContextDoneBlockingIfBusy(ctx, idleProvider) { - return errors.ErrContextClosing + return core.ErrContextClosing } err := bn.isEmptyOrNil() @@ -358,7 +357,7 @@ func (bn *branchNode) commitSnapshot( for i := range bn.children { err = resolveIfCollapsed(bn, byte(i), db) if err != nil { - if strings.Contains(err.Error(), common.GetNodeFromDBErrorString) { + if strings.Contains(err.Error(), core.GetNodeFromDBErrorString) { treatCommitSnapshotError(err, bn.EncodedChildren[i], missingNodesChan) continue } @@ -378,7 +377,7 @@ func (bn *branchNode) commitSnapshot( return bn.saveToStorage(db, stats, depthLevel) } -func (bn *branchNode) saveToStorage(targetDb common.DBWriteCacher, stats common.TrieStatisticsHandler, depthLevel int) error { +func (bn *branchNode) saveToStorage(targetDb common.BaseStorer, stats common.TrieStatisticsHandler, depthLevel int) error { nodeSize, err := encodeNodeAndCommitToDB(bn, targetDb) if err != nil { return err @@ -409,7 +408,7 @@ func (bn *branchNode) getEncodedNode() ([]byte, error) { return marshaledNode, nil } -func (bn *branchNode) resolveCollapsed(pos byte, db common.DBWriteCacher) error { +func (bn *branchNode) resolveCollapsed(pos byte, db common.TrieStorageInteractor) error { err := bn.isEmptyOrNil() if err != nil { return fmt.Errorf("resolveCollapsed error %w", err) @@ -442,7 +441,7 @@ func (bn *branchNode) isPosCollapsed(pos int) bool { return bn.children[pos] == nil && len(bn.EncodedChildren[pos]) != 0 } -func (bn *branchNode) tryGet(key []byte, currentDepth uint32, db common.DBWriteCacher) (value []byte, maxDepth uint32, err error) { +func (bn *branchNode) tryGet(key []byte, currentDepth uint32, db common.TrieStorageInteractor) (value []byte, maxDepth uint32, err error) { err = bn.isEmptyOrNil() if err != nil { return nil, currentDepth, fmt.Errorf("tryGet error %w", err) @@ -466,7 +465,7 @@ func (bn *branchNode) tryGet(key []byte, currentDepth uint32, db common.DBWriteC return bn.children[childPos].tryGet(key, currentDepth+1, db) } -func (bn *branchNode) getNext(key []byte, db common.DBWriteCacher) (node, []byte, error) { +func (bn *branchNode) getNext(key []byte, db common.TrieStorageInteractor) (node, []byte, error) { err := bn.isEmptyOrNil() if err != nil { return nil, nil, fmt.Errorf("getNext error %w", err) @@ -490,7 +489,7 @@ func (bn *branchNode) getNext(key []byte, db common.DBWriteCacher) (node, []byte return bn.children[childPos], key, nil } -func (bn *branchNode) insert(n *leafNode, db common.DBWriteCacher) (node, [][]byte, error) { +func (bn *branchNode) insert(n *leafNode, db common.TrieStorageInteractor) (node, [][]byte, error) { emptyHashes := make([][]byte, 0) err := bn.isEmptyOrNil() if err != nil { @@ -531,7 +530,7 @@ func (bn *branchNode) insertOnNilChild(n *leafNode, childPos byte) (node, [][]by return bn, modifiedHashes, nil } -func (bn *branchNode) insertOnExistingChild(n *leafNode, childPos byte, db common.DBWriteCacher) (node, [][]byte, error) { +func (bn *branchNode) insertOnExistingChild(n *leafNode, childPos byte, db common.TrieStorageInteractor) (node, [][]byte, error) { newNode, modifiedHashes, err := bn.children[childPos].insert(n, db) if check.IfNil(newNode) || err != nil { return nil, [][]byte{}, err @@ -554,7 +553,7 @@ func (bn *branchNode) modifyNodeAfterInsert(modifiedHashes [][]byte, childPos by return modifiedHashes } -func (bn *branchNode) delete(key []byte, db common.DBWriteCacher) (bool, node, [][]byte, error) { +func (bn *branchNode) delete(key []byte, db common.TrieStorageInteractor) (bool, node, [][]byte, error) { emptyHashes := make([][]byte, 0) err := bn.isEmptyOrNil() if err != nil { @@ -659,7 +658,7 @@ func (bn *branchNode) isEmptyOrNil() error { return ErrEmptyBranchNode } -func (bn *branchNode) print(writer io.Writer, index int, db common.DBWriteCacher) { +func (bn *branchNode) print(writer io.Writer, index int, db common.TrieStorageInteractor) { if bn == nil { return } @@ -712,7 +711,7 @@ func (bn *branchNode) getDirtyHashes(hashes common.ModifiedHashes) error { return nil } -func (bn *branchNode) getChildren(db common.DBWriteCacher) ([]node, error) { +func (bn *branchNode) getChildren(db common.TrieStorageInteractor) ([]node, error) { err := bn.isEmptyOrNil() if err != nil { return nil, fmt.Errorf("getChildren error %w", err) @@ -782,7 +781,7 @@ func (bn *branchNode) loadChildren(getNode func([]byte) (node, error)) ([][]byte func (bn *branchNode) getAllLeavesOnChannel( leavesChannel chan core.KeyValueHolder, keyBuilder common.KeyBuilder, - db common.DBWriteCacher, + db common.TrieStorageInteractor, marshalizer marshal.Marshalizer, chanClose chan struct{}, ctx context.Context, @@ -824,7 +823,7 @@ func (bn *branchNode) getAllLeavesOnChannel( return nil } -func (bn *branchNode) getAllHashes(db common.DBWriteCacher) ([][]byte, error) { +func (bn *branchNode) getAllHashes(db common.TrieStorageInteractor) ([][]byte, error) { err := bn.isEmptyOrNil() if err != nil { return nil, fmt.Errorf("getAllHashes error: %w", err) @@ -885,7 +884,7 @@ func (bn *branchNode) getValue() []byte { return []byte{} } -func (bn *branchNode) collectStats(ts common.TrieStatisticsHandler, depthLevel int, db common.DBWriteCacher) error { +func (bn *branchNode) collectStats(ts common.TrieStatisticsHandler, depthLevel int, db common.TrieStorageInteractor) error { err := bn.isEmptyOrNil() if err != nil { return fmt.Errorf("collectStats error %w", err) diff --git a/trie/branchNode_test.go b/trie/branchNode_test.go index a121e8b21aa..e3f1118c61a 100644 --- a/trie/branchNode_test.go +++ b/trie/branchNode_test.go @@ -7,16 +7,13 @@ import ( "fmt" "testing" - "github.com/multiversx/mx-chain-core-go/data/mock" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/config" - chainErrors "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/storage/cache" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/multiversx/mx-chain-go/trie/statistics" "github.com/stretchr/testify/assert" ) @@ -47,30 +44,12 @@ func getBnAndCollapsedBn(marshalizer marshal.Marshalizer, hasher hashing.Hasher) } func newEmptyTrie() (*patriciaMerkleTrie, *trieStorageManager) { - marsh, hsh := getTestMarshalizerAndHasher() - - // TODO change this initialization of the persister (and everywhere in this package) - // by using a persister factory - generalCfg := config.TrieStorageManagerConfig{ - PruningBufferLen: 1000, - SnapshotsBufferLen: 10, - SnapshotsGoroutineNum: 1, - } - - args := NewTrieStorageManagerArgs{ - MainStorer: createMemUnit(), - CheckpointsStorer: createMemUnit(), - Marshalizer: marsh, - Hasher: hsh, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, uint64(hsh.Size())), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := GetDefaultTrieStorageManagerParameters() trieStorage, _ := NewTrieStorageManager(args) tr := &patriciaMerkleTrie{ trieStorage: trieStorage, - marshalizer: marsh, - hasher: hsh, + marshalizer: args.Marshalizer, + hasher: args.Hasher, oldHashes: make([][]byte, 0), oldRoot: make([]byte, 0), maxTrieLevelInMemory: 5, @@ -185,26 +164,9 @@ func TestBranchNode_setRootHash(t *testing.T) { t.Parallel() marsh, hsh := getTestMarshalizerAndHasher() - args := NewTrieStorageManagerArgs{ - MainStorer: createMemUnit(), - CheckpointsStorer: createMemUnit(), - Marshalizer: marsh, - Hasher: hsh, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10, uint64(hsh.Size())), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } - trieStorage1, _ := NewTrieStorageManager(args) - args = NewTrieStorageManagerArgs{ - MainStorer: createMemUnit(), - CheckpointsStorer: createMemUnit(), - Marshalizer: marsh, - Hasher: hsh, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10, uint64(hsh.Size())), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } - trieStorage2, _ := NewTrieStorageManager(args) + + trieStorage1, _ := NewTrieStorageManager(GetDefaultTrieStorageManagerParameters()) + trieStorage2, _ := NewTrieStorageManager(GetDefaultTrieStorageManagerParameters()) maxTrieLevelInMemory := uint(5) tr1, _ := NewTrie(trieStorage1, marsh, hsh, maxTrieLevelInMemory) @@ -1352,20 +1314,20 @@ func TestBranchNode_commitContextDone(t *testing.T) { cancel() err := bn.commitCheckpoint(db, db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Equal(t, chainErrors.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) err = bn.commitSnapshot(db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Equal(t, chainErrors.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) } func TestBranchNode_commitSnapshotDbIsClosing(t *testing.T) { t.Parallel() - db := &mock.StorerStub{ - GetCalled: func(key []byte) ([]byte, error) { - return nil, chainErrors.ErrContextClosing - }, + db := testscommon.NewMemDbMock() + db.GetCalled = func(key []byte) ([]byte, error) { + return nil, core.ErrContextClosing } + _, collapsedBn := getBnAndCollapsedBn(getTestMarshalizerAndHasher()) missingNodesChan := make(chan []byte, 10) err := collapsedBn.commitSnapshot(db, nil, missingNodesChan, context.Background(), statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) diff --git a/trie/depthFirstSync.go b/trie/depthFirstSync.go index 2af9bbb5e72..b2ef76ac35a 100644 --- a/trie/depthFirstSync.go +++ b/trie/depthFirstSync.go @@ -9,7 +9,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/storage" ) @@ -22,7 +21,7 @@ type depthFirstTrieSyncer struct { waitTimeBetweenChecks time.Duration marshaller marshal.Marshalizer hasher hashing.Hasher - db common.DBWriteCacher + db common.TrieStorageInteractor requestHandler RequestHandler interceptedNodesCacher storage.Cacher mutOperation sync.RWMutex @@ -106,7 +105,7 @@ func (d *depthFirstTrieSyncer) StartSyncing(rootHash []byte, ctx context.Context case <-time.After(d.waitTimeBetweenChecks): continue case <-ctx.Done(): - return errors.ErrContextClosing + return core.ErrContextClosing } } } diff --git a/trie/depthFirstSync_test.go b/trie/depthFirstSync_test.go index 4fc6d9194aa..456c1b1f3e8 100644 --- a/trie/depthFirstSync_test.go +++ b/trie/depthFirstSync_test.go @@ -11,7 +11,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -81,7 +80,7 @@ func TestDepthFirstTrieSyncer_StartSyncingCanTimeout(t *testing.T) { defer cancelFunc() err := d.StartSyncing(roothash, ctx) - require.Equal(t, errors.ErrContextClosing, err) + require.Equal(t, core.ErrContextClosing, err) } func TestDepthFirstTrieSyncer_StartSyncingTimeoutNoNodesReceived(t *testing.T) { diff --git a/trie/dfsIterator_test.go b/trie/dfsIterator_test.go index 476889b6a2c..5e9f653db9b 100644 --- a/trie/dfsIterator_test.go +++ b/trie/dfsIterator_test.go @@ -7,6 +7,27 @@ import ( "github.com/stretchr/testify/assert" ) +func TestNewDFSIterator(t *testing.T) { + t.Parallel() + + t.Run("nil trie should error", func(t *testing.T) { + t.Parallel() + + it, err := trie.NewDFSIterator(nil) + assert.Equal(t, trie.ErrNilTrie, err) + assert.Nil(t, it) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + tr := initTrie() + + it, err := trie.NewDFSIterator(tr) + assert.Nil(t, err) + assert.NotNil(t, it) + }) +} + func TestDFSIterator_Next(t *testing.T) { t.Parallel() diff --git a/trie/doubleListSync.go b/trie/doubleListSync.go index cfd7120e7f8..edf7ed76d23 100644 --- a/trie/doubleListSync.go +++ b/trie/doubleListSync.go @@ -9,7 +9,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/storage" ) @@ -32,7 +31,7 @@ type doubleListTrieSyncer struct { waitTimeBetweenChecks time.Duration marshalizer marshal.Marshalizer hasher hashing.Hasher - db common.DBWriteCacher + db common.TrieStorageInteractor requestHandler RequestHandler interceptedNodesCacher storage.Cacher mutOperation sync.RWMutex @@ -123,7 +122,7 @@ func (d *doubleListTrieSyncer) StartSyncing(rootHash []byte, ctx context.Context case <-time.After(d.waitTimeBetweenChecks): continue case <-ctx.Done(): - return errors.ErrContextClosing + return core.ErrContextClosing } } } diff --git a/trie/doubleListSync_test.go b/trie/doubleListSync_test.go index a519db35d2e..c0a453242b9 100644 --- a/trie/doubleListSync_test.go +++ b/trie/doubleListSync_test.go @@ -11,14 +11,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/config" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -39,20 +36,8 @@ func createMemUnit() storage.Storer { // CreateTrieStorageManager creates the trie storage manager for the tests func createTrieStorageManager(store storage.Storer) (common.StorageManager, storage.Storer) { - generalCfg := config.TrieStorageManagerConfig{ - PruningBufferLen: 1000, - SnapshotsBufferLen: 10, - SnapshotsGoroutineNum: 1, - } - args := NewTrieStorageManagerArgs{ - MainStorer: store, - CheckpointsStorer: store, - Marshalizer: marshalizer, - Hasher: hasherMock, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, uint64(hasherMock.Size())), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := GetDefaultTrieStorageManagerParameters() + args.MainStorer = store tsm, _ := NewTrieStorageManager(args) return tsm, store @@ -186,7 +171,7 @@ func TestDoubleListTrieSyncer_StartSyncingCanTimeout(t *testing.T) { defer cancelFunc() err := d.StartSyncing(roothash, ctx) - require.Equal(t, errors.ErrContextClosing, err) + require.Equal(t, core.ErrContextClosing, err) } func TestDoubleListTrieSyncer_StartSyncingTimeoutNoNodesReceived(t *testing.T) { diff --git a/trie/errors.go b/trie/errors.go index a225a84c00c..dc229f1c1b0 100644 --- a/trie/errors.go +++ b/trie/errors.go @@ -114,3 +114,6 @@ var ErrNilTrieIteratorLeavesChannel = errors.New("nil trie iterator leaves chann // ErrNilTrieIteratorErrChannel signals that a nil trie iterator error channel has been provided var ErrNilTrieIteratorErrChannel = errors.New("nil trie iterator error channel") + +// ErrInvalidIdentifier signals that an invalid identifier was provided +var ErrInvalidIdentifier = errors.New("invalid identifier") diff --git a/trie/export_test.go b/trie/export_test.go index 8485227f3d4..c227b8bf81b 100644 --- a/trie/export_test.go +++ b/trie/export_test.go @@ -3,8 +3,13 @@ package trie import ( "time" + "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" + "github.com/multiversx/mx-chain-go/trie/hashesHolder" ) func (ts *trieSyncer) trieNodeIntercepted(hash []byte, val interface{}) { @@ -82,7 +87,7 @@ func WriteInChanNonBlocking(errChan chan error, err error) { } type StorageManagerExtensionStub struct { - *testscommon.StorageManagerStub + *storageManager.StorageManagerStub } // IsBaseTrieStorageManager - @@ -101,3 +106,23 @@ func IsTrieStorageManagerInEpoch(tsm common.StorageManager) bool { func NewBaseIterator(trie common.Trie) (*baseIterator, error) { return newBaseIterator(trie) } + +// GetDefaultTrieStorageManagerParameters - +func GetDefaultTrieStorageManagerParameters() NewTrieStorageManagerArgs { + generalCfg := config.TrieStorageManagerConfig{ + PruningBufferLen: 1000, + SnapshotsBufferLen: 10, + SnapshotsGoroutineNum: 1, + } + + return NewTrieStorageManagerArgs{ + MainStorer: testscommon.NewSnapshotPruningStorerMock(), + CheckpointsStorer: testscommon.NewSnapshotPruningStorerMock(), + Marshalizer: &marshal.GogoProtoMarshalizer{}, + Hasher: &testscommon.KeccakMock{}, + GeneralConfig: generalCfg, + CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize), + IdleProvider: &testscommon.ProcessStatusHandlerStub{}, + Identifier: dataRetriever.UserAccountsUnit.String(), + } +} diff --git a/trie/extensionNode.go b/trie/extensionNode.go index 8130a761233..04871193be8 100644 --- a/trie/extensionNode.go +++ b/trie/extensionNode.go @@ -14,7 +14,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" ) var _ = node(&extensionNode{}) @@ -163,7 +162,7 @@ func (en *extensionNode) hashNode() ([]byte, error) { return encodeNodeAndGetHash(en) } -func (en *extensionNode) commitDirty(level byte, maxTrieLevelInMemory uint, originDb common.DBWriteCacher, targetDb common.DBWriteCacher) error { +func (en *extensionNode) commitDirty(level byte, maxTrieLevelInMemory uint, originDb common.TrieStorageInteractor, targetDb common.BaseStorer) error { level++ err := en.isEmptyOrNil() if err != nil { @@ -201,8 +200,8 @@ func (en *extensionNode) commitDirty(level byte, maxTrieLevelInMemory uint, orig } func (en *extensionNode) commitCheckpoint( - originDb common.DBWriteCacher, - targetDb common.DBWriteCacher, + originDb common.TrieStorageInteractor, + targetDb common.BaseStorer, checkpointHashes CheckpointHashesHolder, leavesChan chan core.KeyValueHolder, ctx context.Context, @@ -211,7 +210,7 @@ func (en *extensionNode) commitCheckpoint( depthLevel int, ) error { if shouldStopIfContextDoneBlockingIfBusy(ctx, idleProvider) { - return errors.ErrContextClosing + return core.ErrContextClosing } err := en.isEmptyOrNil() @@ -244,7 +243,7 @@ func (en *extensionNode) commitCheckpoint( } func (en *extensionNode) commitSnapshot( - db common.DBWriteCacher, + db common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, missingNodesChan chan []byte, ctx context.Context, @@ -253,7 +252,7 @@ func (en *extensionNode) commitSnapshot( depthLevel int, ) error { if shouldStopIfContextDoneBlockingIfBusy(ctx, idleProvider) { - return errors.ErrContextClosing + return core.ErrContextClosing } err := en.isEmptyOrNil() @@ -264,7 +263,7 @@ func (en *extensionNode) commitSnapshot( err = resolveIfCollapsed(en, 0, db) isMissingNodeErr := false if err != nil { - isMissingNodeErr = strings.Contains(err.Error(), common.GetNodeFromDBErrorString) + isMissingNodeErr = strings.Contains(err.Error(), core.GetNodeFromDBErrorString) if !isMissingNodeErr { return err } @@ -282,7 +281,7 @@ func (en *extensionNode) commitSnapshot( return en.saveToStorage(db, stats, depthLevel) } -func (en *extensionNode) saveToStorage(targetDb common.DBWriteCacher, stats common.TrieStatisticsHandler, depthLevel int) error { +func (en *extensionNode) saveToStorage(targetDb common.BaseStorer, stats common.TrieStatisticsHandler, depthLevel int) error { nodeSize, err := encodeNodeAndCommitToDB(en, targetDb) if err != nil { return err @@ -307,7 +306,7 @@ func (en *extensionNode) getEncodedNode() ([]byte, error) { return marshaledNode, nil } -func (en *extensionNode) resolveCollapsed(_ byte, db common.DBWriteCacher) error { +func (en *extensionNode) resolveCollapsed(_ byte, db common.TrieStorageInteractor) error { err := en.isEmptyOrNil() if err != nil { return fmt.Errorf("resolveCollapsed error %w", err) @@ -329,7 +328,7 @@ func (en *extensionNode) isPosCollapsed(_ int) bool { return en.isCollapsed() } -func (en *extensionNode) tryGet(key []byte, currentDepth uint32, db common.DBWriteCacher) (value []byte, maxDepth uint32, err error) { +func (en *extensionNode) tryGet(key []byte, currentDepth uint32, db common.TrieStorageInteractor) (value []byte, maxDepth uint32, err error) { err = en.isEmptyOrNil() if err != nil { return nil, currentDepth, fmt.Errorf("tryGet error %w", err) @@ -351,7 +350,7 @@ func (en *extensionNode) tryGet(key []byte, currentDepth uint32, db common.DBWri return en.child.tryGet(key, currentDepth+1, db) } -func (en *extensionNode) getNext(key []byte, db common.DBWriteCacher) (node, []byte, error) { +func (en *extensionNode) getNext(key []byte, db common.TrieStorageInteractor) (node, []byte, error) { err := en.isEmptyOrNil() if err != nil { return nil, nil, fmt.Errorf("getNext error %w", err) @@ -373,7 +372,7 @@ func (en *extensionNode) getNext(key []byte, db common.DBWriteCacher) (node, []b return en.child, key, nil } -func (en *extensionNode) insert(n *leafNode, db common.DBWriteCacher) (node, [][]byte, error) { +func (en *extensionNode) insert(n *leafNode, db common.TrieStorageInteractor) (node, [][]byte, error) { emptyHashes := make([][]byte, 0) err := en.isEmptyOrNil() if err != nil { @@ -396,7 +395,7 @@ func (en *extensionNode) insert(n *leafNode, db common.DBWriteCacher) (node, [][ return en.insertInNewBn(n, keyMatchLen) } -func (en *extensionNode) insertInSameEn(n *leafNode, keyMatchLen int, db common.DBWriteCacher) (node, [][]byte, error) { +func (en *extensionNode) insertInSameEn(n *leafNode, keyMatchLen int, db common.TrieStorageInteractor) (node, [][]byte, error) { n.Key = n.Key[keyMatchLen:] newNode, oldHashes, err := en.child.insert(n, db) if check.IfNil(newNode) || err != nil { @@ -457,7 +456,7 @@ func (en *extensionNode) insertInNewBn(n *leafNode, keyMatchLen int) (node, [][] return newEn, oldHash, nil } -func (en *extensionNode) delete(key []byte, db common.DBWriteCacher) (bool, node, [][]byte, error) { +func (en *extensionNode) delete(key []byte, db common.TrieStorageInteractor) (bool, node, [][]byte, error) { emptyHashes := make([][]byte, 0) err := en.isEmptyOrNil() if err != nil { @@ -536,7 +535,7 @@ func (en *extensionNode) isEmptyOrNil() error { return nil } -func (en *extensionNode) print(writer io.Writer, index int, db common.DBWriteCacher) { +func (en *extensionNode) print(writer io.Writer, index int, db common.TrieStorageInteractor) { if en == nil { return } @@ -583,7 +582,7 @@ func (en *extensionNode) getDirtyHashes(hashes common.ModifiedHashes) error { return nil } -func (en *extensionNode) getChildren(db common.DBWriteCacher) ([]node, error) { +func (en *extensionNode) getChildren(db common.TrieStorageInteractor) ([]node, error) { err := en.isEmptyOrNil() if err != nil { return nil, fmt.Errorf("getChildren error %w", err) @@ -640,7 +639,7 @@ func (en *extensionNode) loadChildren(getNode func([]byte) (node, error)) ([][]b func (en *extensionNode) getAllLeavesOnChannel( leavesChannel chan core.KeyValueHolder, keyBuilder common.KeyBuilder, - db common.DBWriteCacher, + db common.TrieStorageInteractor, marshalizer marshal.Marshalizer, chanClose chan struct{}, ctx context.Context, @@ -675,7 +674,7 @@ func (en *extensionNode) getAllLeavesOnChannel( return nil } -func (en *extensionNode) getAllHashes(db common.DBWriteCacher) ([][]byte, error) { +func (en *extensionNode) getAllHashes(db common.TrieStorageInteractor) ([][]byte, error) { err := en.isEmptyOrNil() if err != nil { return nil, fmt.Errorf("getAllHashes error: %w", err) @@ -723,7 +722,7 @@ func (en *extensionNode) getValue() []byte { return []byte{} } -func (en *extensionNode) collectStats(ts common.TrieStatisticsHandler, depthLevel int, db common.DBWriteCacher) error { +func (en *extensionNode) collectStats(ts common.TrieStatisticsHandler, depthLevel int, db common.TrieStorageInteractor) error { err := en.isEmptyOrNil() if err != nil { return fmt.Errorf("collectStats error %w", err) diff --git a/trie/extensionNode_test.go b/trie/extensionNode_test.go index cc8dd806d2c..f24f8edbf14 100644 --- a/trie/extensionNode_test.go +++ b/trie/extensionNode_test.go @@ -6,9 +6,8 @@ import ( "errors" "testing" - "github.com/multiversx/mx-chain-core-go/data/mock" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" - chainErrors "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/storage/cache" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" @@ -1021,10 +1020,10 @@ func TestExtensionNode_commitContextDone(t *testing.T) { cancel() err := en.commitCheckpoint(db, db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Equal(t, chainErrors.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) err = en.commitSnapshot(db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Equal(t, chainErrors.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) } func TestExtensionNode_getValueReturnsEmptyByteSlice(t *testing.T) { @@ -1037,11 +1036,11 @@ func TestExtensionNode_getValueReturnsEmptyByteSlice(t *testing.T) { func TestExtensionNode_commitSnapshotDbIsClosing(t *testing.T) { t.Parallel() - db := &mock.StorerStub{ - GetCalled: func(key []byte) ([]byte, error) { - return nil, chainErrors.ErrContextClosing - }, + db := testscommon.NewMemDbMock() + db.GetCalled = func(key []byte) ([]byte, error) { + return nil, core.ErrContextClosing } + _, collapsedEn := getEnAndCollapsedEn() missingNodesChan := make(chan []byte, 10) err := collapsedEn.commitSnapshot(db, nil, missingNodesChan, context.Background(), statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) diff --git a/trie/factory/trieCreator.go b/trie/factory/trieCreator.go index 8d90ba47844..2958e9edccf 100644 --- a/trie/factory/trieCreator.go +++ b/trie/factory/trieCreator.go @@ -23,6 +23,7 @@ type TrieCreateArgs struct { SnapshotsEnabled bool MaxTrieLevelInMem uint IdleProvider trie.IdleNodeProvider + Identifier string } type trieCreator struct { @@ -64,6 +65,7 @@ func (tc *trieCreator) Create(args TrieCreateArgs) (common.StorageManager, commo GeneralConfig: tc.trieStorageManagerConfig, CheckpointHashesHolder: tc.getCheckpointHashesHolder(args.CheckpointsEnabled), IdleProvider: args.IdleProvider, + Identifier: args.Identifier, } options := trie.StorageManagerOptions{ @@ -140,6 +142,7 @@ func CreateTriesComponentsForShardId( MaxTrieLevelInMem: generalConfig.StateTriesConfig.MaxStateTrieLevelInMemory, SnapshotsEnabled: snapshotsEnabled, IdleProvider: coreComponentsHolder.ProcessStatusHandler(), + Identifier: dataRetriever.UserAccountsUnit.String(), } userStorageManager, userAccountTrie, err := trFactory.Create(args) if err != nil { @@ -149,8 +152,8 @@ func CreateTriesComponentsForShardId( trieContainer := state.NewDataTriesHolder() trieStorageManagers := make(map[string]common.StorageManager) - trieContainer.Put([]byte(UserAccountTrie), userAccountTrie) - trieStorageManagers[UserAccountTrie] = userStorageManager + trieContainer.Put([]byte(dataRetriever.UserAccountsUnit.String()), userAccountTrie) + trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = userStorageManager mainStorer, err = storageService.GetStorer(dataRetriever.PeerAccountsUnit) if err != nil { @@ -170,14 +173,15 @@ func CreateTriesComponentsForShardId( MaxTrieLevelInMem: generalConfig.StateTriesConfig.MaxPeerTrieLevelInMemory, SnapshotsEnabled: snapshotsEnabled, IdleProvider: coreComponentsHolder.ProcessStatusHandler(), + Identifier: dataRetriever.PeerAccountsUnit.String(), } peerStorageManager, peerAccountsTrie, err := trFactory.Create(args) if err != nil { return nil, nil, err } - trieContainer.Put([]byte(PeerAccountTrie), peerAccountsTrie) - trieStorageManagers[PeerAccountTrie] = peerStorageManager + trieContainer.Put([]byte(dataRetriever.PeerAccountsUnit.String()), peerAccountsTrie) + trieStorageManagers[dataRetriever.PeerAccountsUnit.String()] = peerStorageManager return trieContainer, trieStorageManagers, nil } diff --git a/trie/factory/trieCreator_test.go b/trie/factory/trieCreator_test.go index 375969eb070..55bba27cea4 100644 --- a/trie/factory/trieCreator_test.go +++ b/trie/factory/trieCreator_test.go @@ -37,6 +37,7 @@ func getCreateArgs() factory.TrieCreateArgs { SnapshotsEnabled: true, MaxTrieLevelInMem: 5, IdleProvider: &testscommon.ProcessStatusHandlerStub{}, + Identifier: dataRetriever.UserAccountsUnit.String(), } } @@ -165,13 +166,49 @@ func TestTrieCreator_CreateWithNilCheckpointsStorerShouldErr(t *testing.T) { require.True(t, strings.Contains(err.Error(), trie.ErrNilStorer.Error())) } -func TestTrieCreator_CreateTriesComponentsForShardIdMissingStorer(t *testing.T) { +func TestTrieCreator_CreateWithInvalidMaxTrieLevelInMemShouldErr(t *testing.T) { + t.Parallel() + + args := getArgs() + tf, _ := factory.NewTrieFactory(args) + + createArgs := getCreateArgs() + createArgs.MaxTrieLevelInMem = 0 + _, tr, err := tf.Create(createArgs) + require.Nil(t, tr) + require.NotNil(t, err) + require.Contains(t, err.Error(), trie.ErrInvalidLevelValue.Error()) +} + +func TestTrieCreator_CreateTriesComponentsForShardId(t *testing.T) { t.Parallel() t.Run("missing UserAccountsUnit", testWithMissingStorer(dataRetriever.UserAccountsUnit)) t.Run("missing UserAccountsCheckpointsUnit", testWithMissingStorer(dataRetriever.UserAccountsCheckpointsUnit)) t.Run("missing PeerAccountsUnit", testWithMissingStorer(dataRetriever.PeerAccountsUnit)) t.Run("missing PeerAccountsCheckpointsUnit", testWithMissingStorer(dataRetriever.PeerAccountsCheckpointsUnit)) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + holder, storageManager, err := factory.CreateTriesComponentsForShardId( + false, + testscommon.GetGeneralConfig(), + &mock.CoreComponentsStub{ + InternalMarshalizerField: &testscommon.MarshalizerMock{}, + HasherField: &hashingMocks.HasherMock{}, + PathHandlerField: &testscommon.PathManagerStub{}, + ProcessStatusHandlerInternal: &testscommon.ProcessStatusHandlerStub{}, + }, + &storageStubs.ChainStorerStub{ + GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { + return &storageStubs.StorerStub{}, nil + }, + }, + ) + require.NotNil(t, holder) + require.NotNil(t, storageManager) + require.Nil(t, err) + }) } func testWithMissingStorer(missingUnit dataRetriever.UnitType) func(t *testing.T) { diff --git a/trie/factory/trieFactoryArgs.go b/trie/factory/trieFactoryArgs.go index 58f16c1e023..72ce26c4e4f 100644 --- a/trie/factory/trieFactoryArgs.go +++ b/trie/factory/trieFactoryArgs.go @@ -7,12 +7,6 @@ import ( "github.com/multiversx/mx-chain-go/storage" ) -// UserAccountTrie represents the use account identifier -const UserAccountTrie = "userAccount" - -// PeerAccountTrie represents the peer account identifier -const PeerAccountTrie = "peerAccount" - // TrieFactoryArgs holds the arguments for creating a trie factory type TrieFactoryArgs struct { Marshalizer marshal.Marshalizer diff --git a/trie/interface.go b/trie/interface.go index 4c6ffb45572..50c17b33a1f 100644 --- a/trie/interface.go +++ b/trie/interface.go @@ -24,36 +24,36 @@ type node interface { isPosCollapsed(pos int) bool isDirty() bool getEncodedNode() ([]byte, error) - resolveCollapsed(pos byte, db common.DBWriteCacher) error + resolveCollapsed(pos byte, db common.TrieStorageInteractor) error hashNode() ([]byte, error) hashChildren() error - tryGet(key []byte, depth uint32, db common.DBWriteCacher) ([]byte, uint32, error) - getNext(key []byte, db common.DBWriteCacher) (node, []byte, error) - insert(n *leafNode, db common.DBWriteCacher) (node, [][]byte, error) - delete(key []byte, db common.DBWriteCacher) (bool, node, [][]byte, error) + tryGet(key []byte, depth uint32, db common.TrieStorageInteractor) ([]byte, uint32, error) + getNext(key []byte, db common.TrieStorageInteractor) (node, []byte, error) + insert(n *leafNode, db common.TrieStorageInteractor) (node, [][]byte, error) + delete(key []byte, db common.TrieStorageInteractor) (bool, node, [][]byte, error) reduceNode(pos int) (node, bool, error) isEmptyOrNil() error - print(writer io.Writer, index int, db common.DBWriteCacher) + print(writer io.Writer, index int, db common.TrieStorageInteractor) getDirtyHashes(common.ModifiedHashes) error - getChildren(db common.DBWriteCacher) ([]node, error) + getChildren(db common.TrieStorageInteractor) ([]node, error) isValid() bool setDirty(bool) loadChildren(func([]byte) (node, error)) ([][]byte, []node, error) - getAllLeavesOnChannel(chan core.KeyValueHolder, common.KeyBuilder, common.DBWriteCacher, marshal.Marshalizer, chan struct{}, context.Context) error - getAllHashes(db common.DBWriteCacher) ([][]byte, error) + getAllLeavesOnChannel(chan core.KeyValueHolder, common.KeyBuilder, common.TrieStorageInteractor, marshal.Marshalizer, chan struct{}, context.Context) error + getAllHashes(db common.TrieStorageInteractor) ([][]byte, error) getNextHashAndKey([]byte) (bool, []byte, []byte) getValue() []byte - commitDirty(level byte, maxTrieLevelInMemory uint, originDb common.DBWriteCacher, targetDb common.DBWriteCacher) error - commitCheckpoint(originDb common.DBWriteCacher, targetDb common.DBWriteCacher, checkpointHashes CheckpointHashesHolder, leavesChan chan core.KeyValueHolder, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error - commitSnapshot(originDb common.DBWriteCacher, leavesChan chan core.KeyValueHolder, missingNodesChan chan []byte, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error + commitDirty(level byte, maxTrieLevelInMemory uint, originDb common.TrieStorageInteractor, targetDb common.BaseStorer) error + commitCheckpoint(originDb common.TrieStorageInteractor, targetDb common.BaseStorer, checkpointHashes CheckpointHashesHolder, leavesChan chan core.KeyValueHolder, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error + commitSnapshot(originDb common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, missingNodesChan chan []byte, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error getMarshalizer() marshal.Marshalizer setMarshalizer(marshal.Marshalizer) getHasher() hashing.Hasher setHasher(hashing.Hasher) sizeInBytes() int - collectStats(handler common.TrieStatisticsHandler, depthLevel int, db common.DBWriteCacher) error + collectStats(handler common.TrieStatisticsHandler, depthLevel int, db common.TrieStorageInteractor) error IsInterfaceNil() bool } @@ -63,8 +63,8 @@ type dbWithGetFromEpoch interface { } type snapshotNode interface { - commitCheckpoint(originDb common.DBWriteCacher, targetDb common.DBWriteCacher, checkpointHashes CheckpointHashesHolder, leavesChan chan core.KeyValueHolder, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error - commitSnapshot(originDb common.DBWriteCacher, leavesChan chan core.KeyValueHolder, missingNodesChan chan []byte, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error + commitCheckpoint(originDb common.TrieStorageInteractor, targetDb common.BaseStorer, checkpointHashes CheckpointHashesHolder, leavesChan chan core.KeyValueHolder, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error + commitSnapshot(originDb common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, missingNodesChan chan []byte, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error } // RequestHandler defines the methods through which request to data can be made @@ -96,7 +96,7 @@ type epochStorer interface { } type snapshotPruningStorer interface { - common.DBWriteCacher + common.BaseStorer GetFromOldEpochsWithoutAddingToCache(key []byte) ([]byte, core.OptionalUint32, error) GetFromLastEpoch(key []byte) ([]byte, error) PutInEpoch(key []byte, data []byte, epoch uint32) error @@ -122,8 +122,3 @@ type IdleNodeProvider interface { type storageManagerExtension interface { RemoveFromCheckpointHashesHolder(hash []byte) } - -// StorageMarker is used to mark the given storer as synced and active -type StorageMarker interface { - MarkStorerAsSyncedAndActive(storer common.StorageManager) -} diff --git a/trie/keyBuilder/disabledKeyBuilder_test.go b/trie/keyBuilder/disabledKeyBuilder_test.go new file mode 100644 index 00000000000..cdd63acfa1f --- /dev/null +++ b/trie/keyBuilder/disabledKeyBuilder_test.go @@ -0,0 +1,31 @@ +package keyBuilder + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDisabledKeyBuilder(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + require.Fail(t, "should have not panicked") + } + }() + + builder := NewDisabledKeyBuilder() + require.NotNil(t, builder) + + builder.BuildKey([]byte("key")) + + key, err := builder.GetKey() + require.Nil(t, err) + require.True(t, bytes.Equal(key, []byte{})) + + clonedBuilder := builder.Clone() + require.Equal(t, &disabledKeyBuilder{}, clonedBuilder) +} diff --git a/trie/leafNode.go b/trie/leafNode.go index cb4c4bfdc76..e20a38d4afd 100644 --- a/trie/leafNode.go +++ b/trie/leafNode.go @@ -14,7 +14,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" ) var _ = node(&leafNode{}) @@ -112,7 +111,7 @@ func (ln *leafNode) hashNode() ([]byte, error) { return encodeNodeAndGetHash(ln) } -func (ln *leafNode) commitDirty(_ byte, _ uint, _ common.DBWriteCacher, targetDb common.DBWriteCacher) error { +func (ln *leafNode) commitDirty(_ byte, _ uint, _ common.TrieStorageInteractor, targetDb common.BaseStorer) error { err := ln.isEmptyOrNil() if err != nil { return fmt.Errorf("commit error %w", err) @@ -129,8 +128,8 @@ func (ln *leafNode) commitDirty(_ byte, _ uint, _ common.DBWriteCacher, targetDb } func (ln *leafNode) commitCheckpoint( - _ common.DBWriteCacher, - targetDb common.DBWriteCacher, + _ common.TrieStorageInteractor, + targetDb common.BaseStorer, checkpointHashes CheckpointHashesHolder, leavesChan chan core.KeyValueHolder, ctx context.Context, @@ -139,7 +138,7 @@ func (ln *leafNode) commitCheckpoint( depthLevel int, ) error { if shouldStopIfContextDoneBlockingIfBusy(ctx, idleProvider) { - return errors.ErrContextClosing + return core.ErrContextClosing } err := ln.isEmptyOrNil() @@ -175,7 +174,7 @@ func (ln *leafNode) commitCheckpoint( } func (ln *leafNode) commitSnapshot( - db common.DBWriteCacher, + db common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, _ chan []byte, ctx context.Context, @@ -184,7 +183,7 @@ func (ln *leafNode) commitSnapshot( depthLevel int, ) error { if shouldStopIfContextDoneBlockingIfBusy(ctx, idleProvider) { - return errors.ErrContextClosing + return core.ErrContextClosing } err := ln.isEmptyOrNil() @@ -236,7 +235,7 @@ func (ln *leafNode) getEncodedNode() ([]byte, error) { return marshaledNode, nil } -func (ln *leafNode) resolveCollapsed(_ byte, _ common.DBWriteCacher) error { +func (ln *leafNode) resolveCollapsed(_ byte, _ common.TrieStorageInteractor) error { return nil } @@ -248,7 +247,7 @@ func (ln *leafNode) isPosCollapsed(_ int) bool { return false } -func (ln *leafNode) tryGet(key []byte, currentDepth uint32, _ common.DBWriteCacher) (value []byte, maxDepth uint32, err error) { +func (ln *leafNode) tryGet(key []byte, currentDepth uint32, _ common.TrieStorageInteractor) (value []byte, maxDepth uint32, err error) { err = ln.isEmptyOrNil() if err != nil { return nil, currentDepth, fmt.Errorf("tryGet error %w", err) @@ -260,7 +259,7 @@ func (ln *leafNode) tryGet(key []byte, currentDepth uint32, _ common.DBWriteCach return nil, currentDepth, nil } -func (ln *leafNode) getNext(key []byte, _ common.DBWriteCacher) (node, []byte, error) { +func (ln *leafNode) getNext(key []byte, _ common.TrieStorageInteractor) (node, []byte, error) { err := ln.isEmptyOrNil() if err != nil { return nil, nil, fmt.Errorf("getNext error %w", err) @@ -270,7 +269,7 @@ func (ln *leafNode) getNext(key []byte, _ common.DBWriteCacher) (node, []byte, e } return nil, nil, ErrNodeNotFound } -func (ln *leafNode) insert(n *leafNode, _ common.DBWriteCacher) (node, [][]byte, error) { +func (ln *leafNode) insert(n *leafNode, _ common.TrieStorageInteractor) (node, [][]byte, error) { err := ln.isEmptyOrNil() if err != nil { return nil, [][]byte{}, fmt.Errorf("insert error %w", err) @@ -344,7 +343,7 @@ func (ln *leafNode) insertInNewBn(n *leafNode, keyMatchLen int) (node, error) { return bn, nil } -func (ln *leafNode) delete(key []byte, _ common.DBWriteCacher) (bool, node, [][]byte, error) { +func (ln *leafNode) delete(key []byte, _ common.TrieStorageInteractor) (bool, node, [][]byte, error) { if bytes.Equal(key, ln.Key) { oldHash := make([][]byte, 0) if !ln.dirty { @@ -377,7 +376,7 @@ func (ln *leafNode) isEmptyOrNil() error { return nil } -func (ln *leafNode) print(writer io.Writer, _ int, _ common.DBWriteCacher) { +func (ln *leafNode) print(writer io.Writer, _ int, _ common.TrieStorageInteractor) { if ln == nil { return } @@ -409,7 +408,7 @@ func (ln *leafNode) getDirtyHashes(hashes common.ModifiedHashes) error { return nil } -func (ln *leafNode) getChildren(_ common.DBWriteCacher) ([]node, error) { +func (ln *leafNode) getChildren(_ common.TrieStorageInteractor) ([]node, error) { return nil, nil } @@ -428,7 +427,7 @@ func (ln *leafNode) loadChildren(_ func([]byte) (node, error)) ([][]byte, []node func (ln *leafNode) getAllLeavesOnChannel( leavesChannel chan core.KeyValueHolder, keyBuilder common.KeyBuilder, - _ common.DBWriteCacher, + _ common.TrieStorageInteractor, _ marshal.Marshalizer, chanClose chan struct{}, ctx context.Context, @@ -459,7 +458,7 @@ func (ln *leafNode) getAllLeavesOnChannel( } } -func (ln *leafNode) getAllHashes(_ common.DBWriteCacher) ([][]byte, error) { +func (ln *leafNode) getAllHashes(_ common.TrieStorageInteractor) ([][]byte, error) { err := ln.isEmptyOrNil() if err != nil { return nil, fmt.Errorf("getAllHashes error: %w", err) @@ -495,7 +494,7 @@ func (ln *leafNode) getValue() []byte { return ln.Value } -func (ln *leafNode) collectStats(ts common.TrieStatisticsHandler, depthLevel int, _ common.DBWriteCacher) error { +func (ln *leafNode) collectStats(ts common.TrieStatisticsHandler, depthLevel int, _ common.TrieStorageInteractor) error { err := ln.isEmptyOrNil() if err != nil { return fmt.Errorf("collectStats error %w", err) diff --git a/trie/leafNode_test.go b/trie/leafNode_test.go index b534e700d44..bf9cab8209b 100644 --- a/trie/leafNode_test.go +++ b/trie/leafNode_test.go @@ -8,7 +8,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" - chainErrors "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/storage/cache" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" @@ -733,10 +732,10 @@ func TestLeafNode_commitContextDone(t *testing.T) { cancel() err := ln.commitCheckpoint(db, db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Equal(t, chainErrors.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) err = ln.commitSnapshot(db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Equal(t, chainErrors.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) } func TestLeafNode_getValue(t *testing.T) { diff --git a/trie/node.go b/trie/node.go index c69d0a9d484..617aab8b528 100644 --- a/trie/node.go +++ b/trie/node.go @@ -3,15 +3,13 @@ package trie import ( "context" - "encoding/hex" - "fmt" "runtime/debug" "time" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/trie/keyBuilder" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -76,7 +74,7 @@ func encodeNodeAndGetHash(n node) ([]byte, error) { } // encodeNodeAndCommitToDB will encode and save provided node. It returns the node's value in bytes -func encodeNodeAndCommitToDB(n node, db common.DBWriteCacher) (int, error) { +func encodeNodeAndCommitToDB(n node, db common.BaseStorer) (int, error) { key, err := computeAndSetNodeHash(n) if err != nil { return 0, err @@ -118,12 +116,12 @@ func computeAndSetNodeHash(n node) ([]byte, error) { return key, nil } -func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) { +func getNodeFromDBAndDecode(n []byte, db common.TrieStorageInteractor, marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) { encChild, err := db.Get(n) if err != nil { treatLogError(log, err, n) - return nil, fmt.Errorf(common.GetNodeFromDBErrorString+" %w for key %v", err, hex.EncodeToString(n)) + return nil, core.NewGetNodeFromDBErrWithKey(n, err, db.GetIdentifier()) } return decodeNode(encChild, marshalizer, hasher) @@ -134,10 +132,10 @@ func treatLogError(logInstance logger.Logger, err error, key []byte) { return } - logInstance.Trace(common.GetNodeFromDBErrorString, "error", err, "key", key, "stack trace", string(debug.Stack())) + logInstance.Trace(core.GetNodeFromDBErrorString, "error", err, "key", key, "stack trace", string(debug.Stack())) } -func resolveIfCollapsed(n node, pos byte, db common.DBWriteCacher) error { +func resolveIfCollapsed(n node, pos byte, db common.TrieStorageInteractor) error { err := n.isEmptyOrNil() if err != nil { return err @@ -273,7 +271,7 @@ func shouldStopIfContextDoneBlockingIfBusy(ctx context.Context, idleProvider Idl } func treatCommitSnapshotError(err error, hash []byte, missingNodesChan chan []byte) { - if errors.IsClosingError(err) { + if core.IsClosingError(err) { log.Debug("context closing", "hash", hash) return } diff --git a/trie/node_test.go b/trie/node_test.go index f6bfcf165ce..0b6e850ee63 100644 --- a/trie/node_test.go +++ b/trie/node_test.go @@ -667,7 +667,7 @@ func TestTreatLogError(t *testing.T) { }, TraceCalled: func(message string, args ...interface{}) { wasCalled = true - require.Equal(t, common.GetNodeFromDBErrorString, message) + require.Equal(t, core.GetNodeFromDBErrorString, message) require.Equal(t, 6, len(args)) expectedFirst5Args := []interface{}{"error", err, "key", key, "stack trace"} require.Equal(t, expectedFirst5Args, args[:5]) diff --git a/trie/patriciaMerkleTrie.go b/trie/patriciaMerkleTrie.go index 544be32091d..fd1e41aca66 100644 --- a/trie/patriciaMerkleTrie.go +++ b/trie/patriciaMerkleTrie.go @@ -7,12 +7,12 @@ import ( "fmt" "sync" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/trie/statistics" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -276,14 +276,9 @@ func (tr *patriciaMerkleTrie) recreate(root []byte, tsm common.StorageManager) ( ) } - _, err := tsm.Get(root) - if err != nil { - return nil, err - } - newTr, _, err := tr.recreateFromDb(root, tsm) if err != nil { - if errors.IsClosingError(err) { + if core.IsClosingError(err) { log.Debug("could not recreate", "rootHash", root, "error", err) return nil, err } diff --git a/trie/patriciaMerkleTrie_test.go b/trie/patriciaMerkleTrie_test.go index ae93bf933e4..384d6891b8f 100644 --- a/trie/patriciaMerkleTrie_test.go +++ b/trie/patriciaMerkleTrie_test.go @@ -18,12 +18,9 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/holders" - "github.com/multiversx/mx-chain-go/config" - "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/storage" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/multiversx/mx-chain-go/trie/keyBuilder" "github.com/multiversx/mx-chain-go/trie/mock" "github.com/stretchr/testify/assert" @@ -38,29 +35,8 @@ func emptyTrie() common.Trie { return tr } -func getDefaultTrieStorageManagerParameters() trie.NewTrieStorageManagerArgs { - marshalizer := &testscommon.ProtobufMarshalizerMock{} - hasher := &testscommon.KeccakMock{} - - generalCfg := config.TrieStorageManagerConfig{ - PruningBufferLen: 1000, - SnapshotsBufferLen: 10, - SnapshotsGoroutineNum: 1, - } - - return trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.NewSnapshotPruningStorerMock(), - CheckpointsStorer: testscommon.NewSnapshotPruningStorerMock(), - Marshalizer: marshalizer, - Hasher: hasher, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } -} - func getDefaultTrieParameters() (common.StorageManager, marshal.Marshalizer, hashing.Hasher, uint) { - args := getDefaultTrieStorageManagerParameters() + args := trie.GetDefaultTrieStorageManagerParameters() trieStorageManager, _ := trie.NewTrieStorageManager(args) maxTrieLevelInMemory := uint(1) @@ -1050,7 +1026,7 @@ func TestPatriciaMerkleTrie_ConcurrentOperations(t *testing.T) { func TestPatriciaMerkleTrie_GetSerializedNodesClose(t *testing.T) { t.Parallel() - args := getDefaultTrieStorageManagerParameters() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &storage.StorerStub{ GetCalled: func(key []byte) ([]byte, error) { // gets take a long time diff --git a/trie/snapshotTrieStorageManager.go b/trie/snapshotTrieStorageManager.go index 5fe208be6df..133cb9080e4 100644 --- a/trie/snapshotTrieStorageManager.go +++ b/trie/snapshotTrieStorageManager.go @@ -6,7 +6,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" ) type snapshotTrieStorageManager struct { @@ -35,13 +34,13 @@ func (stsm *snapshotTrieStorageManager) Get(key []byte) ([]byte, error) { if stsm.closed { log.Debug("snapshotTrieStorageManager get context closing", "key", key) - return nil, errors.ErrContextClosing + return nil, core.ErrContextClosing } // test point get during snapshot val, epoch, err := stsm.mainSnapshotStorer.GetFromOldEpochsWithoutAddingToCache(key) - if errors.IsClosingError(err) { + if core.IsClosingError(err) { return nil, err } if len(val) != 0 { @@ -86,7 +85,7 @@ func (stsm *snapshotTrieStorageManager) Put(key, data []byte) error { if stsm.closed { log.Debug("snapshotTrieStorageManager put context closing", "key", key, "data", data) - return errors.ErrContextClosing + return core.ErrContextClosing } log.Trace("put hash in snapshot storer", "hash", key, "epoch", stsm.epoch) @@ -100,7 +99,7 @@ func (stsm *snapshotTrieStorageManager) GetFromLastEpoch(key []byte) ([]byte, er if stsm.closed { log.Debug("snapshotTrieStorageManager getFromLastEpoch context closing", "key", key) - return nil, errors.ErrContextClosing + return nil, core.ErrContextClosing } return stsm.mainSnapshotStorer.GetFromLastEpoch(key) diff --git a/trie/snapshotTrieStorageManager_test.go b/trie/snapshotTrieStorageManager_test.go index 9db4a24a9e3..a0c401a6eb8 100644 --- a/trie/snapshotTrieStorageManager_test.go +++ b/trie/snapshotTrieStorageManager_test.go @@ -1,12 +1,14 @@ package trie import ( + "errors" "strings" "testing" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" ) @@ -14,7 +16,9 @@ import ( func TestNewSnapshotTrieStorageManagerInvalidStorerType(t *testing.T) { t.Parallel() - _, trieStorage := newEmptyTrie() + args := GetDefaultTrieStorageManagerParameters() + args.MainStorer = createMemUnit() + trieStorage, _ := NewTrieStorageManager(args) stsm, err := newSnapshotTrieStorageManager(trieStorage, 0) assert.True(t, check.IfNil(stsm)) @@ -31,55 +35,117 @@ func TestNewSnapshotTrieStorageManager(t *testing.T) { assert.False(t, check.IfNil(stsm)) } -func TestNewSnapshotTrieStorageManager_GetFromOldEpochsWithoutCache(t *testing.T) { +func TestSnapshotTrieStorageManager_Get(t *testing.T) { t.Parallel() - _, trieStorage := newEmptyTrie() - getFromOldEpochsWithoutCacheCalled := false - trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ - GetFromOldEpochsWithoutAddingToCacheCalled: func(_ []byte) ([]byte, core.OptionalUint32, error) { - getFromOldEpochsWithoutCacheCalled = true - return nil, core.OptionalUint32{}, nil - }, - } - stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) - - _, _ = stsm.Get([]byte("key")) - assert.True(t, getFromOldEpochsWithoutCacheCalled) + t.Run("closed storage manager should error", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{} + stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) + _ = stsm.Close() + + val, err := stsm.Get([]byte("key")) + assert.Equal(t, core.ErrContextClosing, err) + assert.Nil(t, val) + }) + t.Run("GetFromOldEpochsWithoutAddingToCache returns db closed should error", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ + GetFromOldEpochsWithoutAddingToCacheCalled: func(_ []byte) ([]byte, core.OptionalUint32, error) { + return nil, core.OptionalUint32{}, storage.ErrDBIsClosed + }, + } + stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) + + val, err := stsm.Get([]byte("key")) + assert.Equal(t, storage.ErrDBIsClosed, err) + assert.Nil(t, val) + }) + t.Run("should work from old epochs without cache", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + getFromOldEpochsWithoutCacheCalled := false + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ + GetFromOldEpochsWithoutAddingToCacheCalled: func(_ []byte) ([]byte, core.OptionalUint32, error) { + getFromOldEpochsWithoutCacheCalled = true + return nil, core.OptionalUint32{}, nil + }, + } + stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) + + _, _ = stsm.Get([]byte("key")) + assert.True(t, getFromOldEpochsWithoutCacheCalled) + }) } -func TestNewSnapshotTrieStorageManager_PutWithoutCache(t *testing.T) { +func TestSnapshotTrieStorageManager_Put(t *testing.T) { t.Parallel() - _, trieStorage := newEmptyTrie() - putWithoutCacheCalled := false - trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ - PutInEpochWithoutCacheCalled: func(_ []byte, _ []byte, _ uint32) error { - putWithoutCacheCalled = true - return nil - }, - } - stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) - - _ = stsm.Put([]byte("key"), []byte("data")) - assert.True(t, putWithoutCacheCalled) + t.Run("closed storage manager should error", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{} + stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) + _ = stsm.Close() + + err := stsm.Put([]byte("key"), []byte("data")) + assert.Equal(t, core.ErrContextClosing, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + putWithoutCacheCalled := false + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ + PutInEpochWithoutCacheCalled: func(_ []byte, _ []byte, _ uint32) error { + putWithoutCacheCalled = true + return nil + }, + } + stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) + + _ = stsm.Put([]byte("key"), []byte("data")) + assert.True(t, putWithoutCacheCalled) + }) } -func TestNewSnapshotTrieStorageManager_GetFromLastEpoch(t *testing.T) { +func TestSnapshotTrieStorageManager_GetFromLastEpoch(t *testing.T) { t.Parallel() - _, trieStorage := newEmptyTrie() - getFromLastEpochCalled := false - trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ - GetFromLastEpochCalled: func(_ []byte) ([]byte, error) { - getFromLastEpochCalled = true - return nil, nil - }, - } - stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) - - _, _ = stsm.GetFromLastEpoch([]byte("key")) - assert.True(t, getFromLastEpochCalled) + t.Run("closed storage manager should error", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{} + stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) + _ = stsm.Close() + + val, err := stsm.GetFromLastEpoch([]byte("key")) + assert.Equal(t, core.ErrContextClosing, err) + assert.Nil(t, val) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + getFromLastEpochCalled := false + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ + GetFromLastEpochCalled: func(_ []byte) ([]byte, error) { + getFromLastEpochCalled = true + return nil, nil + }, + } + stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) + + _, _ = stsm.GetFromLastEpoch([]byte("key")) + assert.True(t, getFromLastEpochCalled) + }) } func TestSnapshotTrieStorageManager_AlsoAddInPreviousEpoch(t *testing.T) { @@ -200,7 +266,7 @@ func TestSnapshotTrieStorageManager_AlsoAddInPreviousEpoch(t *testing.T) { }, PutInEpochCalled: func(_ []byte, _ []byte, _ uint32) error { putInEpochCalled = true - return nil + return errors.New("error for coverage only") }, } stsm, _ := newSnapshotTrieStorageManager(trieStorage, 5) diff --git a/trie/statistics/trieStatisticsCollector_test.go b/trie/statistics/trieStatisticsCollector_test.go index c2d6a9ab75d..e63af29fd9f 100644 --- a/trie/statistics/trieStatisticsCollector_test.go +++ b/trie/statistics/trieStatisticsCollector_test.go @@ -8,11 +8,13 @@ import ( "github.com/stretchr/testify/assert" ) -func TestSnapshotStatistics_AddTrieStats(t *testing.T) { +func TestSnapshotStatistics_Add(t *testing.T) { t.Parallel() tsc := NewTrieStatisticsCollector() + tsc.Add(nil) // coverage, early exit + numInserts := 100 for i := 0; i < numInserts; i++ { tsc.Add(getTrieStatsDTO(rand.Intn(numInserts), uint64(rand.Intn(numInserts)))) @@ -43,6 +45,8 @@ func TestSnapshotStatistics_AddTrieStats(t *testing.T) { assert.Equal(t, numTriesToPrint, len(tsc.triesBySize)) assert.Equal(t, numTriesToPrint, len(tsc.triesByDepth)) assert.Equal(t, uint64(i+1), tsc.GetNumNodes()) + + tsc.Print() // coverage } } diff --git a/trie/statistics/trieStatistics_test.go b/trie/statistics/trieStatistics_test.go index 31773b33fa7..d0870ced3dd 100644 --- a/trie/statistics/trieStatistics_test.go +++ b/trie/statistics/trieStatistics_test.go @@ -1,9 +1,13 @@ package statistics import ( + "encoding/hex" + "fmt" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestTrieStatistics_AddBranchNode(t *testing.T) { @@ -107,3 +111,39 @@ func TestTrieStatistics_GetTrieStats(t *testing.T) { assert.Equal(t, uint64(numExtensions), stats.NumExtensionNodes) assert.Equal(t, uint64(numLeaves), stats.NumLeafNodes) } + +func TestTrieStatsDTO_ToString(t *testing.T) { + t.Parallel() + + tsd := TrieStatsDTO{ + Address: "address", + RootHash: []byte("root hash"), + TotalNodesSize: 1, + TotalNumNodes: 1, + MaxTrieDepth: 1, + BranchNodesSize: 1, + NumBranchNodes: 1, + ExtensionNodesSize: 1, + NumExtensionNodes: 1, + LeafNodesSize: 1, + NumLeafNodes: 1, + } + + expectedLines := []string{ + fmt.Sprintf("address %v,", tsd.Address), + fmt.Sprintf("rootHash %v,", hex.EncodeToString(tsd.RootHash)), + fmt.Sprintf("total trie size = %v,", core.ConvertBytes(tsd.TotalNodesSize)), + fmt.Sprintf("num trie nodes = %v,", tsd.TotalNumNodes), + fmt.Sprintf("max trie depth = %v,", tsd.MaxTrieDepth), + fmt.Sprintf("branch nodes size %v,", core.ConvertBytes(tsd.BranchNodesSize)), + fmt.Sprintf("extension nodes size %v,", core.ConvertBytes(tsd.ExtensionNodesSize)), + fmt.Sprintf("leaf nodes size %v,", core.ConvertBytes(tsd.LeafNodesSize)), + fmt.Sprintf("num branches %v,", tsd.NumBranchNodes), + fmt.Sprintf("num extensions %v,", tsd.NumExtensionNodes), + fmt.Sprintf("num leaves %v", tsd.NumLeafNodes), + } + stringDTO := tsd.ToString() + for i, line := range stringDTO { + require.Equal(t, expectedLines[i], line) + } +} diff --git a/trie/storageMarker/disabledStorageMarker.go b/trie/storageMarker/disabledStorageMarker.go index 5070910b61f..62257e62f60 100644 --- a/trie/storageMarker/disabledStorageMarker.go +++ b/trie/storageMarker/disabledStorageMarker.go @@ -13,3 +13,8 @@ func NewDisabledStorageMarker() *disabledStorageMarker { // MarkStorerAsSyncedAndActive does nothing for this implementation func (dsm *disabledStorageMarker) MarkStorerAsSyncedAndActive(_ common.StorageManager) { } + +// IsInterfaceNil returns true if there is no value under the interface +func (dsm *disabledStorageMarker) IsInterfaceNil() bool { + return dsm == nil +} diff --git a/trie/storageMarker/trieStorageMarker.go b/trie/storageMarker/trieStorageMarker.go index 59ec321d0df..ca5ce3287d5 100644 --- a/trie/storageMarker/trieStorageMarker.go +++ b/trie/storageMarker/trieStorageMarker.go @@ -39,3 +39,8 @@ func (sm *trieStorageMarker) MarkStorerAsSyncedAndActive(storer common.StorageMa } log.Debug("set activeDB in epoch", "epoch", lastEpoch) } + +// IsInterfaceNil returns true if there is no value under the interface +func (sm *trieStorageMarker) IsInterfaceNil() bool { + return sm == nil +} diff --git a/trie/storageMarker/trieStorageMarker_test.go b/trie/storageMarker/trieStorageMarker_test.go index ae6699801cb..af42a3c439a 100644 --- a/trie/storageMarker/trieStorageMarker_test.go +++ b/trie/storageMarker/trieStorageMarker_test.go @@ -1,23 +1,54 @@ package storageMarker import ( + "errors" "testing" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" "github.com/stretchr/testify/assert" ) func TestTrieStorageMarker_MarkStorerAsSyncedAndActive(t *testing.T) { t.Parallel() + t.Run("all operations error should work", func(t *testing.T) { + t.Parallel() + + sm := NewTrieStorageMarker() + assert.NotNil(t, sm) + + expectedErr := errors.New("expected err") + + getLatestStorageEpochCalled := false + putCalled := false + putInEpochWithoutCacheCalled := false + storer := &storageManager.StorageManagerStub{ + GetLatestStorageEpochCalled: func() (uint32, error) { + getLatestStorageEpochCalled = true + return 0, expectedErr + }, + PutCalled: func(key []byte, val []byte) error { + putCalled = true + return expectedErr + }, + PutInEpochWithoutCacheCalled: func(key []byte, val []byte, epoch uint32) error { + putInEpochWithoutCacheCalled = true + return expectedErr + }, + } + sm.MarkStorerAsSyncedAndActive(storer) + assert.True(t, getLatestStorageEpochCalled) + assert.True(t, putCalled) + assert.True(t, putInEpochWithoutCacheCalled) + }) t.Run("mark storer as synced and active epoch 5", func(t *testing.T) { sm := NewTrieStorageMarker() assert.NotNil(t, sm) trieSyncedKeyPut := false activeDbKeyPut := false - storer := &testscommon.StorageManagerStub{ + storer := &storageManager.StorageManagerStub{ GetLatestStorageEpochCalled: func() (uint32, error) { return 5, nil }, @@ -45,7 +76,7 @@ func TestTrieStorageMarker_MarkStorerAsSyncedAndActive(t *testing.T) { trieSyncedKeyPut := false activeDbKeyPut := false - storer := &testscommon.StorageManagerStub{ + storer := &storageManager.StorageManagerStub{ GetLatestStorageEpochCalled: func() (uint32, error) { return 0, nil }, diff --git a/trie/sync.go b/trie/sync.go index 5acd55c6b44..ce48f8c8e6b 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -14,7 +14,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/storage" ) @@ -34,7 +33,7 @@ type trieSyncer struct { waitTimeBetweenRequests time.Duration marshalizer marshal.Marshalizer hasher hashing.Hasher - db common.DBWriteCacher + db common.TrieStorageInteractor requestHandler RequestHandler interceptedNodesCacher storage.Cacher mutOperation sync.RWMutex @@ -164,7 +163,7 @@ func (ts *trieSyncer) StartSyncing(rootHash []byte, ctx context.Context) error { case <-time.After(ts.waitTimeBetweenRequests): continue case <-ctx.Done(): - return errors.ErrContextClosing + return core.ErrContextClosing } } } @@ -311,7 +310,7 @@ func (ts *trieSyncer) getNode(hash []byte) (node, error) { func getNodeFromCacheOrStorage( hash []byte, interceptedNodesCacher storage.Cacher, - db common.DBWriteCacher, + db common.TrieStorageInteractor, marshalizer marshal.Marshalizer, hasher hashing.Hasher, ) (node, error) { diff --git a/trie/syncTrieStorageManager_test.go b/trie/syncTrieStorageManager_test.go index afebd9fd918..2290c4bf08c 100644 --- a/trie/syncTrieStorageManager_test.go +++ b/trie/syncTrieStorageManager_test.go @@ -1,6 +1,7 @@ package trie import ( + "errors" "strings" "testing" @@ -19,7 +20,9 @@ func TestNewSyncTrieStorageManagerNilTsm(t *testing.T) { func TestNewSyncTrieStorageManagerInvalidStorerType(t *testing.T) { t.Parallel() - _, trieStorage := newEmptyTrie() + args := GetDefaultTrieStorageManagerParameters() + args.MainStorer = createMemUnit() + trieStorage, _ := NewTrieStorageManager(args) stsm, err := NewSyncTrieStorageManager(trieStorage) assert.Nil(t, stsm) @@ -57,6 +60,22 @@ func TestNewSyncTrieStorageManager_PutInFirstEpoch(t *testing.T) { assert.Equal(t, 1, putInEpochCalled) } +func TestNewSyncTrieStorageManager_PutInEpochError(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + _, trieStorage := newEmptyTrie() + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ + PutInEpochCalled: func(_ []byte, _ []byte, _ uint32) error { + return expectedErr + }, + } + stsm, _ := NewSyncTrieStorageManager(trieStorage) + + err := stsm.Put([]byte("key"), []byte("val")) + assert.Equal(t, expectedErr, err) +} + func TestNewSyncTrieStorageManager_PutInEpoch(t *testing.T) { t.Parallel() diff --git a/trie/sync_test.go b/trie/sync_test.go index 3b783f90c11..fcbf0ec04f7 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" @@ -259,3 +260,50 @@ func TestTrieSync_FoundInStorageShouldNotRequest(t *testing.T) { assert.Equal(t, numLeaves, numLeavesOnChan) } + +func TestTrieSync_StartSyncing(t *testing.T) { + t.Parallel() + + t.Run("nil hash should return nil", func(t *testing.T) { + t.Parallel() + + timeout := time.Second * 2 + arg := createMockArgument(timeout) + ts, _ := NewTrieSyncer(arg) + + err := ts.StartSyncing(nil, context.Background()) + assert.NoError(t, err) + }) + t.Run("empty trie hash should return nil", func(t *testing.T) { + t.Parallel() + + timeout := time.Second * 2 + arg := createMockArgument(timeout) + ts, _ := NewTrieSyncer(arg) + + err := ts.StartSyncing(common.EmptyTrieHash, context.Background()) + assert.NoError(t, err) + }) + t.Run("nil context should error", func(t *testing.T) { + t.Parallel() + + timeout := time.Second * 2 + arg := createMockArgument(timeout) + ts, _ := NewTrieSyncer(arg) + + err := ts.StartSyncing([]byte("roothash"), nil) + assert.Equal(t, ErrNilContext, err) + }) + t.Run("closed context should error", func(t *testing.T) { + t.Parallel() + + timeout := time.Second * 2 + arg := createMockArgument(timeout) + ts, _ := NewTrieSyncer(arg) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + err := ts.StartSyncing([]byte("roothash"), ctx) + assert.Equal(t, core.ErrContextClosing, err) + }) +} diff --git a/trie/trieNodesHandler_test.go b/trie/trieNodesHandler_test.go index 86b4d6431ab..146200ac3ee 100644 --- a/trie/trieNodesHandler_test.go +++ b/trie/trieNodesHandler_test.go @@ -63,6 +63,20 @@ func TestTrieNodesHandler_jobDone(t *testing.T) { assert.True(t, handler.jobDone()) } +func TestTrieNodesHandler_noMissingHashes(t *testing.T) { + t.Parallel() + + roothash := "roothash" + handler := newTrieNodesHandler() + assert.True(t, handler.noMissingHashes()) + + handler.addInitialRootHash(roothash) + assert.False(t, handler.noMissingHashes()) + + handler.processMissingHashWasFound(&leafNode{}, roothash) + assert.True(t, handler.noMissingHashes()) +} + func TestTrieNodesHandler_replaceParentWithChildren(t *testing.T) { t.Parallel() diff --git a/trie/trieStorageManager.go b/trie/trieStorageManager.go index 99fa6895bb7..0fee55ade72 100644 --- a/trie/trieStorageManager.go +++ b/trie/trieStorageManager.go @@ -16,23 +16,23 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/trie/statistics" ) // trieStorageManager manages all the storage operations of the trie (commit, snapshot, checkpoint, pruning) type trieStorageManager struct { - mainStorer common.DBWriteCacher + mainStorer common.BaseStorer + checkpointsStorer common.BaseStorer pruningBlockingOps uint32 snapshotReq chan *snapshotsQueueEntry checkpointReq chan *snapshotsQueueEntry - checkpointsStorer common.DBWriteCacher checkpointHashesHolder CheckpointHashesHolder storageOperationMutex sync.RWMutex cancelFunc context.CancelFunc closer core.SafeCloser closed bool idleProvider IdleNodeProvider + identifier string } type snapshotsQueueEntry struct { @@ -47,13 +47,14 @@ type snapshotsQueueEntry struct { // NewTrieStorageManagerArgs holds the arguments needed for creating a new trieStorageManager type NewTrieStorageManagerArgs struct { - MainStorer common.DBWriteCacher - CheckpointsStorer common.DBWriteCacher + MainStorer common.BaseStorer + CheckpointsStorer common.BaseStorer Marshalizer marshal.Marshalizer Hasher hashing.Hasher GeneralConfig config.TrieStorageManagerConfig CheckpointHashesHolder CheckpointHashesHolder IdleProvider IdleNodeProvider + Identifier string } // NewTrieStorageManager creates a new instance of trieStorageManager @@ -76,6 +77,9 @@ func NewTrieStorageManager(args NewTrieStorageManagerArgs) (*trieStorageManager, if check.IfNil(args.IdleProvider) { return nil, ErrNilIdleNodeProvider } + if len(args.Identifier) == 0 { + return nil, ErrInvalidIdentifier + } ctx, cancelFunc := context.WithCancel(context.Background()) @@ -89,6 +93,7 @@ func NewTrieStorageManager(args NewTrieStorageManagerArgs) (*trieStorageManager, checkpointHashesHolder: args.CheckpointHashesHolder, closer: closing.NewSafeChanCloser(), idleProvider: args.IdleProvider, + identifier: args.Identifier, } goRoutinesThrottler, err := throttler.NewNumGoRoutinesThrottler(int32(args.GeneralConfig.SnapshotsGoroutineNum)) if err != nil { @@ -176,11 +181,11 @@ func (tsm *trieStorageManager) Get(key []byte) ([]byte, error) { if tsm.closed { log.Trace("trieStorageManager get context closing", "key", key) - return nil, errors.ErrContextClosing + return nil, core.ErrContextClosing } val, err := tsm.mainStorer.Get(key) - if errors.IsClosingError(err) { + if core.IsClosingError(err) { return nil, err } if len(val) != 0 { @@ -197,7 +202,7 @@ func (tsm *trieStorageManager) GetFromCurrentEpoch(key []byte) ([]byte, error) { if tsm.closed { log.Trace("trieStorageManager get context closing", "key", key) tsm.storageOperationMutex.Unlock() - return nil, errors.ErrContextClosing + return nil, core.ErrContextClosing } storer, ok := tsm.mainStorer.(snapshotPruningStorer) @@ -214,7 +219,7 @@ func (tsm *trieStorageManager) GetFromCurrentEpoch(key []byte) ([]byte, error) { func (tsm *trieStorageManager) getFromOtherStorers(key []byte) ([]byte, error) { val, err := tsm.checkpointsStorer.Get(key) - if errors.IsClosingError(err) { + if core.IsClosingError(err) { return nil, err } if len(val) != 0 { @@ -232,7 +237,7 @@ func (tsm *trieStorageManager) Put(key []byte, val []byte) error { if tsm.closed { log.Trace("trieStorageManager put context closing", "key", key, "value", val) - return errors.ErrContextClosing + return core.ErrContextClosing } return tsm.mainStorer.Put(key, val) @@ -246,7 +251,7 @@ func (tsm *trieStorageManager) PutInEpoch(key []byte, val []byte, epoch uint32) if tsm.closed { log.Trace("trieStorageManager putInEpoch context closing", "key", key, "value", val, "epoch", epoch) - return errors.ErrContextClosing + return core.ErrContextClosing } storer, ok := tsm.mainStorer.(snapshotPruningStorer) @@ -265,7 +270,7 @@ func (tsm *trieStorageManager) PutInEpochWithoutCache(key []byte, val []byte, ep if tsm.closed { log.Trace("trieStorageManager putInEpochWithoutCache context closing", "key", key, "value", val, "epoch", epoch) - return errors.ErrContextClosing + return core.ErrContextClosing } storer, ok := tsm.mainStorer.(snapshotPruningStorer) @@ -503,7 +508,7 @@ func (tsm *trieStorageManager) takeCheckpoint(checkpointEntry *snapshotsQueueEnt } func treatSnapshotError(err error, message string, rootHash []byte, mainTrieRootHash []byte) { - if errors.IsClosingError(err) { + if core.IsClosingError(err) { log.Debug("context closing", "message", message, "rootHash", rootHash, "mainTrieRootHash", mainTrieRootHash) return } @@ -512,7 +517,7 @@ func treatSnapshotError(err error, message string, rootHash []byte, mainTrieRoot } func newSnapshotNode( - db common.DBWriteCacher, + db common.TrieStorageInteractor, msh marshal.Marshalizer, hsh hashing.Hasher, rootHash []byte, @@ -520,7 +525,7 @@ func newSnapshotNode( ) (snapshotNode, error) { newRoot, err := getNodeFromDBAndDecode(rootHash, db, msh, hsh) if err != nil { - if strings.Contains(err.Error(), common.GetNodeFromDBErrorString) { + if strings.Contains(err.Error(), core.GetNodeFromDBErrorString) { treatCommitSnapshotError(err, rootHash, missingNodesCh) } return nil, err @@ -674,6 +679,11 @@ func (tsm *trieStorageManager) GetBaseTrieStorageManager() common.StorageManager return tsm } +// GetIdentifier returns the identifier of the main storer +func (tsm *trieStorageManager) GetIdentifier() string { + return tsm.identifier +} + // IsInterfaceNil returns true if there is no value under the interface func (tsm *trieStorageManager) IsInterfaceNil() bool { return tsm == nil diff --git a/trie/trieStorageManagerFactory_test.go b/trie/trieStorageManagerFactory_test.go index 8045a06d707..fcf2150b645 100644 --- a/trie/trieStorageManagerFactory_test.go +++ b/trie/trieStorageManagerFactory_test.go @@ -7,7 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" - "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" "github.com/stretchr/testify/assert" @@ -26,7 +26,7 @@ func TestTrieFactory_CreateWithoutPruning(t *testing.T) { options := getTrieStorageManagerOptions() options.PruningEnabled = false - tsm, err := trie.CreateTrieStorageManager(getNewTrieStorageManagerArgs(), options) + tsm, err := trie.CreateTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters(), options) assert.Nil(t, err) assert.Equal(t, "*trie.trieStorageManagerWithoutPruning", fmt.Sprintf("%T", tsm)) } @@ -36,7 +36,7 @@ func TestTrieFactory_CreateWithoutSnapshot(t *testing.T) { options := getTrieStorageManagerOptions() options.SnapshotsEnabled = false - tsm, err := trie.CreateTrieStorageManager(getNewTrieStorageManagerArgs(), options) + tsm, err := trie.CreateTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters(), options) assert.Nil(t, err) assert.Equal(t, "*trie.trieStorageManagerWithoutSnapshot", fmt.Sprintf("%T", tsm)) } @@ -46,7 +46,7 @@ func TestTrieFactory_CreateWithoutCheckpoints(t *testing.T) { options := getTrieStorageManagerOptions() options.CheckpointsEnabled = false - tsm, err := trie.CreateTrieStorageManager(getNewTrieStorageManagerArgs(), options) + tsm, err := trie.CreateTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters(), options) assert.Nil(t, err) assert.Equal(t, "*trie.trieStorageManagerWithoutCheckpoints", fmt.Sprintf("%T", tsm)) } @@ -54,7 +54,7 @@ func TestTrieFactory_CreateWithoutCheckpoints(t *testing.T) { func TestTrieFactory_CreateNormal(t *testing.T) { t.Parallel() - tsm, err := trie.CreateTrieStorageManager(getNewTrieStorageManagerArgs(), getTrieStorageManagerOptions()) + tsm, err := trie.CreateTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters(), getTrieStorageManagerOptions()) assert.Nil(t, err) assert.Equal(t, "*trie.trieStorageManager", fmt.Sprintf("%T", tsm)) } @@ -67,7 +67,7 @@ func TestTrieStorageManager_SerialFuncShadowingCallsExpectedImpl(t *testing.T) { getCalled := false returnedVal := []byte("existingVal") putCalled := 0 - tsm = &testscommon.StorageManagerStub{ + tsm = &storageManager.StorageManagerStub{ GetCalled: func(_ []byte) ([]byte, error) { getCalled = true return returnedVal, nil @@ -98,7 +98,7 @@ func TestTrieStorageManager_SerialFuncShadowingCallsExpectedImpl(t *testing.T) { return true }, GetBaseTrieStorageManagerCalled: func() common.StorageManager { - tsm, _ = trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + tsm, _ = trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) return tsm }, } diff --git a/trie/trieStorageManagerInEpoch.go b/trie/trieStorageManagerInEpoch.go index fee9a9dad31..5726290b3fd 100644 --- a/trie/trieStorageManagerInEpoch.go +++ b/trie/trieStorageManagerInEpoch.go @@ -3,9 +3,9 @@ package trie import ( "fmt" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" ) // numEpochsToVerify needs to be at least 2 due to a snapshotting edge-case. @@ -49,7 +49,7 @@ func (tsmie *trieStorageManagerInEpoch) Get(key []byte) ([]byte, error) { if tsmie.closed { log.Debug("trieStorageManagerInEpoch get context closing", "key", key) - return nil, errors.ErrContextClosing + return nil, core.ErrContextClosing } for i := uint32(0); i < numEpochsToVerify; i++ { @@ -73,7 +73,7 @@ func treatGetFromEpochError(err error, epoch uint32) { return } - if errors.IsClosingError(err) { + if core.IsClosingError(err) { log.Debug("trieStorageManagerInEpoch closing err", "error", err.Error(), "epoch", epoch) return } diff --git a/trie/trieStorageManagerInEpoch_test.go b/trie/trieStorageManagerInEpoch_test.go index 9ba92d45549..29722e645c4 100644 --- a/trie/trieStorageManagerInEpoch_test.go +++ b/trie/trieStorageManagerInEpoch_test.go @@ -1,31 +1,34 @@ package trie import ( + "errors" "strings" "testing" - "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" - "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewTrieStorageManagerInEpochNilStorageManager(t *testing.T) { t.Parallel() tsmie, err := newTrieStorageManagerInEpoch(nil, 0) - assert.True(t, check.IfNil(tsmie)) + assert.Nil(t, tsmie) assert.Equal(t, ErrNilTrieStorage, err) } func TestNewTrieStorageManagerInEpochInvalidStorageManagerType(t *testing.T) { t.Parallel() - trieStorage := &testscommon.StorageManagerStub{} + trieStorage := &storageManager.StorageManagerStub{} tsmie, err := newTrieStorageManagerInEpoch(trieStorage, 0) - assert.True(t, check.IfNil(tsmie)) + assert.Nil(t, tsmie) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), "invalid storage manager, type is")) } @@ -37,7 +40,7 @@ func TestNewTrieStorageManagerInEpochInvalidStorerType(t *testing.T) { trieStorage.mainStorer = database.NewMemDB() tsmie, err := newTrieStorageManagerInEpoch(trieStorage, 0) - assert.True(t, check.IfNil(tsmie)) + assert.Nil(t, tsmie) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), "invalid storer, type is")) } @@ -48,13 +51,41 @@ func TestNewTrieStorageManagerInEpoch(t *testing.T) { _, trieStorage := newEmptyTrie() tsmie, err := newTrieStorageManagerInEpoch(trieStorage, 0) - assert.False(t, check.IfNil(tsmie)) + assert.NotNil(t, tsmie) assert.Nil(t, err) } +func TestTrieStorageManagerInEpoch_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var tsmie *trieStorageManagerInEpoch + assert.True(t, tsmie.IsInterfaceNil()) + + _, trieStorage := newEmptyTrie() + tsmie, _ = newTrieStorageManagerInEpoch(trieStorage, 0) + assert.False(t, tsmie.IsInterfaceNil()) +} + func TestTrieStorageManagerInEpoch_GetFromEpoch(t *testing.T) { t.Parallel() + t.Run("closed storage manager should error", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ + GetFromEpochCalled: func(_ []byte, _ uint32) ([]byte, error) { + require.Fail(t, "should have not been called") + return nil, nil + }, + } + tsmie, _ := newTrieStorageManagerInEpoch(trieStorage, 0) + _ = tsmie.Close() + + _, err := tsmie.Get([]byte("key")) + require.Equal(t, core.ErrContextClosing, err) + }) + t.Run("epoch 0 does not panic", func(t *testing.T) { t.Parallel() @@ -72,6 +103,42 @@ func TestTrieStorageManagerInEpoch_GetFromEpoch(t *testing.T) { assert.True(t, getFromEpochCalled) }) + t.Run("closing error should work", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + getFromEpochCalled := false + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ + GetFromEpochCalled: func(_ []byte, _ uint32) ([]byte, error) { + getFromEpochCalled = true + return nil, storage.ErrDBIsClosed + }, + } + tsmie, _ := newTrieStorageManagerInEpoch(trieStorage, 0) + + _, err := tsmie.Get([]byte("key")) + assert.Equal(t, ErrKeyNotFound, err) + assert.True(t, getFromEpochCalled) + }) + + t.Run("other error should work", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + getFromEpochCalled := false + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ + GetFromEpochCalled: func(_ []byte, _ uint32) ([]byte, error) { + getFromEpochCalled = true + return nil, errors.New("not closing error") + }, + } + tsmie, _ := newTrieStorageManagerInEpoch(trieStorage, 0) + + _, err := tsmie.Get([]byte("key")) + assert.Equal(t, ErrKeyNotFound, err) + assert.True(t, getFromEpochCalled) + }) + t.Run("getFromEpoch searches more storers", func(t *testing.T) { t.Parallel() diff --git a/trie/trieStorageManagerWithoutCheckpoints_test.go b/trie/trieStorageManagerWithoutCheckpoints_test.go index 891a14a392e..251d64f38ed 100644 --- a/trie/trieStorageManagerWithoutCheckpoints_test.go +++ b/trie/trieStorageManagerWithoutCheckpoints_test.go @@ -9,21 +9,33 @@ import ( trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestNewTrieStorageManagerWithoutCheckpointsOkVals(t *testing.T) { +func TestNewTrieStorageManagerWithoutCheckpoints(t *testing.T) { t.Parallel() - tsm, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) - ts, err := trie.NewTrieStorageManagerWithoutCheckpoints(tsm) - assert.Nil(t, err) - assert.NotNil(t, ts) + t.Run("nil storage manager should error", func(t *testing.T) { + t.Parallel() + + ts, err := trie.NewTrieStorageManagerWithoutCheckpoints(nil) + require.Equal(t, trie.ErrNilTrieStorage, err) + require.Nil(t, ts) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + tsm, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) + ts, err := trie.NewTrieStorageManagerWithoutCheckpoints(tsm) + assert.Nil(t, err) + assert.NotNil(t, ts) + }) } func TestTrieStorageManagerWithoutCheckpoints_SetCheckpoint(t *testing.T) { t.Parallel() - tsm, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + tsm, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) ts, _ := trie.NewTrieStorageManagerWithoutCheckpoints(tsm) iteratorChannels := &common.TrieIteratorChannels{ @@ -50,7 +62,7 @@ func TestTrieStorageManagerWithoutCheckpoints_SetCheckpoint(t *testing.T) { func TestTrieStorageManagerWithoutCheckpoints_AddDirtyCheckpointHashes(t *testing.T) { t.Parallel() - tsm, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + tsm, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) ts, _ := trie.NewTrieStorageManagerWithoutCheckpoints(tsm) assert.False(t, ts.AddDirtyCheckpointHashes([]byte("rootHash"), nil)) diff --git a/trie/trieStorageManagerWithoutPruning_test.go b/trie/trieStorageManagerWithoutPruning_test.go index 4dc35a38613..4c05108991a 100644 --- a/trie/trieStorageManagerWithoutPruning_test.go +++ b/trie/trieStorageManagerWithoutPruning_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" "github.com/multiversx/mx-chain-go/trie" "github.com/stretchr/testify/assert" ) @@ -20,7 +20,7 @@ func TestNewTrieStorageManagerWithoutPruningWithNilStorage(t *testing.T) { func TestNewTrieStorageManagerWithoutPruning(t *testing.T) { t.Parallel() - tsm, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + tsm, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) ts, err := trie.NewTrieStorageManagerWithoutPruning(tsm) assert.Nil(t, err) assert.NotNil(t, ts) @@ -29,7 +29,7 @@ func TestNewTrieStorageManagerWithoutPruning(t *testing.T) { func TestTrieStorageManagerWithoutPruning_IsPruningEnabled(t *testing.T) { t.Parallel() - tsm, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + tsm, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) ts, _ := trie.NewTrieStorageManagerWithoutPruning(tsm) assert.False(t, ts.IsPruningEnabled()) } @@ -39,7 +39,7 @@ func TestTrieStorageManagerWithoutPruning_Remove(t *testing.T) { removeFromCheckpointHashesHolderCalled := false tsm := &trie.StorageManagerExtensionStub{ - StorageManagerStub: &testscommon.StorageManagerStub{ + StorageManagerStub: &storageManager.StorageManagerStub{ RemoveFromCheckpointHashesHolderCalled: func(hash []byte) { removeFromCheckpointHashesHolderCalled = true }, diff --git a/trie/trieStorageManagerWithoutSnapshot_test.go b/trie/trieStorageManagerWithoutSnapshot_test.go index 309e328433f..d3c4073fab7 100644 --- a/trie/trieStorageManagerWithoutSnapshot_test.go +++ b/trie/trieStorageManagerWithoutSnapshot_test.go @@ -15,16 +15,27 @@ import ( func TestNewTrieStorageManagerWithoutSnapshot(t *testing.T) { t.Parallel() - tsm, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) - ts, err := trie.NewTrieStorageManagerWithoutSnapshot(tsm) - assert.Nil(t, err) - assert.NotNil(t, ts) + t.Run("nil trie storage manager should error", func(t *testing.T) { + t.Parallel() + + ts, err := trie.NewTrieStorageManagerWithoutSnapshot(nil) + assert.Equal(t, trie.ErrNilTrieStorage, err) + assert.Nil(t, ts) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + tsm, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) + ts, err := trie.NewTrieStorageManagerWithoutSnapshot(tsm) + assert.Nil(t, err) + assert.NotNil(t, ts) + }) } func TestTrieStorageManagerWithoutSnapshot_GetFromCurrentEpoch(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() tsm, _ := trie.NewTrieStorageManager(args) ts, _ := trie.NewTrieStorageManagerWithoutSnapshot(tsm) @@ -40,7 +51,7 @@ func TestTrieStorageManagerWithoutSnapshot_GetFromCurrentEpoch(t *testing.T) { func TestTrieStorageManagerWithoutSnapshot_PutInEpoch(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() tsm, _ := trie.NewTrieStorageManager(args) ts, _ := trie.NewTrieStorageManagerWithoutSnapshot(tsm) @@ -57,7 +68,7 @@ func TestTrieStorageManagerWithoutSnapshot_PutInEpoch(t *testing.T) { func TestTrieStorageManagerWithoutSnapshot_PutInEpochWithoutCache(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() tsm, _ := trie.NewTrieStorageManager(args) ts, _ := trie.NewTrieStorageManagerWithoutSnapshot(tsm) @@ -74,7 +85,7 @@ func TestTrieStorageManagerWithoutSnapshot_PutInEpochWithoutCache(t *testing.T) func TestTrieStorageManagerWithoutSnapshot_TakeSnapshot(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() tsm, _ := trie.NewTrieStorageManager(args) ts, _ := trie.NewTrieStorageManagerWithoutSnapshot(tsm) @@ -94,7 +105,7 @@ func TestTrieStorageManagerWithoutSnapshot_TakeSnapshot(t *testing.T) { func TestTrieStorageManagerWithoutSnapshot_GetLatestStorageEpoch(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() tsm, _ := trie.NewTrieStorageManager(args) ts, _ := trie.NewTrieStorageManagerWithoutSnapshot(tsm) @@ -106,7 +117,7 @@ func TestTrieStorageManagerWithoutSnapshot_GetLatestStorageEpoch(t *testing.T) { func TestTrieStorageManagerWithoutSnapshot_SetEpochForPutOperationDoesNotPanic(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() tsm, _ := trie.NewTrieStorageManager(args) ts, _ := trie.NewTrieStorageManagerWithoutSnapshot(tsm) @@ -116,7 +127,7 @@ func TestTrieStorageManagerWithoutSnapshot_SetEpochForPutOperationDoesNotPanic(t func TestTrieStorageManagerWithoutSnapshot_ShouldTakeSnapshot(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() tsm, _ := trie.NewTrieStorageManager(args) ts, _ := trie.NewTrieStorageManagerWithoutSnapshot(tsm) @@ -130,7 +141,7 @@ func TestTrieStorageManagerWithoutSnapshot_IsInterfaceNil(t *testing.T) { var ts common.StorageManager assert.True(t, check.IfNil(ts)) - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() tsm, _ := trie.NewTrieStorageManager(args) ts, _ = trie.NewTrieStorageManagerWithoutSnapshot(tsm) assert.False(t, check.IfNil(ts)) diff --git a/trie/trieStorageManager_test.go b/trie/trieStorageManager_test.go index a0b5a88ce63..f23810d0eb1 100644 --- a/trie/trieStorageManager_test.go +++ b/trie/trieStorageManager_test.go @@ -10,33 +10,22 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" - "github.com/multiversx/mx-chain-go/config" - "github.com/multiversx/mx-chain-go/errors" + storageMx "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -const ( - hashSize = 32 +var ( + providedKey = []byte("key") + providedVal = []byte("value") + expectedErr = errorsGo.New("expected error") ) -func getNewTrieStorageManagerArgs() trie.NewTrieStorageManagerArgs { - return trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.CreateMemUnit(), - CheckpointsStorer: testscommon.CreateMemUnit(), - Marshalizer: &testscommon.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10, hashSize), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } -} - // errChanWithLen extends the BufferedErrChan interface with a Len method type errChanWithLen interface { common.BufferedErrChan @@ -46,10 +35,28 @@ type errChanWithLen interface { func TestNewTrieStorageManager(t *testing.T) { t.Parallel() + t.Run("nil main storer", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = nil + ts, err := trie.NewTrieStorageManager(args) + assert.Nil(t, ts) + assert.True(t, strings.Contains(err.Error(), trie.ErrNilStorer.Error())) + }) + t.Run("nil checkpoints storer", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + args.CheckpointsStorer = nil + ts, err := trie.NewTrieStorageManager(args) + assert.Nil(t, ts) + assert.True(t, strings.Contains(err.Error(), trie.ErrNilStorer.Error())) + }) t.Run("nil marshaller", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.Marshalizer = nil ts, err := trie.NewTrieStorageManager(args) assert.Nil(t, ts) @@ -58,7 +65,7 @@ func TestNewTrieStorageManager(t *testing.T) { t.Run("nil hasher", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.Hasher = nil ts, err := trie.NewTrieStorageManager(args) assert.Nil(t, ts) @@ -67,16 +74,43 @@ func TestNewTrieStorageManager(t *testing.T) { t.Run("nil checkpoint hashes holder", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.CheckpointHashesHolder = nil ts, err := trie.NewTrieStorageManager(args) assert.Nil(t, ts) assert.Equal(t, trie.ErrNilCheckpointHashesHolder, err) }) + t.Run("nil idle provider", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + args.IdleProvider = nil + ts, err := trie.NewTrieStorageManager(args) + assert.Nil(t, ts) + assert.Equal(t, trie.ErrNilIdleNodeProvider, err) + }) + t.Run("invalid config should error", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + args.GeneralConfig.SnapshotsGoroutineNum = 0 + ts, err := trie.NewTrieStorageManager(args) + assert.Nil(t, ts) + assert.Error(t, err) + }) + t.Run("invalid identifier", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + args.Identifier = "" + ts, err := trie.NewTrieStorageManager(args) + assert.Nil(t, ts) + assert.Equal(t, trie.ErrInvalidIdentifier, err) + }) t.Run("should work", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, err := trie.NewTrieStorageManager(args) assert.Nil(t, err) assert.NotNil(t, ts) @@ -115,7 +149,7 @@ func TestTrieCheckpoint(t *testing.T) { func TestTrieStorageManager_SetCheckpointNilErrorChan(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, _ := trie.NewTrieStorageManager(args) rootHash := []byte("rootHash") @@ -134,7 +168,7 @@ func TestTrieStorageManager_SetCheckpointNilErrorChan(t *testing.T) { func TestTrieStorageManager_SetCheckpointClosedDb(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, _ := trie.NewTrieStorageManager(args) _ = ts.Close() @@ -155,7 +189,7 @@ func TestTrieStorageManager_SetCheckpointClosedDb(t *testing.T) { func TestTrieStorageManager_SetCheckpointEmptyTrieRootHash(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, _ := trie.NewTrieStorageManager(args) rootHash := make([]byte, 32) @@ -200,7 +234,7 @@ func TestTrieCheckpoint_DoesNotSaveToCheckpointStorageIfNotDirty(t *testing.T) { func TestTrieStorageManager_IsPruningEnabled(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, _ := trie.NewTrieStorageManager(args) assert.True(t, ts.IsPruningEnabled()) @@ -209,8 +243,9 @@ func TestTrieStorageManager_IsPruningEnabled(t *testing.T) { func TestTrieStorageManager_IsPruningBlocked(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, _ := trie.NewTrieStorageManager(args) + ts.ExitPruningBufferingMode() // early exit assert.False(t, ts.IsPruningBlocked()) @@ -224,58 +259,120 @@ func TestTrieStorageManager_IsPruningBlocked(t *testing.T) { func TestTrieStorageManager_Remove(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() - args.MainStorer = testscommon.NewSnapshotPruningStorerMock() - args.CheckpointsStorer = testscommon.NewSnapshotPruningStorerMock() + t.Run("main storer not snapshotPruningStorer should call remove", func(t *testing.T) { + t.Parallel() + + wasCalled := false + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = &storage.StorerStub{ + RemoveCalled: func(key []byte) error { + wasCalled = true + return nil + }, + } + ts, _ := trie.NewTrieStorageManager(args) + + err := ts.Remove(providedKey) + assert.Nil(t, err) + assert.True(t, wasCalled) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = testscommon.NewSnapshotPruningStorerMock() + args.CheckpointsStorer = testscommon.NewSnapshotPruningStorerMock() + ts, _ := trie.NewTrieStorageManager(args) + + _ = args.MainStorer.Put(providedKey, providedVal) + hashes := make(common.ModifiedHashes) + hashes[string(providedVal)] = struct{}{} + hashes[string(providedKey)] = struct{}{} + _ = args.CheckpointHashesHolder.Put(providedKey, hashes) + + val, err := args.MainStorer.Get(providedKey) + assert.Nil(t, err) + assert.NotNil(t, val) + ok := args.CheckpointHashesHolder.ShouldCommit(providedKey) + assert.True(t, ok) + + err = ts.Remove(providedKey) + assert.Nil(t, err) + + val, err = args.MainStorer.Get(providedKey) + assert.Nil(t, val) + assert.NotNil(t, err) + ok = args.CheckpointHashesHolder.ShouldCommit(providedKey) + assert.False(t, ok) + }) +} + +func TestTrieStorageManager_RemoveFromCheckpointHashesHolder(t *testing.T) { + t.Parallel() + + wasCalled := false + args := trie.GetDefaultTrieStorageManagerParameters() + args.CheckpointHashesHolder = &trieMock.CheckpointHashesHolderStub{ + RemoveCalled: func(bytes []byte) { + wasCalled = true + }, + } ts, _ := trie.NewTrieStorageManager(args) - key := []byte("key") - value := []byte("value") + ts.RemoveFromCheckpointHashesHolder(providedKey) + assert.True(t, wasCalled) +} - _ = args.MainStorer.Put(key, value) - hashes := make(common.ModifiedHashes) - hashes[string(value)] = struct{}{} - hashes[string(key)] = struct{}{} - _ = args.CheckpointHashesHolder.Put(key, hashes) +func TestTrieStorageManager_SetEpochForPutOperation(t *testing.T) { + t.Parallel() - val, err := args.MainStorer.Get(key) - assert.Nil(t, err) - assert.NotNil(t, val) - ok := args.CheckpointHashesHolder.ShouldCommit(key) - assert.True(t, ok) + t.Run("main storer not epochStorer should early exit", func(t *testing.T) { + t.Parallel() - err = ts.Remove(key) - assert.Nil(t, err) + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = &storage.StorerStub{} + ts, _ := trie.NewTrieStorageManager(args) - val, err = args.MainStorer.Get(key) - assert.Nil(t, val) - assert.NotNil(t, err) - ok = args.CheckpointHashesHolder.ShouldCommit(key) - assert.False(t, ok) + ts.SetEpochForPutOperation(0) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedEpoch := uint32(100) + wasCalled := false + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = &storageManager.StorageManagerStub{ + SetEpochForPutOperationCalled: func(u uint32) { + assert.Equal(t, providedEpoch, u) + wasCalled = true + }, + } + ts, _ := trie.NewTrieStorageManager(args) + + ts.SetEpochForPutOperation(providedEpoch) + assert.True(t, wasCalled) + }) } func TestTrieStorageManager_PutInEpochClosedDb(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, _ := trie.NewTrieStorageManager(args) _ = ts.Close() - key := []byte("key") - value := []byte("value") - err := ts.PutInEpoch(key, value, 0) - assert.Equal(t, errors.ErrContextClosing, err) + err := ts.PutInEpoch(providedKey, providedVal, 0) + assert.Equal(t, core.ErrContextClosing, err) } func TestTrieStorageManager_PutInEpochInvalidStorer(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = testscommon.CreateMemUnit() ts, _ := trie.NewTrieStorageManager(args) - key := []byte("key") - value := []byte("value") - err := ts.PutInEpoch(key, value, 0) + err := ts.PutInEpoch(providedKey, providedVal, 0) assert.True(t, strings.Contains(err.Error(), "invalid storer type")) } @@ -283,7 +380,7 @@ func TestTrieStorageManager_PutInEpoch(t *testing.T) { t.Parallel() putInEpochCalled := false - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &trieMock.SnapshotPruningStorerStub{ MemDbMock: testscommon.NewMemDbMock(), PutInEpochCalled: func(key []byte, data []byte, epoch uint32) error { @@ -293,9 +390,7 @@ func TestTrieStorageManager_PutInEpoch(t *testing.T) { } ts, _ := trie.NewTrieStorageManager(args) - key := []byte("key") - value := []byte("value") - err := ts.PutInEpoch(key, value, 0) + err := ts.PutInEpoch(providedKey, providedVal, 0) assert.Nil(t, err) assert.True(t, putInEpochCalled) } @@ -303,7 +398,8 @@ func TestTrieStorageManager_PutInEpoch(t *testing.T) { func TestTrieStorageManager_GetLatestStorageEpochInvalidStorer(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = testscommon.CreateMemUnit() ts, _ := trie.NewTrieStorageManager(args) val, err := ts.GetLatestStorageEpoch() @@ -315,7 +411,7 @@ func TestTrieStorageManager_GetLatestStorageEpoch(t *testing.T) { t.Parallel() getLatestSorageCalled := false - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &trieMock.SnapshotPruningStorerStub{ MemDbMock: testscommon.NewMemDbMock(), GetLatestStorageEpochCalled: func() (uint32, error) { @@ -334,7 +430,7 @@ func TestTrieStorageManager_GetLatestStorageEpoch(t *testing.T) { func TestTrieStorageManager_TakeSnapshotNilErrorChan(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, _ := trie.NewTrieStorageManager(args) rootHash := []byte("rootHash") @@ -353,7 +449,7 @@ func TestTrieStorageManager_TakeSnapshotNilErrorChan(t *testing.T) { func TestTrieStorageManager_TakeSnapshotClosedDb(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, _ := trie.NewTrieStorageManager(args) _ = ts.Close() @@ -374,7 +470,7 @@ func TestTrieStorageManager_TakeSnapshotClosedDb(t *testing.T) { func TestTrieStorageManager_TakeSnapshotEmptyTrieRootHash(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, _ := trie.NewTrieStorageManager(args) rootHash := make([]byte, 32) @@ -394,7 +490,7 @@ func TestTrieStorageManager_TakeSnapshotEmptyTrieRootHash(t *testing.T) { func TestTrieStorageManager_TakeSnapshotWithGetNodeFromDBError(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = testscommon.NewSnapshotPruningStorerMock() ts, _ := trie.NewTrieStorageManager(args) @@ -412,35 +508,295 @@ func TestTrieStorageManager_TakeSnapshotWithGetNodeFromDBError(t *testing.T) { assert.True(t, ok) assert.Equal(t, 1, ch.Len()) errRecovered := iteratorChannels.ErrChan.ReadFromChanNonBlocking() - assert.True(t, strings.Contains(errRecovered.Error(), common.GetNodeFromDBErrorString)) + assert.True(t, strings.Contains(errRecovered.Error(), core.GetNodeFromDBErrorString)) } -func TestTrieStorageManager_ShouldTakeSnapshotInvalidStorer(t *testing.T) { +func TestTrieStorageManager_ShouldTakeSnapshot(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() - ts, _ := trie.NewTrieStorageManager(args) + t.Run("invalid storer should return false", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + ts, _ := trie.NewTrieStorageManager(args) + + assert.False(t, ts.ShouldTakeSnapshot()) + }) + t.Run("trie synced should return false", func(t *testing.T) { + t.Parallel() - assert.False(t, ts.ShouldTakeSnapshot()) + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = &trieMock.SnapshotPruningStorerStub{ + GetFromCurrentEpochCalled: func(key []byte) ([]byte, error) { + return []byte(common.TrieSyncedVal), nil + }, + MemDbMock: testscommon.NewMemDbMock(), + } + ts, _ := trie.NewTrieStorageManager(args) + + assert.False(t, ts.ShouldTakeSnapshot()) + }) + t.Run("GetFromOldEpochsWithoutAddingToCacheCalled error should return false", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = &trieMock.SnapshotPruningStorerStub{ + GetFromCurrentEpochCalled: func(key []byte) ([]byte, error) { + return nil, expectedErr // isTrieSynced returns false + }, + GetFromOldEpochsWithoutAddingToCacheCalled: func(key []byte) ([]byte, core.OptionalUint32, error) { + return nil, core.OptionalUint32{}, storageMx.ErrDBIsClosed + }, + MemDbMock: testscommon.NewMemDbMock(), + } + ts, _ := trie.NewTrieStorageManager(args) + + assert.False(t, ts.ShouldTakeSnapshot()) + }) + t.Run("GetFromOldEpochsWithoutAddingToCacheCalled returns non ActiveDBVal should return false", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = &trieMock.SnapshotPruningStorerStub{ + GetFromCurrentEpochCalled: func(key []byte) ([]byte, error) { + return []byte("response"), nil + }, + GetFromOldEpochsWithoutAddingToCacheCalled: func(key []byte) ([]byte, core.OptionalUint32, error) { + return []byte("response"), core.OptionalUint32{}, nil + }, + MemDbMock: testscommon.NewMemDbMock(), + } + ts, _ := trie.NewTrieStorageManager(args) + + assert.False(t, ts.ShouldTakeSnapshot()) + }) + t.Run("GetFromOldEpochsWithoutAddingToCacheCalled returns ActiveDBVal should return true", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = &trieMock.SnapshotPruningStorerStub{ + GetFromCurrentEpochCalled: func(key []byte) ([]byte, error) { + return nil, expectedErr // isTrieSynced returns false + }, + GetFromOldEpochsWithoutAddingToCacheCalled: func(key []byte) ([]byte, core.OptionalUint32, error) { + return []byte(common.ActiveDBVal), core.OptionalUint32{}, nil + }, + MemDbMock: testscommon.NewMemDbMock(), + } + ts, _ := trie.NewTrieStorageManager(args) + + assert.True(t, ts.ShouldTakeSnapshot()) + }) +} + +func TestTrieStorageManager_Get(t *testing.T) { + t.Parallel() + + t.Run("closed storage manager should error", func(t *testing.T) { + t.Parallel() + + ts, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) + _ = ts.Close() + + val, err := ts.Get(providedKey) + assert.Equal(t, core.ErrContextClosing, err) + assert.Nil(t, val) + }) + t.Run("main storer closing should error", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = &storage.StorerStub{ + GetCalled: func(key []byte) ([]byte, error) { + return nil, storageMx.ErrDBIsClosed + }, + } + ts, _ := trie.NewTrieStorageManager(args) + + val, err := ts.Get(providedKey) + assert.Equal(t, storageMx.ErrDBIsClosed, err) + assert.Nil(t, val) + }) + t.Run("checkpoints storer closing should error", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + args.CheckpointsStorer = &storage.StorerStub{ + GetCalled: func(key []byte) ([]byte, error) { + return nil, storageMx.ErrDBIsClosed + }, + } + ts, _ := trie.NewTrieStorageManager(args) + + val, err := ts.Get(providedKey) + assert.Equal(t, storageMx.ErrDBIsClosed, err) + assert.Nil(t, val) + }) + t.Run("should return from main storer", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + _ = args.MainStorer.Put(providedKey, providedVal) + ts, _ := trie.NewTrieStorageManager(args) + + val, err := ts.Get(providedKey) + assert.Nil(t, err) + assert.Equal(t, providedVal, val) + }) + t.Run("should return from checkpoints storer", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + _ = args.CheckpointsStorer.Put(providedKey, providedVal) + ts, _ := trie.NewTrieStorageManager(args) + + val, err := ts.Get(providedKey) + assert.Nil(t, err) + assert.Equal(t, providedVal, val) + }) } func TestNewSnapshotTrieStorageManager_GetFromCurrentEpoch(t *testing.T) { t.Parallel() - getFromCurrentEpochCalled := false - args := getNewTrieStorageManagerArgs() - args.MainStorer = &trieMock.SnapshotPruningStorerStub{ - MemDbMock: testscommon.NewMemDbMock(), - GetFromCurrentEpochCalled: func(_ []byte) ([]byte, error) { - getFromCurrentEpochCalled = true - return nil, nil - }, - } - ts, _ := trie.NewTrieStorageManager(args) + t.Run("closed storage manager should error", func(t *testing.T) { + t.Parallel() - _, err := ts.GetFromCurrentEpoch([]byte("key")) - assert.Nil(t, err) - assert.True(t, getFromCurrentEpochCalled) + ts, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) + _ = ts.Close() + + val, err := ts.GetFromCurrentEpoch(providedKey) + assert.Equal(t, core.ErrContextClosing, err) + assert.Nil(t, val) + }) + t.Run("main storer not snapshotPruningStorer should error", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = &storage.StorerStub{} + ts, _ := trie.NewTrieStorageManager(args) + + val, err := ts.GetFromCurrentEpoch(providedKey) + assert.True(t, strings.Contains(err.Error(), "invalid storer")) + assert.Nil(t, val) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + getFromCurrentEpochCalled := false + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = &trieMock.SnapshotPruningStorerStub{ + MemDbMock: testscommon.NewMemDbMock(), + GetFromCurrentEpochCalled: func(_ []byte) ([]byte, error) { + getFromCurrentEpochCalled = true + return nil, nil + }, + } + ts, _ := trie.NewTrieStorageManager(args) + + _, err := ts.GetFromCurrentEpoch(providedKey) + assert.Nil(t, err) + assert.True(t, getFromCurrentEpochCalled) + }) +} + +func TestTrieStorageManager_Put(t *testing.T) { + t.Parallel() + + t.Run("closed storage manager should error", func(t *testing.T) { + t.Parallel() + + ts, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) + _ = ts.Close() + + err := ts.Put(providedKey, providedVal) + assert.Equal(t, core.ErrContextClosing, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + ts, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) + + _ = ts.Put(providedKey, providedVal) + val, err := ts.Get(providedKey) + assert.Nil(t, err) + assert.Equal(t, providedVal, val) + }) +} + +func TestTrieStorageManager_PutInEpochWithoutCache(t *testing.T) { + t.Parallel() + + t.Run("closed storage manager should error", func(t *testing.T) { + t.Parallel() + + ts, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) + _ = ts.Close() + + err := ts.PutInEpochWithoutCache(providedKey, providedVal, 0) + assert.Equal(t, core.ErrContextClosing, err) + }) + t.Run("main storer not snapshotPruningStorer should error", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = &storage.StorerStub{} + ts, _ := trie.NewTrieStorageManager(args) + + err := ts.PutInEpochWithoutCache(providedKey, providedVal, 0) + assert.True(t, strings.Contains(err.Error(), "invalid storer")) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = testscommon.NewSnapshotPruningStorerMock() + ts, _ := trie.NewTrieStorageManager(args) + + err := ts.PutInEpochWithoutCache(providedKey, providedVal, 0) + assert.Nil(t, err) + }) +} + +func TestTrieStorageManager_Close(t *testing.T) { + t.Parallel() + + t.Run("error on main storer close", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = &storage.StorerStub{ + CloseCalled: func() error { + return expectedErr + }, + } + ts, _ := trie.NewTrieStorageManager(args) + + err := ts.Close() + assert.True(t, errorsGo.Is(err, expectedErr)) + }) + t.Run("error on checkpoints storer close", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + args.CheckpointsStorer = &storage.StorerStub{ + CloseCalled: func() error { + return expectedErr + }, + } + ts, _ := trie.NewTrieStorageManager(args) + + err := ts.Close() + assert.True(t, errorsGo.Is(err, expectedErr)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + ts, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) + + err := ts.Close() + assert.NoError(t, err) + }) } func TestWriteInChanNonBlocking(t *testing.T) { @@ -541,3 +897,15 @@ func TestWriteInChanNonBlocking(t *testing.T) { assert.Equal(t, err2, recovered) }) } + +func TestTrieStorageManager_GetIdentifier(t *testing.T) { + t.Parallel() + + expectedId := "testId" + args := trie.GetDefaultTrieStorageManagerParameters() + args.Identifier = expectedId + ts, _ := trie.NewTrieStorageManager(args) + + id := ts.GetIdentifier() + assert.Equal(t, expectedId, id) +} diff --git a/update/container/accountDBSyncers_test.go b/update/container/accountDBSyncers_test.go index 0a63210e6c9..181ddbb0f3f 100644 --- a/update/container/accountDBSyncers_test.go +++ b/update/container/accountDBSyncers_test.go @@ -5,11 +5,17 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/update" "github.com/multiversx/mx-chain-go/update/mock" "github.com/stretchr/testify/require" ) +var ( + testKey = "key" + testAccountsDBSyncersVal = &mock.AccountsDBSyncerStub{} +) + func TestNewAccountsDBSyncersContainer(t *testing.T) { t.Parallel() @@ -17,82 +23,156 @@ func TestNewAccountsDBSyncersContainer(t *testing.T) { require.False(t, check.IfNil(adsc)) } -func TestAccountDBSyncers_AddGetShouldWork(t *testing.T) { +func TestAccountDBSyncers_Get(t *testing.T) { t.Parallel() - adsc := NewAccountsDBSyncersContainer() - testKey := "key" - testVal := &mock.AccountsDBSyncerStub{} - err := adsc.Add(testKey, testVal) - require.NoError(t, err) + t.Run("missing key should error", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + val, err := adsc.Get(testKey) + require.Equal(t, update.ErrInvalidContainerKey, err) + require.Nil(t, val) + }) + t.Run("invalid data should error", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + + _ = adsc.AddInterface(testKey, "not an account db syncer") + val, err := adsc.Get(testKey) + require.Equal(t, update.ErrWrongTypeInContainer, err) + require.Nil(t, val) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + + err := adsc.Add(testKey, testAccountsDBSyncersVal) + require.Nil(t, err) + val, err := adsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, testAccountsDBSyncersVal, val) + }) +} - res, err := adsc.Get(testKey) - require.NoError(t, err) - require.Equal(t, testVal, res) +func TestAccountDBSyncers_Add(t *testing.T) { + t.Parallel() + + t.Run("nil value should error", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + err := adsc.Add(testKey, nil) + require.Equal(t, update.ErrNilContainerElement, err) + }) + t.Run("duplicated key should error", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + err := adsc.Add(testKey, testAccountsDBSyncersVal) + require.NoError(t, err) + + err = adsc.Add(testKey, testAccountsDBSyncersVal) + require.Equal(t, update.ErrContainerKeyAlreadyExists, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + err := adsc.Add(testKey, testAccountsDBSyncersVal) + require.NoError(t, err) + }) } -func TestAccountDBSyncers_AddMultipleShouldWork(t *testing.T) { +func TestAccountDBSyncers_AddMultiple(t *testing.T) { t.Parallel() - adsc := NewAccountsDBSyncersContainer() testKey0 := "key0" - testVal0 := &mock.AccountsDBSyncerStub{} + testAccountsDBSyncersVal0 := &mock.AccountsDBSyncerStub{} testKey1 := "key1" - testVal1 := &mock.AccountsDBSyncerStub{} - - err := adsc.AddMultiple([]string{testKey0, testKey1}, []update.AccountsDBSyncer{testVal0, testVal1}) - require.NoError(t, err) - - res0, err := adsc.Get(testKey0) - require.NoError(t, err) - require.Equal(t, testVal0, res0) - - res1, err := adsc.Get(testKey1) - require.NoError(t, err) - require.Equal(t, testVal1, res1) - - require.Equal(t, 2, adsc.Len()) + testAccountsDBSyncersVal1 := &mock.AccountsDBSyncerStub{} + + t.Run("different lengths should error", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + err := adsc.AddMultiple([]string{testKey0}, nil) + require.Equal(t, update.ErrLenMismatch, err) + }) + t.Run("duplicated keys should error on Add", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + err := adsc.AddMultiple([]string{testKey0, testKey1, testKey1}, []update.AccountsDBSyncer{testAccountsDBSyncersVal0, testAccountsDBSyncersVal1, testAccountsDBSyncersVal1}) + require.Equal(t, update.ErrContainerKeyAlreadyExists, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + err := adsc.AddMultiple([]string{testKey0, testKey1}, []update.AccountsDBSyncer{testAccountsDBSyncersVal0, testAccountsDBSyncersVal1}) + require.NoError(t, err) + + res0, err := adsc.Get(testKey0) + require.NoError(t, err) + require.Equal(t, testAccountsDBSyncersVal0, res0) + + res1, err := adsc.Get(testKey1) + require.NoError(t, err) + require.Equal(t, testAccountsDBSyncersVal1, res1) + + require.Equal(t, 2, adsc.Len()) + }) } -func TestAccountDBSyncers_ReplaceShouldWork(t *testing.T) { +func TestAccountDBSyncers_Replace(t *testing.T) { t.Parallel() - adsc := NewAccountsDBSyncersContainer() - testKey := "key" - testVal := &mock.AccountsDBSyncerStub{} - err := adsc.Add(testKey, testVal) - require.NoError(t, err) - - res, err := adsc.Get(testKey) - require.NoError(t, err) - require.Equal(t, testVal, res) - - // update - newTestVal := &mock.AccountsDBSyncerStub{ - SyncAccountsCalled: func(_ []byte) error { - return errors.New("local error") - }, - } - err = adsc.Replace(testKey, newTestVal) - require.NoError(t, err) - - res, err = adsc.Get(testKey) - require.NoError(t, err) - require.Equal(t, newTestVal, res) + t.Run("nil val should error", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + err := adsc.Replace(testKey, nil) + require.Equal(t, update.ErrNilContainerElement, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + err := adsc.Add(testKey, testAccountsDBSyncersVal) + require.NoError(t, err) + + res, err := adsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, testAccountsDBSyncersVal, res) + + // update + newtestAccountsDBSyncersVal := &mock.AccountsDBSyncerStub{ + SyncAccountsCalled: func(_ []byte, _ common.StorageMarker) error { + return errors.New("local error") + }, + } + err = adsc.Replace(testKey, newtestAccountsDBSyncersVal) + require.NoError(t, err) + + res, err = adsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, newtestAccountsDBSyncersVal, res) + }) } -func TestAccountDBSyncers_DeleteShouldWork(t *testing.T) { +func TestAccountDBSyncers_RemoveShouldWork(t *testing.T) { t.Parallel() adsc := NewAccountsDBSyncersContainer() - testKey := "key" - testVal := &mock.AccountsDBSyncerStub{} - err := adsc.Add(testKey, testVal) + err := adsc.Add(testKey, testAccountsDBSyncersVal) require.NoError(t, err) res, err := adsc.Get(testKey) require.NoError(t, err) - require.Equal(t, testVal, res) + require.Equal(t, testAccountsDBSyncersVal, res) adsc.Remove(testKey) diff --git a/update/container/export_test.go b/update/container/export_test.go new file mode 100644 index 00000000000..2611cf2805f --- /dev/null +++ b/update/container/export_test.go @@ -0,0 +1,15 @@ +package containers + +// AddInterface - +func (a *accountDBSyncers) AddInterface(key string, val interface{}) error { + a.objects.Insert(key, val) + + return nil +} + +// AddInterface - +func (t *trieSyncers) AddInterface(key string, val interface{}) error { + t.objects.Insert(key, val) + + return nil +} diff --git a/update/container/trieSyncers_test.go b/update/container/trieSyncers_test.go index d3362e6592d..1778f7f931a 100644 --- a/update/container/trieSyncers_test.go +++ b/update/container/trieSyncers_test.go @@ -11,6 +11,8 @@ import ( "github.com/stretchr/testify/require" ) +var testTrieSyncersVal = &mock.TrieSyncersStub{} + func TestNewTrieSyncersContainer(t *testing.T) { t.Parallel() @@ -18,82 +20,157 @@ func TestNewTrieSyncersContainer(t *testing.T) { require.False(t, check.IfNil(tsc)) } -func TestTrieSyncers_AddGetShouldWork(t *testing.T) { +func TestTrieSyncers_Get(t *testing.T) { t.Parallel() - tsc := NewTrieSyncersContainer() - testKey := "key" - testVal := &mock.TrieSyncersStub{} - err := tsc.Add(testKey, testVal) - require.NoError(t, err) + t.Run("missing key should error", func(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + val, err := tsc.Get(testKey) + require.Equal(t, update.ErrInvalidContainerKey, err) + require.Nil(t, val) + }) + t.Run("invalid data should error", func(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + + _ = tsc.AddInterface(testKey, "not an account db syncer") + val, err := tsc.Get(testKey) + require.Equal(t, update.ErrWrongTypeInContainer, err) + require.Nil(t, val) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + err := tsc.Add(testKey, testTrieSyncersVal) + require.NoError(t, err) + + res, err := tsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, testTrieSyncersVal, res) + }) +} - res, err := tsc.Get(testKey) - require.NoError(t, err) - require.Equal(t, testVal, res) +func TestTrieSyncers_Add(t *testing.T) { + t.Parallel() + + t.Run("nil value should error", func(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + err := tsc.Add(testKey, nil) + require.Equal(t, update.ErrNilContainerElement, err) + }) + t.Run("duplicated key should error", func(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + err := tsc.Add(testKey, testTrieSyncersVal) + require.NoError(t, err) + + err = tsc.Add(testKey, testTrieSyncersVal) + require.Equal(t, update.ErrContainerKeyAlreadyExists, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + err := tsc.Add(testKey, testTrieSyncersVal) + require.NoError(t, err) + }) } -func TestTrieSyncers_AddMultipleShouldWork(t *testing.T) { +func TestTrieSyncers_AddMultiple(t *testing.T) { t.Parallel() - tsc := NewTrieSyncersContainer() testKey0 := "key0" - testVal0 := &mock.TrieSyncersStub{} + testTrieSyncersVal0 := &mock.TrieSyncersStub{} testKey1 := "key1" - testVal1 := &mock.TrieSyncersStub{} + testTrieSyncersVal1 := &mock.TrieSyncersStub{} - err := tsc.AddMultiple([]string{testKey0, testKey1}, []update.TrieSyncer{testVal0, testVal1}) - require.NoError(t, err) + t.Run("different lengths should error", func(t *testing.T) { + t.Parallel() - res0, err := tsc.Get(testKey0) - require.NoError(t, err) - require.Equal(t, testVal0, res0) + tsc := NewTrieSyncersContainer() + err := tsc.AddMultiple([]string{testKey0}, nil) + require.Equal(t, update.ErrLenMismatch, err) + }) + t.Run("duplicated keys should error on Add", func(t *testing.T) { + t.Parallel() - res1, err := tsc.Get(testKey1) - require.NoError(t, err) - require.Equal(t, testVal1, res1) + tsc := NewTrieSyncersContainer() + err := tsc.AddMultiple([]string{testKey0, testKey1, testKey1}, []update.TrieSyncer{testTrieSyncersVal0, testTrieSyncersVal1, testTrieSyncersVal1}) + require.Equal(t, update.ErrContainerKeyAlreadyExists, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - require.Equal(t, 2, tsc.Len()) -} + tsc := NewTrieSyncersContainer() -func TestTrieSyncers_ReplaceShouldWork(t *testing.T) { - t.Parallel() + err := tsc.AddMultiple([]string{testKey0, testKey1}, []update.TrieSyncer{testTrieSyncersVal0, testTrieSyncersVal1}) + require.NoError(t, err) - tsc := NewTrieSyncersContainer() - testKey := "key" - testVal := &mock.TrieSyncersStub{} - err := tsc.Add(testKey, testVal) - require.NoError(t, err) + res0, err := tsc.Get(testKey0) + require.NoError(t, err) + require.Equal(t, testTrieSyncersVal0, res0) - res, err := tsc.Get(testKey) - require.NoError(t, err) - require.Equal(t, testVal, res) - - // update - newTestVal := &mock.TrieSyncersStub{ - StartSyncingCalled: func(_ []byte, _ context.Context) error { - return errors.New("local err") - }, - } - err = tsc.Replace(testKey, newTestVal) - require.NoError(t, err) + res1, err := tsc.Get(testKey1) + require.NoError(t, err) + require.Equal(t, testTrieSyncersVal1, res1) - res, err = tsc.Get(testKey) - require.NoError(t, err) - require.Equal(t, newTestVal, res) + require.Equal(t, 2, tsc.Len()) + }) +} + +func TestTrieSyncers_Replace(t *testing.T) { + t.Parallel() + + t.Run("nil val should error", func(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + err := tsc.Replace(testKey, nil) + require.Equal(t, update.ErrNilContainerElement, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + err := tsc.Add(testKey, testTrieSyncersVal) + require.NoError(t, err) + + res, err := tsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, testTrieSyncersVal, res) + + // update + newtestTrieSyncersVal := &mock.TrieSyncersStub{ + StartSyncingCalled: func(_ []byte, _ context.Context) error { + return errors.New("local err") + }, + } + err = tsc.Replace(testKey, newtestTrieSyncersVal) + require.NoError(t, err) + + res, err = tsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, newtestTrieSyncersVal, res) + }) } -func TestTrieSyncers_DeleteShouldWork(t *testing.T) { +func TestTrieSyncers_RemoveShouldWork(t *testing.T) { t.Parallel() tsc := NewTrieSyncersContainer() - testKey := "key" - testVal := &mock.TrieSyncersStub{} - err := tsc.Add(testKey, testVal) + err := tsc.Add(testKey, testTrieSyncersVal) require.NoError(t, err) res, err := tsc.Get(testKey) require.NoError(t, err) - require.Equal(t, testVal, res) + require.Equal(t, testTrieSyncersVal, res) tsc.Remove(testKey) diff --git a/update/factory/accountDBSyncerContainerFactory.go b/update/factory/accountDBSyncerContainerFactory.go index 41fe4050c7a..ec1929c754f 100644 --- a/update/factory/accountDBSyncerContainerFactory.go +++ b/update/factory/accountDBSyncerContainerFactory.go @@ -15,7 +15,6 @@ import ( "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/trie/statistics" - "github.com/multiversx/mx-chain-go/trie/storageMarker" "github.com/multiversx/mx-chain-go/update" containers "github.com/multiversx/mx-chain-go/update/container" "github.com/multiversx/mx-chain-go/update/genesis" @@ -150,7 +149,6 @@ func (a *accountDBSyncersContainerFactory) createUserAccountsSyncer(shardId uint MaxHardCapForMissingNodes: a.maxHardCapForMissingNodes, TrieSyncerVersion: a.trieSyncerVersion, CheckNodesOnDisk: a.checkNodesOnDisk, - StorageMarker: storageMarker.NewTrieStorageMarker(), UserAccountsSyncStatisticsHandler: statistics.NewTrieSyncStatistics(), AppStatusHandler: disabled.NewAppStatusHandler(), }, @@ -180,7 +178,6 @@ func (a *accountDBSyncersContainerFactory) createValidatorAccountsSyncer(shardId MaxHardCapForMissingNodes: a.maxHardCapForMissingNodes, TrieSyncerVersion: a.trieSyncerVersion, CheckNodesOnDisk: a.checkNodesOnDisk, - StorageMarker: storageMarker.NewTrieStorageMarker(), UserAccountsSyncStatisticsHandler: statistics.NewTrieSyncStatistics(), AppStatusHandler: disabled.NewAppStatusHandler(), }, diff --git a/update/factory/dataTrieFactory.go b/update/factory/dataTrieFactory.go index 215c90fbf9a..db3fa4ea71b 100644 --- a/update/factory/dataTrieFactory.go +++ b/update/factory/dataTrieFactory.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-go/common" commonDisabled "github.com/multiversx/mx-chain-go/common/disabled" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage/database" @@ -73,6 +74,7 @@ func NewDataTrieFactory(args ArgsNewDataTrieFactory) (*dataTrieFactory, error) { }, CheckpointHashesHolder: disabled.NewDisabledCheckpointHashesHolder(), IdleProvider: commonDisabled.NewProcessStatusHandler(), + Identifier: dataRetriever.UserAccountsUnit.String(), } options := trie.StorageManagerOptions{ PruningEnabled: false, diff --git a/update/genesis/import.go b/update/genesis/import.go index 5d6b3e7ea90..e740564c424 100644 --- a/update/genesis/import.go +++ b/update/genesis/import.go @@ -16,11 +16,11 @@ import ( "github.com/multiversx/mx-chain-go/common" commonDisabled "github.com/multiversx/mx-chain-go/common/disabled" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/factory" "github.com/multiversx/mx-chain-go/state/storagePruningManager/disabled" "github.com/multiversx/mx-chain-go/trie" - triesFactory "github.com/multiversx/mx-chain-go/trie/factory" "github.com/multiversx/mx-chain-go/update" ) @@ -292,9 +292,9 @@ func (si *stateImport) getTrie(shardID uint32, accType Type) (common.Trie, error return trieForShard, nil } - trieStorageManager := si.trieStorageManagers[triesFactory.UserAccountTrie] + trieStorageManager := si.trieStorageManagers[dataRetriever.UserAccountsUnit.String()] if accType == ValidatorAccount { - trieStorageManager = si.trieStorageManagers[triesFactory.PeerAccountTrie] + trieStorageManager = si.trieStorageManagers[dataRetriever.PeerAccountsUnit.String()] } trieForShard, err := trie.NewTrie(trieStorageManager, si.marshalizer, si.hasher, maxTrieLevelInMemory) @@ -329,7 +329,7 @@ func (si *stateImport) importDataTrie(identifier string, shID uint32, keys [][]b return fmt.Errorf("%w wanted a roothash", update.ErrWrongTypeAssertion) } - dataTrie, err := trie.NewTrie(si.trieStorageManagers[triesFactory.UserAccountTrie], si.marshalizer, si.hasher, maxTrieLevelInMemory) + dataTrie, err := trie.NewTrie(si.trieStorageManagers[dataRetriever.UserAccountsUnit.String()], si.marshalizer, si.hasher, maxTrieLevelInMemory) if err != nil { return err } diff --git a/update/genesis/import_test.go b/update/genesis/import_test.go index c670865c2fa..30dd5f95492 100644 --- a/update/genesis/import_test.go +++ b/update/genesis/import_test.go @@ -10,9 +10,10 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" - "github.com/multiversx/mx-chain-go/trie/factory" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" "github.com/multiversx/mx-chain-go/update" "github.com/multiversx/mx-chain-go/update/mock" "github.com/stretchr/testify/assert" @@ -23,7 +24,7 @@ import ( func TestNewStateImport(t *testing.T) { trieStorageManagers := make(map[string]common.StorageManager) - trieStorageManagers[factory.UserAccountTrie] = &testscommon.StorageManagerStub{} + trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = &storageManager.StorageManagerStub{} tests := []struct { name string args ArgsNewStateImport @@ -86,8 +87,8 @@ func TestImportAll(t *testing.T) { t.Parallel() trieStorageManagers := make(map[string]common.StorageManager) - trieStorageManagers[factory.UserAccountTrie] = &testscommon.StorageManagerStub{} - trieStorageManagers[factory.PeerAccountTrie] = &testscommon.StorageManagerStub{} + trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = &storageManager.StorageManagerStub{} + trieStorageManagers[dataRetriever.PeerAccountsUnit.String()] = &storageManager.StorageManagerStub{} args := ArgsNewStateImport{ HardforkStorer: &mock.HardforkStorerStub{}, @@ -110,7 +111,7 @@ func TestStateImport_ImportUnFinishedMetaBlocksShouldWork(t *testing.T) { t.Parallel() trieStorageManagers := make(map[string]common.StorageManager) - trieStorageManagers[factory.UserAccountTrie] = &testscommon.StorageManagerStub{} + trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = &storageManager.StorageManagerStub{} hasher := &hashingMocks.HasherMock{} marshahlizer := &mock.MarshalizerMock{} diff --git a/update/interface.go b/update/interface.go index bdd993e0392..6487b71438e 100644 --- a/update/interface.go +++ b/update/interface.go @@ -175,7 +175,7 @@ type WhiteListHandler interface { // AccountsDBSyncer defines the methods for the accounts db syncer type AccountsDBSyncer interface { GetSyncedTries() map[string]common.Trie - SyncAccounts(rootHash []byte) error + SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error IsInterfaceNil() bool } diff --git a/update/mock/accountsDBSyncerStub.go b/update/mock/accountsDBSyncerStub.go index 9ff9abb9017..39477bdc70a 100644 --- a/update/mock/accountsDBSyncerStub.go +++ b/update/mock/accountsDBSyncerStub.go @@ -7,7 +7,7 @@ import ( // AccountsDBSyncerStub - type AccountsDBSyncerStub struct { GetSyncedTriesCalled func() map[string]common.Trie - SyncAccountsCalled func(rootHash []byte) error + SyncAccountsCalled func(rootHash []byte, storageMarker common.StorageMarker) error } // GetSyncedTries - @@ -19,9 +19,9 @@ func (a *AccountsDBSyncerStub) GetSyncedTries() map[string]common.Trie { } // SyncAccounts - -func (a *AccountsDBSyncerStub) SyncAccounts(rootHash []byte) error { +func (a *AccountsDBSyncerStub) SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error { if a.SyncAccountsCalled != nil { - return a.SyncAccountsCalled(rootHash) + return a.SyncAccountsCalled(rootHash, storageMarker) } return nil } diff --git a/update/sync/coordinator_test.go b/update/sync/coordinator_test.go index de7b26d5032..b56b2d8f99a 100644 --- a/update/sync/coordinator_test.go +++ b/update/sync/coordinator_test.go @@ -109,7 +109,7 @@ func createSyncTrieState(retErr bool) update.EpochStartTriesSyncHandler { AccountsDBsSyncers: &mock.AccountsDBSyncersStub{ GetCalled: func(key string) (syncer update.AccountsDBSyncer, err error) { return &mock.AccountsDBSyncerStub{ - SyncAccountsCalled: func(rootHash []byte) error { + SyncAccountsCalled: func(rootHash []byte, _ common.StorageMarker) error { if retErr { return errors.New("err") } diff --git a/update/sync/syncAccountsDBs.go b/update/sync/syncAccountsDBs.go index 38c8a2fcf72..803460bd914 100644 --- a/update/sync/syncAccountsDBs.go +++ b/update/sync/syncAccountsDBs.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/trie/storageMarker" "github.com/multiversx/mx-chain-go/update" "github.com/multiversx/mx-chain-go/update/genesis" ) @@ -149,7 +150,7 @@ func (st *syncAccountsDBs) syncAccountsOfType(accountType genesis.Type, trieID s return err } - err = accountsDBSyncer.SyncAccounts(rootHash) + err = accountsDBSyncer.SyncAccounts(rootHash, storageMarker.NewDisabledStorageMarker()) if err != nil { // TODO: critical error - should not happen - maybe recreate trie syncer here return err diff --git a/vm/factory/systemSCFactory.go b/vm/factory/systemSCFactory.go index d5272aed81a..0cccff2ce4b 100644 --- a/vm/factory/systemSCFactory.go +++ b/vm/factory/systemSCFactory.go @@ -219,9 +219,9 @@ func (scf *systemSCFactory) createESDTContract() (vm.SystemSmartContract, error) } func (scf *systemSCFactory) createGovernanceContract() (vm.SystemSmartContract, error) { - configChangeAddress, err := scf.addressPubKeyConverter.Decode(scf.systemSCConfig.GovernanceSystemSCConfig.ChangeConfigAddress) + ownerAddress, err := scf.addressPubKeyConverter.Decode(scf.systemSCConfig.GovernanceSystemSCConfig.OwnerAddress) if err != nil { - return nil, fmt.Errorf("%w for GovernanceSystemSCConfig.ChangeConfigAddress in systemSCFactory", vm.ErrInvalidAddress) + return nil, fmt.Errorf("%w for GovernanceSystemSCConfig.OwnerAddress in systemSCFactory", vm.ErrInvalidAddress) } argsGovernance := systemSmartContracts.ArgsNewGovernanceContract{ @@ -235,7 +235,7 @@ func (scf *systemSCFactory) createGovernanceContract() (vm.SystemSmartContract, ValidatorSCAddress: vm.ValidatorSCAddress, EnableEpochsHandler: scf.enableEpochsHandler, UnBondPeriodInEpochs: scf.systemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs, - ConfigChangeAddress: configChangeAddress, + OwnerAddress: ownerAddress, } governance, err := systemSmartContracts.NewGovernanceContract(argsGovernance) return governance, err diff --git a/vm/factory/systemSCFactory_test.go b/vm/factory/systemSCFactory_test.go index 2d52a260a18..5ea4e2b777e 100644 --- a/vm/factory/systemSCFactory_test.go +++ b/vm/factory/systemSCFactory_test.go @@ -48,8 +48,9 @@ func createMockNewSystemScFactoryArgs() ArgsNewSystemSCFactory { MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, - ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", + OwnerAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", diff --git a/vm/systemSmartContracts/eei.go b/vm/systemSmartContracts/eei.go index 32da253277f..d3ca57029e3 100644 --- a/vm/systemSmartContracts/eei.go +++ b/vm/systemSmartContracts/eei.go @@ -161,8 +161,7 @@ func (host *vmContext) SetStorage(key []byte, value []byte) { // GetBalance returns the balance of the given address func (host *vmContext) GetBalance(addr []byte) *big.Int { - strAdr := string(addr) - outAcc, exists := host.outputAccounts[strAdr] + outAcc, exists := host.outputAccounts[string(addr)] if exists { actualBalance := big.NewInt(0).Add(outAcc.Balance, outAcc.BalanceDelta) return actualBalance diff --git a/vm/systemSmartContracts/governance.go b/vm/systemSmartContracts/governance.go index 2e97f2ccb97..74763ffed1d 100644 --- a/vm/systemSmartContracts/governance.go +++ b/vm/systemSmartContracts/governance.go @@ -18,6 +18,7 @@ import ( ) const governanceConfigKey = "governanceConfig" +const accumulatedFeeKey = "accumulatedFee" const noncePrefix = "n_" const proposalPrefix = "p_" const yesString = "yes" @@ -37,7 +38,7 @@ type ArgsNewGovernanceContract struct { GovernanceSCAddress []byte DelegationMgrSCAddress []byte ValidatorSCAddress []byte - ConfigChangeAddress []byte + OwnerAddress []byte UnBondPeriodInEpochs uint32 EnableEpochsHandler common.EnableEpochsHandler } @@ -50,7 +51,6 @@ type governanceContract struct { governanceSCAddress []byte delegationMgrSCAddress []byte validatorSCAddress []byte - changeConfigAddress []byte marshalizer marshal.Marshalizer hasher hashing.Hasher governanceConfig config.GovernanceSystemSCConfig @@ -88,7 +88,7 @@ func NewGovernanceContract(args ArgsNewGovernanceContract) (*governanceContract, if len(args.GovernanceSCAddress) < 1 { return nil, fmt.Errorf("%w for governance sc address", vm.ErrInvalidAddress) } - if len(args.ConfigChangeAddress) < 1 { + if len(args.OwnerAddress) < 1 { return nil, fmt.Errorf("%w for change config address", vm.ErrInvalidAddress) } @@ -96,7 +96,7 @@ func NewGovernanceContract(args ArgsNewGovernanceContract) (*governanceContract, eei: args.Eei, gasCost: args.GasCost, baseProposalCost: baseProposalCost, - ownerAddress: nil, + ownerAddress: args.OwnerAddress, governanceSCAddress: args.GovernanceSCAddress, delegationMgrSCAddress: args.DelegationMgrSCAddress, validatorSCAddress: args.ValidatorSCAddress, @@ -105,7 +105,6 @@ func NewGovernanceContract(args ArgsNewGovernanceContract) (*governanceContract, governanceConfig: args.GovernanceConfig, enableEpochsHandler: args.EnableEpochsHandler, unBondPeriodInEpochs: args.UnBondPeriodInEpochs, - changeConfigAddress: args.ConfigChangeAddress, } return g, nil @@ -156,6 +155,8 @@ func (g *governanceContract) Execute(args *vmcommon.ContractCallInput) vmcommon. return g.viewDelegatedVoteInfo(args) case "viewProposal": return g.viewProposal(args) + case "claimAccumulatedFees": + return g.claimAccumulatedFees(args) } g.eei.AddReturnMessage("invalid method to call") @@ -175,8 +176,6 @@ func (g *governanceContract) init(args *vmcommon.ContractCallInput) vmcommon.Ret g.eei.SetStorage([]byte(governanceConfigKey), marshaledData) g.eei.SetStorage([]byte(ownerKey), args.CallerAddr) - g.ownerAddress = make([]byte, 0, len(args.CallerAddr)) - g.ownerAddress = append(g.ownerAddress, args.CallerAddr...) return vmcommon.Ok } @@ -198,19 +197,18 @@ func (g *governanceContract) initV2(args *vmcommon.ContractCallInput) vmcommon.R } g.eei.SetStorage([]byte(ownerKey), args.CallerAddr) - g.ownerAddress = make([]byte, 0, len(args.CallerAddr)) - g.ownerAddress = append(g.ownerAddress, args.CallerAddr...) return vmcommon.Ok } // changeConfig allows the owner to change the configuration for requesting proposals // args.Arguments[0] - proposalFee - as string -// args.Arguments[1] - minQuorum - 0-10000 - represents percentage -// args.Arguments[2] - minVeto - 0-10000 - represents percentage -// args.Arguments[3] - minPass - 0-10000 - represents percentage +// args.Arguments[1] - lostProposalFee - as string +// args.Arguments[2] - minQuorum - 0-10000 - represents percentage +// args.Arguments[3] - minVeto - 0-10000 - represents percentage +// args.Arguments[4] - minPass - 0-10000 - represents percentage func (g *governanceContract) changeConfig(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(g.changeConfigAddress, args.CallerAddr) { + if !bytes.Equal(g.ownerAddress, args.CallerAddr) { g.eei.AddReturnMessage("changeConfig can be called only by owner") return vmcommon.UserError } @@ -218,8 +216,8 @@ func (g *governanceContract) changeConfig(args *vmcommon.ContractCallInput) vmco g.eei.AddReturnMessage("changeConfig can be called only without callValue") return vmcommon.UserError } - if len(args.Arguments) != 4 { - g.eei.AddReturnMessage("changeConfig needs 4 arguments") + if len(args.Arguments) != 5 { + g.eei.AddReturnMessage("changeConfig needs 5 arguments") return vmcommon.UserError } @@ -228,17 +226,28 @@ func (g *governanceContract) changeConfig(args *vmcommon.ContractCallInput) vmco g.eei.AddReturnMessage("changeConfig first argument is incorrectly formatted") return vmcommon.UserError } - minQuorum, err := convertDecimalToPercentage(args.Arguments[1]) + lostProposalFee, okConvert := big.NewInt(0).SetString(string(args.Arguments[1]), conversionBase) + if !okConvert || proposalFee.Cmp(zero) <= 0 { + g.eei.AddReturnMessage("changeConfig second argument is incorrectly formatted") + return vmcommon.UserError + } + if proposalFee.Cmp(lostProposalFee) < 0 { + errLocal := fmt.Errorf("%w proposal fee is smaller than lost proposal fee ", vm.ErrIncorrectConfig) + g.eei.AddReturnMessage(errLocal.Error()) + return vmcommon.UserError + } + + minQuorum, err := convertDecimalToPercentage(args.Arguments[2]) if err != nil { g.eei.AddReturnMessage(err.Error() + " minQuorum") return vmcommon.UserError } - minVeto, err := convertDecimalToPercentage(args.Arguments[2]) + minVeto, err := convertDecimalToPercentage(args.Arguments[3]) if err != nil { g.eei.AddReturnMessage(err.Error() + " minVeto") return vmcommon.UserError } - minPass, err := convertDecimalToPercentage(args.Arguments[3]) + minPass, err := convertDecimalToPercentage(args.Arguments[4]) if err != nil { g.eei.AddReturnMessage(err.Error() + " minPass") return vmcommon.UserError @@ -254,6 +263,7 @@ func (g *governanceContract) changeConfig(args *vmcommon.ContractCallInput) vmco scConfig.MinVetoThreshold = minVeto scConfig.MinPassThreshold = minPass scConfig.ProposalFee = proposalFee + scConfig.LostProposalFee = lostProposalFee g.baseProposalCost.Set(proposalFee) err = g.saveConfig(scConfig) @@ -605,7 +615,13 @@ func (g *governanceContract) closeProposal(args *vmcommon.ContractCallInput) vmc } generalProposal.Closed = true - err = g.computeEndResults(generalProposal) + baseConfig, err := g.getConfig() + if err != nil { + g.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + generalProposal.Passed = g.computeEndResults(generalProposal, baseConfig) if err != nil { g.eei.AddReturnMessage("computeEndResults error " + err.Error()) return vmcommon.UserError @@ -617,7 +633,13 @@ func (g *governanceContract) closeProposal(args *vmcommon.ContractCallInput) vmc return vmcommon.UserError } - err = g.eei.Transfer(args.CallerAddr, args.RecipientAddr, generalProposal.ProposalCost, nil, 0) + tokensToReturn := big.NewInt(0).Set(generalProposal.ProposalCost) + if !generalProposal.Passed { + tokensToReturn.Sub(tokensToReturn, baseConfig.LostProposalFee) + g.addToAccumulatedFees(baseConfig.LostProposalFee) + } + + err = g.eei.Transfer(args.CallerAddr, args.RecipientAddr, tokensToReturn, nil, 0) if err != nil { g.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -633,6 +655,52 @@ func (g *governanceContract) closeProposal(args *vmcommon.ContractCallInput) vmc return vmcommon.Ok } +func (g *governanceContract) getAccumulatedFees() *big.Int { + currentData := g.eei.GetStorage([]byte(accumulatedFeeKey)) + return big.NewInt(0).SetBytes(currentData) +} + +func (g *governanceContract) setAccumulatedFees(value *big.Int) { + g.eei.SetStorage([]byte(accumulatedFeeKey), value.Bytes()) +} + +func (g *governanceContract) addToAccumulatedFees(value *big.Int) { + currentValue := g.getAccumulatedFees() + currentValue.Add(currentValue, value) + g.setAccumulatedFees(currentValue) +} + +func (g *governanceContract) claimAccumulatedFees(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if args.CallValue.Cmp(zero) != 0 { + g.eei.AddReturnMessage("callValue expected to be 0") + return vmcommon.UserError + } + if len(args.Arguments) != 0 { + g.eei.AddReturnMessage("invalid number of arguments, expected 0") + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, g.ownerAddress) { + g.eei.AddReturnMessage("can be called only by owner") + return vmcommon.UserError + } + err := g.eei.UseGas(g.gasCost.MetaChainSystemSCsCost.CloseProposal) + if err != nil { + g.eei.AddReturnMessage("not enough gas") + return vmcommon.OutOfGas + } + + accumulatedFees := g.getAccumulatedFees() + g.setAccumulatedFees(big.NewInt(0)) + + err = g.eei.Transfer(args.CallerAddr, args.RecipientAddr, accumulatedFees, nil, 0) + if err != nil { + g.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + // viewVotingPower returns the total voting power func (g *governanceContract) viewVotingPower(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { err := g.checkViewFuncArguments(args, 1) @@ -838,12 +906,7 @@ func (g *governanceContract) getTotalStakeInSystem() *big.Int { } // computeEndResults computes if a proposal has passed or not based on votes accumulated -func (g *governanceContract) computeEndResults(proposal *GeneralProposal) error { - baseConfig, err := g.getConfig() - if err != nil { - return err - } - +func (g *governanceContract) computeEndResults(proposal *GeneralProposal, baseConfig *GovernanceConfigV2) bool { totalVotes := big.NewInt(0).Add(proposal.Yes, proposal.No) totalVotes.Add(totalVotes, proposal.Veto) totalVotes.Add(totalVotes, proposal.Abstain) @@ -853,27 +916,23 @@ func (g *governanceContract) computeEndResults(proposal *GeneralProposal) error if totalVotes.Cmp(minQuorumOutOfStake) == -1 { g.eei.Finish([]byte("Proposal did not reach minQuorum")) - proposal.Passed = false - return nil + return false } minVetoOfTotalVotes := core.GetIntTrimmedPercentageOfValue(totalVotes, float64(baseConfig.MinVetoThreshold)) if proposal.Veto.Cmp(minVetoOfTotalVotes) >= 0 { - proposal.Passed = false g.eei.Finish([]byte("Proposal vetoed")) - return nil + return false } minPassOfTotalVotes := core.GetIntTrimmedPercentageOfValue(totalVotes, float64(baseConfig.MinPassThreshold)) if proposal.Yes.Cmp(minPassOfTotalVotes) >= 0 && proposal.Yes.Cmp(proposal.No) > 0 { g.eei.Finish([]byte("Proposal passed")) - proposal.Passed = true - return nil + return true } g.eei.Finish([]byte("Proposal rejected")) - proposal.Passed = false - return nil + return false } func (g *governanceContract) getActiveFundForDelegator(delegationAddress []byte, address []byte) (*big.Int, error) { @@ -1117,11 +1176,21 @@ func (g *governanceContract) convertV2Config(config config.GovernanceSystemSCCon return nil, vm.ErrIncorrectConfig } + lostProposalFee, success := big.NewInt(0).SetString(config.Active.LostProposalFee, conversionBase) + if !success { + return nil, vm.ErrIncorrectConfig + } + + if proposalFee.Cmp(lostProposalFee) < 0 { + return nil, fmt.Errorf("%w proposal fee is smaller than lost proposal fee ", vm.ErrIncorrectConfig) + } + return &GovernanceConfigV2{ MinQuorum: float32(config.Active.MinQuorum), MinPassThreshold: float32(config.Active.MinPassThreshold), MinVetoThreshold: float32(config.Active.MinVetoThreshold), ProposalFee: proposalFee, + LostProposalFee: lostProposalFee, }, nil } diff --git a/vm/systemSmartContracts/governance.pb.go b/vm/systemSmartContracts/governance.pb.go index 2f7e55df5c3..49e8acfc63e 100644 --- a/vm/systemSmartContracts/governance.pb.go +++ b/vm/systemSmartContracts/governance.pb.go @@ -268,7 +268,8 @@ type GovernanceConfigV2 struct { MinPassThreshold float32 `protobuf:"fixed32,2,opt,name=MinPassThreshold,proto3" json:"MinPassThreshold"` MinVetoThreshold float32 `protobuf:"fixed32,3,opt,name=MinVetoThreshold,proto3" json:"MinVetoThreshold"` ProposalFee *math_big.Int `protobuf:"bytes,4,opt,name=ProposalFee,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"ProposalFee"` - LastProposalNonce uint64 `protobuf:"varint,5,opt,name=LastProposalNonce,proto3" json:"LastProposalNonce"` + LostProposalFee *math_big.Int `protobuf:"bytes,5,opt,name=LostProposalFee,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"LostProposalFee"` + LastProposalNonce uint64 `protobuf:"varint,6,opt,name=LastProposalNonce,proto3" json:"LastProposalNonce"` } func (m *GovernanceConfigV2) Reset() { *m = GovernanceConfigV2{} } @@ -327,6 +328,13 @@ func (m *GovernanceConfigV2) GetProposalFee() *math_big.Int { return nil } +func (m *GovernanceConfigV2) GetLostProposalFee() *math_big.Int { + if m != nil { + return m.LostProposalFee + } + return nil +} + func (m *GovernanceConfigV2) GetLastProposalNonce() uint64 { if m != nil { return m.LastProposalNonce @@ -456,63 +464,64 @@ func init() { func init() { proto.RegisterFile("governance.proto", fileDescriptor_e18a03da5266c714) } var fileDescriptor_e18a03da5266c714 = []byte{ - // 883 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4d, 0x8f, 0xe3, 0x34, - 0x18, 0x6e, 0x9a, 0x76, 0x3e, 0x3c, 0x9d, 0x25, 0x6b, 0x16, 0x29, 0xe2, 0x10, 0x8f, 0x7a, 0xaa, - 0x40, 0xd3, 0x4a, 0x80, 0xb4, 0x12, 0x5c, 0xd8, 0x74, 0x87, 0x65, 0xa4, 0xdd, 0xee, 0x6c, 0x66, - 0x28, 0x2c, 0x17, 0xe4, 0x26, 0x9e, 0x34, 0xa2, 0xb1, 0x2b, 0xdb, 0xdd, 0x0f, 0x24, 0x24, 0x4e, - 0x9c, 0xf9, 0x19, 0x88, 0x5f, 0xc2, 0x71, 0x8e, 0x73, 0x0a, 0x4c, 0xe7, 0x02, 0x11, 0x87, 0x95, - 0xf8, 0x03, 0xc8, 0x4e, 0x9b, 0x8f, 0xe9, 0x69, 0x44, 0xb4, 0x27, 0xdb, 0x8f, 0xed, 0xe7, 0xc9, - 0xfb, 0xd8, 0xaf, 0xdf, 0x00, 0x2b, 0x64, 0x2f, 0x08, 0xa7, 0x98, 0xfa, 0xa4, 0x3f, 0xe7, 0x4c, - 0x32, 0xd8, 0xd6, 0xcd, 0xfb, 0x87, 0x61, 0x24, 0xa7, 0x8b, 0x49, 0xdf, 0x67, 0xf1, 0x20, 0x64, - 0x21, 0x1b, 0x68, 0x78, 0xb2, 0x38, 0xd7, 0x23, 0x3d, 0xd0, 0xbd, 0x6c, 0x57, 0xf7, 0x9f, 0x6d, - 0xf0, 0xce, 0x23, 0x42, 0x09, 0xc7, 0xb3, 0x13, 0xce, 0xe6, 0x4c, 0xe0, 0x19, 0x44, 0xa0, 0x3d, - 0x62, 0xd4, 0x27, 0xb6, 0x71, 0x60, 0xf4, 0x5a, 0xee, 0x6e, 0x9a, 0xa0, 0x0c, 0xf0, 0xb2, 0x06, - 0xf6, 0x01, 0x18, 0xb2, 0x38, 0x8e, 0xe4, 0x97, 0x58, 0x4c, 0xed, 0xe6, 0x81, 0xd1, 0xeb, 0xb8, - 0x77, 0xd2, 0x04, 0x95, 0x50, 0xaf, 0xd4, 0x87, 0x9f, 0x82, 0x3b, 0xa7, 0x12, 0x73, 0x39, 0x66, - 0x92, 0x1c, 0xcd, 0x99, 0x3f, 0xb5, 0x4d, 0xcd, 0x0c, 0xd3, 0x04, 0x15, 0x33, 0x99, 0xc4, 0x8d, - 0x95, 0xf0, 0x13, 0xd0, 0x39, 0xa2, 0x41, 0xb1, 0xb3, 0xa5, 0x77, 0x5a, 0x69, 0x82, 0xd6, 0x78, - 0xb6, 0xaf, 0xb2, 0x0a, 0x4e, 0x80, 0xf9, 0x9c, 0x08, 0xbb, 0xad, 0x3f, 0xed, 0x24, 0x4d, 0x90, - 0x1a, 0xfe, 0xf6, 0x07, 0x3a, 0x8a, 0xb1, 0x9c, 0x0e, 0x26, 0x51, 0xd8, 0x3f, 0xa6, 0xf2, 0xb3, - 0x92, 0x55, 0xf1, 0x62, 0x26, 0xa3, 0x17, 0x84, 0x8b, 0x57, 0x83, 0xf8, 0xd5, 0xa1, 0x3f, 0xc5, - 0x11, 0x3d, 0xf4, 0x19, 0x27, 0x87, 0x21, 0x1b, 0x04, 0x58, 0xe2, 0xbe, 0x1b, 0x85, 0xc7, 0x54, - 0x0e, 0xb1, 0x90, 0x84, 0x7b, 0x8a, 0x0d, 0x7e, 0x07, 0x9a, 0x23, 0x66, 0x6f, 0x69, 0x89, 0xa7, - 0x69, 0x82, 0x9a, 0x23, 0x56, 0x9f, 0x42, 0x73, 0xc4, 0x20, 0x01, 0xad, 0x31, 0x91, 0xcc, 0xde, - 0xd6, 0x12, 0xcf, 0xd2, 0x04, 0xe9, 0x71, 0x7d, 0x22, 0x9a, 0x0e, 0x52, 0xb0, 0xfd, 0x60, 0x22, - 0x24, 0x8e, 0xa8, 0xbd, 0xa3, 0x95, 0xce, 0xd2, 0x04, 0xad, 0xa1, 0xfa, 0xc4, 0xd6, 0x8c, 0xf0, - 0x07, 0xb0, 0xf7, 0x6c, 0xc1, 0xf8, 0x22, 0x3e, 0x95, 0xf8, 0x7b, 0x62, 0xef, 0x6a, 0xcd, 0x6f, - 0xd2, 0x04, 0x95, 0xe1, 0xfa, 0x74, 0xcb, 0xac, 0xb0, 0x0b, 0xb6, 0x4e, 0xb0, 0x10, 0x24, 0xb0, - 0xc1, 0x81, 0xd1, 0xdb, 0x71, 0x41, 0x9a, 0xa0, 0x15, 0xe2, 0xad, 0x5a, 0xb5, 0x66, 0x38, 0x63, - 0x6a, 0xcd, 0x5e, 0xb1, 0x26, 0x43, 0xbc, 0x55, 0x0b, 0xef, 0x83, 0xfd, 0x63, 0x21, 0x16, 0x84, - 0x3f, 0x08, 0x02, 0x4e, 0x84, 0xb0, 0x3b, 0x3a, 0x8a, 0xbb, 0x69, 0x82, 0xaa, 0x13, 0x5e, 0x75, - 0x08, 0x7f, 0x04, 0x9d, 0x75, 0x9e, 0x0d, 0x99, 0x90, 0xf6, 0xbe, 0xde, 0xf7, 0x5c, 0x5d, 0xe7, - 0x32, 0x5e, 0x5f, 0xf8, 0x15, 0xda, 0xee, 0xdf, 0x4d, 0x60, 0x3d, 0xca, 0x5f, 0x8e, 0x21, 0xa3, - 0xe7, 0x51, 0x08, 0x7b, 0x60, 0x67, 0xb4, 0x88, 0x47, 0x2c, 0x20, 0x42, 0xa7, 0xbc, 0xe9, 0x76, - 0xd2, 0x04, 0xe5, 0x98, 0x97, 0xf7, 0xe0, 0x87, 0x60, 0xf7, 0x49, 0x44, 0x33, 0x43, 0x75, 0xde, - 0xb7, 0xdd, 0xfd, 0x34, 0x41, 0x05, 0xe8, 0x15, 0x5d, 0xf8, 0x39, 0xb0, 0x9e, 0x44, 0x54, 0x99, - 0x7a, 0x36, 0xe5, 0x44, 0x4c, 0xd9, 0x2c, 0xd0, 0x79, 0xdf, 0x76, 0xef, 0xa5, 0x09, 0xda, 0x98, - 0xf3, 0x36, 0x90, 0x15, 0x83, 0xba, 0xa4, 0x05, 0x43, 0xab, 0xc2, 0x50, 0x99, 0xf3, 0x36, 0x10, - 0x75, 0xd7, 0xd6, 0xf1, 0x7f, 0x41, 0xc8, 0xea, 0x3d, 0xd0, 0x77, 0xad, 0x04, 0xd7, 0x78, 0xd7, - 0x4a, 0xac, 0xdd, 0x9f, 0x4d, 0x00, 0x6f, 0x7a, 0x3d, 0xfe, 0xa8, 0xea, 0xa1, 0xb2, 0xbb, 0x79, - 0x4b, 0x0f, 0x9b, 0x7a, 0xcf, 0xff, 0xf1, 0xd0, 0xac, 0x30, 0xdc, 0xd2, 0xc3, 0xd6, 0x5b, 0xf4, - 0x10, 0x0e, 0xc1, 0xdd, 0xc7, 0x58, 0xc8, 0x35, 0x94, 0x95, 0xa5, 0xb6, 0x2e, 0x01, 0xef, 0xa5, - 0x09, 0xda, 0x9c, 0xf4, 0x36, 0xa1, 0xae, 0x0f, 0xac, 0xa7, 0x34, 0x64, 0x11, 0x0d, 0x55, 0x81, - 0x08, 0x1e, 0x47, 0x42, 0xaa, 0x24, 0x7f, 0x18, 0x71, 0xe2, 0x4b, 0xdb, 0x38, 0x30, 0x7b, 0xad, - 0x2c, 0xc9, 0x33, 0xc4, 0x5b, 0xb5, 0xea, 0xa4, 0x1e, 0x92, 0x19, 0x09, 0xb1, 0x24, 0xca, 0x75, - 0xb5, 0x4c, 0x9f, 0x54, 0x0e, 0x7a, 0x45, 0xb7, 0xfb, 0xaf, 0x09, 0xde, 0xcd, 0x47, 0xa7, 0x43, - 0xa5, 0x74, 0x4c, 0xcf, 0x19, 0x7c, 0x09, 0xc0, 0x19, 0x93, 0x78, 0x76, 0xc2, 0x5e, 0x12, 0xae, - 0xcf, 0xbb, 0xe3, 0x7e, 0xad, 0x6a, 0x65, 0x81, 0xd6, 0xe7, 0x5d, 0x89, 0x14, 0x4a, 0xb0, 0xfb, - 0x95, 0x20, 0x41, 0xa6, 0x9b, 0xd5, 0xe8, 0xb1, 0xfa, 0xfa, 0x1c, 0xac, 0x4f, 0xb6, 0xe0, 0xcc, - 0xc3, 0xcd, 0xde, 0x76, 0xf3, 0x46, 0xb8, 0x35, 0x3f, 0xed, 0x25, 0xd2, 0x75, 0xb8, 0x99, 0x6e, - 0xab, 0x1a, 0x6e, 0xcd, 0xb2, 0x05, 0xe7, 0x07, 0xf7, 0xc1, 0xbe, 0x3a, 0xe9, 0x31, 0x9e, 0x2d, - 0xc8, 0xd9, 0xeb, 0x39, 0x81, 0xdb, 0xfa, 0xc7, 0xc3, 0x6a, 0xc0, 0x2d, 0xf5, 0x77, 0x60, 0x19, - 0x70, 0x27, 0x2b, 0xe2, 0x56, 0x13, 0xee, 0xe5, 0x75, 0xd6, 0x32, 0xdd, 0xd1, 0xc5, 0x95, 0xd3, - 0xb8, 0xbc, 0x72, 0x1a, 0x6f, 0xae, 0x1c, 0xe3, 0xa7, 0xa5, 0x63, 0xfc, 0xba, 0x74, 0x8c, 0xdf, - 0x97, 0x8e, 0x71, 0xb1, 0x74, 0x8c, 0xcb, 0xa5, 0x63, 0xfc, 0xb9, 0x74, 0x8c, 0xbf, 0x96, 0x4e, - 0xe3, 0xcd, 0xd2, 0x31, 0x7e, 0xb9, 0x76, 0x1a, 0x17, 0xd7, 0x4e, 0xe3, 0xf2, 0xda, 0x69, 0x7c, - 0x7b, 0x4f, 0xbc, 0x16, 0x92, 0xc4, 0xa7, 0x31, 0xe6, 0x72, 0xc8, 0xa8, 0xe4, 0xd8, 0x97, 0x62, - 0xb2, 0xa5, 0x7f, 0xe7, 0x3e, 0xfe, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x52, 0x6e, 0xfa, 0xb5, 0x18, - 0x0a, 0x00, 0x00, + // 902 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0x23, 0x35, + 0x14, 0xcf, 0xe4, 0x5f, 0x5b, 0x37, 0xdd, 0x9d, 0x35, 0x8b, 0x34, 0xe2, 0x30, 0xae, 0x72, 0x8a, + 0x40, 0x4d, 0x24, 0x40, 0x5a, 0x09, 0x2e, 0xec, 0x64, 0xcb, 0x52, 0xa9, 0x9b, 0xed, 0x4e, 0x4b, + 0x60, 0x11, 0x12, 0x72, 0x66, 0xdc, 0xc9, 0x88, 0x8c, 0x5d, 0x8d, 0x9d, 0xfd, 0x83, 0x84, 0xc4, + 0x89, 0x2b, 0x7c, 0x0c, 0xc4, 0x27, 0xe1, 0xd8, 0x63, 0x4f, 0x03, 0x4d, 0x2f, 0x60, 0x71, 0x58, + 0x89, 0x2f, 0x80, 0xec, 0x49, 0xe6, 0x4f, 0x72, 0xaa, 0x18, 0x71, 0xb2, 0xdf, 0xef, 0xd9, 0xbf, + 0xdf, 0xbc, 0x37, 0xcf, 0x7e, 0x06, 0x66, 0xc0, 0x5e, 0x90, 0x98, 0x62, 0xea, 0x91, 0xfe, 0x45, + 0xcc, 0x04, 0x83, 0x2d, 0x3d, 0xbc, 0x73, 0x10, 0x84, 0x62, 0x3a, 0x9f, 0xf4, 0x3d, 0x16, 0x0d, + 0x02, 0x16, 0xb0, 0x81, 0x86, 0x27, 0xf3, 0x73, 0x6d, 0x69, 0x43, 0xcf, 0xd2, 0x5d, 0xdd, 0xbf, + 0xb7, 0xc0, 0xdd, 0xc7, 0x84, 0x92, 0x18, 0xcf, 0x4e, 0x62, 0x76, 0xc1, 0x38, 0x9e, 0x41, 0x04, + 0x5a, 0x23, 0x46, 0x3d, 0x62, 0x19, 0xfb, 0x46, 0xaf, 0xe9, 0xec, 0xc8, 0x04, 0xa5, 0x80, 0x9b, + 0x0e, 0xb0, 0x0f, 0xc0, 0x90, 0x45, 0x51, 0x28, 0x3e, 0xc3, 0x7c, 0x6a, 0xd5, 0xf7, 0x8d, 0x5e, + 0xc7, 0xb9, 0x23, 0x13, 0x54, 0x40, 0xdd, 0xc2, 0x1c, 0x7e, 0x04, 0xee, 0x9c, 0x0a, 0x1c, 0x8b, + 0x31, 0x13, 0xe4, 0xf0, 0x82, 0x79, 0x53, 0xab, 0xa1, 0x99, 0xa1, 0x4c, 0xd0, 0x9a, 0xc7, 0x5d, + 0xb3, 0xe1, 0x87, 0xa0, 0x73, 0x48, 0xfd, 0x7c, 0x67, 0x53, 0xef, 0x34, 0x65, 0x82, 0x4a, 0xb8, + 0x5b, 0xb2, 0xe0, 0x04, 0x34, 0x9e, 0x13, 0x6e, 0xb5, 0xf4, 0xa7, 0x9d, 0xc8, 0x04, 0x29, 0xf3, + 0xd7, 0xdf, 0xd1, 0x61, 0x84, 0xc5, 0x74, 0x30, 0x09, 0x83, 0xfe, 0x11, 0x15, 0x1f, 0x17, 0x52, + 0x15, 0xcd, 0x67, 0x22, 0x7c, 0x41, 0x62, 0xfe, 0x6a, 0x10, 0xbd, 0x3a, 0xf0, 0xa6, 0x38, 0xa4, + 0x07, 0x1e, 0x8b, 0xc9, 0x41, 0xc0, 0x06, 0x3e, 0x16, 0xb8, 0xef, 0x84, 0xc1, 0x11, 0x15, 0x43, + 0xcc, 0x05, 0x89, 0x5d, 0xc5, 0x06, 0xbf, 0x01, 0xf5, 0x11, 0xb3, 0xda, 0x5a, 0xe2, 0xa9, 0x4c, + 0x50, 0x7d, 0xc4, 0xaa, 0x53, 0xa8, 0x8f, 0x18, 0x24, 0xa0, 0x39, 0x26, 0x82, 0x59, 0x5b, 0x5a, + 0xe2, 0x99, 0x4c, 0x90, 0xb6, 0xab, 0x13, 0xd1, 0x74, 0x90, 0x82, 0xad, 0x87, 0x13, 0x2e, 0x70, + 0x48, 0xad, 0x6d, 0xad, 0x74, 0x26, 0x13, 0xb4, 0x82, 0xaa, 0x13, 0x5b, 0x31, 0xc2, 0xef, 0xc0, + 0xee, 0xb3, 0x39, 0x8b, 0xe7, 0xd1, 0xa9, 0xc0, 0xdf, 0x12, 0x6b, 0x47, 0x6b, 0x7e, 0x29, 0x13, + 0x54, 0x84, 0xab, 0xd3, 0x2d, 0xb2, 0xc2, 0x2e, 0x68, 0x9f, 0x60, 0xce, 0x89, 0x6f, 0x81, 0x7d, + 0xa3, 0xb7, 0xed, 0x00, 0x99, 0xa0, 0x25, 0xe2, 0x2e, 0x47, 0xb5, 0x66, 0x38, 0x63, 0x6a, 0xcd, + 0x6e, 0xbe, 0x26, 0x45, 0xdc, 0xe5, 0x08, 0x1f, 0x80, 0xbd, 0x23, 0xce, 0xe7, 0x24, 0x7e, 0xe8, + 0xfb, 0x31, 0xe1, 0xdc, 0xea, 0xe8, 0x28, 0xee, 0xc9, 0x04, 0x95, 0x1d, 0x6e, 0xd9, 0x84, 0xdf, + 0x83, 0xce, 0xea, 0x9c, 0x0d, 0x19, 0x17, 0xd6, 0x9e, 0xde, 0xf7, 0x5c, 0x95, 0x73, 0x11, 0xaf, + 0x2e, 0xfc, 0x12, 0x6d, 0xf7, 0xaf, 0x3a, 0x30, 0x1f, 0x67, 0x37, 0xc7, 0x90, 0xd1, 0xf3, 0x30, + 0x80, 0x3d, 0xb0, 0x3d, 0x9a, 0x47, 0x23, 0xe6, 0x13, 0xae, 0x8f, 0x7c, 0xc3, 0xe9, 0xc8, 0x04, + 0x65, 0x98, 0x9b, 0xcd, 0xe0, 0x7b, 0x60, 0xe7, 0x49, 0x48, 0xd3, 0x84, 0xea, 0x73, 0xdf, 0x72, + 0xf6, 0x64, 0x82, 0x72, 0xd0, 0xcd, 0xa7, 0xf0, 0x13, 0x60, 0x3e, 0x09, 0xa9, 0x4a, 0xea, 0xd9, + 0x34, 0x26, 0x7c, 0xca, 0x66, 0xbe, 0x3e, 0xf7, 0x2d, 0xe7, 0xbe, 0x4c, 0xd0, 0x86, 0xcf, 0xdd, + 0x40, 0x96, 0x0c, 0xaa, 0x48, 0x73, 0x86, 0x66, 0x89, 0xa1, 0xe4, 0x73, 0x37, 0x10, 0x55, 0x6b, + 0xab, 0xf8, 0x3f, 0x25, 0x64, 0x79, 0x1f, 0xe8, 0x5a, 0x2b, 0xc0, 0x15, 0xd6, 0x5a, 0x81, 0xb5, + 0xfb, 0x53, 0x13, 0xc0, 0xf5, 0x5c, 0x8f, 0xdf, 0x2f, 0xe7, 0x50, 0xa5, 0xbb, 0x7e, 0xcb, 0x1c, + 0xd6, 0xf5, 0x9e, 0xff, 0x92, 0xc3, 0x46, 0x89, 0xe1, 0x96, 0x39, 0x6c, 0xfe, 0x8f, 0x39, 0x84, + 0x3f, 0x1a, 0xe0, 0xee, 0x31, 0xe3, 0x62, 0xf3, 0x27, 0x7e, 0x2d, 0x13, 0xb4, 0xee, 0xaa, 0xee, + 0x23, 0xd6, 0x99, 0xe1, 0x10, 0xdc, 0x3b, 0xc6, 0x39, 0x94, 0xf6, 0xc7, 0xb6, 0xee, 0x45, 0x6f, + 0xcb, 0x04, 0x6d, 0x3a, 0xdd, 0x4d, 0xa8, 0xeb, 0x01, 0xf3, 0x29, 0x0d, 0x58, 0x48, 0x03, 0xd5, + 0xa9, 0xfc, 0xe3, 0x90, 0x0b, 0x75, 0xdb, 0x3c, 0x0a, 0x63, 0xe2, 0x09, 0xcb, 0xd8, 0x6f, 0xf4, + 0x9a, 0xe9, 0x6d, 0x93, 0x22, 0xee, 0x72, 0x54, 0x25, 0xf3, 0x88, 0xcc, 0x48, 0x80, 0x05, 0x51, + 0xbf, 0x5f, 0x2d, 0xd3, 0x25, 0x93, 0x81, 0x6e, 0x3e, 0xed, 0xfe, 0xd3, 0x00, 0x6f, 0x65, 0xd6, + 0xe9, 0x50, 0x29, 0x1d, 0xd1, 0x73, 0x06, 0x5f, 0x02, 0x70, 0xc6, 0x04, 0x9e, 0x9d, 0xb0, 0x97, + 0x24, 0xd6, 0x85, 0xd7, 0x71, 0xbe, 0x50, 0x4d, 0x3b, 0x47, 0xab, 0xcb, 0x5f, 0x81, 0x14, 0x0a, + 0xb0, 0xf3, 0x39, 0x27, 0x7e, 0xaa, 0x9b, 0x3e, 0x16, 0xc6, 0xea, 0xeb, 0x33, 0xb0, 0x3a, 0xd9, + 0x9c, 0x33, 0x0b, 0x37, 0x6d, 0x32, 0x8d, 0xb5, 0x70, 0x2b, 0xee, 0x31, 0x05, 0xd2, 0x55, 0xb8, + 0xa9, 0x6e, 0xb3, 0x1c, 0x6e, 0xc5, 0xb2, 0x39, 0xe7, 0xbb, 0x0f, 0xc0, 0x9e, 0xfa, 0xd3, 0x63, + 0x3c, 0x9b, 0x93, 0xb3, 0xd7, 0x17, 0x04, 0x6e, 0xe9, 0x17, 0x90, 0x59, 0x83, 0x6d, 0xf5, 0x4c, + 0x31, 0x0d, 0xb8, 0x9d, 0xbe, 0x26, 0xcc, 0x3a, 0xdc, 0xcd, 0x1a, 0xbe, 0xd9, 0x70, 0x46, 0x97, + 0xd7, 0x76, 0xed, 0xea, 0xda, 0xae, 0xbd, 0xb9, 0xb6, 0x8d, 0x1f, 0x16, 0xb6, 0xf1, 0xcb, 0xc2, + 0x36, 0x7e, 0x5b, 0xd8, 0xc6, 0xe5, 0xc2, 0x36, 0xae, 0x16, 0xb6, 0xf1, 0xc7, 0xc2, 0x36, 0xfe, + 0x5c, 0xd8, 0xb5, 0x37, 0x0b, 0xdb, 0xf8, 0xf9, 0xc6, 0xae, 0x5d, 0xde, 0xd8, 0xb5, 0xab, 0x1b, + 0xbb, 0xf6, 0xd5, 0x7d, 0xfe, 0x9a, 0x0b, 0x12, 0x9d, 0x46, 0x38, 0x16, 0x43, 0x46, 0x45, 0x8c, + 0x3d, 0xc1, 0x27, 0x6d, 0xfd, 0xae, 0xfc, 0xe0, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x67, 0xcd, + 0x78, 0x44, 0xa1, 0x0a, 0x00, 0x00, } func (x VoteValueType) String() string { @@ -673,6 +682,12 @@ func (this *GovernanceConfigV2) Equal(that interface{}) bool { return false } } + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + if !__caster.Equal(this.LostProposalFee, that1.LostProposalFee) { + return false + } + } if this.LastProposalNonce != that1.LastProposalNonce { return false } @@ -800,12 +815,13 @@ func (this *GovernanceConfigV2) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 9) + s := make([]string, 0, 10) s = append(s, "&systemSmartContracts.GovernanceConfigV2{") s = append(s, "MinQuorum: "+fmt.Sprintf("%#v", this.MinQuorum)+",\n") s = append(s, "MinPassThreshold: "+fmt.Sprintf("%#v", this.MinPassThreshold)+",\n") s = append(s, "MinVetoThreshold: "+fmt.Sprintf("%#v", this.MinVetoThreshold)+",\n") s = append(s, "ProposalFee: "+fmt.Sprintf("%#v", this.ProposalFee)+",\n") + s = append(s, "LostProposalFee: "+fmt.Sprintf("%#v", this.LostProposalFee)+",\n") s = append(s, "LastProposalNonce: "+fmt.Sprintf("%#v", this.LastProposalNonce)+",\n") s = append(s, "}") return strings.Join(s, "") @@ -1057,8 +1073,19 @@ func (m *GovernanceConfigV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { if m.LastProposalNonce != 0 { i = encodeVarintGovernance(dAtA, i, uint64(m.LastProposalNonce)) i-- - dAtA[i] = 0x28 + dAtA[i] = 0x30 + } + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + size := __caster.Size(m.LostProposalFee) + i -= size + if _, err := __caster.MarshalTo(m.LostProposalFee, dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintGovernance(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} size := __caster.Size(m.ProposalFee) @@ -1336,6 +1363,11 @@ func (m *GovernanceConfigV2) Size() (n int) { l = __caster.Size(m.ProposalFee) n += 1 + l + sovGovernance(uint64(l)) } + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + l = __caster.Size(m.LostProposalFee) + n += 1 + l + sovGovernance(uint64(l)) + } if m.LastProposalNonce != 0 { n += 1 + sovGovernance(uint64(m.LastProposalNonce)) } @@ -1445,6 +1477,7 @@ func (this *GovernanceConfigV2) String() string { `MinPassThreshold:` + fmt.Sprintf("%v", this.MinPassThreshold) + `,`, `MinVetoThreshold:` + fmt.Sprintf("%v", this.MinVetoThreshold) + `,`, `ProposalFee:` + fmt.Sprintf("%v", this.ProposalFee) + `,`, + `LostProposalFee:` + fmt.Sprintf("%v", this.LostProposalFee) + `,`, `LastProposalNonce:` + fmt.Sprintf("%v", this.LastProposalNonce) + `,`, `}`, }, "") @@ -2196,6 +2229,44 @@ func (m *GovernanceConfigV2) Unmarshal(dAtA []byte) error { } iNdEx = postIndex case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LostProposalFee", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGovernance + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGovernance + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGovernance + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + if tmp, err := __caster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } else { + m.LostProposalFee = tmp + } + } + iNdEx = postIndex + case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field LastProposalNonce", wireType) } diff --git a/vm/systemSmartContracts/governance.proto b/vm/systemSmartContracts/governance.proto index a60fee5b126..019a0755eae 100644 --- a/vm/systemSmartContracts/governance.proto +++ b/vm/systemSmartContracts/governance.proto @@ -43,7 +43,8 @@ message GovernanceConfigV2 { float MinPassThreshold = 2 [(gogoproto.jsontag) = "MinPassThreshold"]; float MinVetoThreshold = 3 [(gogoproto.jsontag) = "MinVetoThreshold"]; bytes ProposalFee = 4 [(gogoproto.jsontag) = "ProposalFee", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; - uint64 LastProposalNonce = 5 [(gogoproto.jsontag) = "LastProposalNonce"]; + bytes LostProposalFee = 5 [(gogoproto.jsontag) = "LostProposalFee", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; + uint64 LastProposalNonce = 6 [(gogoproto.jsontag) = "LastProposalNonce"]; } message OngoingVotedList { diff --git a/vm/systemSmartContracts/governance_test.go b/vm/systemSmartContracts/governance_test.go index 3faf8489503..82143331b06 100644 --- a/vm/systemSmartContracts/governance_test.go +++ b/vm/systemSmartContracts/governance_test.go @@ -46,15 +46,16 @@ func createArgsWithEEI(eei vm.SystemEI) ArgsNewGovernanceContract { MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, - ChangeConfigAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + OwnerAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", }, Marshalizer: &mock.MarshalizerMock{}, Hasher: &hashingMocks.HasherMock{}, GovernanceSCAddress: vm.GovernanceSCAddress, DelegationMgrSCAddress: vm.DelegationManagerSCAddress, ValidatorSCAddress: vm.ValidatorSCAddress, - ConfigChangeAddress: bytes.Repeat([]byte{1}, 32), + OwnerAddress: bytes.Repeat([]byte{1}, 32), UnBondPeriodInEpochs: 10, EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{ IsGovernanceFlagEnabledField: true, @@ -256,7 +257,6 @@ func TestGovernanceContract_ExecuteInit(t *testing.T) { retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.Ok, retCode) - require.Equal(t, gsc.ownerAddress, callerAddr) } func TestGovernanceContract_ExecuteInitV2InvalidCaller(t *testing.T) { @@ -311,7 +311,6 @@ func TestGovernanceContract_ExecuteInitV2(t *testing.T) { retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.Ok, retCode) - require.Equal(t, gsc.ownerAddress, vm.GovernanceSCAddress) } func TestGovernanceContract_ChangeConfig(t *testing.T) { @@ -328,7 +327,10 @@ func TestGovernanceContract_ChangeConfig(t *testing.T) { }, GetStorageCalled: func(key []byte) []byte { if bytes.Equal(key, []byte(governanceConfigKey)) { - configBytes, _ := args.Marshalizer.Marshal(&GovernanceConfigV2{}) + configBytes, _ := args.Marshalizer.Marshal(&GovernanceConfigV2{ + ProposalFee: big.NewInt(10), + LostProposalFee: big.NewInt(1), + }) return configBytes } @@ -339,6 +341,7 @@ func TestGovernanceContract_ChangeConfig(t *testing.T) { gsc, _ := NewGovernanceContract(args) callInputArgs := [][]byte{ + []byte("1"), []byte("1"), []byte("10"), []byte("10"), @@ -346,7 +349,7 @@ func TestGovernanceContract_ChangeConfig(t *testing.T) { } initInput := createVMInput(zero, "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) _ = gsc.Execute(initInput) - callInput := createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, callInputArgs) + callInput := createVMInput(zero, "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, callInputArgs) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.Ok, retCode) @@ -390,7 +393,7 @@ func TestGovernanceContract_ChangeConfigWrongCallValue(t *testing.T) { initInput := createVMInput(zero, "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) _ = gsc.Execute(initInput) - callInput := createVMInput(big.NewInt(10), "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, nil) + callInput := createVMInput(big.NewInt(10), "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, nil) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) @@ -401,7 +404,7 @@ func TestGovernanceContract_ChangeConfigWrongArgumentsLength(t *testing.T) { t.Parallel() retMessage := "" - errSubstr := "changeConfig needs 4 arguments" + errSubstr := "changeConfig needs 5 arguments" args := createMockGovernanceArgs() args.Eei = &mock.SystemEIStub{ AddReturnMessageCalled: func(msg string) { @@ -413,7 +416,7 @@ func TestGovernanceContract_ChangeConfigWrongArgumentsLength(t *testing.T) { initInput := createVMInput(zero, "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) _ = gsc.Execute(initInput) - callInput := createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, nil) + callInput := createVMInput(zero, "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, nil) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) @@ -438,25 +441,55 @@ func TestGovernanceContract_ChangeConfigInvalidParams(t *testing.T) { _ = gsc.Execute(initInput) callInputArgs := [][]byte{ + []byte("invalid"), []byte("invalid"), []byte("10"), []byte("10"), []byte("5"), } - callInput := createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, callInputArgs) + callInput := createVMInput(zero, "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, callInputArgs) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) require.Contains(t, retMessage, errSubstr) + errSubstr = "changeConfig second argument is incorrectly formatted" + callInputArgs = [][]byte{ + []byte("1"), + []byte("invalid"), + []byte("10"), + []byte("10"), + []byte("5"), + } + callInput = createVMInput(zero, "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, callInputArgs) + retCode = gsc.Execute(callInput) + + require.Equal(t, vmcommon.UserError, retCode) + require.Contains(t, retMessage, errSubstr) + + errSubstr = vm.ErrIncorrectConfig.Error() + " proposal fee is smaller than lost proposal fee " + callInputArgs = [][]byte{ + []byte("1"), + []byte("10"), + []byte("10"), + []byte("10"), + []byte("5"), + } + callInput = createVMInput(zero, "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, callInputArgs) + retCode = gsc.Execute(callInput) + + require.Equal(t, vmcommon.UserError, retCode) + require.Contains(t, retMessage, errSubstr) + errSubstr = "config incorrect minQuorum" callInputArgs = [][]byte{ + []byte("1"), []byte("1"), []byte("invalid"), []byte("10"), []byte("5"), } - callInput = createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, callInputArgs) + callInput = createVMInput(zero, "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, callInputArgs) retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) @@ -464,12 +497,13 @@ func TestGovernanceContract_ChangeConfigInvalidParams(t *testing.T) { errSubstr = "config incorrect minVeto" callInputArgs = [][]byte{ + []byte("1"), []byte("1"), []byte("10"), []byte("invalid"), []byte("5"), } - callInput = createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, callInputArgs) + callInput = createVMInput(zero, "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, callInputArgs) retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) @@ -477,12 +511,13 @@ func TestGovernanceContract_ChangeConfigInvalidParams(t *testing.T) { errSubstr = "config incorrect minPass" callInputArgs = [][]byte{ + []byte("1"), []byte("1"), []byte("10"), []byte("10"), []byte("invalid"), } - callInput = createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, callInputArgs) + callInput = createVMInput(zero, "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, callInputArgs) retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) @@ -514,12 +549,13 @@ func TestGovernanceContract_ChangeConfigGetConfigErr(t *testing.T) { _ = gsc.Execute(initInput) callInputArgs := [][]byte{ + []byte("1"), []byte("1"), []byte("10"), []byte("10"), []byte("10"), } - callInput := createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, callInputArgs) + callInput := createVMInput(zero, "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, callInputArgs) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) @@ -597,6 +633,7 @@ func TestGovernanceContract_ProposalAlreadyExists(t *testing.T) { gsc.eei.SetStorage([]byte(proposalPrefix+string(proposalIdentifier)), []byte("1")) callInput := createVMInput(big.NewInt(500), "proposal", vm.GovernanceSCAddress, []byte("addr1"), callInputArgs) + retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) require.Equal(t, eei.GetReturnMessage(), "proposal already exists") @@ -904,6 +941,8 @@ func TestGovernanceContract_CloseProposal(t *testing.T) { MinQuorum: 0.1, MinVetoThreshold: 0.1, MinPassThreshold: 0.1, + ProposalFee: big.NewInt(10), + LostProposalFee: big.NewInt(1), }) return configBytes } @@ -912,6 +951,7 @@ func TestGovernanceContract_CloseProposal(t *testing.T) { } if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { proposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ + ProposalCost: big.NewInt(10), Yes: big.NewInt(10), No: big.NewInt(10), Veto: big.NewInt(10), @@ -1194,7 +1234,7 @@ func TestGovernanceContract_CloseProposalComputeResultsErr(t *testing.T) { t.Parallel() retMessage := "" - errSubstr := "computeEndResults error" + errSubstr := "element was not found" callerAddress := []byte("address") proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) args := createMockGovernanceArgs() @@ -1208,6 +1248,7 @@ func TestGovernanceContract_CloseProposalComputeResultsErr(t *testing.T) { } if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { proposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ + ProposalCost: big.NewInt(10), Yes: big.NewInt(10), No: big.NewInt(10), Veto: big.NewInt(10), @@ -1366,6 +1407,7 @@ func TestGovernanceContract_ViewConfig(t *testing.T) { mockEEI.GetStorageCalled = func(key []byte) []byte { proposalBytes, _ := args.Marshalizer.Marshal(&GovernanceConfigV2{ ProposalFee: big.NewInt(10), + LostProposalFee: big.NewInt(1), LastProposalNonce: 10, MinQuorum: 0.4, MinPassThreshold: 0.4, @@ -1676,17 +1718,20 @@ func TestGovernanceContract_addNewVote(t *testing.T) { func TestComputeEndResults(t *testing.T) { t.Parallel() + baseConfig := &GovernanceConfigV2{ + MinQuorum: 0.4, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.3, + ProposalFee: big.NewInt(10), + LostProposalFee: big.NewInt(1), + } + retMessage := "" args := createMockGovernanceArgs() args.Eei = &mock.SystemEIStub{ GetStorageCalled: func(key []byte) []byte { if bytes.Equal(key, []byte(governanceConfigKey)) { - configBytes, _ := args.Marshalizer.Marshal(&GovernanceConfigV2{ - MinQuorum: 0.4, - MinPassThreshold: 0.5, - MinVetoThreshold: 0.3, - ProposalFee: big.NewInt(10), - }) + configBytes, _ := args.Marshalizer.Marshal(baseConfig) return configBytes } @@ -1707,8 +1752,8 @@ func TestComputeEndResults(t *testing.T) { Veto: big.NewInt(0), Abstain: big.NewInt(10), } - err := gsc.computeEndResults(didNotPassQuorum) - require.Nil(t, err) + passed := gsc.computeEndResults(didNotPassQuorum, baseConfig) + require.False(t, passed) require.Equal(t, "Proposal did not reach minQuorum", retMessage) require.False(t, didNotPassQuorum.Passed) @@ -1718,8 +1763,8 @@ func TestComputeEndResults(t *testing.T) { Veto: big.NewInt(0), Abstain: big.NewInt(10), } - err = gsc.computeEndResults(didNotPassVotes) - require.Nil(t, err) + passed = gsc.computeEndResults(didNotPassVotes, baseConfig) + require.False(t, passed) require.Equal(t, "Proposal rejected", retMessage) require.False(t, didNotPassVotes.Passed) @@ -1729,8 +1774,8 @@ func TestComputeEndResults(t *testing.T) { Veto: big.NewInt(0), Abstain: big.NewInt(10), } - err = gsc.computeEndResults(didNotPassVotes2) - require.Nil(t, err) + passed = gsc.computeEndResults(didNotPassVotes2, baseConfig) + require.False(t, passed) require.Equal(t, "Proposal rejected", retMessage) require.False(t, didNotPassVotes2.Passed) @@ -1740,8 +1785,8 @@ func TestComputeEndResults(t *testing.T) { Veto: big.NewInt(70), Abstain: big.NewInt(10), } - err = gsc.computeEndResults(didNotPassVeto) - require.Nil(t, err) + passed = gsc.computeEndResults(didNotPassVeto, baseConfig) + require.False(t, passed) require.Equal(t, "Proposal vetoed", retMessage) require.False(t, didNotPassVeto.Passed) @@ -1751,10 +1796,9 @@ func TestComputeEndResults(t *testing.T) { Veto: big.NewInt(10), Abstain: big.NewInt(10), } - err = gsc.computeEndResults(pass) - require.Nil(t, err) + passed = gsc.computeEndResults(pass, baseConfig) + require.True(t, passed) require.Equal(t, "Proposal passed", retMessage) - require.True(t, pass.Passed) } func TestGovernanceContract_ProposeVoteClose(t *testing.T) { @@ -1763,7 +1807,7 @@ func TestGovernanceContract_ProposeVoteClose(t *testing.T) { callerAddress := bytes.Repeat([]byte{2}, 32) proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - gsc, blockchainHook, _ := createGovernanceBlockChainHookStubContextHandler() + gsc, blockchainHook, eei := createGovernanceBlockChainHookStubContextHandler() callInputArgs := [][]byte{ proposalIdentifier, @@ -1787,4 +1831,82 @@ func TestGovernanceContract_ProposeVoteClose(t *testing.T) { callInput = createVMInput(big.NewInt(0), "closeProposal", callerAddress, vm.GovernanceSCAddress, [][]byte{big.NewInt(1).Bytes()}) retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.Ok, retCode) + + proposal, _ := gsc.getProposalFromNonce(big.NewInt(1)) + require.True(t, proposal.Closed) + require.True(t, proposal.Passed) + require.Equal(t, big.NewInt(500), eei.GetTotalSentToUser(callInput.CallerAddr)) +} + +func TestGovernanceContract_ProposeClosePayFee(t *testing.T) { + t.Parallel() + + callerAddress := bytes.Repeat([]byte{2}, 32) + proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) + + gsc, blockchainHook, eei := createGovernanceBlockChainHookStubContextHandler() + + callInputArgs := [][]byte{ + proposalIdentifier, + big.NewInt(50).Bytes(), + big.NewInt(55).Bytes(), + } + callInput := createVMInput(big.NewInt(500), "proposal", callerAddress, vm.GovernanceSCAddress, callInputArgs) + retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.Ok, retCode) + + currentEpoch := uint32(52) + blockchainHook.CurrentEpochCalled = func() uint32 { + return currentEpoch + } + + currentEpoch = 56 + callInput = createVMInput(big.NewInt(0), "closeProposal", callerAddress, vm.GovernanceSCAddress, [][]byte{big.NewInt(1).Bytes()}) + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.Ok, retCode) + + proposal, _ := gsc.getProposalFromNonce(big.NewInt(1)) + require.True(t, proposal.Closed) + require.False(t, proposal.Passed) + require.Equal(t, big.NewInt(499), eei.GetTotalSentToUser(callInput.CallerAddr)) +} + +func TestGovernanceContract_ClaimAccumulatedFees(t *testing.T) { + t.Parallel() + + gsc, _, eei := createGovernanceBlockChainHookStubContextHandler() + callInput := createVMInput(big.NewInt(500), "claimAccumulatedFees", []byte("addr1"), vm.GovernanceSCAddress, [][]byte{{1}}) + + retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, eei.GetReturnMessage(), "callValue expected to be 0") + + callInput.CallValue = big.NewInt(0) + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "invalid number of arguments, expected 0")) + + callInput.Arguments = [][]byte{} + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "can be called only by owner")) + + gsc.gasCost.MetaChainSystemSCsCost.CloseProposal = 100 + callInput.CallerAddr = gsc.ownerAddress + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.OutOfGas, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "not enough gas")) + + gsc.gasCost.MetaChainSystemSCsCost.CloseProposal = 0 + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.Ok, retCode) + require.Equal(t, big.NewInt(0), eei.GetTotalSentToUser(callInput.CallerAddr)) + + gsc.addToAccumulatedFees(big.NewInt(100)) + + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.Ok, retCode) + require.Equal(t, big.NewInt(100), eei.GetTotalSentToUser(callInput.CallerAddr)) + + require.Equal(t, big.NewInt(0), gsc.getAccumulatedFees()) }