diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index eb617a59d09..64d69891305 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -7,6 +7,10 @@ on: - main workflow_dispatch: +permissions: + contents: write + pull-requests: write + jobs: build: strategy: diff --git a/api/logs/logSender_test.go b/api/logs/logSender_test.go index 630dbf1a9db..4f3eef61522 100644 --- a/api/logs/logSender_test.go +++ b/api/logs/logSender_test.go @@ -41,7 +41,7 @@ func createMockLogSender() (*logs.LogSender, *mock.WsConnStub, io.Writer) { return lsender, conn, ls.Writer() } -//------- NewLogSender +// ------- NewLogSender func TestNewLogSender_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() @@ -82,7 +82,7 @@ func TestNewLogSender_ShouldWork(t *testing.T) { removeWriterFromLogSubsystem(ls.Writer()) } -//------- StartSendingBlocking +// ------- StartSendingBlocking func TestLogSender_StartSendingBlockingConnReadMessageErrShouldCloseConn(t *testing.T) { t.Parallel() @@ -144,7 +144,7 @@ func TestLogSender_StartSendingBlockingSendsMessage(t *testing.T) { }) go func() { - //watchdog function + // watchdog function time.Sleep(time.Millisecond * 10) _ = ls.Writer().Close() @@ -169,7 +169,7 @@ func TestLogSender_StartSendingBlockingSendsMessageAndStopsWhenReadClose(t *test }) go func() { - //watchdog function + // watchdog function time.Sleep(time.Millisecond * 10) conn.SetReadMessageHandler(func() (messageType int, p []byte, err error) { diff --git a/api/mock/loggerStub.go b/api/mock/loggerStub.go index a607ada6f1a..738af664fbb 100644 --- a/api/mock/loggerStub.go +++ b/api/mock/loggerStub.go @@ -4,63 +4,64 @@ import logger "github.com/ElrondNetwork/elrond-go-logger" // LoggerStub - type LoggerStub struct { - LogLineCalled func(level string, message string, args ...interface{}) + LogCalled func(level logger.LogLevel, message string, args ...interface{}) + LogLineCalled func(line *logger.LogLine) SetLevelCalled func(logLevel logger.LogLevel) } +// Log - +func (l *LoggerStub) Log(logLevel logger.LogLevel, message string, args ...interface{}) { + if l.LogCalled != nil { + l.LogCalled(logLevel, message, args...) + } +} + +// LogLine - +func (l *LoggerStub) LogLine(line *logger.LogLine) { + if l.LogLineCalled != nil { + l.LogLineCalled(line) + } +} + // Trace - func (l *LoggerStub) Trace(message string, args ...interface{}) { - if l.LogLineCalled != nil { - l.LogLineCalled("TRACE", message, args...) + if l.LogCalled != nil { + l.LogCalled(logger.LogTrace, message, args...) } } // Debug - func (l *LoggerStub) Debug(message string, args ...interface{}) { - if l.LogLineCalled != nil { - l.LogLineCalled("DEBUG", message, args...) + if l.LogCalled != nil { + l.LogCalled(logger.LogDebug, message, args...) } } // Info - func (l *LoggerStub) Info(message string, args ...interface{}) { - if l.LogLineCalled != nil { - l.LogLineCalled("INFO", message, args...) + if l.LogCalled != nil { + l.LogCalled(logger.LogInfo, message, args...) } } // Warn - func (l *LoggerStub) Warn(message string, args ...interface{}) { - if l.LogLineCalled != nil { - l.LogLineCalled("WARN", message, args...) + if l.LogCalled != nil { + l.LogCalled(logger.LogWarning, message, args...) } } // Error - func (l *LoggerStub) Error(message string, args ...interface{}) { - if l.LogLineCalled != nil { - l.LogLineCalled("ERROR", message, args...) + if l.LogCalled != nil { + l.LogCalled(logger.LogError, message, args...) } } // LogIfError - func (l *LoggerStub) LogIfError(err error, args ...interface{}) { - if l.LogLineCalled != nil && err != nil { - l.LogLineCalled("ERROR", err.Error(), args...) - } -} - -// LogLine - -func (l *LoggerStub) LogLine(line *logger.LogLine) { - if l.LogLineCalled != nil { - l.LogLineCalled("Log", "line", line) - } -} - -// Log - -func (l *LoggerStub) Log(logLevel logger.LogLevel, message string, args ...interface{}) { - if l.LogLineCalled != nil { - l.LogLineCalled(string(logLevel), message, args...) + if l.LogCalled != nil && err != nil { + l.LogCalled(logger.LogError, err.Error(), args...) } } diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 3ebdb6af19f..340dda6a217 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -11,7 +11,11 @@ # available in local disk StartInEpochEnabled = true - # ChainID identifies the blockChain + # ChainID represents the chain identifier + # The currently supported constants are: + # "1" for Mainnet + # "D" for Devnet + # "T" for Testnet ChainID = "undefined" # MinTransactionVersion represents the minimum transaction version accepted @@ -474,6 +478,10 @@ Capacity = 10000 Type = "LRU" +[PeersRatingConfig] + TopRatedCacheCapacity = 5000 + BadRatedCacheCapacity = 5000 + [TrieSyncStorage] Capacity = 300000 SizeInBytes = 104857600 #100MB @@ -694,6 +702,11 @@ { StartEpoch = 1, Version = "v1.4" }, ] + [VirtualMachine.GasConfig] + # MaxGasPerVmQuery defines the maximum amount of gas to be allocated for VM Queries coming from API + # If set to 0, then MaxUInt64 will be used + MaxGasPerVmQuery = 1500000000 #1.5b + [Hardfork] EnableTrigger = true EnableTriggerFromP2P = true @@ -885,5 +898,31 @@ [Resolvers] NumCrossShardPeers = 2 - NumIntraShardPeers = 1 + NumTotalPeers = 3 # NumCrossShardPeers + num intra shard NumFullHistoryPeers = 3 + +[HeartbeatV2] + PeerAuthenticationTimeBetweenSendsInSec = 7200 # 2h + PeerAuthenticationTimeBetweenSendsWhenErrorInSec = 60 # 1min + PeerAuthenticationThresholdBetweenSends = 0.1 # 10% + HeartbeatTimeBetweenSendsInSec = 60 # 1min + HeartbeatTimeBetweenSendsWhenErrorInSec = 60 # 1min + HeartbeatThresholdBetweenSends = 0.1 # 10% + MaxNumOfPeerAuthenticationInResponse = 10 + HeartbeatExpiryTimespanInSec = 3600 # 1h + MinPeersThreshold = 0.8 # 80% + DelayBetweenRequestsInSec = 10 # 10sec + MaxTimeoutInSec = 7200 # 2h + DelayBetweenConnectionNotificationsInSec = 60 # 1min + MaxMissingKeysInRequest = 1000 + MaxDurationPeerUnresponsiveInSec = 900 # 15min + HideInactiveValidatorIntervalInSec = 3600 # 1h + HardforkTimeBetweenSendsInSec = 10 # 10sec + [HeartbeatV2.PeerAuthenticationPool] + DefaultSpanInSec = 3600 # 1h + CacheExpiryInSec = 3600 # 1h + [HeartbeatV2.HeartbeatPool] + Name = "HeartbeatPool" + Capacity = 1000 + Type = "SizeLRU" + SizeInBytes = 314572800 #300MB diff --git a/cmd/node/config/enableEpochs.toml b/cmd/node/config/enableEpochs.toml index 68e4df0d279..20654e40ff2 100644 --- a/cmd/node/config/enableEpochs.toml +++ b/cmd/node/config/enableEpochs.toml @@ -55,6 +55,7 @@ # StakingV2EnableEpoch represents the epoch when staking v2 is enabled StakingV2EnableEpoch = 1 + # DoubleKeyProtectionEnableEpoch represents the epoch when the double key protection will be enabled DoubleKeyProtectionEnableEpoch = 1 # ESDTEnableEpoch represents the epoch when ESDT is enabled @@ -135,7 +136,7 @@ # CorrectFirstQueuedEpoch represents the epoch when the backward compatibility for setting the first queued node is enabled CorrectFirstQueuedEpoch = 1 - # DeleteDelegatorAfterClaimRewardsEnableEpoch represents the epoch when the delegators data is deleted for delegators that have to claim rewards after they widrawal all funds + # DeleteDelegatorAfterClaimRewardsEnableEpoch represents the epoch when the delegators data is deleted for delegators that have to claim rewards after they withdraw all funds DeleteDelegatorAfterClaimRewardsEnableEpoch = 1 # FixOOGReturnCodeEnableEpoch represents the epoch when the backward compatibility returning out of gas error is enabled @@ -187,6 +188,9 @@ # SCRSizeInvariantOnBuiltInResultEnableEpoch represents the epoch when scr size invariant on built in result is enabled SCRSizeInvariantOnBuiltInResultEnableEpoch = 1 + # CheckCorrectTokenIDForTransferRoleEnableEpoch represents the epoch when the correct token ID check is applied for transfer role verification + CheckCorrectTokenIDForTransferRoleEnableEpoch = 2 + # FailExecutionOnEveryAPIErrorEnableEpoch represent the epoch when new protection in VM is enabled to fail all wrong API calls FailExecutionOnEveryAPIErrorEnableEpoch = 3 @@ -214,7 +218,14 @@ { EpochEnable = 6, MaxNumNodes = 48, NodesToShufflePerShard = 2 }, ] + # HeartbeatDisableEpoch represents the epoch when heartbeat v1 messages stop being sent and processed + HeartbeatDisableEpoch = 2 + + # MiniBlockPartialExecutionEnableEpoch represents the epoch when mini block partial execution will be enabled + MiniBlockPartialExecutionEnableEpoch = 3 + [GasSchedule] + # GasScheduleByEpochs holds the configuration for the gas schedule that will be applied from specific epochs GasScheduleByEpochs = [ { StartEpoch = 0, FileName = "gasScheduleV1.toml" }, { StartEpoch = 1, FileName = "gasScheduleV6.toml" }, diff --git a/cmd/node/config/external.toml b/cmd/node/config/external.toml index a7b4253660d..7902306a5dd 100644 --- a/cmd/node/config/external.toml +++ b/cmd/node/config/external.toml @@ -6,14 +6,14 @@ #Strongly suggested to activate this on a regular observer node. Enabled = false IndexerCacheSize = 0 + BulkRequestMaxSizeInBytes = 4194304 # 4MB URL = "http://localhost:9200" UseKibana = false Username = "" Password = "" # EnabledIndexes represents a slice of indexes that will be enabled for indexing. Full list is: - # ["tps", "rating", "transactions", "blocks", "validators", "miniblocks", "rounds", "accounts", "accountshistory", "receipts", "scresults", "accountsesdt", "accountsesdthistory", "epochinfo", "scdeploys", "tokens", "tags", "logs", "delegators"] - EnabledIndexes = ["tps", "rating", "transactions", "blocks", "validators", "miniblocks", "rounds", "accounts", "accountshistory", "receipts", "scresults", "accountsesdt", "accountsesdthistory", "epochinfo", "scdeploys", "tokens", "tags", "logs", "delegators"] - + # ["rating", "transactions", "blocks", "validators", "miniblocks", "rounds", "accounts", "accountshistory", "receipts", "scresults", "accountsesdt", "accountsesdthistory", "epochinfo", "scdeploys", "tokens", "tags", "logs", "delegators", "operations"] + EnabledIndexes = ["rating", "transactions", "blocks", "validators", "miniblocks", "rounds", "accounts", "accountshistory", "receipts", "scresults", "accountsesdt", "accountsesdthistory", "epochinfo", "scdeploys", "tokens", "tags", "logs", "delegators", "operations"] # EventNotifierConnector defines settings needed to configure and launch the event notifier component [EventNotifierConnector] # Enabled will turn on or off the event notifier connector diff --git a/cmd/node/config/prefs.toml b/cmd/node/config/prefs.toml index 75d14e14176..f7d0628b1ab 100644 --- a/cmd/node/config/prefs.toml +++ b/cmd/node/config/prefs.toml @@ -18,11 +18,11 @@ # It is highly recommended to enable this flag on an observer (not on a validator node) FullArchive = false - # PreferredConnections holds an array containing the public keys of the nodes to connect with (in top of other connections) + # PreferredConnections holds an array containing valid ips or peer ids from nodes to connect with (in top of other connections) # Example: # PreferredConnections = [ - # "eb2a13ec773924df2c7d1e92ff1c08d1c3b14218dc6a780b269ef12b9c098971f71851c212103720d40f92380c306a0c1a5e606f043f034188c3fcb95170112158730e2c53cd6c79331ce73df921675d71488f6287aa1ddca297756a98239584", - # "eb2a13ec773924df2c7d1e92ff1c08d1c3b14218dc6a780b269ef12b9c098971f71851c212103720d40f92380c306a0c1a5e606f043f034188c3fcb95170112158730e2c53cd6c79331ce73df921675d71488f6287aa1ddca297756a98239584" + # "127.0.0.10", + # "16Uiu2HAm6yvbp1oZ6zjnWsn9FdRqBSaQkbhELyaThuq48ybdorrr" # ] PreferredConnections = [] diff --git a/cmd/seednode/main.go b/cmd/seednode/main.go index 266afd8d4d5..3b8451e11ac 100644 --- a/cmd/seednode/main.go +++ b/cmd/seednode/main.go @@ -248,6 +248,7 @@ func createNode(p2pConfig config.P2PConfig, marshalizer marshal.Marshalizer) (p2 SyncTimer: &libp2p.LocalSyncTimer{}, PreferredPeersHolder: disabled.NewPreferredPeersHolder(), NodeOperationMode: p2p.NormalOperation, + PeersRatingHandler: disabled.NewDisabledPeersRatingHandler(), } return libp2p.NewNetworkMessenger(arg) diff --git a/cmd/termui/view/termuic/termuiRenders/widgetsRender.go b/cmd/termui/view/termuic/termuiRenders/widgetsRender.go index 0bcc478948e..b025f038e3e 100644 --- a/cmd/termui/view/termuic/termuiRenders/widgetsRender.go +++ b/cmd/termui/view/termuic/termuiRenders/widgetsRender.go @@ -37,7 +37,7 @@ type WidgetsRender struct { presenter view.Presenter } -//NewWidgetsRender method will create new WidgetsRender that display termui console +// NewWidgetsRender method will create new WidgetsRender that display termui console func NewWidgetsRender(presenter view.Presenter, grid *DrawableContainer) (*WidgetsRender, error) { if presenter == nil || presenter.IsInterfaceNil() { return nil, view.ErrNilPresenterInterface @@ -106,7 +106,7 @@ func (wr *WidgetsRender) setGrid() { wr.container.SetBottom(gridBottom) } -//RefreshData method is used to prepare data that are displayed on container +// RefreshData method is used to prepare data that are displayed on container func (wr *WidgetsRender) RefreshData(numMillisecondsRefreshTime int) { wr.prepareInstanceInfo() wr.prepareChainInfo(numMillisecondsRefreshTime) @@ -116,7 +116,7 @@ func (wr *WidgetsRender) RefreshData(numMillisecondsRefreshTime int) { } func (wr *WidgetsRender) prepareInstanceInfo() { - //8 rows and one column + // 8 rows and one column numRows := 8 rows := make([][]string, numRows) @@ -138,7 +138,7 @@ func (wr *WidgetsRender) prepareInstanceInfo() { fmt.Sprintf("Node name: %s (Shard %s - %s)", nodeName, shardIdStr, - strings.Title(nodeTypeAndListDisplay), + nodeTypeAndListDisplay, ), } @@ -174,7 +174,7 @@ func (wr *WidgetsRender) prepareInstanceInfo() { } func (wr *WidgetsRender) prepareChainInfo(numMillisecondsRefreshTime int) { - //10 rows and one column + // 10 rows and one column numRows := 10 rows := make([][]string, numRows) @@ -266,7 +266,7 @@ func computeRedundancyStr(redundancyLevel int64, redundancyIsMainActive string) } func (wr *WidgetsRender) prepareBlockInfo() { - //7 rows and one column + // 7 rows and one column numRows := 8 rows := make([][]string, numRows) diff --git a/common/constants.go b/common/constants.go index de3c756973c..d14b8833c8b 100644 --- a/common/constants.go +++ b/common/constants.go @@ -71,6 +71,15 @@ const GenesisTxSignatureString = "GENESISGENESISGENESISGENESISGENESISGENESISGENE // HeartbeatTopic is the topic used for heartbeat signaling const HeartbeatTopic = "heartbeat" +// HeartbeatV2Topic is the topic used for heartbeatV2 signaling +const HeartbeatV2Topic = "heartbeatV2" + +// PeerAuthenticationTopic is the topic used for peer authentication signaling +const PeerAuthenticationTopic = "peerAuthentication" + +// ConnectionTopic represents the topic used when sending the new connection message data +const ConnectionTopic = "connection" + // PathShardPlaceholder represents the placeholder for the shard ID in paths const PathShardPlaceholder = "[S]" @@ -493,6 +502,9 @@ const ( // MetricWaitingListFixEnableEpoch represents the epoch when the waiting list fix is enabled MetricWaitingListFixEnableEpoch = "erd_waiting_list_fix_enable_epoch" + // MetricHeartbeatDisableEpoch represents the epoch when heartbeat v1 messages stop being sent and processed + MetricHeartbeatDisableEpoch = "erd_heartbeat_disable_epoch" + // MetricMaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MetricMaxNodesChangeEnableEpoch = "erd_max_nodes_change_enable_epoch" @@ -827,3 +839,6 @@ const ( // ApiOutputFormatProto outport format returns the bytes of the proto object ApiOutputFormatProto ApiOutputFormat = 1 ) + +// MaxIndexOfTxInMiniBlock defines the maximum index of a tx inside one mini block +const MaxIndexOfTxInMiniBlock = int32(29999) diff --git a/config/config.go b/config/config.go index a14dba12dac..ec0dcf7d53a 100644 --- a/config/config.go +++ b/config/config.go @@ -103,6 +103,34 @@ type SoftwareVersionConfig struct { PollingIntervalInMinutes int } +// HeartbeatV2Config will hold the configuration for heartbeat v2 +type HeartbeatV2Config struct { + PeerAuthenticationTimeBetweenSendsInSec int64 + PeerAuthenticationTimeBetweenSendsWhenErrorInSec int64 + PeerAuthenticationThresholdBetweenSends float64 + HeartbeatTimeBetweenSendsInSec int64 + HeartbeatTimeBetweenSendsWhenErrorInSec int64 + HeartbeatThresholdBetweenSends float64 + MaxNumOfPeerAuthenticationInResponse int + HeartbeatExpiryTimespanInSec int64 + MinPeersThreshold float32 + DelayBetweenRequestsInSec int64 + MaxTimeoutInSec int64 + DelayBetweenConnectionNotificationsInSec int64 + MaxMissingKeysInRequest uint32 + MaxDurationPeerUnresponsiveInSec int64 + HideInactiveValidatorIntervalInSec int64 + PeerAuthenticationPool PeerAuthenticationPoolConfig + HeartbeatPool CacheConfig + HardforkTimeBetweenSendsInSec int64 +} + +// PeerAuthenticationPoolConfig will hold the configuration for peer authentication pool +type PeerAuthenticationPoolConfig struct { + DefaultSpanInSec int + CacheExpiryInSec int +} + // Config will hold the entire application configuration parameters type Config struct { MiniBlocksStorage StorageConfig @@ -162,6 +190,7 @@ type Config struct { Antiflood AntifloodConfig ResourceStats ResourceStatsConfig Heartbeat HeartbeatConfig + HeartbeatV2 HeartbeatV2Config ValidatorStatistics ValidatorStatisticsConfig GeneralSettings GeneralSettingsConfig Consensus ConsensusConfig @@ -184,6 +213,14 @@ type Config struct { TrieSync TrieSyncConfig Resolvers ResolverConfig VMOutputCacher CacheConfig + + PeersRatingConfig PeersRatingConfig +} + +// PeersRatingConfig will hold settings related to peers rating +type PeersRatingConfig struct { + TopRatedCacheCapacity int + BadRatedCacheCapacity int } // LogsConfig will hold settings related to the logging sub-system @@ -352,6 +389,7 @@ type IncreaseFactorConfig struct { type VirtualMachineServicesConfig struct { Execution VirtualMachineConfig Querying QueryVirtualMachineConfig + GasConfig VirtualMachineGasConfig } // VirtualMachineConfig holds configuration for a Virtual Machine service @@ -373,6 +411,11 @@ type QueryVirtualMachineConfig struct { NumConcurrentVMs int } +// VirtualMachineGasConfig holds the configuration for the virtual machine(s) gas operations +type VirtualMachineGasConfig struct { + MaxGasPerVmQuery uint64 +} + // HardforkConfig holds the configuration for the hardfork trigger type HardforkConfig struct { ExportStateStorageConfig StorageConfig @@ -543,6 +586,6 @@ type TrieSyncConfig struct { // ResolverConfig represents the config options to be used when setting up the resolver instances type ResolverConfig struct { NumCrossShardPeers uint32 - NumIntraShardPeers uint32 + NumTotalPeers uint32 NumFullHistoryPeers uint32 } diff --git a/config/epochConfig.go b/config/epochConfig.go index e46870a8d85..2d7848b4fda 100644 --- a/config/epochConfig.go +++ b/config/epochConfig.go @@ -75,7 +75,10 @@ type EnableEpochs struct { DoNotReturnOldBlockInBlockchainHookEnableEpoch uint32 AddFailedRelayedTxToInvalidMBsDisableEpoch uint32 SCRSizeInvariantOnBuiltInResultEnableEpoch uint32 + CheckCorrectTokenIDForTransferRoleEnableEpoch uint32 FailExecutionOnEveryAPIErrorEnableEpoch uint32 + HeartbeatDisableEpoch uint32 + MiniBlockPartialExecutionEnableEpoch uint32 StakeLimitsEnableEpoch uint32 StakingV4InitEnableEpoch uint32 StakingV4EnableEpoch uint32 diff --git a/config/externalConfig.go b/config/externalConfig.go index c1b4ca7857b..d4a869bdf4c 100644 --- a/config/externalConfig.go +++ b/config/externalConfig.go @@ -9,13 +9,14 @@ type ExternalConfig struct { // ElasticSearchConfig will hold the configuration for the elastic search type ElasticSearchConfig struct { - Enabled bool - IndexerCacheSize int - URL string - UseKibana bool - Username string - Password string - EnabledIndexes []string + Enabled bool + IndexerCacheSize int + BulkRequestMaxSizeInBytes int + URL string + UseKibana bool + Username string + Password string + EnabledIndexes []string } // EventNotifierConfig will hold the configuration for the events notifier driver diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index d7ea51d7dcd..94cc98e912a 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -106,6 +106,9 @@ func TestTomlParser(t *testing.T) { NumConcurrentVMs: 16, VirtualMachineConfig: VirtualMachineConfig{ArwenVersions: arwenVersions}, }, + GasConfig: VirtualMachineGasConfig{ + MaxGasPerVmQuery: 1_500_000_000, + }, }, Debug: DebugConfig{ InterceptorResolver: InterceptorResolverDebugConfig{ @@ -192,6 +195,9 @@ func TestTomlParser(t *testing.T) { { StartEpoch = 88, Version = "v1.2" }, ] + [VirtualMachine.GasConfig] + MaxGasPerVmQuery = 1500000000 + [Debug] [Debug.InterceptorResolver] Enabled = true @@ -648,6 +654,9 @@ func TestEnableEpochConfig(t *testing.T) { # FailExecutionOnEveryAPIErrorEnableEpoch represent the epoch when new protection in VM is enabled to fail all wrong API calls FailExecutionOnEveryAPIErrorEnableEpoch = 53 + # HeartbeatDisableEpoch represents the epoch when heartbeat v1 messages stop being sent and processed + HeartbeatDisableEpoch = 54 + # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ { EpochEnable = 44, MaxNumNodes = 2169, NodesToShufflePerShard = 80 }, @@ -728,6 +737,7 @@ func TestEnableEpochConfig(t *testing.T) { TransformToMultiShardCreateEnableEpoch: 51, ESDTRegisterAndSetAllRolesEnableEpoch: 52, FailExecutionOnEveryAPIErrorEnableEpoch: 53, + HeartbeatDisableEpoch: 54, }, GasSchedule: GasScheduleConfig{ GasScheduleByEpochs: []GasScheduleByEpochs{ diff --git a/consensus/interface.go b/consensus/interface.go index f27c5031bf7..97767339fdc 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -80,7 +80,7 @@ type P2PMessenger interface { // The interface assures that the collected data will be used by the p2p network sharding components type NetworkShardingCollector interface { UpdatePeerIDInfo(pid core.PeerID, pk []byte, shardID uint32) - UpdatePeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) + PutPeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) GetPeerInfo(pid core.PeerID) core.P2PPeerInfo IsInterfaceNil() bool } diff --git a/consensus/mock/blockProcessorMock.go b/consensus/mock/blockProcessorMock.go index 9255844e8f9..f8a5e947870 100644 --- a/consensus/mock/blockProcessorMock.go +++ b/consensus/mock/blockProcessorMock.go @@ -5,7 +5,6 @@ import ( "time" "github.com/ElrondNetwork/elrond-go-core/data" - "github.com/ElrondNetwork/elrond-go/process/block/processedMb" ) // BlockProcessorMock mocks the implementation for a blockProcessor @@ -33,10 +32,6 @@ type BlockProcessorMock struct { func (bpm *BlockProcessorMock) SetNumProcessedObj(_ uint64) { } -// ApplyProcessedMiniBlocks - -func (bpm *BlockProcessorMock) ApplyProcessedMiniBlocks(_ *processedMb.ProcessedMiniBlockTracker) { -} - // RestoreLastNotarizedHrdsToGenesis - func (bpm *BlockProcessorMock) RestoreLastNotarizedHrdsToGenesis() { } diff --git a/consensus/mock/consensusDataContainerMock.go b/consensus/mock/consensusDataContainerMock.go index d1ffa540c7c..adbeaaf2c86 100644 --- a/consensus/mock/consensusDataContainerMock.go +++ b/consensus/mock/consensusDataContainerMock.go @@ -220,7 +220,7 @@ func (ccm *ConsensusCoreMock) NodeRedundancyHandler() consensus.NodeRedundancyHa } // ScheduledProcessor - -func (ccm *ConsensusCoreMock) ScheduledProcessor() consensus.ScheduledProcessor{ +func (ccm *ConsensusCoreMock) ScheduledProcessor() consensus.ScheduledProcessor { return ccm.scheduledProcessor } diff --git a/consensus/spos/bls/subroundBlock_test.go b/consensus/spos/bls/subroundBlock_test.go index 8241c6bc3be..21130cb280b 100644 --- a/consensus/spos/bls/subroundBlock_test.go +++ b/consensus/spos/bls/subroundBlock_test.go @@ -998,9 +998,6 @@ func TestSubroundBlock_CallFuncRemainingTimeWithStructShouldWork(t *testing.T) { time.Sleep(200 * time.Millisecond) assert.True(t, remainingTimeInCurrentRound() < 0) - - roundStartTime = roundStartTime.Add(500 * time.Millisecond) - assert.True(t, remainingTimeInCurrentRound() < 0) } func TestSubroundBlock_CallFuncRemainingTimeWithStructShouldNotWork(t *testing.T) { diff --git a/consensus/spos/scheduledProcessor_test.go b/consensus/spos/scheduledProcessor_test.go index 87889c1f7a5..50bfc68ed0a 100644 --- a/consensus/spos/scheduledProcessor_test.go +++ b/consensus/spos/scheduledProcessor_test.go @@ -148,10 +148,10 @@ func TestScheduledProcessorWrapper_IsProcessedInProgressStartingInFuture(t *test sp.setStatus(inProgress) startTime := time.Now() - sp.startTime = startTime.Add(10 * time.Millisecond) + sp.startTime = startTime.Add(500 * time.Millisecond) require.False(t, sp.IsProcessedOKWithTimeout()) endTime := time.Now() - require.Less(t, endTime.Sub(startTime), time.Millisecond) + require.Less(t, endTime.Sub(startTime), time.Millisecond*100) } func TestScheduledProcessorWrapper_IsProcessedInProgressEarlyCompletion(t *testing.T) { diff --git a/consensus/spos/sposFactory/sposFactory_test.go b/consensus/spos/sposFactory/sposFactory_test.go index 0c11515386c..7590aa56591 100644 --- a/consensus/spos/sposFactory/sposFactory_test.go +++ b/consensus/spos/sposFactory/sposFactory_test.go @@ -10,8 +10,8 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" "github.com/ElrondNetwork/elrond-go/testscommon" - statusHandlerMock "github.com/ElrondNetwork/elrond-go/testscommon/statusHandler" "github.com/ElrondNetwork/elrond-go/testscommon/hashingMocks" + statusHandlerMock "github.com/ElrondNetwork/elrond-go/testscommon/statusHandler" "github.com/stretchr/testify/assert" ) diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index 9b35a3aa837..3c3cf391822 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -51,8 +51,6 @@ type Worker struct { headerIntegrityVerifier process.HeaderIntegrityVerifier appStatusHandler core.AppStatusHandler - networkShardingCollector consensus.NetworkShardingCollector - receivedMessages map[consensus.MessageType][]*consensus.Message receivedMessagesCalls map[consensus.MessageType]func(ctx context.Context, msg *consensus.Message) bool @@ -79,30 +77,29 @@ type Worker struct { // WorkerArgs holds the consensus worker arguments type WorkerArgs struct { - ConsensusService ConsensusService - BlockChain data.ChainHandler - BlockProcessor process.BlockProcessor - ScheduledProcessor consensus.ScheduledProcessor - Bootstrapper process.Bootstrapper - BroadcastMessenger consensus.BroadcastMessenger - ConsensusState *ConsensusState - ForkDetector process.ForkDetector - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - RoundHandler consensus.RoundHandler - ShardCoordinator sharding.Coordinator - PeerSignatureHandler crypto.PeerSignatureHandler - SyncTimer ntp.SyncTimer - HeaderSigVerifier HeaderSigVerifier - HeaderIntegrityVerifier process.HeaderIntegrityVerifier - ChainID []byte - NetworkShardingCollector consensus.NetworkShardingCollector - AntifloodHandler consensus.P2PAntifloodHandler - PoolAdder PoolAdder - SignatureSize int - PublicKeySize int - AppStatusHandler core.AppStatusHandler - NodeRedundancyHandler consensus.NodeRedundancyHandler + ConsensusService ConsensusService + BlockChain data.ChainHandler + BlockProcessor process.BlockProcessor + ScheduledProcessor consensus.ScheduledProcessor + Bootstrapper process.Bootstrapper + BroadcastMessenger consensus.BroadcastMessenger + ConsensusState *ConsensusState + ForkDetector process.ForkDetector + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + RoundHandler consensus.RoundHandler + ShardCoordinator sharding.Coordinator + PeerSignatureHandler crypto.PeerSignatureHandler + SyncTimer ntp.SyncTimer + HeaderSigVerifier HeaderSigVerifier + HeaderIntegrityVerifier process.HeaderIntegrityVerifier + ChainID []byte + AntifloodHandler consensus.P2PAntifloodHandler + PoolAdder PoolAdder + SignatureSize int + PublicKeySize int + AppStatusHandler core.AppStatusHandler + NodeRedundancyHandler consensus.NodeRedundancyHandler } // NewWorker creates a new Worker object @@ -128,28 +125,27 @@ func NewWorker(args *WorkerArgs) (*Worker, error) { } wrk := Worker{ - consensusService: args.ConsensusService, - blockChain: args.BlockChain, - blockProcessor: args.BlockProcessor, - scheduledProcessor: args.ScheduledProcessor, - bootstrapper: args.Bootstrapper, - broadcastMessenger: args.BroadcastMessenger, - consensusState: args.ConsensusState, - forkDetector: args.ForkDetector, - marshalizer: args.Marshalizer, - hasher: args.Hasher, - roundHandler: args.RoundHandler, - shardCoordinator: args.ShardCoordinator, - peerSignatureHandler: args.PeerSignatureHandler, - syncTimer: args.SyncTimer, - headerSigVerifier: args.HeaderSigVerifier, - headerIntegrityVerifier: args.HeaderIntegrityVerifier, - appStatusHandler: args.AppStatusHandler, - networkShardingCollector: args.NetworkShardingCollector, - antifloodHandler: args.AntifloodHandler, - poolAdder: args.PoolAdder, - nodeRedundancyHandler: args.NodeRedundancyHandler, - closer: closing.NewSafeChanCloser(), + consensusService: args.ConsensusService, + blockChain: args.BlockChain, + blockProcessor: args.BlockProcessor, + scheduledProcessor: args.ScheduledProcessor, + bootstrapper: args.Bootstrapper, + broadcastMessenger: args.BroadcastMessenger, + consensusState: args.ConsensusState, + forkDetector: args.ForkDetector, + marshalizer: args.Marshalizer, + hasher: args.Hasher, + roundHandler: args.RoundHandler, + shardCoordinator: args.ShardCoordinator, + peerSignatureHandler: args.PeerSignatureHandler, + syncTimer: args.SyncTimer, + headerSigVerifier: args.HeaderSigVerifier, + headerIntegrityVerifier: args.HeaderIntegrityVerifier, + appStatusHandler: args.AppStatusHandler, + antifloodHandler: args.AntifloodHandler, + poolAdder: args.PoolAdder, + nodeRedundancyHandler: args.NodeRedundancyHandler, + closer: closing.NewSafeChanCloser(), } wrk.consensusMessageValidator = consensusMessageValidatorObj @@ -232,9 +228,6 @@ func checkNewWorkerParams(args *WorkerArgs) error { if len(args.ChainID) == 0 { return ErrInvalidChainID } - if check.IfNil(args.NetworkShardingCollector) { - return ErrNilNetworkShardingCollector - } if check.IfNil(args.AntifloodHandler) { return ErrNilAntifloodHandler } @@ -381,8 +374,6 @@ func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedP return err } - wrk.networkShardingCollector.UpdatePeerIDInfo(message.Peer(), cnsMsg.PubKey, wrk.shardCoordinator.SelfId()) - isMessageWithBlockBody := wrk.consensusService.IsMessageWithBlockBody(msgType) isMessageWithBlockHeader := wrk.consensusService.IsMessageWithBlockHeader(msgType) isMessageWithBlockBodyAndHeader := wrk.consensusService.IsMessageWithBlockBodyAndHeader(msgType) diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index da03c37a6cc..3d0a8653442 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -22,7 +22,6 @@ import ( "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/testscommon" "github.com/ElrondNetwork/elrond-go/testscommon/hashingMocks" - "github.com/ElrondNetwork/elrond-go/testscommon/p2pmocks" statusHandlerMock "github.com/ElrondNetwork/elrond-go/testscommon/statusHandler" "github.com/stretchr/testify/assert" ) @@ -86,30 +85,29 @@ func createDefaultWorkerArgs(appStatusHandler core.AppStatusHandler) *spos.Worke peerSigHandler := &mock.PeerSignatureHandler{Signer: singleSignerMock, KeyGen: keyGeneratorMock} workerArgs := &spos.WorkerArgs{ - ConsensusService: blsService, - BlockChain: blockchainMock, - BlockProcessor: blockProcessor, - ScheduledProcessor: scheduledProcessor, - Bootstrapper: bootstrapperMock, - BroadcastMessenger: broadcastMessengerMock, - ConsensusState: consensusState, - ForkDetector: forkDetectorMock, - Marshalizer: marshalizerMock, - Hasher: hasher, - RoundHandler: roundHandlerMock, - ShardCoordinator: shardCoordinatorMock, - PeerSignatureHandler: peerSigHandler, - SyncTimer: syncTimerMock, - HeaderSigVerifier: &mock.HeaderSigVerifierStub{}, - HeaderIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, - ChainID: chainID, - NetworkShardingCollector: &p2pmocks.NetworkShardingCollectorStub{}, - AntifloodHandler: createMockP2PAntifloodHandler(), - PoolAdder: poolAdder, - SignatureSize: SignatureSize, - PublicKeySize: PublicKeySize, - AppStatusHandler: appStatusHandler, - NodeRedundancyHandler: &mock.NodeRedundancyHandlerStub{}, + ConsensusService: blsService, + BlockChain: blockchainMock, + BlockProcessor: blockProcessor, + ScheduledProcessor: scheduledProcessor, + Bootstrapper: bootstrapperMock, + BroadcastMessenger: broadcastMessengerMock, + ConsensusState: consensusState, + ForkDetector: forkDetectorMock, + Marshalizer: marshalizerMock, + Hasher: hasher, + RoundHandler: roundHandlerMock, + ShardCoordinator: shardCoordinatorMock, + PeerSignatureHandler: peerSigHandler, + SyncTimer: syncTimerMock, + HeaderSigVerifier: &mock.HeaderSigVerifierStub{}, + HeaderIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, + ChainID: chainID, + AntifloodHandler: createMockP2PAntifloodHandler(), + PoolAdder: poolAdder, + SignatureSize: SignatureSize, + PublicKeySize: PublicKeySize, + AppStatusHandler: appStatusHandler, + NodeRedundancyHandler: &mock.NodeRedundancyHandlerStub{}, } return workerArgs @@ -320,17 +318,6 @@ func TestWorker_NewWorkerEmptyChainIDShouldFail(t *testing.T) { assert.Equal(t, spos.ErrInvalidChainID, err) } -func TestWorker_NewWorkerNilNetworkShardingCollectorShouldFail(t *testing.T) { - t.Parallel() - - workerArgs := createDefaultWorkerArgs(&statusHandlerMock.AppStatusHandlerStub{}) - workerArgs.NetworkShardingCollector = nil - wrk, err := spos.NewWorker(workerArgs) - - assert.Nil(t, wrk) - assert.Equal(t, spos.ErrNilNetworkShardingCollector, err) -} - func TestWorker_NewWorkerNilAntifloodHandlerShouldFail(t *testing.T) { t.Parallel() diff --git a/dataRetriever/dataPool/dataPool.go b/dataRetriever/dataPool/dataPool.go index baf78ae7156..92eeeb291ff 100644 --- a/dataRetriever/dataPool/dataPool.go +++ b/dataRetriever/dataPool/dataPool.go @@ -2,12 +2,15 @@ package dataPool import ( "github.com/ElrondNetwork/elrond-go-core/core/check" + logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/storage" ) var _ dataRetriever.PoolsHolder = (*dataPool)(nil) +var log = logger.GetOrCreate("dataRetriever/dataPool") + type dataPool struct { transactions dataRetriever.ShardedDataCacherNotifier unsignedTransactions dataRetriever.ShardedDataCacherNotifier @@ -19,6 +22,8 @@ type dataPool struct { trieNodesChunks storage.Cacher currBlockTxs dataRetriever.TransactionCacher smartContracts storage.Cacher + peerAuthentications storage.Cacher + heartbeats storage.Cacher } // DataPoolArgs represents the data pool's constructor structure @@ -33,6 +38,8 @@ type DataPoolArgs struct { TrieNodesChunks storage.Cacher CurrentBlockTransactions dataRetriever.TransactionCacher SmartContracts storage.Cacher + PeerAuthentications storage.Cacher + Heartbeats storage.Cacher } // NewDataPool creates a data pools holder object @@ -67,6 +74,12 @@ func NewDataPool(args DataPoolArgs) (*dataPool, error) { if check.IfNil(args.SmartContracts) { return nil, dataRetriever.ErrNilSmartContractsPool } + if check.IfNil(args.PeerAuthentications) { + return nil, dataRetriever.ErrNilPeerAuthenticationPool + } + if check.IfNil(args.Heartbeats) { + return nil, dataRetriever.ErrNilHeartbeatPool + } return &dataPool{ transactions: args.Transactions, @@ -79,6 +92,8 @@ func NewDataPool(args DataPoolArgs) (*dataPool, error) { trieNodesChunks: args.TrieNodesChunks, currBlockTxs: args.CurrentBlockTransactions, smartContracts: args.SmartContracts, + peerAuthentications: args.PeerAuthentications, + heartbeats: args.Heartbeats, }, nil } @@ -132,6 +147,40 @@ func (dp *dataPool) SmartContracts() storage.Cacher { return dp.smartContracts } +// PeerAuthentications returns the holder for peer authentications +func (dp *dataPool) PeerAuthentications() storage.Cacher { + return dp.peerAuthentications +} + +// Heartbeats returns the holder for heartbeats +func (dp *dataPool) Heartbeats() storage.Cacher { + return dp.heartbeats +} + +// Close closes all the components +func (dp *dataPool) Close() error { + var lastError error + if !check.IfNil(dp.trieNodes) { + log.Debug("closing trie nodes data pool....") + err := dp.trieNodes.Close() + if err != nil { + log.Error("failed to close trie nodes data pool", "error", err.Error()) + lastError = err + } + } + + if !check.IfNil(dp.peerAuthentications) { + log.Debug("closing peer authentications data pool....") + err := dp.peerAuthentications.Close() + if err != nil { + log.Error("failed to close peer authentications data pool", "error", err.Error()) + lastError = err + } + } + + return lastError +} + // IsInterfaceNil returns true if there is no value under the interface func (dp *dataPool) IsInterfaceNil() bool { return dp == nil diff --git a/dataRetriever/dataPool/dataPool_test.go b/dataRetriever/dataPool/dataPool_test.go index bd0552b7fb1..d64648f28b0 100644 --- a/dataRetriever/dataPool/dataPool_test.go +++ b/dataRetriever/dataPool/dataPool_test.go @@ -1,6 +1,7 @@ package dataPool_test import ( + "errors" "testing" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -25,6 +26,8 @@ func createMockDataPoolArgs() dataPool.DataPoolArgs { TrieNodesChunks: testscommon.NewCacherStub(), CurrentBlockTransactions: &mock.TxForCurrentBlockStub{}, SmartContracts: testscommon.NewCacherStub(), + PeerAuthentications: testscommon.NewCacherStub(), + Heartbeats: testscommon.NewCacherStub(), } } @@ -116,6 +119,28 @@ func TestNewDataPool_NilSmartContractsShouldErr(t *testing.T) { assert.Nil(t, tdp) } +func TestNewDataPool_NilPeerAuthenticationsShouldErr(t *testing.T) { + t.Parallel() + + args := createMockDataPoolArgs() + args.PeerAuthentications = nil + tdp, err := dataPool.NewDataPool(args) + + assert.Equal(t, dataRetriever.ErrNilPeerAuthenticationPool, err) + assert.Nil(t, tdp) +} + +func TestNewDataPool_NilHeartbeatsShouldErr(t *testing.T) { + t.Parallel() + + args := createMockDataPoolArgs() + args.Heartbeats = nil + tdp, err := dataPool.NewDataPool(args) + + assert.Equal(t, dataRetriever.ErrNilHeartbeatPool, err) + assert.Nil(t, tdp) +} + func TestNewDataPool_NilPeerBlocksShouldErr(t *testing.T) { t.Parallel() @@ -138,19 +163,9 @@ func TestNewDataPool_NilCurrBlockShouldErr(t *testing.T) { } func TestNewDataPool_OkValsShouldWork(t *testing.T) { - args := dataPool.DataPoolArgs{ - Transactions: testscommon.NewShardedDataStub(), - UnsignedTransactions: testscommon.NewShardedDataStub(), - RewardTransactions: testscommon.NewShardedDataStub(), - Headers: &mock.HeadersCacherStub{}, - MiniBlocks: testscommon.NewCacherStub(), - PeerChangesBlocks: testscommon.NewCacherStub(), - TrieNodes: testscommon.NewCacherStub(), - TrieNodesChunks: testscommon.NewCacherStub(), - CurrentBlockTransactions: &mock.TxForCurrentBlockStub{}, - SmartContracts: testscommon.NewCacherStub(), - } + t.Parallel() + args := createMockDataPoolArgs() tdp, err := dataPool.NewDataPool(args) assert.Nil(t, err) @@ -166,4 +181,90 @@ func TestNewDataPool_OkValsShouldWork(t *testing.T) { assert.True(t, args.TrieNodes == tdp.TrieNodes()) assert.True(t, args.TrieNodesChunks == tdp.TrieNodesChunks()) assert.True(t, args.SmartContracts == tdp.SmartContracts()) + assert.True(t, args.PeerAuthentications == tdp.PeerAuthentications()) + assert.True(t, args.Heartbeats == tdp.Heartbeats()) +} + +func TestNewDataPool_Close(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + t.Run("trie nodes close returns error", func(t *testing.T) { + t.Parallel() + + args := createMockDataPoolArgs() + args.TrieNodes = &testscommon.CacherStub{ + CloseCalled: func() error { + return expectedErr + }, + } + tdp, _ := dataPool.NewDataPool(args) + assert.NotNil(t, tdp) + err := tdp.Close() + assert.Equal(t, expectedErr, err) + }) + t.Run("peer authentications close returns error", func(t *testing.T) { + t.Parallel() + + args := createMockDataPoolArgs() + args.PeerAuthentications = &testscommon.CacherStub{ + CloseCalled: func() error { + return expectedErr + }, + } + tdp, _ := dataPool.NewDataPool(args) + assert.NotNil(t, tdp) + err := tdp.Close() + assert.Equal(t, expectedErr, err) + }) + t.Run("both fail", func(t *testing.T) { + t.Parallel() + + tnExpectedErr := errors.New("tn expected error") + paExpectedErr := errors.New("pa expected error") + args := createMockDataPoolArgs() + tnCalled, paCalled := false, false + args.TrieNodes = &testscommon.CacherStub{ + CloseCalled: func() error { + tnCalled = true + return tnExpectedErr + }, + } + args.PeerAuthentications = &testscommon.CacherStub{ + CloseCalled: func() error { + paCalled = true + return paExpectedErr + }, + } + tdp, _ := dataPool.NewDataPool(args) + assert.NotNil(t, tdp) + err := tdp.Close() + assert.Equal(t, paExpectedErr, err) + assert.True(t, tnCalled) + assert.True(t, paCalled) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := createMockDataPoolArgs() + tnCalled, paCalled := false, false + args.TrieNodes = &testscommon.CacherStub{ + CloseCalled: func() error { + tnCalled = true + return nil + }, + } + args.PeerAuthentications = &testscommon.CacherStub{ + CloseCalled: func() error { + paCalled = true + return nil + }, + } + tdp, _ := dataPool.NewDataPool(args) + assert.NotNil(t, tdp) + err := tdp.Close() + assert.Nil(t, err) + assert.True(t, tnCalled) + assert.True(t, paCalled) + }) } diff --git a/dataRetriever/errors.go b/dataRetriever/errors.go index c5a810d3dca..91fc7a678ac 100644 --- a/dataRetriever/errors.go +++ b/dataRetriever/errors.go @@ -140,6 +140,9 @@ var ErrInvalidMaxTxRequest = errors.New("max tx request number is invalid") // ErrNilPeerListCreator signals that a nil peer list creator implementation has been provided var ErrNilPeerListCreator = errors.New("nil peer list creator provided") +// ErrNilPeersRatingHandler signals that a nil peers rating handler implementation has been provided +var ErrNilPeersRatingHandler = errors.New("nil peers rating handler") + // ErrNilTrieDataGetter signals that a nil trie data getter has been provided var ErrNilTrieDataGetter = errors.New("nil trie data getter provided") @@ -220,3 +223,24 @@ var ErrNilPathManager = errors.New("nil path manager") // ErrNilEpochNotifier signals that the provided EpochNotifier is nil var ErrNilEpochNotifier = errors.New("nil EpochNotifier") + +// ErrNilPeerAuthenticationPool signals that a nil peer authentication pool has been provided +var ErrNilPeerAuthenticationPool = errors.New("nil peer authentication pool") + +// ErrNilHeartbeatPool signals that a nil heartbeat pool has been provided +var ErrNilHeartbeatPool = errors.New("nil heartbeat pool") + +// ErrPeerAuthNotFound signals that no peer authentication found +var ErrPeerAuthNotFound = errors.New("peer authentication not found") + +// ErrNilNodesCoordinator signals a nil nodes coordinator has been provided +var ErrNilNodesCoordinator = errors.New("nil nodes coordinator") + +// InvalidChunkIndex signals that an invalid chunk was provided +var InvalidChunkIndex = errors.New("invalid chunk index") + +// ErrInvalidNumOfPeerAuthentication signals that an invalid number of peer authentication was provided +var ErrInvalidNumOfPeerAuthentication = errors.New("invalid num of peer authentication") + +// ErrNilPeerShardMapper signals that a nil peer shard mapper has been provided +var ErrNilPeerShardMapper = errors.New("nil peer shard mapper") diff --git a/dataRetriever/factory/dataPoolFactory.go b/dataRetriever/factory/dataPoolFactory.go index c9cb7ed29bc..2f1f71fe915 100644 --- a/dataRetriever/factory/dataPoolFactory.go +++ b/dataRetriever/factory/dataPoolFactory.go @@ -3,6 +3,7 @@ package factory import ( "fmt" "io/ioutil" + "time" "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go-core/core/check" @@ -20,7 +21,7 @@ import ( "github.com/ElrondNetwork/elrond-go/storage/disabled" "github.com/ElrondNetwork/elrond-go/storage/factory" "github.com/ElrondNetwork/elrond-go/storage/lrucache/capacity" - + "github.com/ElrondNetwork/elrond-go/storage/mapTimeCache" "github.com/ElrondNetwork/elrond-go/storage/storageCacherAdapter" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" trieFactory "github.com/ElrondNetwork/elrond-go/trie/factory" @@ -124,6 +125,20 @@ func NewDataPoolFromConfig(args ArgsDataPool) (dataRetriever.PoolsHolder, error) return nil, fmt.Errorf("%w while creating the cache for the smartcontract results", err) } + peerAuthPool, err := mapTimeCache.NewMapTimeCache(mapTimeCache.ArgMapTimeCacher{ + DefaultSpan: time.Duration(mainConfig.HeartbeatV2.PeerAuthenticationPool.DefaultSpanInSec) * time.Second, + CacheExpiry: time.Duration(mainConfig.HeartbeatV2.PeerAuthenticationPool.CacheExpiryInSec) * time.Second, + }) + if err != nil { + return nil, fmt.Errorf("%w while creating the cache for the peer authentication messages", err) + } + + cacherCfg = factory.GetCacherFromConfig(mainConfig.HeartbeatV2.HeartbeatPool) + heartbeatPool, err := storageUnit.NewCache(cacherCfg) + if err != nil { + return nil, fmt.Errorf("%w while creating the cache for the heartbeat messages", err) + } + currBlockTxs := dataPool.NewCurrentBlockPool() dataPoolArgs := dataPool.DataPoolArgs{ Transactions: txPool, @@ -136,6 +151,8 @@ func NewDataPoolFromConfig(args ArgsDataPool) (dataRetriever.PoolsHolder, error) TrieNodesChunks: trieNodesChunks, CurrentBlockTransactions: currBlockTxs, SmartContracts: smartContracts, + PeerAuthentications: peerAuthPool, + Heartbeats: heartbeatPool, } return dataPool.NewDataPool(dataPoolArgs) } diff --git a/dataRetriever/factory/dataPoolFactory_test.go b/dataRetriever/factory/dataPoolFactory_test.go index 0acef55e241..5b5fb174144 100644 --- a/dataRetriever/factory/dataPoolFactory_test.go +++ b/dataRetriever/factory/dataPoolFactory_test.go @@ -128,6 +128,22 @@ func TestNewDataPoolFromConfig_BadConfigShouldErr(t *testing.T) { fmt.Println(err) require.True(t, errors.Is(err, storage.ErrNotSupportedCacheType)) require.True(t, strings.Contains(err.Error(), "the cache for the smartcontract results")) + + args = getGoodArgs() + args.Config.HeartbeatV2.PeerAuthenticationPool.CacheExpiryInSec = 0 + holder, err = NewDataPoolFromConfig(args) + require.Nil(t, holder) + fmt.Println(err) + require.True(t, errors.Is(err, storage.ErrInvalidCacheExpiry)) + require.True(t, strings.Contains(err.Error(), "the cache for the peer authentication messages")) + + args = getGoodArgs() + args.Config.HeartbeatV2.HeartbeatPool.Type = "invalid cache type" + holder, err = NewDataPoolFromConfig(args) + require.Nil(t, holder) + fmt.Println(err) + require.True(t, errors.Is(err, storage.ErrNotSupportedCacheType)) + require.True(t, strings.Contains(err.Error(), "the cache for the heartbeat messages")) } func getGoodArgs() ArgsDataPool { diff --git a/dataRetriever/factory/resolverscontainer/args.go b/dataRetriever/factory/resolverscontainer/args.go index 69f33258025..15708542fd3 100644 --- a/dataRetriever/factory/resolverscontainer/args.go +++ b/dataRetriever/factory/resolverscontainer/args.go @@ -7,6 +7,7 @@ import ( "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -26,6 +27,10 @@ type FactoryArgs struct { OutputAntifloodHandler dataRetriever.P2PAntifloodHandler CurrentNetworkEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler PreferredPeersHolder p2p.PreferredPeersHolderHandler + PeersRatingHandler dataRetriever.PeersRatingHandler SizeCheckDelta uint32 IsFullHistoryNode bool + NodesCoordinator dataRetriever.NodesCoordinator + MaxNumOfPeerAuthenticationInResponse int + PeerShardMapper process.PeerShardMapper } diff --git a/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go index cb3a150b65b..e8ecfeb6843 100644 --- a/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go @@ -12,6 +12,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers" "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers/topicResolverSender" + "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -19,29 +20,36 @@ import ( // EmptyExcludePeersOnTopic is an empty topic const EmptyExcludePeersOnTopic = "" +const minNumOfPeerAuthentication = 5 + var log = logger.GetOrCreate("dataRetriever/factory/resolverscontainer") type baseResolversContainerFactory struct { - container dataRetriever.ResolversContainer - shardCoordinator sharding.Coordinator - messenger dataRetriever.TopicMessageHandler - store dataRetriever.StorageService - marshalizer marshal.Marshalizer - dataPools dataRetriever.PoolsHolder - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter - intRandomizer dataRetriever.IntRandomizer - dataPacker dataRetriever.DataPacker - triesContainer common.TriesHolder - inputAntifloodHandler dataRetriever.P2PAntifloodHandler - outputAntifloodHandler dataRetriever.P2PAntifloodHandler - throttler dataRetriever.ResolverThrottler - intraShardTopic string - isFullHistoryNode bool - currentNetworkEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler - preferredPeersHolder dataRetriever.PreferredPeersHolderHandler - numCrossShardPeers int - numIntraShardPeers int - numFullHistoryPeers int + container dataRetriever.ResolversContainer + shardCoordinator sharding.Coordinator + messenger dataRetriever.TopicMessageHandler + store dataRetriever.StorageService + marshalizer marshal.Marshalizer + dataPools dataRetriever.PoolsHolder + uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + intRandomizer dataRetriever.IntRandomizer + dataPacker dataRetriever.DataPacker + triesContainer common.TriesHolder + inputAntifloodHandler dataRetriever.P2PAntifloodHandler + outputAntifloodHandler dataRetriever.P2PAntifloodHandler + throttler dataRetriever.ResolverThrottler + intraShardTopic string + isFullHistoryNode bool + currentNetworkEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler + preferredPeersHolder dataRetriever.PreferredPeersHolderHandler + peersRatingHandler dataRetriever.PeersRatingHandler + numCrossShardPeers int + numIntraShardPeers int + numTotalPeers int + numFullHistoryPeers int + nodesCoordinator dataRetriever.NodesCoordinator + maxNumOfPeerAuthenticationInResponse int + peerShardMapper process.PeerShardMapper } func (brcf *baseResolversContainerFactory) checkParams() error { @@ -84,15 +92,28 @@ func (brcf *baseResolversContainerFactory) checkParams() error { if check.IfNil(brcf.preferredPeersHolder) { return dataRetriever.ErrNilPreferredPeersHolder } + if check.IfNil(brcf.peersRatingHandler) { + return dataRetriever.ErrNilPeersRatingHandler + } if brcf.numCrossShardPeers <= 0 { return fmt.Errorf("%w for numCrossShardPeers", dataRetriever.ErrInvalidValue) } - if brcf.numIntraShardPeers <= 0 { - return fmt.Errorf("%w for numIntraShardPeers", dataRetriever.ErrInvalidValue) + if brcf.numTotalPeers <= brcf.numCrossShardPeers { + return fmt.Errorf("%w for numTotalPeers", dataRetriever.ErrInvalidValue) } if brcf.numFullHistoryPeers <= 0 { return fmt.Errorf("%w for numFullHistoryPeers", dataRetriever.ErrInvalidValue) } + if check.IfNil(brcf.nodesCoordinator) { + return dataRetriever.ErrNilNodesCoordinator + } + if brcf.maxNumOfPeerAuthenticationInResponse < minNumOfPeerAuthentication { + return fmt.Errorf("%w for maxNumOfPeerAuthenticationInResponse, expected %d, received %d", + dataRetriever.ErrInvalidValue, minNumOfPeerAuthentication, brcf.maxNumOfPeerAuthenticationInResponse) + } + if check.IfNil(brcf.peerShardMapper) { + return dataRetriever.ErrNilPeerShardMapper + } return nil } @@ -113,7 +134,7 @@ func (brcf *baseResolversContainerFactory) generateTxResolvers( identifierTx := topic + shardC.CommunicationIdentifier(idx) excludePeersFromTopic := topic + shardC.CommunicationIdentifier(shardC.SelfId()) - resolver, err := brcf.createTxResolver(identifierTx, excludePeersFromTopic, unit, dataPool, idx) + resolver, err := brcf.createTxResolver(identifierTx, excludePeersFromTopic, unit, dataPool, idx, brcf.numCrossShardPeers, brcf.numIntraShardPeers) if err != nil { return err } @@ -125,7 +146,7 @@ func (brcf *baseResolversContainerFactory) generateTxResolvers( identifierTx := topic + shardC.CommunicationIdentifier(core.MetachainShardId) excludePeersFromTopic := topic + shardC.CommunicationIdentifier(shardC.SelfId()) - resolver, err := brcf.createTxResolver(identifierTx, excludePeersFromTopic, unit, dataPool, core.MetachainShardId) + resolver, err := brcf.createTxResolver(identifierTx, excludePeersFromTopic, unit, dataPool, core.MetachainShardId, brcf.numCrossShardPeers, brcf.numIntraShardPeers) if err != nil { return err } @@ -142,23 +163,27 @@ func (brcf *baseResolversContainerFactory) createTxResolver( unit dataRetriever.UnitType, dataPool dataRetriever.ShardedDataCacherNotifier, targetShardID uint32, + numCrossShardPeers int, + numIntraShardPeers int, ) (dataRetriever.Resolver, error) { txStorer := brcf.store.GetStorer(unit) - resolverSender, err := brcf.createOneResolverSender(topic, excludedTopic, targetShardID) + resolverSender, err := brcf.createOneResolverSenderWithSpecifiedNumRequests(topic, excludedTopic, targetShardID, numCrossShardPeers, numIntraShardPeers) if err != nil { return nil, err } arg := resolvers.ArgTxResolver{ - SenderResolver: resolverSender, + ArgBaseResolver: resolvers.ArgBaseResolver{ + SenderResolver: resolverSender, + Marshaller: brcf.marshalizer, + AntifloodHandler: brcf.inputAntifloodHandler, + Throttler: brcf.throttler, + }, TxPool: dataPool, TxStorage: txStorer, - Marshalizer: brcf.marshalizer, DataPacker: brcf.dataPacker, - AntifloodHandler: brcf.inputAntifloodHandler, - Throttler: brcf.throttler, IsFullHistoryNode: brcf.isFullHistoryNode, } resolver, err := resolvers.NewTxResolver(arg) @@ -184,7 +209,7 @@ func (brcf *baseResolversContainerFactory) generateMiniBlocksResolvers() error { identifierMiniBlocks := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(idx) excludePeersFromTopic := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(shardC.SelfId()) - resolver, err := brcf.createMiniBlocksResolver(identifierMiniBlocks, excludePeersFromTopic, idx) + resolver, err := brcf.createMiniBlocksResolver(identifierMiniBlocks, excludePeersFromTopic, idx, brcf.numCrossShardPeers, brcf.numIntraShardPeers) if err != nil { return err } @@ -196,7 +221,7 @@ func (brcf *baseResolversContainerFactory) generateMiniBlocksResolvers() error { identifierMiniBlocks := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(core.MetachainShardId) excludePeersFromTopic := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(shardC.SelfId()) - resolver, err := brcf.createMiniBlocksResolver(identifierMiniBlocks, excludePeersFromTopic, core.MetachainShardId) + resolver, err := brcf.createMiniBlocksResolver(identifierMiniBlocks, excludePeersFromTopic, core.MetachainShardId, brcf.numCrossShardPeers, brcf.numIntraShardPeers) if err != nil { return err } @@ -205,7 +230,7 @@ func (brcf *baseResolversContainerFactory) generateMiniBlocksResolvers() error { keys[noOfShards] = identifierMiniBlocks identifierAllShardMiniBlocks := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(core.AllShardId) - allShardMiniblocksResolver, err := brcf.createMiniBlocksResolver(identifierAllShardMiniBlocks, EmptyExcludePeersOnTopic, brcf.shardCoordinator.SelfId()) + allShardMiniblocksResolver, err := brcf.createMiniBlocksResolver(identifierAllShardMiniBlocks, EmptyExcludePeersOnTopic, brcf.shardCoordinator.SelfId(), brcf.numCrossShardPeers, brcf.numIntraShardPeers) if err != nil { return err } @@ -220,21 +245,25 @@ func (brcf *baseResolversContainerFactory) createMiniBlocksResolver( topic string, excludedTopic string, targetShardID uint32, + numCrossShardPeers int, + numIntraShardPeers int, ) (dataRetriever.Resolver, error) { miniBlocksStorer := brcf.store.GetStorer(dataRetriever.MiniBlockUnit) - resolverSender, err := brcf.createOneResolverSender(topic, excludedTopic, targetShardID) + resolverSender, err := brcf.createOneResolverSenderWithSpecifiedNumRequests(topic, excludedTopic, targetShardID, numCrossShardPeers, numIntraShardPeers) if err != nil { return nil, err } arg := resolvers.ArgMiniblockResolver{ - SenderResolver: resolverSender, + ArgBaseResolver: resolvers.ArgBaseResolver{ + SenderResolver: resolverSender, + Marshaller: brcf.marshalizer, + AntifloodHandler: brcf.inputAntifloodHandler, + Throttler: brcf.throttler, + }, MiniBlockPool: brcf.dataPools.MiniBlocks(), MiniBlockStorage: miniBlocksStorer, - Marshalizer: brcf.marshalizer, - AntifloodHandler: brcf.inputAntifloodHandler, - Throttler: brcf.throttler, DataPacker: brcf.dataPacker, IsFullHistoryNode: brcf.isFullHistoryNode, } @@ -251,34 +280,51 @@ func (brcf *baseResolversContainerFactory) createMiniBlocksResolver( return txBlkResolver, nil } -func (brcf *baseResolversContainerFactory) createOneResolverSender( - topic string, - excludedTopic string, - targetShardId uint32, -) (dataRetriever.TopicResolverSender, error) { - return brcf.createOneResolverSenderWithSpecifiedNumRequests( - topic, - excludedTopic, - targetShardId, - brcf.numCrossShardPeers, - brcf.numIntraShardPeers, - brcf.numFullHistoryPeers, - brcf.currentNetworkEpochProvider, - ) +func (brcf *baseResolversContainerFactory) generatePeerAuthenticationResolver() error { + identifierPeerAuth := common.PeerAuthenticationTopic + shardC := brcf.shardCoordinator + resolverSender, err := brcf.createOneResolverSenderWithSpecifiedNumRequests(identifierPeerAuth, EmptyExcludePeersOnTopic, shardC.SelfId(), brcf.numCrossShardPeers, brcf.numIntraShardPeers) + if err != nil { + return err + } + + arg := resolvers.ArgPeerAuthenticationResolver{ + ArgBaseResolver: resolvers.ArgBaseResolver{ + SenderResolver: resolverSender, + Marshaller: brcf.marshalizer, + AntifloodHandler: brcf.inputAntifloodHandler, + Throttler: brcf.throttler, + }, + PeerAuthenticationPool: brcf.dataPools.PeerAuthentications(), + NodesCoordinator: brcf.nodesCoordinator, + MaxNumOfPeerAuthenticationInResponse: brcf.maxNumOfPeerAuthenticationInResponse, + PeerShardMapper: brcf.peerShardMapper, + DataPacker: brcf.dataPacker, + } + peerAuthResolver, err := resolvers.NewPeerAuthenticationResolver(arg) + if err != nil { + return err + } + + err = brcf.messenger.RegisterMessageProcessor(peerAuthResolver.RequestTopic(), common.DefaultResolversIdentifier, peerAuthResolver) + if err != nil { + return err + } + + return brcf.container.Add(identifierPeerAuth, peerAuthResolver) } func (brcf *baseResolversContainerFactory) createOneResolverSenderWithSpecifiedNumRequests( topic string, excludedTopic string, targetShardId uint32, - numCrossShard int, - numIntraShard int, - numFullHistory int, - currentNetworkEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler, + numCrossShardPeers int, + numIntraShardPeers int, ) (dataRetriever.TopicResolverSender, error) { log.Trace("baseResolversContainerFactory.createOneResolverSenderWithSpecifiedNumRequests", - "topic", topic, "intraShardTopic", brcf.intraShardTopic, "excludedTopic", excludedTopic) + "topic", topic, "intraShardTopic", brcf.intraShardTopic, "excludedTopic", excludedTopic, + "numCrossShardPeers", numCrossShardPeers, "numIntraShardPeers", numIntraShardPeers) peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(brcf.messenger, topic, brcf.intraShardTopic, excludedTopic) if err != nil { @@ -293,12 +339,13 @@ func (brcf *baseResolversContainerFactory) createOneResolverSenderWithSpecifiedN Randomizer: brcf.intRandomizer, TargetShardId: targetShardId, OutputAntiflooder: brcf.outputAntifloodHandler, - NumCrossShardPeers: numCrossShard, - NumIntraShardPeers: numIntraShard, - NumFullHistoryPeers: numFullHistory, - CurrentNetworkEpochProvider: currentNetworkEpochProvider, + NumCrossShardPeers: numCrossShardPeers, + NumIntraShardPeers: numIntraShardPeers, + NumFullHistoryPeers: brcf.numFullHistoryPeers, + CurrentNetworkEpochProvider: brcf.currentNetworkEpochProvider, PreferredPeersHolder: brcf.preferredPeersHolder, SelfShardIdProvider: brcf.shardCoordinator, + PeersRatingHandler: brcf.peersRatingHandler, } // TODO instantiate topic sender resolver with the shard IDs for which this resolver is supposed to serve the data // this will improve the serving of transactions as the searching will be done only on 2 sharded data units @@ -313,20 +360,16 @@ func (brcf *baseResolversContainerFactory) createOneResolverSenderWithSpecifiedN func (brcf *baseResolversContainerFactory) createTrieNodesResolver( topic string, trieId string, - numCrossShard int, - numIntraShard int, - numFullHistory int, + numCrossShardPeers int, + numIntraShardPeers int, targetShardID uint32, - currentNetworkEpochProviderHandler dataRetriever.CurrentNetworkEpochProviderHandler, ) (dataRetriever.Resolver, error) { resolverSender, err := brcf.createOneResolverSenderWithSpecifiedNumRequests( topic, EmptyExcludePeersOnTopic, targetShardID, - numCrossShard, - numIntraShard, - numFullHistory, - currentNetworkEpochProviderHandler, + numCrossShardPeers, + numIntraShardPeers, ) if err != nil { return nil, err @@ -334,11 +377,13 @@ func (brcf *baseResolversContainerFactory) createTrieNodesResolver( trie := brcf.triesContainer.Get([]byte(trieId)) argTrie := resolvers.ArgTrieNodeResolver{ - SenderResolver: resolverSender, - TrieDataGetter: trie, - Marshalizer: brcf.marshalizer, - AntifloodHandler: brcf.inputAntifloodHandler, - Throttler: brcf.throttler, + ArgBaseResolver: resolvers.ArgBaseResolver{ + SenderResolver: resolverSender, + Marshaller: brcf.marshalizer, + AntifloodHandler: brcf.inputAntifloodHandler, + Throttler: brcf.throttler, + }, + TrieDataGetter: trie, } resolver, err := resolvers.NewTrieNodeResolver(argTrie) if err != nil { diff --git a/dataRetriever/factory/resolverscontainer/export_test.go b/dataRetriever/factory/resolverscontainer/export_test.go index 20fcdc7eddb..76c6e940c7b 100644 --- a/dataRetriever/factory/resolverscontainer/export_test.go +++ b/dataRetriever/factory/resolverscontainer/export_test.go @@ -5,9 +5,9 @@ func (brcf *baseResolversContainerFactory) NumCrossShardPeers() int { return brcf.numCrossShardPeers } -// NumIntraShardPeers - -func (brcf *baseResolversContainerFactory) NumIntraShardPeers() int { - return brcf.numIntraShardPeers +// NumTotalPeers - +func (brcf *baseResolversContainerFactory) NumTotalPeers() int { + return brcf.numTotalPeers } // NumFullHistoryPeers - diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go index 1020e30c5e4..6e56fd55a2c 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go @@ -34,27 +34,33 @@ func NewMetaResolversContainerFactory( return nil, err } + numIntraShardPeers := args.ResolverConfig.NumTotalPeers - args.ResolverConfig.NumCrossShardPeers container := containers.NewResolversContainer() base := &baseResolversContainerFactory{ - container: container, - shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, - store: args.Store, - marshalizer: args.Marshalizer, - dataPools: args.DataPools, - uint64ByteSliceConverter: args.Uint64ByteSliceConverter, - intRandomizer: &random.ConcurrentSafeIntRandomizer{}, - dataPacker: args.DataPacker, - triesContainer: args.TriesContainer, - inputAntifloodHandler: args.InputAntifloodHandler, - outputAntifloodHandler: args.OutputAntifloodHandler, - throttler: thr, - isFullHistoryNode: args.IsFullHistoryNode, - currentNetworkEpochProvider: args.CurrentNetworkEpochProvider, - preferredPeersHolder: args.PreferredPeersHolder, - numCrossShardPeers: int(args.ResolverConfig.NumCrossShardPeers), - numIntraShardPeers: int(args.ResolverConfig.NumIntraShardPeers), - numFullHistoryPeers: int(args.ResolverConfig.NumFullHistoryPeers), + container: container, + shardCoordinator: args.ShardCoordinator, + messenger: args.Messenger, + store: args.Store, + marshalizer: args.Marshalizer, + dataPools: args.DataPools, + uint64ByteSliceConverter: args.Uint64ByteSliceConverter, + intRandomizer: &random.ConcurrentSafeIntRandomizer{}, + dataPacker: args.DataPacker, + triesContainer: args.TriesContainer, + inputAntifloodHandler: args.InputAntifloodHandler, + outputAntifloodHandler: args.OutputAntifloodHandler, + throttler: thr, + isFullHistoryNode: args.IsFullHistoryNode, + currentNetworkEpochProvider: args.CurrentNetworkEpochProvider, + preferredPeersHolder: args.PreferredPeersHolder, + peersRatingHandler: args.PeersRatingHandler, + numCrossShardPeers: int(args.ResolverConfig.NumCrossShardPeers), + numIntraShardPeers: int(numIntraShardPeers), + numTotalPeers: int(args.ResolverConfig.NumTotalPeers), + numFullHistoryPeers: int(args.ResolverConfig.NumFullHistoryPeers), + nodesCoordinator: args.NodesCoordinator, + maxNumOfPeerAuthenticationInResponse: args.MaxNumOfPeerAuthenticationInResponse, + peerShardMapper: args.PeerShardMapper, } err = base.checkParams() @@ -119,6 +125,11 @@ func (mrcf *metaResolversContainerFactory) Create() (dataRetriever.ResolversCont return nil, err } + err = mrcf.generatePeerAuthenticationResolver() + if err != nil { + return nil, err + } + return mrcf.container, nil } @@ -139,10 +150,8 @@ func (mrcf *metaResolversContainerFactory) AddShardTrieNodeResolvers(container d identifierTrieNodes, triesFactory.UserAccountTrie, mrcf.numCrossShardPeers, - mrcf.numIntraShardPeers, - mrcf.numFullHistoryPeers, + mrcf.numTotalPeers-mrcf.numCrossShardPeers, idx, - mrcf.currentNetworkEpochProvider, ) if err != nil { return err @@ -163,12 +172,12 @@ func (mrcf *metaResolversContainerFactory) generateShardHeaderResolvers() error keys := make([]string, noOfShards) resolversSlice := make([]dataRetriever.Resolver, noOfShards) - //wire up to topics: shardBlocks_0_META, shardBlocks_1_META ... + // wire up to topics: shardBlocks_0_META, shardBlocks_1_META ... for idx := uint32(0); idx < noOfShards; idx++ { identifierHeader := factory.ShardBlocksTopic + shardC.CommunicationIdentifier(idx) excludePeersFromTopic := EmptyExcludePeersOnTopic - resolver, err := mrcf.createShardHeaderResolver(identifierHeader, excludePeersFromTopic, idx) + resolver, err := mrcf.createShardHeaderResolver(identifierHeader, excludePeersFromTopic, idx, mrcf.numCrossShardPeers, mrcf.numIntraShardPeers) if err != nil { return err } @@ -184,10 +193,12 @@ func (mrcf *metaResolversContainerFactory) createShardHeaderResolver( topic string, excludedTopic string, shardID uint32, + numCrossShardPeers int, + numIntraShardPeers int, ) (dataRetriever.Resolver, error) { hdrStorer := mrcf.store.GetStorer(dataRetriever.BlockHeaderUnit) - resolverSender, err := mrcf.createOneResolverSender(topic, excludedTopic, shardID) + resolverSender, err := mrcf.createOneResolverSenderWithSpecifiedNumRequests(topic, excludedTopic, shardID, numCrossShardPeers, numIntraShardPeers) if err != nil { return nil, err } @@ -196,15 +207,17 @@ func (mrcf *metaResolversContainerFactory) createShardHeaderResolver( hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardID) hdrNonceStore := mrcf.store.GetStorer(hdrNonceHashDataUnit) arg := resolvers.ArgHeaderResolver{ - SenderResolver: resolverSender, + ArgBaseResolver: resolvers.ArgBaseResolver{ + SenderResolver: resolverSender, + Marshaller: mrcf.marshalizer, + AntifloodHandler: mrcf.inputAntifloodHandler, + Throttler: mrcf.throttler, + }, Headers: mrcf.dataPools.Headers(), HdrStorage: hdrStorer, HeadersNoncesStorage: hdrNonceStore, - Marshalizer: mrcf.marshalizer, NonceConverter: mrcf.uint64ByteSliceConverter, ShardCoordinator: mrcf.shardCoordinator, - AntifloodHandler: mrcf.inputAntifloodHandler, - Throttler: mrcf.throttler, IsFullHistoryNode: mrcf.isFullHistoryNode, } resolver, err := resolvers.NewHeaderResolver(arg) @@ -224,7 +237,7 @@ func (mrcf *metaResolversContainerFactory) createShardHeaderResolver( func (mrcf *metaResolversContainerFactory) generateMetaChainHeaderResolvers() error { identifierHeader := factory.MetachainBlocksTopic - resolver, err := mrcf.createMetaChainHeaderResolver(identifierHeader, core.MetachainShardId) + resolver, err := mrcf.createMetaChainHeaderResolver(identifierHeader, core.MetachainShardId, mrcf.numCrossShardPeers, mrcf.numIntraShardPeers) if err != nil { return err } @@ -235,25 +248,29 @@ func (mrcf *metaResolversContainerFactory) generateMetaChainHeaderResolvers() er func (mrcf *metaResolversContainerFactory) createMetaChainHeaderResolver( identifier string, shardId uint32, + numCrossShardPeers int, + numIntraShardPeers int, ) (dataRetriever.Resolver, error) { hdrStorer := mrcf.store.GetStorer(dataRetriever.MetaBlockUnit) - resolverSender, err := mrcf.createOneResolverSender(identifier, EmptyExcludePeersOnTopic, shardId) + resolverSender, err := mrcf.createOneResolverSenderWithSpecifiedNumRequests(identifier, EmptyExcludePeersOnTopic, shardId, numCrossShardPeers, numIntraShardPeers) if err != nil { return nil, err } hdrNonceStore := mrcf.store.GetStorer(dataRetriever.MetaHdrNonceHashDataUnit) arg := resolvers.ArgHeaderResolver{ - SenderResolver: resolverSender, + ArgBaseResolver: resolvers.ArgBaseResolver{ + SenderResolver: resolverSender, + Marshaller: mrcf.marshalizer, + AntifloodHandler: mrcf.inputAntifloodHandler, + Throttler: mrcf.throttler, + }, Headers: mrcf.dataPools.Headers(), HdrStorage: hdrStorer, HeadersNoncesStorage: hdrNonceStore, - Marshalizer: mrcf.marshalizer, NonceConverter: mrcf.uint64ByteSliceConverter, ShardCoordinator: mrcf.shardCoordinator, - AntifloodHandler: mrcf.inputAntifloodHandler, - Throttler: mrcf.throttler, IsFullHistoryNode: mrcf.isFullHistoryNode, } resolver, err := resolvers.NewHeaderResolver(arg) @@ -278,10 +295,8 @@ func (mrcf *metaResolversContainerFactory) generateTrieNodesResolvers() error { identifierTrieNodes, triesFactory.UserAccountTrie, 0, - mrcf.numIntraShardPeers+mrcf.numCrossShardPeers, - mrcf.numFullHistoryPeers, + mrcf.numTotalPeers, core.MetachainShardId, - mrcf.currentNetworkEpochProvider, ) if err != nil { return err @@ -295,10 +310,8 @@ func (mrcf *metaResolversContainerFactory) generateTrieNodesResolvers() error { identifierTrieNodes, triesFactory.PeerAccountTrie, 0, - mrcf.numIntraShardPeers+mrcf.numCrossShardPeers, - mrcf.numFullHistoryPeers, + mrcf.numTotalPeers, core.MetachainShardId, - mrcf.currentNetworkEpochProvider, ) if err != nil { return err @@ -322,12 +335,12 @@ func (mrcf *metaResolversContainerFactory) generateRewardsResolvers( keys := make([]string, noOfShards) resolverSlice := make([]dataRetriever.Resolver, noOfShards) - //wire up to topics: shardBlocks_0_META, shardBlocks_1_META ... + // wire up to topics: shardBlocks_0_META, shardBlocks_1_META ... for idx := uint32(0); idx < noOfShards; idx++ { identifierTx := topic + shardC.CommunicationIdentifier(idx) excludePeersFromTopic := EmptyExcludePeersOnTopic - resolver, err := mrcf.createTxResolver(identifierTx, excludePeersFromTopic, unit, dataPool, idx) + resolver, err := mrcf.createTxResolver(identifierTx, excludePeersFromTopic, unit, dataPool, idx, mrcf.numCrossShardPeers, mrcf.numIntraShardPeers) if err != nil { return err } diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go index a9e5333fb2f..bb82c021392 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go @@ -1,6 +1,7 @@ package resolverscontainer_test import ( + "errors" "strings" "testing" @@ -17,6 +18,7 @@ import ( "github.com/ElrondNetwork/elrond-go/testscommon" dataRetrieverMock "github.com/ElrondNetwork/elrond-go/testscommon/dataRetriever" "github.com/ElrondNetwork/elrond-go/testscommon/p2pmocks" + "github.com/ElrondNetwork/elrond-go/testscommon/shardingMocks" storageStubs "github.com/ElrondNetwork/elrond-go/testscommon/storage" trieMock "github.com/ElrondNetwork/elrond-go/testscommon/trie" triesFactory "github.com/ElrondNetwork/elrond-go/trie/factory" @@ -168,6 +170,17 @@ func TestNewMetaResolversContainerFactory_NilPreferredPeersHolderShouldErr(t *te assert.Equal(t, dataRetriever.ErrNilPreferredPeersHolder, err) } +func TestNewMetaResolversContainerFactory_NilPeersRatingHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.PeersRatingHandler = nil + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilPeersRatingHandler, err) +} + func TestNewMetaResolversContainerFactory_NilUint64SliceConverterShouldErr(t *testing.T) { t.Parallel() @@ -201,6 +214,115 @@ func TestNewMetaResolversContainerFactory_NilTrieDataGetterShouldErr(t *testing. assert.Equal(t, dataRetriever.ErrNilTrieDataGetter, err) } +func TestNewMetaResolversContainerFactory_NilInputAntifloodHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.InputAntifloodHandler = nil + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilAntifloodHandler)) +} + +func TestNewMetaResolversContainerFactory_NilOutputAntifloodHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.OutputAntifloodHandler = nil + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilAntifloodHandler)) +} + +func TestNewMetaResolversContainerFactory_NilCurrentNetworkEpochProviderShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.CurrentNetworkEpochProvider = nil + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilCurrentNetworkEpochProvider, err) +} + +func TestNewMetaResolversContainerFactory_InvalidNumCrossShardPeersShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.ResolverConfig.NumCrossShardPeers = 0 + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrInvalidValue)) +} + +func TestNewMetaResolversContainerFactory_InvalidNumTotalPeersShouldErr(t *testing.T) { + t.Parallel() + + t.Run("NumTotalPeers is lower than NumCrossShardPeers", func(t *testing.T) { + args := getArgumentsMeta() + args.ResolverConfig.NumTotalPeers = 0 + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrInvalidValue)) + }) + t.Run("NumTotalPeers is equal to NumCrossShardPeers", func(t *testing.T) { + args := getArgumentsMeta() + args.ResolverConfig.NumTotalPeers = args.ResolverConfig.NumCrossShardPeers + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrInvalidValue)) + }) +} + +func TestNewMetaResolversContainerFactory_InvalidNumFullHistoryPeersShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.ResolverConfig.NumFullHistoryPeers = 0 + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrInvalidValue)) +} + +func TestNewMetaResolversContainerFactory_NilPeerShardMapperShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.PeerShardMapper = nil + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilPeerShardMapper, err) +} + +func TestNewMetaResolversContainerFactory_NilNodesCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.NodesCoordinator = nil + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilNodesCoordinator, err) +} + +func TestNewMetaResolversContainerFactory_InvalidMaxNumOfPeerAuthenticationInResponseShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.MaxNumOfPeerAuthenticationInResponse = 0 + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, strings.Contains(err.Error(), dataRetriever.ErrInvalidValue.Error())) +} + func TestNewMetaResolversContainerFactory_ShouldWork(t *testing.T) { t.Parallel() @@ -209,7 +331,7 @@ func TestNewMetaResolversContainerFactory_ShouldWork(t *testing.T) { assert.Nil(t, err) assert.False(t, check.IfNil(rcf)) - assert.Equal(t, int(args.ResolverConfig.NumIntraShardPeers), rcf.NumIntraShardPeers()) + assert.Equal(t, int(args.ResolverConfig.NumTotalPeers), rcf.NumTotalPeers()) assert.Equal(t, int(args.ResolverConfig.NumCrossShardPeers), rcf.NumCrossShardPeers()) assert.Equal(t, int(args.ResolverConfig.NumFullHistoryPeers), rcf.NumFullHistoryPeers()) } @@ -261,8 +383,9 @@ func TestMetaResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { numResolversRewards := noOfShards numResolversTxs := noOfShards + 1 numResolversTrieNodes := 2 + numResolversPeerAuth := 1 totalResolvers := numResolversShardHeadersForMetachain + numResolverMetablocks + numResolversMiniBlocks + - numResolversUnsigned + numResolversTxs + numResolversTrieNodes + numResolversRewards + numResolversUnsigned + numResolversTxs + numResolversTrieNodes + numResolversRewards + numResolversPeerAuth assert.Equal(t, totalResolvers, container.Len()) @@ -289,8 +412,12 @@ func getArgumentsMeta() resolverscontainer.FactoryArgs { PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, ResolverConfig: config.ResolverConfig{ NumCrossShardPeers: 1, - NumIntraShardPeers: 2, + NumTotalPeers: 3, NumFullHistoryPeers: 3, }, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, + NodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, + MaxNumOfPeerAuthenticationInResponse: 5, + PeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, } } diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go index 4fdac5984e2..444c4332f22 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go @@ -32,27 +32,33 @@ func NewShardResolversContainerFactory( return nil, err } + numIntraShardPeers := args.ResolverConfig.NumTotalPeers - args.ResolverConfig.NumCrossShardPeers container := containers.NewResolversContainer() base := &baseResolversContainerFactory{ - container: container, - shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, - store: args.Store, - marshalizer: args.Marshalizer, - dataPools: args.DataPools, - uint64ByteSliceConverter: args.Uint64ByteSliceConverter, - intRandomizer: &random.ConcurrentSafeIntRandomizer{}, - dataPacker: args.DataPacker, - triesContainer: args.TriesContainer, - inputAntifloodHandler: args.InputAntifloodHandler, - outputAntifloodHandler: args.OutputAntifloodHandler, - throttler: thr, - isFullHistoryNode: args.IsFullHistoryNode, - currentNetworkEpochProvider: args.CurrentNetworkEpochProvider, - preferredPeersHolder: args.PreferredPeersHolder, - numCrossShardPeers: int(args.ResolverConfig.NumCrossShardPeers), - numIntraShardPeers: int(args.ResolverConfig.NumIntraShardPeers), - numFullHistoryPeers: int(args.ResolverConfig.NumFullHistoryPeers), + container: container, + shardCoordinator: args.ShardCoordinator, + messenger: args.Messenger, + store: args.Store, + marshalizer: args.Marshalizer, + dataPools: args.DataPools, + uint64ByteSliceConverter: args.Uint64ByteSliceConverter, + intRandomizer: &random.ConcurrentSafeIntRandomizer{}, + dataPacker: args.DataPacker, + triesContainer: args.TriesContainer, + inputAntifloodHandler: args.InputAntifloodHandler, + outputAntifloodHandler: args.OutputAntifloodHandler, + throttler: thr, + isFullHistoryNode: args.IsFullHistoryNode, + currentNetworkEpochProvider: args.CurrentNetworkEpochProvider, + preferredPeersHolder: args.PreferredPeersHolder, + peersRatingHandler: args.PeersRatingHandler, + numCrossShardPeers: int(args.ResolverConfig.NumCrossShardPeers), + numIntraShardPeers: int(numIntraShardPeers), + numTotalPeers: int(args.ResolverConfig.NumTotalPeers), + numFullHistoryPeers: int(args.ResolverConfig.NumFullHistoryPeers), + nodesCoordinator: args.NodesCoordinator, + maxNumOfPeerAuthenticationInResponse: args.MaxNumOfPeerAuthenticationInResponse, + peerShardMapper: args.PeerShardMapper, } err = base.checkParams() @@ -117,6 +123,11 @@ func (srcf *shardResolversContainerFactory) Create() (dataRetriever.ResolversCon return nil, err } + err = srcf.generatePeerAuthenticationResolver() + if err != nil { + return nil, err + } + return srcf.container, nil } @@ -125,11 +136,11 @@ func (srcf *shardResolversContainerFactory) Create() (dataRetriever.ResolversCon func (srcf *shardResolversContainerFactory) generateHeaderResolvers() error { shardC := srcf.shardCoordinator - //only one shard header topic, for example: shardBlocks_0_META + // only one shard header topic, for example: shardBlocks_0_META identifierHdr := factory.ShardBlocksTopic + shardC.CommunicationIdentifier(core.MetachainShardId) hdrStorer := srcf.store.GetStorer(dataRetriever.BlockHeaderUnit) - resolverSender, err := srcf.createOneResolverSender(identifierHdr, EmptyExcludePeersOnTopic, shardC.SelfId()) + resolverSender, err := srcf.createOneResolverSenderWithSpecifiedNumRequests(identifierHdr, EmptyExcludePeersOnTopic, shardC.SelfId(), srcf.numCrossShardPeers, srcf.numIntraShardPeers) if err != nil { return err } @@ -137,15 +148,17 @@ func (srcf *shardResolversContainerFactory) generateHeaderResolvers() error { hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardC.SelfId()) hdrNonceStore := srcf.store.GetStorer(hdrNonceHashDataUnit) arg := resolvers.ArgHeaderResolver{ - SenderResolver: resolverSender, + ArgBaseResolver: resolvers.ArgBaseResolver{ + SenderResolver: resolverSender, + Marshaller: srcf.marshalizer, + AntifloodHandler: srcf.inputAntifloodHandler, + Throttler: srcf.throttler, + }, Headers: srcf.dataPools.Headers(), HdrStorage: hdrStorer, HeadersNoncesStorage: hdrNonceStore, - Marshalizer: srcf.marshalizer, NonceConverter: srcf.uint64ByteSliceConverter, ShardCoordinator: srcf.shardCoordinator, - AntifloodHandler: srcf.inputAntifloodHandler, - Throttler: srcf.throttler, IsFullHistoryNode: srcf.isFullHistoryNode, } resolver, err := resolvers.NewHeaderResolver(arg) @@ -164,27 +177,29 @@ func (srcf *shardResolversContainerFactory) generateHeaderResolvers() error { //------- MetaBlockHeaderResolvers func (srcf *shardResolversContainerFactory) generateMetablockHeaderResolvers() error { - //only one metachain header block topic - //this is: metachainBlocks + // only one metachain header block topic + // this is: metachainBlocks identifierHdr := factory.MetachainBlocksTopic hdrStorer := srcf.store.GetStorer(dataRetriever.MetaBlockUnit) - resolverSender, err := srcf.createOneResolverSender(identifierHdr, EmptyExcludePeersOnTopic, core.MetachainShardId) + resolverSender, err := srcf.createOneResolverSenderWithSpecifiedNumRequests(identifierHdr, EmptyExcludePeersOnTopic, core.MetachainShardId, srcf.numCrossShardPeers, srcf.numIntraShardPeers) if err != nil { return err } hdrNonceStore := srcf.store.GetStorer(dataRetriever.MetaHdrNonceHashDataUnit) arg := resolvers.ArgHeaderResolver{ - SenderResolver: resolverSender, + ArgBaseResolver: resolvers.ArgBaseResolver{ + SenderResolver: resolverSender, + Marshaller: srcf.marshalizer, + AntifloodHandler: srcf.inputAntifloodHandler, + Throttler: srcf.throttler, + }, Headers: srcf.dataPools.Headers(), HdrStorage: hdrStorer, HeadersNoncesStorage: hdrNonceStore, - Marshalizer: srcf.marshalizer, NonceConverter: srcf.uint64ByteSliceConverter, ShardCoordinator: srcf.shardCoordinator, - AntifloodHandler: srcf.inputAntifloodHandler, - Throttler: srcf.throttler, IsFullHistoryNode: srcf.isFullHistoryNode, } resolver, err := resolvers.NewHeaderResolver(arg) @@ -211,10 +226,8 @@ func (srcf *shardResolversContainerFactory) generateTrieNodesResolvers() error { identifierTrieNodes, triesFactory.UserAccountTrie, 0, - srcf.numIntraShardPeers+srcf.numCrossShardPeers, - srcf.numFullHistoryPeers, + srcf.numTotalPeers, core.MetachainShardId, - srcf.currentNetworkEpochProvider, ) if err != nil { return err @@ -239,7 +252,7 @@ func (srcf *shardResolversContainerFactory) generateRewardResolver( identifierTx := topic + shardC.CommunicationIdentifier(core.MetachainShardId) excludedPeersOnTopic := factory.TransactionTopic + shardC.CommunicationIdentifier(shardC.SelfId()) - resolver, err := srcf.createTxResolver(identifierTx, excludedPeersOnTopic, unit, dataPool, core.MetachainShardId) + resolver, err := srcf.createTxResolver(identifierTx, excludedPeersOnTopic, unit, dataPool, core.MetachainShardId, srcf.numCrossShardPeers, srcf.numIntraShardPeers) if err != nil { return err } diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go index a3694c1fc68..b205f1b2a0d 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go @@ -17,6 +17,7 @@ import ( "github.com/ElrondNetwork/elrond-go/testscommon" dataRetrieverMock "github.com/ElrondNetwork/elrond-go/testscommon/dataRetriever" "github.com/ElrondNetwork/elrond-go/testscommon/p2pmocks" + "github.com/ElrondNetwork/elrond-go/testscommon/shardingMocks" storageStubs "github.com/ElrondNetwork/elrond-go/testscommon/storage" trieMock "github.com/ElrondNetwork/elrond-go/testscommon/trie" triesFactory "github.com/ElrondNetwork/elrond-go/trie/factory" @@ -197,26 +198,51 @@ func TestNewShardResolversContainerFactory_NilPreferredPeersHolderShouldErr(t *t assert.Equal(t, dataRetriever.ErrNilPreferredPeersHolder, err) } -func TestNewShardResolversContainerFactory_NilTriesContainerShouldErr(t *testing.T) { +func TestNewShardResolversContainerFactory_NilPeersRatingHandlerShouldErr(t *testing.T) { t.Parallel() args := getArgumentsShard() - args.TriesContainer = nil + args.PeersRatingHandler = nil rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilTrieDataGetter, err) + assert.Equal(t, dataRetriever.ErrNilPeersRatingHandler, err) } -func TestNewShardResolversContainerFactory_InvalidNumIntraShardPeersShouldErr(t *testing.T) { +func TestNewShardResolversContainerFactory_NilTriesContainerShouldErr(t *testing.T) { t.Parallel() args := getArgumentsShard() - args.ResolverConfig.NumIntraShardPeers = 0 + args.TriesContainer = nil rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) assert.Nil(t, rcf) - assert.True(t, errors.Is(err, dataRetriever.ErrInvalidValue)) + assert.Equal(t, dataRetriever.ErrNilTrieDataGetter, err) +} + +func TestNewShardResolversContainerFactory_InvalidNumTotalPeersShouldErr(t *testing.T) { + t.Parallel() + + t.Run("NumTotalPeers is lower than NumCrossShardPeers", func(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.ResolverConfig.NumTotalPeers = 0 + rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrInvalidValue)) + }) + t.Run("NumTotalPeers is equal to NumCrossShardPeers", func(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.ResolverConfig.NumTotalPeers = args.ResolverConfig.NumCrossShardPeers + rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrInvalidValue)) + }) } func TestNewShardResolversContainerFactory_InvalidNumCrossShardPeersShouldErr(t *testing.T) { @@ -241,6 +267,72 @@ func TestNewShardResolversContainerFactory_InvalidNumFullHistoryPeersShouldErr(t assert.True(t, errors.Is(err, dataRetriever.ErrInvalidValue)) } +func TestNewShardResolversContainerFactory_NilNodesCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.NodesCoordinator = nil + rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilNodesCoordinator, err) +} + +func TestNewShardResolversContainerFactory_InvalidMaxNumOfPeerAuthenticationInResponseShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.MaxNumOfPeerAuthenticationInResponse = 0 + rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, strings.Contains(err.Error(), dataRetriever.ErrInvalidValue.Error())) +} + +func TestNewShardResolversContainerFactory_NilInputAntifloodHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.InputAntifloodHandler = nil + rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilAntifloodHandler)) +} + +func TestNewShardResolversContainerFactory_NilOutputAntifloodHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.OutputAntifloodHandler = nil + rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilAntifloodHandler)) +} + +func TestNewShardResolversContainerFactory_NilCurrentNetworkEpochProviderShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.CurrentNetworkEpochProvider = nil + rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilCurrentNetworkEpochProvider, err) +} + +func TestNewShardResolversContainerFactory_NilPeerShardMapperShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.PeerShardMapper = nil + rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilPeerShardMapper, err) +} + func TestNewShardResolversContainerFactory_ShouldWork(t *testing.T) { t.Parallel() @@ -250,7 +342,7 @@ func TestNewShardResolversContainerFactory_ShouldWork(t *testing.T) { assert.NotNil(t, rcf) assert.Nil(t, err) require.False(t, rcf.IsInterfaceNil()) - assert.Equal(t, int(args.ResolverConfig.NumIntraShardPeers), rcf.NumIntraShardPeers()) + assert.Equal(t, int(args.ResolverConfig.NumTotalPeers), rcf.NumTotalPeers()) assert.Equal(t, int(args.ResolverConfig.NumCrossShardPeers), rcf.NumCrossShardPeers()) assert.Equal(t, int(args.ResolverConfig.NumFullHistoryPeers), rcf.NumFullHistoryPeers()) } @@ -309,6 +401,19 @@ func TestShardResolversContainerFactory_CreateRegisterTrieNodesFailsShouldErr(t assert.Equal(t, errExpected, err) } +func TestShardResolversContainerFactory_CreateRegisterPeerAuthenticationShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.Messenger = createStubTopicMessageHandlerForShard("", common.PeerAuthenticationTopic) + rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) +} + func TestShardResolversContainerFactory_CreateShouldWork(t *testing.T) { t.Parallel() @@ -343,8 +448,9 @@ func TestShardResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { numResolverMiniBlocks := noOfShards + 2 numResolverMetaBlockHeaders := 1 numResolverTrieNodes := 1 - totalResolvers := numResolverTxs + numResolverHeaders + numResolverMiniBlocks + - numResolverMetaBlockHeaders + numResolverSCRs + numResolverRewardTxs + numResolverTrieNodes + numResolverPeerAuth := 1 + totalResolvers := numResolverTxs + numResolverHeaders + numResolverMiniBlocks + numResolverMetaBlockHeaders + + numResolverSCRs + numResolverRewardTxs + numResolverTrieNodes + numResolverPeerAuth assert.Equal(t, totalResolvers, container.Len()) } @@ -367,8 +473,12 @@ func getArgumentsShard() resolverscontainer.FactoryArgs { PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, ResolverConfig: config.ResolverConfig{ NumCrossShardPeers: 1, - NumIntraShardPeers: 2, + NumTotalPeers: 3, NumFullHistoryPeers: 3, }, + NodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, + MaxNumOfPeerAuthenticationInResponse: 5, + PeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, } } diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index c1668838bd9..75fb42525e9 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -153,6 +153,13 @@ type MiniBlocksResolver interface { RequestDataFromHashArray(hashes [][]byte, epoch uint32) error } +// PeerAuthenticationResolver defines what a peer authentication resolver should do +type PeerAuthenticationResolver interface { + Resolver + RequestDataFromChunk(chunkIndex uint32, epoch uint32) error + RequestDataFromHashArray(hashes [][]byte, epoch uint32) error +} + // TopicResolverSender defines what sending operations are allowed for a topic resolver type TopicResolverSender interface { SendOnRequestTopic(rd *RequestData, originalHashes [][]byte) error @@ -321,6 +328,9 @@ type PoolsHolder interface { TrieNodesChunks() storage.Cacher SmartContracts() storage.Cacher CurrentBlockTxs() TransactionCacher + PeerAuthentications() storage.Cacher + Heartbeats() storage.Cacher + Close() error IsInterfaceNil() bool } @@ -410,8 +420,23 @@ type PreferredPeersHolderHandler interface { IsInterfaceNil() bool } +// PeersRatingHandler represent an entity able to handle peers ratings +type PeersRatingHandler interface { + AddPeer(pid core.PeerID) + IncreaseRating(pid core.PeerID) + DecreaseRating(pid core.PeerID) + GetTopRatedPeersFromList(peers []core.PeerID, minNumOfPeersExpected int) []core.PeerID + IsInterfaceNil() bool +} + // SelfShardIDProvider defines the behavior of a component able to provide the self shard ID type SelfShardIDProvider interface { SelfId() uint32 IsInterfaceNil() bool } + +// NodesCoordinator provides Validator methods needed for the peer processing +type NodesCoordinator interface { + GetAllEligibleValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) + IsInterfaceNil() bool +} diff --git a/dataRetriever/mock/peerAuthenticationResolverStub.go b/dataRetriever/mock/peerAuthenticationResolverStub.go new file mode 100644 index 00000000000..b50b0de0cf7 --- /dev/null +++ b/dataRetriever/mock/peerAuthenticationResolverStub.go @@ -0,0 +1,93 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/p2p" +) + +// PeerAuthenticationResolverStub - +type PeerAuthenticationResolverStub struct { + RequestDataFromHashCalled func(hash []byte, epoch uint32) error + ProcessReceivedMessageCalled func(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error + SetResolverDebugHandlerCalled func(handler dataRetriever.ResolverDebugHandler) error + SetNumPeersToQueryCalled func(intra int, cross int) + NumPeersToQueryCalled func() (int, int) + CloseCalled func() error + RequestDataFromChunkCalled func(chunkIndex uint32, epoch uint32) error + RequestDataFromHashArrayCalled func(hashes [][]byte, epoch uint32) error +} + +// RequestDataFromHash - +func (pars *PeerAuthenticationResolverStub) RequestDataFromHash(hash []byte, epoch uint32) error { + if pars.RequestDataFromHashCalled != nil { + return pars.RequestDataFromHashCalled(hash, epoch) + } + + return nil +} + +// ProcessReceivedMessage - +func (pars *PeerAuthenticationResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { + if pars.ProcessReceivedMessageCalled != nil { + return pars.ProcessReceivedMessageCalled(message, fromConnectedPeer) + } + + return nil +} + +// SetResolverDebugHandler - +func (pars *PeerAuthenticationResolverStub) SetResolverDebugHandler(handler dataRetriever.ResolverDebugHandler) error { + if pars.SetResolverDebugHandlerCalled != nil { + return pars.SetResolverDebugHandlerCalled(handler) + } + + return nil +} + +// SetNumPeersToQuery - +func (pars *PeerAuthenticationResolverStub) SetNumPeersToQuery(intra int, cross int) { + if pars.SetNumPeersToQueryCalled != nil { + pars.SetNumPeersToQueryCalled(intra, cross) + } +} + +// NumPeersToQuery - +func (pars *PeerAuthenticationResolverStub) NumPeersToQuery() (int, int) { + if pars.NumPeersToQueryCalled != nil { + return pars.NumPeersToQueryCalled() + } + + return 0, 0 +} + +func (pars *PeerAuthenticationResolverStub) Close() error { + if pars.CloseCalled != nil { + return pars.CloseCalled() + } + + return nil +} + +// RequestDataFromChunk - +func (pars *PeerAuthenticationResolverStub) RequestDataFromChunk(chunkIndex uint32, epoch uint32) error { + if pars.RequestDataFromChunkCalled != nil { + return pars.RequestDataFromChunkCalled(chunkIndex, epoch) + } + + return nil +} + +// RequestDataFromHashArray - +func (pars *PeerAuthenticationResolverStub) RequestDataFromHashArray(hashes [][]byte, epoch uint32) error { + if pars.RequestDataFromHashArrayCalled != nil { + return pars.RequestDataFromHashArrayCalled(hashes, epoch) + } + + return nil +} + +// IsInterfaceNil - +func (pars *PeerAuthenticationResolverStub) IsInterfaceNil() bool { + return pars == nil +} diff --git a/dataRetriever/requestData.pb.go b/dataRetriever/requestData.pb.go index 17f4090ab46..a9c11d71c32 100644 --- a/dataRetriever/requestData.pb.go +++ b/dataRetriever/requestData.pb.go @@ -41,6 +41,8 @@ const ( NonceType RequestDataType = 3 // EpochType indicates that the request data object is of type epoch EpochType RequestDataType = 4 + // ChunkType indicates that the request data object is of type chunk + ChunkType RequestDataType = 5 ) var RequestDataType_name = map[int32]string{ @@ -49,6 +51,7 @@ var RequestDataType_name = map[int32]string{ 2: "HashArrayType", 3: "NonceType", 4: "EpochType", + 5: "ChunkType", } var RequestDataType_value = map[string]int32{ @@ -57,6 +60,7 @@ var RequestDataType_value = map[string]int32{ "HashArrayType": 2, "NonceType": 3, "EpochType": 4, + "ChunkType": 5, } func (RequestDataType) EnumDescriptor() ([]byte, []int) { @@ -136,29 +140,29 @@ func init() { func init() { proto.RegisterFile("requestData.proto", fileDescriptor_d2e280b7501d5666) } var fileDescriptor_d2e280b7501d5666 = []byte{ - // 337 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x90, 0x41, 0x4e, 0x2a, 0x31, - 0x18, 0x80, 0xe7, 0x87, 0xe1, 0x05, 0x0a, 0x03, 0x8f, 0x2e, 0x5e, 0x26, 0x6f, 0xf1, 0x0f, 0x71, - 0x45, 0x4c, 0x1c, 0x12, 0xf5, 0x02, 0xa2, 0x46, 0xd9, 0xb8, 0x98, 0x18, 0x17, 0xee, 0xca, 0x50, - 0x19, 0x22, 0x4e, 0xc7, 0xa1, 0x43, 0x64, 0xe7, 0x11, 0x3c, 0x86, 0x17, 0xf0, 0x0e, 0x2e, 0x59, - 0xb2, 0x22, 0x52, 0x36, 0x86, 0x15, 0x47, 0x30, 0xed, 0x24, 0x4a, 0x5c, 0xb5, 0xdf, 0xd7, 0xaf, - 0xcd, 0x9f, 0x92, 0x66, 0xca, 0x1f, 0x33, 0x3e, 0x91, 0x67, 0x4c, 0x32, 0x3f, 0x49, 0x85, 0x14, - 0xb4, 0x64, 0x96, 0xff, 0x07, 0xc3, 0x91, 0x8c, 0xb2, 0xbe, 0x1f, 0x8a, 0x87, 0xce, 0x50, 0x0c, - 0x45, 0xc7, 0xe8, 0x7e, 0x76, 0x67, 0xc8, 0x80, 0xd9, 0xe5, 0xb7, 0xf6, 0xde, 0x80, 0x54, 0x83, - 0x9f, 0xb7, 0xe8, 0x31, 0xb1, 0xaf, 0x67, 0x09, 0x77, 0xa1, 0x05, 0xed, 0xfa, 0xe1, 0xbf, 0xbc, - 0xf2, 0x77, 0x0a, 0x7d, 0xda, 0x2d, 0x6f, 0x96, 0x9e, 0x2d, 0x67, 0x09, 0x0f, 0x4c, 0x4d, 0x3d, - 0x52, 0xba, 0x61, 0xe3, 0x8c, 0xbb, 0x85, 0x16, 0xb4, 0x6b, 0xdd, 0xca, 0x66, 0xe9, 0x95, 0xa6, - 0x5a, 0x04, 0xb9, 0xd7, 0xc1, 0x79, 0x22, 0xc2, 0xc8, 0x2d, 0xb6, 0xa0, 0xed, 0xe4, 0x01, 0xd7, - 0x22, 0xc8, 0x3d, 0xf5, 0x09, 0x39, 0x8d, 0xb2, 0xf8, 0xbe, 0x17, 0x0f, 0xf8, 0x93, 0x6b, 0x9b, - 0xaa, 0xbe, 0x59, 0x7a, 0x24, 0xfc, 0xb6, 0xc1, 0x4e, 0xb1, 0xcf, 0x48, 0xe3, 0xd7, 0x50, 0xb4, - 0x41, 0xaa, 0xbd, 0x78, 0xca, 0xc6, 0xa3, 0x81, 0xc6, 0xbf, 0x16, 0xad, 0x91, 0xf2, 0x25, 0x9b, - 0x44, 0x86, 0x80, 0x36, 0x89, 0xa3, 0xe9, 0x24, 0x4d, 0xd9, 0xcc, 0xa8, 0x02, 0x75, 0x48, 0xe5, - 0x4a, 0xc4, 0x21, 0x37, 0x58, 0xd4, 0x68, 0x86, 0x31, 0x68, 0x77, 0x2f, 0xe6, 0x2b, 0xb4, 0x16, - 0x2b, 0xb4, 0xb6, 0x2b, 0x84, 0x67, 0x85, 0xf0, 0xaa, 0x10, 0xde, 0x15, 0xc2, 0x5c, 0x21, 0x2c, - 0x14, 0xc2, 0x87, 0x42, 0xf8, 0x54, 0x68, 0x6d, 0x15, 0xc2, 0xcb, 0x1a, 0xad, 0xf9, 0x1a, 0xad, - 0xc5, 0x1a, 0xad, 0x5b, 0x67, 0xc0, 0x24, 0x0b, 0xb8, 0x4c, 0x47, 0x7c, 0xca, 0xd3, 0xfe, 0x1f, - 0xf3, 0x89, 0x47, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x8e, 0xe6, 0x8e, 0x2d, 0xb5, 0x01, 0x00, - 0x00, + // 339 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x90, 0xb1, 0x4e, 0xc2, 0x40, + 0x18, 0x80, 0x7b, 0xd0, 0x1a, 0x38, 0x28, 0xc8, 0x0d, 0xa6, 0x71, 0xf8, 0x4b, 0x9c, 0x88, 0x89, + 0x25, 0x51, 0x5f, 0x40, 0xd4, 0x28, 0x8b, 0x43, 0x63, 0x1c, 0xdc, 0x8e, 0x72, 0x52, 0x22, 0xb6, + 0xb5, 0x5c, 0x89, 0x6c, 0x3e, 0x82, 0x8f, 0xe1, 0x0b, 0xf8, 0x0e, 0x8e, 0x8c, 0x4c, 0x44, 0x8e, + 0xc5, 0x30, 0xf1, 0x08, 0xe6, 0xfe, 0x26, 0x4a, 0x9c, 0xda, 0xef, 0xbb, 0xef, 0xee, 0xfe, 0x1c, + 0x6d, 0xa4, 0xe2, 0x39, 0x13, 0x63, 0x79, 0xc1, 0x25, 0xf7, 0x92, 0x34, 0x96, 0x31, 0xb3, 0xf0, + 0xb3, 0x7f, 0x34, 0x18, 0xca, 0x30, 0xeb, 0x79, 0x41, 0xfc, 0xd4, 0x1e, 0xc4, 0x83, 0xb8, 0x8d, + 0xba, 0x97, 0x3d, 0x20, 0x21, 0xe0, 0x5f, 0xbe, 0xeb, 0xe0, 0x83, 0xd0, 0x8a, 0xff, 0x77, 0x16, + 0x3b, 0xa5, 0xe6, 0xed, 0x34, 0x11, 0x0e, 0x69, 0x92, 0x56, 0xed, 0x78, 0x2f, 0xaf, 0xbc, 0xad, + 0x42, 0xaf, 0x76, 0x4a, 0xeb, 0x85, 0x6b, 0xca, 0x69, 0x22, 0x7c, 0xac, 0x99, 0x4b, 0xad, 0x3b, + 0x3e, 0xca, 0x84, 0x53, 0x68, 0x92, 0x56, 0xb5, 0x53, 0x5e, 0x2f, 0x5c, 0x6b, 0xa2, 0x85, 0x9f, + 0x7b, 0x1d, 0x5c, 0x26, 0x71, 0x10, 0x3a, 0xc5, 0x26, 0x69, 0xd9, 0x79, 0x20, 0xb4, 0xf0, 0x73, + 0xcf, 0x3c, 0x4a, 0xcf, 0xc3, 0x2c, 0x7a, 0xec, 0x46, 0x7d, 0xf1, 0xe2, 0x98, 0x58, 0xd5, 0xd6, + 0x0b, 0x97, 0x06, 0xbf, 0xd6, 0xdf, 0x2a, 0x0e, 0x13, 0x5a, 0xff, 0x37, 0x14, 0xab, 0xd3, 0x4a, + 0x37, 0x9a, 0xf0, 0xd1, 0xb0, 0xaf, 0x71, 0xd7, 0x60, 0x55, 0x5a, 0xba, 0xe6, 0xe3, 0x10, 0x89, + 0xb0, 0x06, 0xb5, 0x35, 0x9d, 0xa5, 0x29, 0x9f, 0xa2, 0x2a, 0x30, 0x9b, 0x96, 0x6f, 0xe2, 0x28, + 0x10, 0x88, 0x45, 0x8d, 0x38, 0x0c, 0xa2, 0xa9, 0x11, 0x2f, 0x44, 0xb4, 0x3a, 0x57, 0xb3, 0x25, + 0x18, 0xf3, 0x25, 0x18, 0x9b, 0x25, 0x90, 0x57, 0x05, 0xe4, 0x5d, 0x01, 0xf9, 0x54, 0x40, 0x66, + 0x0a, 0xc8, 0x5c, 0x01, 0xf9, 0x52, 0x40, 0xbe, 0x15, 0x18, 0x1b, 0x05, 0xe4, 0x6d, 0x05, 0xc6, + 0x6c, 0x05, 0xc6, 0x7c, 0x05, 0xc6, 0xbd, 0xdd, 0xe7, 0x92, 0xfb, 0x42, 0xa6, 0x43, 0x31, 0x11, + 0x69, 0x6f, 0x07, 0xdf, 0xf4, 0xe4, 0x27, 0x00, 0x00, 0xff, 0xff, 0xd7, 0xda, 0x08, 0x2e, 0xc4, + 0x01, 0x00, 0x00, } func (x RequestDataType) String() string { diff --git a/dataRetriever/requestData.proto b/dataRetriever/requestData.proto index adc2950bd70..0334ad2e59e 100644 --- a/dataRetriever/requestData.proto +++ b/dataRetriever/requestData.proto @@ -19,6 +19,8 @@ enum RequestDataType { NonceType = 3; // EpochType indicates that the request data object is of type epoch EpochType = 4; + // ChunkType indicates that the request data object is of type chunk + ChunkType = 5; } // RequestData holds the requested data diff --git a/dataRetriever/requestHandlers/requestHandler.go b/dataRetriever/requestHandlers/requestHandler.go index c4d3cc85908..2b1055c61f3 100644 --- a/dataRetriever/requestHandlers/requestHandler.go +++ b/dataRetriever/requestHandlers/requestHandler.go @@ -11,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go-core/core/partitioning" "github.com/ElrondNetwork/elrond-go-logger" + "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/epochStart" "github.com/ElrondNetwork/elrond-go/process/factory" @@ -732,3 +733,75 @@ func (rrh *resolverRequestHandler) GetNumPeersToQuery(key string) (int, int, err intra, cross := resolver.NumPeersToQuery() return intra, cross, nil } + +// RequestPeerAuthenticationsChunk asks for a chunk of peer authentication messages from connected peers +func (rrh *resolverRequestHandler) RequestPeerAuthenticationsChunk(destShardID uint32, chunkIndex uint32) { + log.Debug("requesting peer authentication messages from network", + "topic", common.PeerAuthenticationTopic, + "shard", destShardID, + "chunk", chunkIndex, + "epoch", rrh.epoch, + ) + + resolver, err := rrh.resolversFinder.MetaChainResolver(common.PeerAuthenticationTopic) + if err != nil { + log.Error("RequestPeerAuthenticationsChunk.MetaChainResolver", + "error", err.Error(), + "topic", common.PeerAuthenticationTopic, + "shard", destShardID, + "chunk", chunkIndex, + "epoch", rrh.epoch, + ) + return + } + + peerAuthResolver, ok := resolver.(dataRetriever.PeerAuthenticationResolver) + if !ok { + log.Warn("wrong assertion type when creating peer authentication resolver") + return + } + + err = peerAuthResolver.RequestDataFromChunk(chunkIndex, rrh.epoch) + if err != nil { + log.Debug("RequestPeerAuthenticationsChunk.RequestDataFromChunk", + "error", err.Error(), + "topic", common.PeerAuthenticationTopic, + "shard", destShardID, + "chunk", chunkIndex, + "epoch", rrh.epoch, + ) + } +} + +// RequestPeerAuthenticationsByHashes asks for peer authentication messages from specific peers hashes +func (rrh *resolverRequestHandler) RequestPeerAuthenticationsByHashes(destShardID uint32, hashes [][]byte) { + log.Debug("requesting peer authentication messages from network", + "topic", common.PeerAuthenticationTopic, + "shard", destShardID, + ) + + resolver, err := rrh.resolversFinder.MetaChainResolver(common.PeerAuthenticationTopic) + if err != nil { + log.Error("RequestPeerAuthenticationsByHashes.MetaChainResolver", + "error", err.Error(), + "topic", common.PeerAuthenticationTopic, + "shard", destShardID, + ) + return + } + + peerAuthResolver, ok := resolver.(dataRetriever.PeerAuthenticationResolver) + if !ok { + log.Warn("wrong assertion type when creating peer authentication resolver") + return + } + + err = peerAuthResolver.RequestDataFromHashArray(hashes, rrh.epoch) + if err != nil { + log.Debug("RequestPeerAuthenticationsByHashes.RequestDataFromHashArray", + "error", err.Error(), + "topic", common.PeerAuthenticationTopic, + "shard", destShardID, + ) + } +} diff --git a/dataRetriever/requestHandlers/requestHandler_test.go b/dataRetriever/requestHandlers/requestHandler_test.go index fa1e573006e..fd2164ee1c0 100644 --- a/dataRetriever/requestHandlers/requestHandler_test.go +++ b/dataRetriever/requestHandlers/requestHandler_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/mock" "github.com/ElrondNetwork/elrond-go/storage/timecache" @@ -15,6 +16,7 @@ import ( ) var timeoutSendRequests = time.Second * 2 +var errExpected = errors.New("expected error") func createResolversFinderStubThatShouldNotBeCalled(tb testing.TB) *mock.ResolversFinderStub { return &mock.ResolversFinderStub{ @@ -109,7 +111,6 @@ func TestResolverRequestHandler_RequestTransactionErrorWhenGettingCrossShardReso } }() - errExpected := errors.New("expected error") rrh, _ := NewResolverRequestHandler( &mock.ResolversFinderStub{ CrossShardResolverCalled: func(baseTopic string, crossShard uint32) (resolver dataRetriever.Resolver, e error) { @@ -243,7 +244,6 @@ func TestResolverRequestHandler_RequestTransactionErrorsOnRequestShouldNotPanic( } }() - errExpected := errors.New("expected error") chTxRequested := make(chan struct{}) txResolver := &mock.HashSliceResolverStub{ RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { @@ -288,7 +288,6 @@ func TestResolverRequestHandler_RequestMiniBlockErrorWhenGettingCrossShardResolv } }() - errExpected := errors.New("expected error") rrh, _ := NewResolverRequestHandler( &mock.ResolversFinderStub{ CrossShardResolverCalled: func(baseTopic string, crossShard uint32) (resolver dataRetriever.Resolver, e error) { @@ -315,7 +314,6 @@ func TestResolverRequestHandler_RequestMiniBlockErrorsOnRequestShouldNotPanic(t } }() - errExpected := errors.New("expected error") mbResolver := &mock.ResolverStub{ RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { return errExpected @@ -597,8 +595,6 @@ func TestResolverRequestHandler_RequestShardHeaderByNonceFinderReturnsErrorShoul } }() - errExpected := errors.New("expected error") - rrh, _ := NewResolverRequestHandler( &mock.ResolversFinderStub{ CrossShardResolverCalled: func(baseTopic string, shardID uint32) (resolver dataRetriever.Resolver, e error) { @@ -625,7 +621,6 @@ func TestResolverRequestHandler_RequestShardHeaderByNonceFinderReturnsAWrongReso } }() - errExpected := errors.New("expected error") hdrResolver := &mock.ResolverStub{ RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { return errExpected @@ -658,7 +653,6 @@ func TestResolverRequestHandler_RequestShardHeaderByNonceResolverFailsShouldNotP } }() - errExpected := errors.New("expected error") hdrResolver := &mock.HeaderResolverStub{ RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { return errExpected @@ -772,7 +766,6 @@ func TestResolverRequestHandler_RequestScrErrorWhenGettingCrossShardResolverShou } }() - errExpected := errors.New("expected error") rrh, _ := NewResolverRequestHandler( &mock.ResolversFinderStub{ CrossShardResolverCalled: func(baseTopic string, crossShard uint32) (resolver dataRetriever.Resolver, e error) { @@ -862,7 +855,6 @@ func TestResolverRequestHandler_RequestScrErrorsOnRequestShouldNotPanic(t *testi } }() - errExpected := errors.New("expected error") chTxRequested := make(chan struct{}) txResolver := &mock.HashSliceResolverStub{ RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { @@ -1205,3 +1197,253 @@ func TestResolverRequestHandler_RequestTrieNodeNotAValidResolver(t *testing.T) { rrh.RequestTrieNode([]byte("hash"), "topic", 1) assert.True(t, called) } + +//------- RequestPeerAuthentications + +func TestResolverRequestHandler_RequestPeerAuthenticationsChunk(t *testing.T) { + t.Parallel() + + providedChunkId := uint32(123) + providedShardId := uint32(15) + t.Run("CrossShardResolver returns error", func(t *testing.T) { + t.Parallel() + + wasCalled := false + paResolver := &mock.PeerAuthenticationResolverStub{ + RequestDataFromChunkCalled: func(chunkIndex uint32, epoch uint32) error { + wasCalled = true + return nil + }, + } + rrh, _ := NewResolverRequestHandler( + &mock.ResolversFinderStub{ + MetaChainResolverCalled: func(baseTopic string) (dataRetriever.Resolver, error) { + assert.Equal(t, common.PeerAuthenticationTopic, baseTopic) + return paResolver, errExpected + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) + + rrh.RequestPeerAuthenticationsChunk(providedShardId, providedChunkId) + assert.False(t, wasCalled) + }) + t.Run("cast fails", func(t *testing.T) { + t.Parallel() + + wasCalled := false + mbResolver := &mock.ResolverStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + wasCalled = true + return nil + }, + } + rrh, _ := NewResolverRequestHandler( + &mock.ResolversFinderStub{ + MetaChainResolverCalled: func(baseTopic string) (dataRetriever.Resolver, error) { + assert.Equal(t, common.PeerAuthenticationTopic, baseTopic) + return mbResolver, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) + + rrh.RequestPeerAuthenticationsChunk(providedShardId, providedChunkId) + assert.False(t, wasCalled) + }) + t.Run("RequestDataFromChunk returns error", func(t *testing.T) { + t.Parallel() + + wasCalled := false + paResolver := &mock.PeerAuthenticationResolverStub{ + RequestDataFromChunkCalled: func(chunkIndex uint32, epoch uint32) error { + wasCalled = true + assert.Equal(t, providedChunkId, chunkIndex) + return errExpected + }, + } + rrh, _ := NewResolverRequestHandler( + &mock.ResolversFinderStub{ + MetaChainResolverCalled: func(baseTopic string) (dataRetriever.Resolver, error) { + assert.Equal(t, common.PeerAuthenticationTopic, baseTopic) + return paResolver, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) + + rrh.RequestPeerAuthenticationsChunk(providedShardId, providedChunkId) + assert.True(t, wasCalled) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() + + wasCalled := false + paResolver := &mock.PeerAuthenticationResolverStub{ + RequestDataFromChunkCalled: func(chunkIndex uint32, epoch uint32) error { + wasCalled = true + assert.Equal(t, providedChunkId, chunkIndex) + return nil + }, + } + rrh, _ := NewResolverRequestHandler( + &mock.ResolversFinderStub{ + MetaChainResolverCalled: func(baseTopic string) (dataRetriever.Resolver, error) { + assert.Equal(t, common.PeerAuthenticationTopic, baseTopic) + return paResolver, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) + + rrh.RequestPeerAuthenticationsChunk(providedShardId, providedChunkId) + assert.True(t, wasCalled) + }) +} + +func TestResolverRequestHandler_RequestPeerAuthenticationsByHashes(t *testing.T) { + t.Parallel() + + providedHashes := [][]byte{[]byte("h1"), []byte("h2")} + providedShardId := uint32(15) + t.Run("CrossShardResolver returns error", func(t *testing.T) { + t.Parallel() + + wasCalled := false + paResolver := &mock.PeerAuthenticationResolverStub{ + RequestDataFromChunkCalled: func(chunkIndex uint32, epoch uint32) error { + wasCalled = true + return nil + }, + } + rrh, _ := NewResolverRequestHandler( + &mock.ResolversFinderStub{ + MetaChainResolverCalled: func(baseTopic string) (dataRetriever.Resolver, error) { + assert.Equal(t, common.PeerAuthenticationTopic, baseTopic) + return paResolver, errExpected + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) + + rrh.RequestPeerAuthenticationsByHashes(providedShardId, providedHashes) + assert.False(t, wasCalled) + }) + t.Run("cast fails", func(t *testing.T) { + t.Parallel() + + wasCalled := false + mbResolver := &mock.ResolverStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + wasCalled = true + return nil + }, + } + rrh, _ := NewResolverRequestHandler( + &mock.ResolversFinderStub{ + MetaChainResolverCalled: func(baseTopic string) (dataRetriever.Resolver, error) { + assert.Equal(t, common.PeerAuthenticationTopic, baseTopic) + return mbResolver, errExpected + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) + + rrh.RequestPeerAuthenticationsByHashes(providedShardId, providedHashes) + assert.False(t, wasCalled) + }) + t.Run("RequestDataFromHashArray returns error", func(t *testing.T) { + t.Parallel() + + wasCalled := false + paResolver := &mock.PeerAuthenticationResolverStub{ + RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { + wasCalled = true + assert.Equal(t, providedHashes, hashes) + return errExpected + }, + } + rrh, _ := NewResolverRequestHandler( + &mock.ResolversFinderStub{ + MetaChainResolverCalled: func(baseTopic string) (dataRetriever.Resolver, error) { + assert.Equal(t, common.PeerAuthenticationTopic, baseTopic) + return paResolver, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) + + rrh.RequestPeerAuthenticationsByHashes(providedShardId, providedHashes) + assert.True(t, wasCalled) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() + + wasCalled := false + paResolver := &mock.PeerAuthenticationResolverStub{ + RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { + wasCalled = true + assert.Equal(t, providedHashes, hashes) + return nil + }, + } + rrh, _ := NewResolverRequestHandler( + &mock.ResolversFinderStub{ + MetaChainResolverCalled: func(baseTopic string) (dataRetriever.Resolver, error) { + assert.Equal(t, common.PeerAuthenticationTopic, baseTopic) + return paResolver, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) + + rrh.RequestPeerAuthenticationsByHashes(providedShardId, providedHashes) + assert.True(t, wasCalled) + }) +} diff --git a/dataRetriever/resolvers/baseResolver.go b/dataRetriever/resolvers/baseResolver.go new file mode 100644 index 00000000000..80ee5379218 --- /dev/null +++ b/dataRetriever/resolvers/baseResolver.go @@ -0,0 +1,55 @@ +package resolvers + +import ( + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/marshal" + "github.com/ElrondNetwork/elrond-go/dataRetriever" +) + +// ArgBaseResolver is the argument structure used as base to create a new a resolver instance +type ArgBaseResolver struct { + SenderResolver dataRetriever.TopicResolverSender + Marshaller marshal.Marshalizer + AntifloodHandler dataRetriever.P2PAntifloodHandler + Throttler dataRetriever.ResolverThrottler +} + +type baseResolver struct { + dataRetriever.TopicResolverSender +} + +func checkArgBase(arg ArgBaseResolver) error { + if check.IfNil(arg.SenderResolver) { + return dataRetriever.ErrNilResolverSender + } + if check.IfNil(arg.Marshaller) { + return dataRetriever.ErrNilMarshalizer + } + if check.IfNil(arg.AntifloodHandler) { + return dataRetriever.ErrNilAntifloodHandler + } + if check.IfNil(arg.Throttler) { + return dataRetriever.ErrNilThrottler + } + return nil +} + +// SetNumPeersToQuery will set the number of intra shard and cross shard number of peer to query +func (res *baseResolver) SetNumPeersToQuery(intra int, cross int) { + res.TopicResolverSender.SetNumPeersToQuery(intra, cross) +} + +// NumPeersToQuery will return the number of intra shard and cross shard number of peer to query +func (res *baseResolver) NumPeersToQuery() (int, int) { + return res.TopicResolverSender.NumPeersToQuery() +} + +// SetResolverDebugHandler will set a resolver debug handler +func (res *baseResolver) SetResolverDebugHandler(handler dataRetriever.ResolverDebugHandler) error { + return res.TopicResolverSender.SetResolverDebugHandler(handler) +} + +// Close returns nil +func (res *baseResolver) Close() error { + return nil +} diff --git a/dataRetriever/resolvers/common_test.go b/dataRetriever/resolvers/common_test.go index 32a976e4b12..b7311e7eee4 100644 --- a/dataRetriever/resolvers/common_test.go +++ b/dataRetriever/resolvers/common_test.go @@ -11,3 +11,14 @@ func createRequestMsg(dataType dataRetriever.RequestDataType, val []byte) p2p.Me buff, _ := marshalizer.Marshal(&dataRetriever.RequestData{Type: dataType, Value: val}) return &mock.P2PMessageMock{DataField: buff} } + +func createRequestMsgWithChunkIndex(dataType dataRetriever.RequestDataType, val []byte, epoch uint32, chunkIndex uint32) p2p.MessageP2P { + marshalizer := &mock.MarshalizerMock{} + buff, _ := marshalizer.Marshal(&dataRetriever.RequestData{ + Type: dataType, + Value: val, + Epoch: epoch, + ChunkIndex: chunkIndex, + }) + return &mock.P2PMessageMock{DataField: buff} +} diff --git a/dataRetriever/resolvers/headerResolver.go b/dataRetriever/resolvers/headerResolver.go index 438f594cf37..eaa93ce3f67 100644 --- a/dataRetriever/resolvers/headerResolver.go +++ b/dataRetriever/resolvers/headerResolver.go @@ -4,7 +4,6 @@ import ( "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go-core/data/typeConverters" - "github.com/ElrondNetwork/elrond-go-core/marshal" "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers/epochproviders/disabled" @@ -19,22 +18,19 @@ var _ dataRetriever.HeaderResolver = (*HeaderResolver)(nil) // ArgHeaderResolver is the argument structure used to create new HeaderResolver instance type ArgHeaderResolver struct { - SenderResolver dataRetriever.TopicResolverSender + ArgBaseResolver Headers dataRetriever.HeadersPool HdrStorage storage.Storer HeadersNoncesStorage storage.Storer - Marshalizer marshal.Marshalizer NonceConverter typeConverters.Uint64ByteSliceConverter ShardCoordinator sharding.Coordinator - AntifloodHandler dataRetriever.P2PAntifloodHandler - Throttler dataRetriever.ResolverThrottler IsFullHistoryNode bool } // HeaderResolver is a wrapper over Resolver that is specialized in resolving headers requests type HeaderResolver struct { + *baseResolver baseStorageResolver - dataRetriever.TopicResolverSender messageProcessor headers dataRetriever.HeadersPool hdrNoncesStorage storage.Storer @@ -45,37 +41,16 @@ type HeaderResolver struct { // NewHeaderResolver creates a new header resolver func NewHeaderResolver(arg ArgHeaderResolver) (*HeaderResolver, error) { - if check.IfNil(arg.SenderResolver) { - return nil, dataRetriever.ErrNilResolverSender - } - if check.IfNil(arg.Headers) { - return nil, dataRetriever.ErrNilHeadersDataPool - } - if check.IfNil(arg.HdrStorage) { - return nil, dataRetriever.ErrNilHeadersStorage - } - if check.IfNil(arg.HeadersNoncesStorage) { - return nil, dataRetriever.ErrNilHeadersNoncesStorage - } - if check.IfNil(arg.Marshalizer) { - return nil, dataRetriever.ErrNilMarshalizer - } - if check.IfNil(arg.NonceConverter) { - return nil, dataRetriever.ErrNilUint64ByteSliceConverter - } - if check.IfNil(arg.ShardCoordinator) { - return nil, dataRetriever.ErrNilShardCoordinator - } - if check.IfNil(arg.AntifloodHandler) { - return nil, dataRetriever.ErrNilAntifloodHandler - } - if check.IfNil(arg.Throttler) { - return nil, dataRetriever.ErrNilThrottler + err := checkArgHeaderResolver(arg) + if err != nil { + return nil, err } epochHandler := disabled.NewEpochHandler() hdrResolver := &HeaderResolver{ - TopicResolverSender: arg.SenderResolver, + baseResolver: &baseResolver{ + TopicResolverSender: arg.SenderResolver, + }, headers: arg.Headers, baseStorageResolver: createBaseStorageResolver(arg.HdrStorage, arg.IsFullHistoryNode), hdrNoncesStorage: arg.HeadersNoncesStorage, @@ -83,7 +58,7 @@ func NewHeaderResolver(arg ArgHeaderResolver) (*HeaderResolver, error) { epochHandler: epochHandler, shardCoordinator: arg.ShardCoordinator, messageProcessor: messageProcessor{ - marshalizer: arg.Marshalizer, + marshalizer: arg.Marshaller, antifloodHandler: arg.AntifloodHandler, topic: arg.SenderResolver.RequestTopic(), throttler: arg.Throttler, @@ -93,6 +68,29 @@ func NewHeaderResolver(arg ArgHeaderResolver) (*HeaderResolver, error) { return hdrResolver, nil } +func checkArgHeaderResolver(arg ArgHeaderResolver) error { + err := checkArgBase(arg.ArgBaseResolver) + if err != nil { + return err + } + if check.IfNil(arg.Headers) { + return dataRetriever.ErrNilHeadersDataPool + } + if check.IfNil(arg.HdrStorage) { + return dataRetriever.ErrNilHeadersStorage + } + if check.IfNil(arg.HeadersNoncesStorage) { + return dataRetriever.ErrNilHeadersNoncesStorage + } + if check.IfNil(arg.NonceConverter) { + return dataRetriever.ErrNilUint64ByteSliceConverter + } + if check.IfNil(arg.ShardCoordinator) { + return dataRetriever.ErrNilShardCoordinator + } + return nil +} + // SetEpochHandler sets the epoch handler for this component func (hdrRes *HeaderResolver) SetEpochHandler(epochHandler dataRetriever.EpochHandler) error { if check.IfNil(epochHandler) { @@ -264,26 +262,6 @@ func (hdrRes *HeaderResolver) RequestDataFromEpoch(identifier []byte) error { ) } -// SetNumPeersToQuery will set the number of intra shard and cross shard number of peer to query -func (hdrRes *HeaderResolver) SetNumPeersToQuery(intra int, cross int) { - hdrRes.TopicResolverSender.SetNumPeersToQuery(intra, cross) -} - -// NumPeersToQuery will return the number of intra shard and cross shard number of peer to query -func (hdrRes *HeaderResolver) NumPeersToQuery() (int, int) { - return hdrRes.TopicResolverSender.NumPeersToQuery() -} - -// SetResolverDebugHandler will set a resolver debug handler -func (hdrRes *HeaderResolver) SetResolverDebugHandler(handler dataRetriever.ResolverDebugHandler) error { - return hdrRes.TopicResolverSender.SetResolverDebugHandler(handler) -} - -// Close returns nil -func (hdrRes *HeaderResolver) Close() error { - return nil -} - // IsInterfaceNil returns true if there is no value under the interface func (hdrRes *HeaderResolver) IsInterfaceNil() bool { return hdrRes == nil diff --git a/dataRetriever/resolvers/headerResolver_test.go b/dataRetriever/resolvers/headerResolver_test.go index 3152d6729ff..47503846e44 100644 --- a/dataRetriever/resolvers/headerResolver_test.go +++ b/dataRetriever/resolvers/headerResolver_test.go @@ -17,17 +17,23 @@ import ( "github.com/stretchr/testify/assert" ) +func createMockArgBaseResolver() resolvers.ArgBaseResolver { + return resolvers.ArgBaseResolver{ + SenderResolver: &mock.TopicResolverSenderStub{}, + Marshaller: &mock.MarshalizerMock{}, + AntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + Throttler: &mock.ThrottlerStub{}, + } +} + func createMockArgHeaderResolver() resolvers.ArgHeaderResolver { return resolvers.ArgHeaderResolver{ - SenderResolver: &mock.TopicResolverSenderStub{}, + ArgBaseResolver: createMockArgBaseResolver(), Headers: &mock.HeadersCacherStub{}, HdrStorage: &storageStubs.StorerStub{}, HeadersNoncesStorage: &storageStubs.StorerStub{}, - Marshalizer: &mock.MarshalizerMock{}, NonceConverter: mock.NewNonceHashConverterMock(), ShardCoordinator: mock.NewOneShardCoordinatorMock(), - AntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - Throttler: &mock.ThrottlerStub{}, } } @@ -83,7 +89,7 @@ func TestNewHeaderResolver_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() arg := createMockArgHeaderResolver() - arg.Marshalizer = nil + arg.Marshaller = nil hdrRes, err := resolvers.NewHeaderResolver(arg) assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) @@ -312,7 +318,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestHashTypeFoundInHdrPoolMarsh return nil }, } - arg.Marshalizer = marshalizerStub + arg.Marshaller = marshalizerStub arg.Headers = headers hdrRes, _ := resolvers.NewHeaderResolver(arg) @@ -394,7 +400,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceShouldCallWithTheCorre } hdrRes, _ := resolvers.NewHeaderResolver(arg) - buff, _ := arg.Marshalizer.Marshal( + buff, _ := arg.Marshaller.Marshal( &dataRetriever.RequestData{ Type: dataRetriever.NonceType, Value: []byte("aaa"), diff --git a/dataRetriever/resolvers/miniblockResolver.go b/dataRetriever/resolvers/miniblockResolver.go index 9235fddd2ea..29e6c1c56da 100644 --- a/dataRetriever/resolvers/miniblockResolver.go +++ b/dataRetriever/resolvers/miniblockResolver.go @@ -6,7 +6,6 @@ import ( "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go-core/data/batch" - "github.com/ElrondNetwork/elrond-go-core/marshal" logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" @@ -19,12 +18,9 @@ var _ requestHandlers.HashSliceResolver = (*miniblockResolver)(nil) // ArgMiniblockResolver is the argument structure used to create a new miniblockResolver instance type ArgMiniblockResolver struct { - SenderResolver dataRetriever.TopicResolverSender + ArgBaseResolver MiniBlockPool storage.Cacher MiniBlockStorage storage.Storer - Marshalizer marshal.Marshalizer - AntifloodHandler dataRetriever.P2PAntifloodHandler - Throttler dataRetriever.ResolverThrottler DataPacker dataRetriever.DataPacker IsFullHistoryNode bool } @@ -32,7 +28,7 @@ type ArgMiniblockResolver struct { // miniblockResolver is a wrapper over Resolver that is specialized in resolving miniblocks requests // TODO extract common functionality between this and transactionResolver type miniblockResolver struct { - dataRetriever.TopicResolverSender + *baseResolver messageProcessor baseStorageResolver miniBlockPool storage.Cacher @@ -41,35 +37,20 @@ type miniblockResolver struct { // NewMiniblockResolver creates a miniblock resolver func NewMiniblockResolver(arg ArgMiniblockResolver) (*miniblockResolver, error) { - if check.IfNil(arg.SenderResolver) { - return nil, dataRetriever.ErrNilResolverSender - } - if check.IfNil(arg.MiniBlockPool) { - return nil, dataRetriever.ErrNilMiniblocksPool - } - if check.IfNil(arg.MiniBlockStorage) { - return nil, dataRetriever.ErrNilMiniblocksStorage - } - if check.IfNil(arg.Marshalizer) { - return nil, dataRetriever.ErrNilMarshalizer - } - if check.IfNil(arg.AntifloodHandler) { - return nil, dataRetriever.ErrNilAntifloodHandler - } - if check.IfNil(arg.Throttler) { - return nil, dataRetriever.ErrNilThrottler - } - if check.IfNil(arg.DataPacker) { - return nil, dataRetriever.ErrNilDataPacker + err := checkArgMiniblockResolver(arg) + if err != nil { + return nil, err } mbResolver := &miniblockResolver{ - TopicResolverSender: arg.SenderResolver, + baseResolver: &baseResolver{ + TopicResolverSender: arg.SenderResolver, + }, miniBlockPool: arg.MiniBlockPool, baseStorageResolver: createBaseStorageResolver(arg.MiniBlockStorage, arg.IsFullHistoryNode), dataPacker: arg.DataPacker, messageProcessor: messageProcessor{ - marshalizer: arg.Marshalizer, + marshalizer: arg.Marshaller, antifloodHandler: arg.AntifloodHandler, topic: arg.SenderResolver.RequestTopic(), throttler: arg.Throttler, @@ -79,6 +60,23 @@ func NewMiniblockResolver(arg ArgMiniblockResolver) (*miniblockResolver, error) return mbResolver, nil } +func checkArgMiniblockResolver(arg ArgMiniblockResolver) error { + err := checkArgBase(arg.ArgBaseResolver) + if err != nil { + return err + } + if check.IfNil(arg.MiniBlockPool) { + return dataRetriever.ErrNilMiniblocksPool + } + if check.IfNil(arg.MiniBlockStorage) { + return dataRetriever.ErrNilMiniblocksStorage + } + if check.IfNil(arg.DataPacker) { + return dataRetriever.ErrNilDataPacker + } + return nil +} + // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) func (mbRes *miniblockResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { @@ -227,26 +225,6 @@ func (mbRes *miniblockResolver) RequestDataFromHashArray(hashes [][]byte, epoch ) } -// SetNumPeersToQuery will set the number of intra shard and cross shard number of peer to query -func (mbRes *miniblockResolver) SetNumPeersToQuery(intra int, cross int) { - mbRes.TopicResolverSender.SetNumPeersToQuery(intra, cross) -} - -// NumPeersToQuery will return the number of intra shard and cross shard number of peer to query -func (mbRes *miniblockResolver) NumPeersToQuery() (int, int) { - return mbRes.TopicResolverSender.NumPeersToQuery() -} - -// SetResolverDebugHandler will set a resolver debug handler -func (mbRes *miniblockResolver) SetResolverDebugHandler(handler dataRetriever.ResolverDebugHandler) error { - return mbRes.TopicResolverSender.SetResolverDebugHandler(handler) -} - -// Close returns nil -func (mbRes *miniblockResolver) Close() error { - return nil -} - // IsInterfaceNil returns true if there is no value under the interface func (mbRes *miniblockResolver) IsInterfaceNil() bool { return mbRes == nil diff --git a/dataRetriever/resolvers/miniblockResolver_test.go b/dataRetriever/resolvers/miniblockResolver_test.go index 22155b16577..8599b3c2b39 100644 --- a/dataRetriever/resolvers/miniblockResolver_test.go +++ b/dataRetriever/resolvers/miniblockResolver_test.go @@ -23,12 +23,9 @@ var fromConnectedPeerId = core.PeerID("from connected peer Id") func createMockArgMiniblockResolver() resolvers.ArgMiniblockResolver { return resolvers.ArgMiniblockResolver{ - SenderResolver: &mock.TopicResolverSenderStub{}, + ArgBaseResolver: createMockArgBaseResolver(), MiniBlockPool: testscommon.NewCacherStub(), MiniBlockStorage: &storageStubs.StorerStub{}, - Marshalizer: &mock.MarshalizerMock{}, - AntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - Throttler: &mock.ThrottlerStub{}, DataPacker: &mock.DataPackerStub{}, } } @@ -72,7 +69,7 @@ func TestNewMiniblockResolver_NilBlockMarshalizerShouldErr(t *testing.T) { t.Parallel() arg := createMockArgMiniblockResolver() - arg.Marshalizer = nil + arg.Marshaller = nil mbRes, err := resolvers.NewMiniblockResolver(arg) assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) @@ -115,7 +112,7 @@ func TestMiniblockResolver_RequestDataFromHashArrayMarshalErr(t *testing.T) { t.Parallel() arg := createMockArgMiniblockResolver() - arg.Marshalizer.(*mock.MarshalizerMock).Fail = true + arg.Marshaller.(*mock.MarshalizerMock).Fail = true mbRes, err := resolvers.NewMiniblockResolver(arg) assert.Nil(t, err) @@ -277,7 +274,7 @@ func TestMiniblockResolver_ProcessReceivedMessageFoundInPoolMarshalizerFailShoul return buff, nil }, } - arg.Marshalizer = marshalizer + arg.Marshaller = marshalizer mbRes, _ := resolvers.NewMiniblockResolver(arg) err := mbRes.ProcessReceivedMessage( diff --git a/dataRetriever/resolvers/peerAuthenticationResolver.go b/dataRetriever/resolvers/peerAuthenticationResolver.go new file mode 100644 index 00000000000..4c09eeb4fd9 --- /dev/null +++ b/dataRetriever/resolvers/peerAuthenticationResolver.go @@ -0,0 +1,317 @@ +package resolvers + +import ( + "bytes" + "encoding/binary" + "fmt" + "sort" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/data/batch" + logger "github.com/ElrondNetwork/elrond-go-logger" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/storage" +) + +// maxBuffToSendPeerAuthentications represents max buffer size to send in bytes +const maxBuffToSendPeerAuthentications = 1 << 18 // 256KB + +const minNumOfPeerAuthentication = 5 +const bytesInUint32 = 4 + +// ArgPeerAuthenticationResolver is the argument structure used to create a new peer authentication resolver instance +type ArgPeerAuthenticationResolver struct { + ArgBaseResolver + PeerAuthenticationPool storage.Cacher + NodesCoordinator dataRetriever.NodesCoordinator + PeerShardMapper process.PeerShardMapper + DataPacker dataRetriever.DataPacker + MaxNumOfPeerAuthenticationInResponse int +} + +// peerAuthenticationResolver is a wrapper over Resolver that is specialized in resolving peer authentication requests +type peerAuthenticationResolver struct { + *baseResolver + messageProcessor + peerAuthenticationPool storage.Cacher + nodesCoordinator dataRetriever.NodesCoordinator + peerShardMapper process.PeerShardMapper + dataPacker dataRetriever.DataPacker + maxNumOfPeerAuthenticationInResponse int +} + +// NewPeerAuthenticationResolver creates a peer authentication resolver +func NewPeerAuthenticationResolver(arg ArgPeerAuthenticationResolver) (*peerAuthenticationResolver, error) { + err := checkArgPeerAuthenticationResolver(arg) + if err != nil { + return nil, err + } + + return &peerAuthenticationResolver{ + baseResolver: &baseResolver{ + TopicResolverSender: arg.SenderResolver, + }, + messageProcessor: messageProcessor{ + marshalizer: arg.Marshaller, + antifloodHandler: arg.AntifloodHandler, + throttler: arg.Throttler, + topic: arg.SenderResolver.RequestTopic(), + }, + peerAuthenticationPool: arg.PeerAuthenticationPool, + nodesCoordinator: arg.NodesCoordinator, + peerShardMapper: arg.PeerShardMapper, + dataPacker: arg.DataPacker, + maxNumOfPeerAuthenticationInResponse: arg.MaxNumOfPeerAuthenticationInResponse, + }, nil +} + +func checkArgPeerAuthenticationResolver(arg ArgPeerAuthenticationResolver) error { + err := checkArgBase(arg.ArgBaseResolver) + if err != nil { + return err + } + if check.IfNil(arg.PeerAuthenticationPool) { + return dataRetriever.ErrNilPeerAuthenticationPool + } + if check.IfNil(arg.NodesCoordinator) { + return dataRetriever.ErrNilNodesCoordinator + } + if check.IfNil(arg.PeerShardMapper) { + return dataRetriever.ErrNilPeerShardMapper + } + if check.IfNil(arg.DataPacker) { + return dataRetriever.ErrNilDataPacker + } + if arg.MaxNumOfPeerAuthenticationInResponse < minNumOfPeerAuthentication { + return dataRetriever.ErrInvalidNumOfPeerAuthentication + } + return nil +} + +// RequestDataFromHash requests peer authentication data from other peers having input a public key hash +func (res *peerAuthenticationResolver) RequestDataFromHash(hash []byte, _ uint32) error { + return res.SendOnRequestTopic( + &dataRetriever.RequestData{ + Type: dataRetriever.HashType, + Value: hash, + }, + [][]byte{hash}, + ) +} + +// RequestDataFromChunk requests peer authentication data from other peers having input a chunk index +func (res *peerAuthenticationResolver) RequestDataFromChunk(chunkIndex uint32, epoch uint32) error { + chunkBuffer := make([]byte, bytesInUint32) + binary.BigEndian.PutUint32(chunkBuffer, chunkIndex) + + b := &batch.Batch{ + Data: make([][]byte, 1), + } + b.Data[0] = chunkBuffer + + dataBuff, err := res.marshalizer.Marshal(b) + if err != nil { + return err + } + + return res.SendOnRequestTopic( + &dataRetriever.RequestData{ + Type: dataRetriever.ChunkType, + ChunkIndex: chunkIndex, + Epoch: epoch, + Value: dataBuff, + }, + [][]byte{chunkBuffer}, + ) +} + +// RequestDataFromHashArray requests peer authentication data from other peers having input multiple public key hashes +func (res *peerAuthenticationResolver) RequestDataFromHashArray(hashes [][]byte, _ uint32) error { + b := &batch.Batch{ + Data: hashes, + } + buffHashes, err := res.marshalizer.Marshal(b) + if err != nil { + return err + } + + return res.SendOnRequestTopic( + &dataRetriever.RequestData{ + Type: dataRetriever.HashArrayType, + Value: buffHashes, + }, + hashes, + ) +} + +// ProcessReceivedMessage represents the callback func from the p2p.Messenger that is called each time a new message is received +// (for the topic this validator was registered to, usually a request topic) +func (res *peerAuthenticationResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { + err := res.canProcessMessage(message, fromConnectedPeer) + if err != nil { + return err + } + + res.throttler.StartProcessing() + defer res.throttler.EndProcessing() + + rd, err := res.parseReceivedMessage(message, fromConnectedPeer) + if err != nil { + return err + } + + switch rd.Type { + case dataRetriever.ChunkType: + return res.resolveChunkRequest(int(rd.ChunkIndex), rd.Epoch, message.Peer()) + case dataRetriever.HashArrayType: + return res.resolveMultipleHashesRequest(rd.Value, message.Peer()) + default: + err = dataRetriever.ErrRequestTypeNotImplemented + } + if err != nil { + err = fmt.Errorf("%w for value %s", err, logger.DisplayByteSlice(rd.Value)) + } + + return err +} + +// resolveChunkRequest sends the response for a chunk request +func (res *peerAuthenticationResolver) resolveChunkRequest(chunkIndex int, epoch uint32, pid core.PeerID) error { + sortedPKs, err := res.getSortedValidatorsKeys(epoch) + if err != nil { + return err + } + if len(sortedPKs) == 0 { + return nil + } + + maxChunks := res.getMaxChunks(sortedPKs) + pksChunk, err := res.extractChunk(sortedPKs, chunkIndex, res.maxNumOfPeerAuthenticationInResponse, maxChunks) + if err != nil { + return err + } + + peerAuthsForChunk, err := res.fetchPeerAuthenticationSlicesForPublicKeys(pksChunk) + if err != nil { + return fmt.Errorf("resolveChunkRequest error %w from chunk %d", err, chunkIndex) + } + + return res.sendPeerAuthsForHashes(peerAuthsForChunk, pid) +} + +// getSortedValidatorsKeys returns the sorted slice of validators keys from all shards +func (res *peerAuthenticationResolver) getSortedValidatorsKeys(epoch uint32) ([][]byte, error) { + validatorsPKsMap, err := res.nodesCoordinator.GetAllEligibleValidatorsPublicKeys(epoch) + if err != nil { + return nil, err + } + + validatorsPKs := make([][]byte, 0) + for _, shardValidators := range validatorsPKsMap { + validatorsPKs = append(validatorsPKs, shardValidators...) + } + + sort.Slice(validatorsPKs, func(i, j int) bool { + return bytes.Compare(validatorsPKs[i], validatorsPKs[j]) < 0 + }) + + return validatorsPKs, nil +} + +// extractChunk returns the chunk from dataBuff at the specified index +func (res *peerAuthenticationResolver) extractChunk(dataBuff [][]byte, chunkIndex int, chunkSize int, maxChunks int) ([][]byte, error) { + chunkIndexOutOfBounds := chunkIndex < 0 || chunkIndex > maxChunks + if chunkIndexOutOfBounds { + return nil, dataRetriever.InvalidChunkIndex + } + + startingIndex := chunkIndex * chunkSize + endIndex := startingIndex + chunkSize + if endIndex > len(dataBuff) { + endIndex = len(dataBuff) + } + return dataBuff[startingIndex:endIndex], nil +} + +// resolveMultipleHashesRequest sends the response for multiple hashes request +func (res *peerAuthenticationResolver) resolveMultipleHashesRequest(hashesBuff []byte, pid core.PeerID) error { + b := batch.Batch{} + err := res.marshalizer.Unmarshal(&b, hashesBuff) + if err != nil { + return err + } + hashes := b.Data + + peerAuthsForHashes, err := res.fetchPeerAuthenticationSlicesForPublicKeys(hashes) + if err != nil { + return fmt.Errorf("resolveMultipleHashesRequest error %w from buff %s", err, hashesBuff) + } + + return res.sendPeerAuthsForHashes(peerAuthsForHashes, pid) +} + +// sendPeerAuthsForHashes sends multiple peer authentication messages for specific hashes +func (res *peerAuthenticationResolver) sendPeerAuthsForHashes(dataBuff [][]byte, pid core.PeerID) error { + buffsToSend, err := res.dataPacker.PackDataInChunks(dataBuff, maxBuffToSendPeerAuthentications) + if err != nil { + return err + } + + for _, buff := range buffsToSend { + err = res.Send(buff, pid) + if err != nil { + return err + } + } + + return nil +} + +// getMaxChunks returns the max num of chunks from a buffer +func (res *peerAuthenticationResolver) getMaxChunks(dataBuff [][]byte) int { + maxChunks := len(dataBuff) / res.maxNumOfPeerAuthenticationInResponse + if len(dataBuff)%res.maxNumOfPeerAuthenticationInResponse != 0 { + maxChunks++ + } + return maxChunks +} + +// fetchPeerAuthenticationSlicesForPublicKeys fetches all peer authentications for all pks +func (res *peerAuthenticationResolver) fetchPeerAuthenticationSlicesForPublicKeys(pks [][]byte) ([][]byte, error) { + peerAuths := make([][]byte, 0) + for _, pk := range pks { + peerAuthForHash, _ := res.fetchPeerAuthenticationAsByteSlice(pk) + if peerAuthForHash != nil { + peerAuths = append(peerAuths, peerAuthForHash) + } + } + + if len(peerAuths) == 0 { + return nil, dataRetriever.ErrPeerAuthNotFound + } + + return peerAuths, nil +} + +// fetchPeerAuthenticationAsByteSlice returns the value from authentication pool if exists +func (res *peerAuthenticationResolver) fetchPeerAuthenticationAsByteSlice(pk []byte) ([]byte, error) { + pid, ok := res.peerShardMapper.GetLastKnownPeerID(pk) + if !ok { + return nil, dataRetriever.ErrPeerAuthNotFound + } + + value, ok := res.peerAuthenticationPool.Peek(pid.Bytes()) + if ok { + return res.marshalizer.Marshal(value) + } + + return nil, dataRetriever.ErrPeerAuthNotFound +} + +// IsInterfaceNil returns true if there is no value under the interface +func (res *peerAuthenticationResolver) IsInterfaceNil() bool { + return res == nil +} diff --git a/dataRetriever/resolvers/peerAuthenticationResolver_test.go b/dataRetriever/resolvers/peerAuthenticationResolver_test.go new file mode 100644 index 00000000000..7d94a40adff --- /dev/null +++ b/dataRetriever/resolvers/peerAuthenticationResolver_test.go @@ -0,0 +1,650 @@ +package resolvers_test + +import ( + "bytes" + "errors" + "fmt" + "sort" + "strings" + "testing" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/partitioning" + "github.com/ElrondNetwork/elrond-go-core/data/batch" + "github.com/ElrondNetwork/elrond-go-core/marshal" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/mock" + "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers" + "github.com/ElrondNetwork/elrond-go/p2p" + processMock "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/testscommon" + "github.com/ElrondNetwork/elrond-go/testscommon/shardingMocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var expectedErr = errors.New("expected error") +var pksMap = map[uint32][][]byte{ + 0: {[]byte("pk00"), []byte("pk01"), []byte("pk02")}, + 1: {[]byte("pk10"), []byte("pk11")}, + 2: {[]byte("pk21"), []byte("pk21"), []byte("pk32"), []byte("pk33")}, +} + +func getKeysSlice() [][]byte { + pks := make([][]byte, 0) + for _, pk := range pksMap { + pks = append(pks, pk...) + } + sort.Slice(pks, func(i, j int) bool { + return bytes.Compare(pks[i], pks[j]) < 0 + }) + return pks +} + +func createMockArgPeerAuthenticationResolver() resolvers.ArgPeerAuthenticationResolver { + return resolvers.ArgPeerAuthenticationResolver{ + ArgBaseResolver: createMockArgBaseResolver(), + PeerAuthenticationPool: testscommon.NewCacherStub(), + NodesCoordinator: &shardingMocks.NodesCoordinatorStub{ + GetAllEligibleValidatorsPublicKeysCalled: func(epoch uint32) (map[uint32][][]byte, error) { + return pksMap, nil + }, + }, + MaxNumOfPeerAuthenticationInResponse: 5, + PeerShardMapper: &processMock.PeerShardMapperStub{ + GetLastKnownPeerIDCalled: func(pk []byte) (*core.PeerID, bool) { + pid := core.PeerID("pid") + return &pid, true + }, + }, + DataPacker: &mock.DataPackerStub{}, + } +} + +func createPublicKeys(prefix string, numOfPks int) [][]byte { + var pkList [][]byte + for i := 0; i < numOfPks; i++ { + pk := []byte(fmt.Sprintf("%s%d", prefix, i)) + pkList = append(pkList, pk) + } + return pkList +} + +func createMockRequestedBuff(numOfPks int) ([]byte, error) { + marshaller := &marshal.GogoProtoMarshalizer{} + return marshaller.Marshal(&batch.Batch{Data: createPublicKeys("pk", numOfPks)}) +} + +func TestNewPeerAuthenticationResolver(t *testing.T) { + t.Parallel() + + t.Run("nil SenderResolver should error", func(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + arg.SenderResolver = nil + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Equal(t, dataRetriever.ErrNilResolverSender, err) + assert.Nil(t, res) + }) + t.Run("nil Marshaller should error", func(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + arg.Marshaller = nil + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) + assert.Nil(t, res) + }) + t.Run("nil AntifloodHandler should error", func(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + arg.AntifloodHandler = nil + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Equal(t, dataRetriever.ErrNilAntifloodHandler, err) + assert.Nil(t, res) + }) + t.Run("nil Throttler should error", func(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + arg.Throttler = nil + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Equal(t, dataRetriever.ErrNilThrottler, err) + assert.Nil(t, res) + }) + t.Run("nil PeerAuthenticationPool should error", func(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + arg.PeerAuthenticationPool = nil + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Equal(t, dataRetriever.ErrNilPeerAuthenticationPool, err) + assert.Nil(t, res) + }) + t.Run("nil NodesCoordinator should error", func(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + arg.NodesCoordinator = nil + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Equal(t, dataRetriever.ErrNilNodesCoordinator, err) + assert.Nil(t, res) + }) + t.Run("nil DataPacker should error", func(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + arg.DataPacker = nil + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Equal(t, dataRetriever.ErrNilDataPacker, err) + assert.Nil(t, res) + }) + t.Run("invalid max num of peer authentication should error", func(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + arg.MaxNumOfPeerAuthenticationInResponse = 1 + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Equal(t, dataRetriever.ErrInvalidNumOfPeerAuthentication, err) + assert.Nil(t, res) + }) + t.Run("nil PeerShardMapper should error", func(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + arg.PeerShardMapper = nil + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Equal(t, dataRetriever.ErrNilPeerShardMapper, err) + assert.Nil(t, res) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + }) +} + +func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { + t.Parallel() + + t.Run("nil message should error", func(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + err = res.ProcessReceivedMessage(nil, fromConnectedPeer) + assert.Equal(t, dataRetriever.ErrNilMessage, err) + }) + t.Run("canProcessMessage due to antiflood handler error", func(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + arg.AntifloodHandler = &mock.P2PAntifloodHandlerStub{ + CanProcessMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { + return expectedErr + }, + } + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.ChunkType, nil), fromConnectedPeer) + assert.True(t, errors.Is(err, expectedErr)) + assert.False(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled) + assert.False(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled) + }) + t.Run("parseReceivedMessage returns error due to marshaller error", func(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + arg.Marshaller = &mock.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return expectedErr + }, + } + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.ChunkType, nil), fromConnectedPeer) + assert.True(t, errors.Is(err, expectedErr)) + }) + t.Run("invalid request type should error", func(t *testing.T) { + t.Parallel() + + numOfPks := 3 + requestedBuff, err := createMockRequestedBuff(numOfPks) + require.Nil(t, err) + + arg := createMockArgPeerAuthenticationResolver() + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedBuff), fromConnectedPeer) + assert.True(t, errors.Is(err, dataRetriever.ErrRequestTypeNotImplemented)) + }) + + // =============== ChunkType -> resolveChunkRequest =============== + + t.Run("resolveChunkRequest: GetAllEligibleValidatorsPublicKeys returns error", func(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + arg.NodesCoordinator = &shardingMocks.NodesCoordinatorStub{ + GetAllEligibleValidatorsPublicKeysCalled: func(epoch uint32) (map[uint32][][]byte, error) { + return nil, expectedErr + }, + } + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.ChunkType, []byte("data")), fromConnectedPeer) + assert.Equal(t, expectedErr, err) + }) + t.Run("resolveChunkRequest: GetAllEligibleValidatorsPublicKeys returns empty", func(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + arg.NodesCoordinator = &shardingMocks.NodesCoordinatorStub{ + GetAllEligibleValidatorsPublicKeysCalled: func(epoch uint32) (map[uint32][][]byte, error) { + return make(map[uint32][][]byte), nil + }, + } + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.ChunkType, []byte("data")), fromConnectedPeer) + require.Nil(t, err) + }) + t.Run("resolveChunkRequest: chunk index is out of bounds", func(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + epoch := uint32(0) + chunkIndex := uint32(10) // out of range + err = res.ProcessReceivedMessage(createRequestMsgWithChunkIndex(dataRetriever.ChunkType, []byte(""), epoch, chunkIndex), fromConnectedPeer) + require.Equal(t, dataRetriever.InvalidChunkIndex, err) + }) + t.Run("resolveChunkRequest: all data not found in cache should error", func(t *testing.T) { + t.Parallel() + + cache := testscommon.NewCacherStub() + cache.PeekCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false + } + + arg := createMockArgPeerAuthenticationResolver() + arg.PeerAuthenticationPool = cache + wasSent := false + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + wasSent = true + return nil + }, + } + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + epoch := uint32(0) + chunkIndex := uint32(0) + err = res.ProcessReceivedMessage(createRequestMsgWithChunkIndex(dataRetriever.ChunkType, []byte(""), epoch, chunkIndex), fromConnectedPeer) + assert.True(t, errors.Is(err, dataRetriever.ErrPeerAuthNotFound)) + expectedSubstrErr := fmt.Sprintf("%s %d", "from chunk", chunkIndex) + assert.True(t, strings.Contains(fmt.Sprintf("%s", err), expectedSubstrErr)) + assert.False(t, wasSent) + }) + t.Run("resolveChunkRequest: some data not found in cache should work", func(t *testing.T) { + t.Parallel() + + expectedNumOfMissing := 3 + cache := testscommon.NewCacherStub() + missingCount := 0 + cache.PeekCalled = func(key []byte) (value interface{}, ok bool) { + if missingCount < expectedNumOfMissing { + missingCount++ + return nil, false + } + return key, true + } + + arg := createMockArgPeerAuthenticationResolver() + arg.PeerAuthenticationPool = cache + messagesSent := 0 + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + b := &batch.Batch{} + err := arg.Marshaller.Unmarshal(b, buff) + assert.Nil(t, err) + expectedDataLen := arg.MaxNumOfPeerAuthenticationInResponse - expectedNumOfMissing + assert.Equal(t, expectedDataLen, len(b.Data)) + messagesSent++ + return nil + }, + } + arg.DataPacker, _ = partitioning.NewSizeDataPacker(arg.Marshaller) + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + epoch := uint32(0) + chunkIndex := uint32(0) + err = res.ProcessReceivedMessage(createRequestMsgWithChunkIndex(dataRetriever.ChunkType, []byte(""), epoch, chunkIndex), fromConnectedPeer) + assert.Nil(t, err) + assert.Equal(t, 1, messagesSent) + }) + t.Run("resolveChunkRequest: Send returns error", func(t *testing.T) { + t.Parallel() + + cache := testscommon.NewCacherStub() + cache.PeekCalled = func(key []byte) (value interface{}, ok bool) { + return key, true + } + + arg := createMockArgPeerAuthenticationResolver() + arg.PeerAuthenticationPool = cache + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + return expectedErr + }, + } + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.ChunkType, []byte("")), fromConnectedPeer) + assert.True(t, errors.Is(err, expectedErr)) + }) + t.Run("resolveChunkRequest: should work", func(t *testing.T) { + t.Parallel() + + cache := testscommon.NewCacherStub() + cache.PeekCalled = func(key []byte) (value interface{}, ok bool) { + return key, true + } + + arg := createMockArgPeerAuthenticationResolver() + arg.PeerAuthenticationPool = cache + messagesSent := 0 + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + messagesSent++ + return nil + }, + } + arg.DataPacker, _ = partitioning.NewSizeDataPacker(arg.Marshaller) + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + epoch := uint32(0) + chunkIndex := uint32(1) + err = res.ProcessReceivedMessage(createRequestMsgWithChunkIndex(dataRetriever.ChunkType, []byte(""), epoch, chunkIndex), fromConnectedPeer) + assert.Nil(t, err) + assert.Equal(t, 1, messagesSent) + }) + + // =============== HashArrayType -> resolveMultipleHashesRequest =============== + + t.Run("resolveMultipleHashesRequest: Unmarshal returns error", func(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, []byte("invalid data")), fromConnectedPeer) + assert.NotNil(t, err) + }) + t.Run("resolveMultipleHashesRequest: all hashes missing from cache should error", func(t *testing.T) { + t.Parallel() + + cache := testscommon.NewCacherStub() + cache.PeekCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false + } + + arg := createMockArgPeerAuthenticationResolver() + arg.PeerAuthenticationPool = cache + wasSent := false + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + wasSent = true + return nil + }, + } + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + hashes := getKeysSlice() + providedHashes, err := arg.Marshaller.Marshal(batch.Batch{Data: hashes}) + assert.Nil(t, err) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer) + expectedSubstrErr := fmt.Sprintf("%s %s", "from buff", providedHashes) + assert.True(t, strings.Contains(fmt.Sprintf("%s", err), expectedSubstrErr)) + assert.False(t, wasSent) + }) + t.Run("resolveMultipleHashesRequest: some data missing from cache should work", func(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + + pk1 := "pk01" + pk2 := "pk02" + providedKeys := make(map[string][]byte) + providedKeys[pk1] = []byte("") + providedKeys[pk2] = []byte("") + pks := make([][]byte, 0) + pks = append(pks, []byte(pk1)) + pks = append(pks, []byte(pk2)) + + hashes := make([][]byte, 0) + hashes = append(hashes, []byte("pk01")) // exists in cache + hashes = append(hashes, []byte("pk1")) // no entries + providedHashes, err := arg.Marshaller.Marshal(batch.Batch{Data: hashes}) + assert.Nil(t, err) + + cache := testscommon.NewCacherStub() + cache.PeekCalled = func(key []byte) (value interface{}, ok bool) { + val, ok := providedKeys[string(key)] + return val, ok + } + cache.KeysCalled = func() [][]byte { + return pks + } + + arg.PeerAuthenticationPool = cache + wasSent := false + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + b := &batch.Batch{} + err = arg.Marshaller.Unmarshal(b, buff) + assert.Nil(t, err) + assert.Equal(t, 1, len(b.Data)) // 1 entry for provided hashes + wasSent = true + return nil + }, + } + arg.PeerShardMapper = &processMock.PeerShardMapperStub{ + GetLastKnownPeerIDCalled: func(pk []byte) (*core.PeerID, bool) { + pid := core.PeerID(pk) + return &pid, true + }, + } + arg.DataPacker, _ = partitioning.NewSizeDataPacker(arg.Marshaller) + + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer) + assert.Nil(t, err) + assert.True(t, wasSent) + }) + t.Run("resolveMultipleHashesRequest: Send returns error", func(t *testing.T) { + t.Parallel() + + cache := testscommon.NewCacherStub() + cache.PeekCalled = func(key []byte) (value interface{}, ok bool) { + return key, true + } + + arg := createMockArgPeerAuthenticationResolver() + arg.PeerAuthenticationPool = cache + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + return expectedErr + }, + } + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + hashes := getKeysSlice() + providedHashes, err := arg.Marshaller.Marshal(batch.Batch{Data: hashes}) + assert.Nil(t, err) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer) + assert.True(t, errors.Is(err, expectedErr)) + }) + t.Run("resolveMultipleHashesRequest: send large data buff", func(t *testing.T) { + t.Parallel() + + providedKeys := getKeysSlice() + expectedLen := len(providedKeys) + cache := testscommon.NewCacherStub() + cache.PeekCalled = func(key []byte) (value interface{}, ok bool) { + for _, pk := range providedKeys { + if bytes.Equal(pk, key) { + return pk, true + } + } + return nil, false + } + cache.KeysCalled = func() [][]byte { + return getKeysSlice() + } + + arg := createMockArgPeerAuthenticationResolver() + arg.PeerAuthenticationPool = cache + messagesSent := 0 + hashesReceived := 0 + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + b := &batch.Batch{} + err := arg.Marshaller.Unmarshal(b, buff) + assert.Nil(t, err) + + hashesReceived += len(b.Data) + messagesSent++ + return nil + }, + } + arg.PeerShardMapper = &processMock.PeerShardMapperStub{ + GetLastKnownPeerIDCalled: func(pk []byte) (*core.PeerID, bool) { + pid := core.PeerID(pk) + return &pid, true + }, + } + // split data into 2 packs + arg.DataPacker = &mock.DataPackerStub{ + PackDataInChunksCalled: func(data [][]byte, limit int) ([][]byte, error) { + middle := len(data) / 2 + b := &batch.Batch{ + Data: data[middle:], + } + buff1, err := arg.Marshaller.Marshal(b) + assert.Nil(t, err) + + b = &batch.Batch{ + Data: data[:middle], + } + buff2, err := arg.Marshaller.Marshal(b) + assert.Nil(t, err) + return [][]byte{buff1, buff2}, nil + }, + } + + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + epoch := uint32(0) + chunkIndex := uint32(0) + providedHashes, err := arg.Marshaller.Marshal(&batch.Batch{Data: providedKeys}) + assert.Nil(t, err) + err = res.ProcessReceivedMessage(createRequestMsgWithChunkIndex(dataRetriever.HashArrayType, providedHashes, epoch, chunkIndex), fromConnectedPeer) + assert.Nil(t, err) + assert.Equal(t, 2, messagesSent) + assert.Equal(t, expectedLen, hashesReceived) + }) +} + +func TestPeerAuthenticationResolver_RequestShouldError(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendOnRequestTopicCalled: func(rd *dataRetriever.RequestData, originalHashes [][]byte) error { + return expectedErr + }, + } + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + t.Run("RequestDataFromHash", func(t *testing.T) { + err = res.RequestDataFromHash([]byte(""), 0) + assert.Equal(t, expectedErr, err) + }) + t.Run("RequestDataFromChunk", func(t *testing.T) { + err = res.RequestDataFromChunk(0, 0) + assert.Equal(t, expectedErr, err) + }) + t.Run("RequestDataFromChunk - error on SendOnRequestTopic", func(t *testing.T) { + hashes := make([][]byte, 0) + hashes = append(hashes, []byte("pk")) + err = res.RequestDataFromHashArray(hashes, 0) + assert.Equal(t, expectedErr, err) + }) + +} + +func TestPeerAuthenticationResolver_RequestShouldWork(t *testing.T) { + t.Parallel() + + arg := createMockArgPeerAuthenticationResolver() + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendOnRequestTopicCalled: func(rd *dataRetriever.RequestData, originalHashes [][]byte) error { + return nil + }, + } + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + t.Run("RequestDataFromHash", func(t *testing.T) { + err = res.RequestDataFromHash([]byte(""), 0) + assert.Nil(t, err) + }) + t.Run("RequestDataFromChunk", func(t *testing.T) { + err = res.RequestDataFromChunk(0, 0) + assert.Nil(t, err) + }) +} diff --git a/dataRetriever/resolvers/topicResolverSender/topicResolverSender.go b/dataRetriever/resolvers/topicResolverSender/topicResolverSender.go index 90e647e8f4c..e618b36a469 100644 --- a/dataRetriever/resolvers/topicResolverSender/topicResolverSender.go +++ b/dataRetriever/resolvers/topicResolverSender/topicResolverSender.go @@ -39,6 +39,7 @@ type ArgTopicResolverSender struct { CurrentNetworkEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler PreferredPeersHolder dataRetriever.PreferredPeersHolderHandler SelfShardIdProvider dataRetriever.SelfShardIDProvider + PeersRatingHandler dataRetriever.PeersRatingHandler TargetShardId uint32 } @@ -57,57 +58,23 @@ type topicResolverSender struct { resolverDebugHandler dataRetriever.ResolverDebugHandler currentNetworkEpochProviderHandler dataRetriever.CurrentNetworkEpochProviderHandler preferredPeersHolderHandler dataRetriever.PreferredPeersHolderHandler + peersRatingHandler dataRetriever.PeersRatingHandler selfShardId uint32 targetShardId uint32 } // NewTopicResolverSender returns a new topic resolver instance func NewTopicResolverSender(arg ArgTopicResolverSender) (*topicResolverSender, error) { - if check.IfNil(arg.Messenger) { - return nil, dataRetriever.ErrNilMessenger - } - if check.IfNil(arg.Marshalizer) { - return nil, dataRetriever.ErrNilMarshalizer - } - if check.IfNil(arg.Randomizer) { - return nil, dataRetriever.ErrNilRandomizer - } - if check.IfNil(arg.PeerListCreator) { - return nil, dataRetriever.ErrNilPeerListCreator - } - if check.IfNil(arg.OutputAntiflooder) { - return nil, dataRetriever.ErrNilAntifloodHandler - } - if check.IfNil(arg.CurrentNetworkEpochProvider) { - return nil, dataRetriever.ErrNilCurrentNetworkEpochProvider - } - if check.IfNil(arg.PreferredPeersHolder) { - return nil, dataRetriever.ErrNilPreferredPeersHolder - } - if check.IfNil(arg.SelfShardIdProvider) { - return nil, dataRetriever.ErrNilSelfShardIDProvider - } - if arg.NumIntraShardPeers < 0 { - return nil, fmt.Errorf("%w for NumIntraShardPeers as the value should be greater or equal than 0", - dataRetriever.ErrInvalidValue) - } - if arg.NumCrossShardPeers < 0 { - return nil, fmt.Errorf("%w for NumCrossShardPeers as the value should be greater or equal than 0", - dataRetriever.ErrInvalidValue) - } - if arg.NumFullHistoryPeers < 0 { - return nil, fmt.Errorf("%w for NumFullHistoryPeers as the value should be greater or equal than 0", - dataRetriever.ErrInvalidValue) - } - if arg.NumCrossShardPeers+arg.NumIntraShardPeers < minPeersToQuery { - return nil, fmt.Errorf("%w for NumCrossShardPeers, NumIntraShardPeers as their sum should be greater or equal than %d", - dataRetriever.ErrInvalidValue, minPeersToQuery) + err := checkArgs(arg) + if err != nil { + return nil, err } resolver := &topicResolverSender{ messenger: arg.Messenger, topicName: arg.TopicName, peerListCreator: arg.PeerListCreator, + peersRatingHandler: arg.PeersRatingHandler, marshalizer: arg.Marshalizer, randomizer: arg.Randomizer, targetShardId: arg.TargetShardId, @@ -124,6 +91,53 @@ func NewTopicResolverSender(arg ArgTopicResolverSender) (*topicResolverSender, e return resolver, nil } +func checkArgs(args ArgTopicResolverSender) error { + if check.IfNil(args.Messenger) { + return dataRetriever.ErrNilMessenger + } + if check.IfNil(args.Marshalizer) { + return dataRetriever.ErrNilMarshalizer + } + if check.IfNil(args.Randomizer) { + return dataRetriever.ErrNilRandomizer + } + if check.IfNil(args.PeerListCreator) { + return dataRetriever.ErrNilPeerListCreator + } + if check.IfNil(args.OutputAntiflooder) { + return dataRetriever.ErrNilAntifloodHandler + } + if check.IfNil(args.CurrentNetworkEpochProvider) { + return dataRetriever.ErrNilCurrentNetworkEpochProvider + } + if check.IfNil(args.PreferredPeersHolder) { + return dataRetriever.ErrNilPreferredPeersHolder + } + if check.IfNil(args.PeersRatingHandler) { + return dataRetriever.ErrNilPeersRatingHandler + } + if check.IfNil(args.SelfShardIdProvider) { + return dataRetriever.ErrNilSelfShardIDProvider + } + if args.NumIntraShardPeers < 0 { + return fmt.Errorf("%w for NumIntraShardPeers as the value should be greater or equal than 0", + dataRetriever.ErrInvalidValue) + } + if args.NumCrossShardPeers < 0 { + return fmt.Errorf("%w for NumCrossShardPeers as the value should be greater or equal than 0", + dataRetriever.ErrInvalidValue) + } + if args.NumFullHistoryPeers < 0 { + return fmt.Errorf("%w for NumFullHistoryPeers as the value should be greater or equal than 0", + dataRetriever.ErrInvalidValue) + } + if args.NumCrossShardPeers+args.NumIntraShardPeers < minPeersToQuery { + return fmt.Errorf("%w for NumCrossShardPeers, NumIntraShardPeers as their sum should be greater or equal than %d", + dataRetriever.ErrInvalidValue, minPeersToQuery) + } + return nil +} + // SendOnRequestTopic is used to send request data over channels (topics) to other peers // This method only sends the request, the received data should be handled by interceptors func (trs *topicResolverSender) SendOnRequestTopic(rd *dataRetriever.RequestData, originalHashes [][]byte) error { @@ -195,7 +209,9 @@ func (trs *topicResolverSender) sendOnTopic( histogramMap := make(map[string]int) - indexes := createIndexList(len(peerList)) + topRatedPeersList := trs.peersRatingHandler.GetTopRatedPeersFromList(peerList, maxToSend) + + indexes := createIndexList(len(topRatedPeersList)) shuffledIndexes := random.FisherYatesShuffle(indexes, trs.randomizer) logData := make([]interface{}, 0) msgSentCounter := 0 @@ -205,7 +221,7 @@ func (trs *topicResolverSender) sendOnTopic( } for idx := 0; idx < len(shuffledIndexes); idx++ { - peer := getPeerID(shuffledIndexes[idx], peerList, preferredPeer, peerType, topicToSendRequest, histogramMap) + peer := getPeerID(shuffledIndexes[idx], topRatedPeersList, preferredPeer, peerType, topicToSendRequest, histogramMap) err := trs.sendToConnectedPeer(topicToSendRequest, buff, peer) if err != nil { diff --git a/dataRetriever/resolvers/topicResolverSender/topicResolverSender_test.go b/dataRetriever/resolvers/topicResolverSender/topicResolverSender_test.go index 012403586a9..787e5bbcbf4 100644 --- a/dataRetriever/resolvers/topicResolverSender/topicResolverSender_test.go +++ b/dataRetriever/resolvers/topicResolverSender/topicResolverSender_test.go @@ -39,6 +39,7 @@ func createMockArgTopicResolverSender() topicResolverSender.ArgTopicResolverSend return map[uint32][]core.PeerID{} }, }, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, } } @@ -121,6 +122,17 @@ func TestNewTopicResolverSender_NilPreferredPeersHolderShouldErr(t *testing.T) { assert.Equal(t, dataRetriever.ErrNilPreferredPeersHolder, err) } +func TestNewTopicResolverSender_NilPeersRatingHandlerShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockArgTopicResolverSender() + arg.PeersRatingHandler = nil + trs, err := topicResolverSender.NewTopicResolverSender(arg) + + assert.True(t, check.IfNil(trs)) + assert.Equal(t, dataRetriever.ErrNilPeersRatingHandler, err) +} + func TestNewTopicResolverSender_NilSelfShardIDProviderShouldErr(t *testing.T) { t.Parallel() @@ -372,7 +384,6 @@ func TestTopicResolverSender_SendOnRequestTopicShouldWorkAndSendToPreferredPeers err := trs.SendOnRequestTopic(&dataRetriever.RequestData{}, defaultHashes) assert.Nil(t, err) - assert.Equal(t, 1, countPrefPeersSh0) assert.Equal(t, 1, countPrefPeersSh1) } diff --git a/dataRetriever/resolvers/transactionResolver.go b/dataRetriever/resolvers/transactionResolver.go index 095f02a174e..ba7466ad0c9 100644 --- a/dataRetriever/resolvers/transactionResolver.go +++ b/dataRetriever/resolvers/transactionResolver.go @@ -6,7 +6,6 @@ import ( "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go-core/data/batch" - "github.com/ElrondNetwork/elrond-go-core/marshal" logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" @@ -25,19 +24,16 @@ const maxBuffToSendBulkMiniblocks = 1 << 18 // 256KB // ArgTxResolver is the argument structure used to create new TxResolver instance type ArgTxResolver struct { - SenderResolver dataRetriever.TopicResolverSender + ArgBaseResolver TxPool dataRetriever.ShardedDataCacherNotifier TxStorage storage.Storer - Marshalizer marshal.Marshalizer DataPacker dataRetriever.DataPacker - AntifloodHandler dataRetriever.P2PAntifloodHandler - Throttler dataRetriever.ResolverThrottler IsFullHistoryNode bool } // TxResolver is a wrapper over Resolver that is specialized in resolving transaction requests type TxResolver struct { - dataRetriever.TopicResolverSender + *baseResolver messageProcessor baseStorageResolver txPool dataRetriever.ShardedDataCacherNotifier @@ -46,35 +42,20 @@ type TxResolver struct { // NewTxResolver creates a new transaction resolver func NewTxResolver(arg ArgTxResolver) (*TxResolver, error) { - if check.IfNil(arg.SenderResolver) { - return nil, dataRetriever.ErrNilResolverSender - } - if check.IfNil(arg.TxPool) { - return nil, dataRetriever.ErrNilTxDataPool - } - if check.IfNil(arg.TxStorage) { - return nil, dataRetriever.ErrNilTxStorage - } - if check.IfNil(arg.Marshalizer) { - return nil, dataRetriever.ErrNilMarshalizer - } - if check.IfNil(arg.DataPacker) { - return nil, dataRetriever.ErrNilDataPacker - } - if check.IfNil(arg.AntifloodHandler) { - return nil, dataRetriever.ErrNilAntifloodHandler - } - if check.IfNil(arg.Throttler) { - return nil, dataRetriever.ErrNilThrottler + err := checkArgTxResolver(arg) + if err != nil { + return nil, err } txResolver := &TxResolver{ - TopicResolverSender: arg.SenderResolver, + baseResolver: &baseResolver{ + TopicResolverSender: arg.SenderResolver, + }, txPool: arg.TxPool, baseStorageResolver: createBaseStorageResolver(arg.TxStorage, arg.IsFullHistoryNode), dataPacker: arg.DataPacker, messageProcessor: messageProcessor{ - marshalizer: arg.Marshalizer, + marshalizer: arg.Marshaller, antifloodHandler: arg.AntifloodHandler, topic: arg.SenderResolver.RequestTopic(), throttler: arg.Throttler, @@ -84,6 +65,23 @@ func NewTxResolver(arg ArgTxResolver) (*TxResolver, error) { return txResolver, nil } +func checkArgTxResolver(arg ArgTxResolver) error { + err := checkArgBase(arg.ArgBaseResolver) + if err != nil { + return err + } + if check.IfNil(arg.TxPool) { + return dataRetriever.ErrNilTxDataPool + } + if check.IfNil(arg.TxStorage) { + return dataRetriever.ErrNilTxStorage + } + if check.IfNil(arg.DataPacker) { + return dataRetriever.ErrNilDataPacker + } + return nil +} + // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { @@ -249,26 +247,6 @@ func (txRes *TxResolver) printHashArray(hashes [][]byte) { } } -// SetNumPeersToQuery will set the number of intra shard and cross shard number of peer to query -func (txRes *TxResolver) SetNumPeersToQuery(intra int, cross int) { - txRes.TopicResolverSender.SetNumPeersToQuery(intra, cross) -} - -// NumPeersToQuery will return the number of intra shard and cross shard number of peer to query -func (txRes *TxResolver) NumPeersToQuery() (int, int) { - return txRes.TopicResolverSender.NumPeersToQuery() -} - -// SetResolverDebugHandler will set a resolver debug handler -func (txRes *TxResolver) SetResolverDebugHandler(handler dataRetriever.ResolverDebugHandler) error { - return txRes.TopicResolverSender.SetResolverDebugHandler(handler) -} - -// Close returns nil -func (txRes *TxResolver) Close() error { - return nil -} - // IsInterfaceNil returns true if there is no value under the interface func (txRes *TxResolver) IsInterfaceNil() bool { return txRes == nil diff --git a/dataRetriever/resolvers/transactionResolver_test.go b/dataRetriever/resolvers/transactionResolver_test.go index be5d7e22d82..0653409b095 100644 --- a/dataRetriever/resolvers/transactionResolver_test.go +++ b/dataRetriever/resolvers/transactionResolver_test.go @@ -23,13 +23,10 @@ var connectedPeerId = core.PeerID("connected peer id") func createMockArgTxResolver() resolvers.ArgTxResolver { return resolvers.ArgTxResolver{ - SenderResolver: &mock.TopicResolverSenderStub{}, - TxPool: testscommon.NewShardedDataStub(), - TxStorage: &storageStubs.StorerStub{}, - Marshalizer: &mock.MarshalizerMock{}, - DataPacker: &mock.DataPackerStub{}, - AntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - Throttler: &mock.ThrottlerStub{}, + ArgBaseResolver: createMockArgBaseResolver(), + TxPool: testscommon.NewShardedDataStub(), + TxStorage: &storageStubs.StorerStub{}, + DataPacker: &mock.DataPackerStub{}, } } @@ -72,7 +69,7 @@ func TestNewTxResolver_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() arg := createMockArgTxResolver() - arg.Marshalizer = nil + arg.Marshaller = nil txRes, err := resolvers.NewTxResolver(arg) assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) @@ -165,7 +162,7 @@ func TestTxResolver_ProcessReceivedMessageWrongTypeShouldErr(t *testing.T) { arg := createMockArgTxResolver() txRes, _ := resolvers.NewTxResolver(arg) - data, _ := arg.Marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.NonceType, Value: []byte("aaa")}) + data, _ := arg.Marshaller.Marshal(&dataRetriever.RequestData{Type: dataRetriever.NonceType, Value: []byte("aaa")}) msg := &mock.P2PMessageMock{DataField: data} @@ -182,7 +179,7 @@ func TestTxResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { arg := createMockArgTxResolver() txRes, _ := resolvers.NewTxResolver(arg) - data, _ := arg.Marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: nil}) + data, _ := arg.Marshaller.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: nil}) msg := &mock.P2PMessageMock{DataField: data} @@ -263,7 +260,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxPoolMarshalizerFailShouldRetN arg := createMockArgTxResolver() arg.TxPool = txPool - arg.Marshalizer = marshalizerStub + arg.Marshaller = marshalizerStub txRes, _ := resolvers.NewTxResolver(arg) data, _ := marshalizerMock.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: []byte("aaa")}) @@ -531,7 +528,7 @@ func TestTxResolver_RequestDataFromHashArrayShouldWork(t *testing.T) { marshalizer := &marshal.GogoProtoMarshalizer{} arg := createMockArgTxResolver() - arg.Marshalizer = marshalizer + arg.Marshaller = marshalizer arg.SenderResolver = res txRes, _ := resolvers.NewTxResolver(arg) diff --git a/dataRetriever/resolvers/trieNodeResolver.go b/dataRetriever/resolvers/trieNodeResolver.go index 462d315bf81..be78d720390 100644 --- a/dataRetriever/resolvers/trieNodeResolver.go +++ b/dataRetriever/resolvers/trieNodeResolver.go @@ -4,7 +4,6 @@ import ( "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go-core/data/batch" - "github.com/ElrondNetwork/elrond-go-core/marshal" logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/p2p" @@ -15,43 +14,31 @@ var logTrieNodes = logger.GetOrCreate("dataretriever/resolvers/trienoderesolver" // ArgTrieNodeResolver is the argument structure used to create new TrieNodeResolver instance type ArgTrieNodeResolver struct { - SenderResolver dataRetriever.TopicResolverSender - TrieDataGetter dataRetriever.TrieDataGetter - Marshalizer marshal.Marshalizer - AntifloodHandler dataRetriever.P2PAntifloodHandler - Throttler dataRetriever.ResolverThrottler + ArgBaseResolver + TrieDataGetter dataRetriever.TrieDataGetter } // TrieNodeResolver is a wrapper over Resolver that is specialized in resolving trie node requests type TrieNodeResolver struct { - dataRetriever.TopicResolverSender + *baseResolver messageProcessor trieDataGetter dataRetriever.TrieDataGetter } // NewTrieNodeResolver creates a new trie node resolver func NewTrieNodeResolver(arg ArgTrieNodeResolver) (*TrieNodeResolver, error) { - if check.IfNil(arg.SenderResolver) { - return nil, dataRetriever.ErrNilResolverSender - } - if check.IfNil(arg.TrieDataGetter) { - return nil, dataRetriever.ErrNilTrieDataGetter - } - if check.IfNil(arg.Marshalizer) { - return nil, dataRetriever.ErrNilMarshalizer - } - if check.IfNil(arg.AntifloodHandler) { - return nil, dataRetriever.ErrNilAntifloodHandler - } - if check.IfNil(arg.Throttler) { - return nil, dataRetriever.ErrNilThrottler + err := checkArgTrieNodeResolver(arg) + if err != nil { + return nil, err } return &TrieNodeResolver{ - TopicResolverSender: arg.SenderResolver, - trieDataGetter: arg.TrieDataGetter, + baseResolver: &baseResolver{ + TopicResolverSender: arg.SenderResolver, + }, + trieDataGetter: arg.TrieDataGetter, messageProcessor: messageProcessor{ - marshalizer: arg.Marshalizer, + marshalizer: arg.Marshaller, antifloodHandler: arg.AntifloodHandler, topic: arg.SenderResolver.RequestTopic(), throttler: arg.Throttler, @@ -59,6 +46,17 @@ func NewTrieNodeResolver(arg ArgTrieNodeResolver) (*TrieNodeResolver, error) { }, nil } +func checkArgTrieNodeResolver(arg ArgTrieNodeResolver) error { + err := checkArgBase(arg.ArgBaseResolver) + if err != nil { + return err + } + if check.IfNil(arg.TrieDataGetter) { + return dataRetriever.ErrNilTrieDataGetter + } + return nil +} + // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) func (tnRes *TrieNodeResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { @@ -295,26 +293,6 @@ func (tnRes *TrieNodeResolver) RequestDataFromReferenceAndChunk(hash []byte, chu ) } -// SetNumPeersToQuery will set the number of intra shard and cross shard number of peer to query -func (tnRes *TrieNodeResolver) SetNumPeersToQuery(intra int, cross int) { - tnRes.TopicResolverSender.SetNumPeersToQuery(intra, cross) -} - -// NumPeersToQuery will return the number of intra shard and cross shard number of peer to query -func (tnRes *TrieNodeResolver) NumPeersToQuery() (int, int) { - return tnRes.TopicResolverSender.NumPeersToQuery() -} - -// SetResolverDebugHandler will set a resolver debug handler -func (tnRes *TrieNodeResolver) SetResolverDebugHandler(handler dataRetriever.ResolverDebugHandler) error { - return tnRes.TopicResolverSender.SetResolverDebugHandler(handler) -} - -// Close returns nil -func (tnRes *TrieNodeResolver) Close() error { - return nil -} - // IsInterfaceNil returns true if there is no value under the interface func (tnRes *TrieNodeResolver) IsInterfaceNil() bool { return tnRes == nil diff --git a/dataRetriever/resolvers/trieNodeResolver_test.go b/dataRetriever/resolvers/trieNodeResolver_test.go index a7f7408ac4a..277273cfa50 100644 --- a/dataRetriever/resolvers/trieNodeResolver_test.go +++ b/dataRetriever/resolvers/trieNodeResolver_test.go @@ -23,11 +23,8 @@ var fromConnectedPeer = core.PeerID("from connected peer") func createMockArgTrieNodeResolver() resolvers.ArgTrieNodeResolver { return resolvers.ArgTrieNodeResolver{ - SenderResolver: &mock.TopicResolverSenderStub{}, - TrieDataGetter: &trieMock.TrieStub{}, - Marshalizer: &mock.MarshalizerMock{}, - AntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - Throttler: &mock.ThrottlerStub{}, + ArgBaseResolver: createMockArgBaseResolver(), + TrieDataGetter: &trieMock.TrieStub{}, } } @@ -57,7 +54,7 @@ func TestNewTrieNodeResolver_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() arg := createMockArgTrieNodeResolver() - arg.Marshalizer = nil + arg.Marshaller = nil tnRes, err := resolvers.NewTrieNodeResolver(arg) assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) @@ -222,7 +219,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageShouldGetFromTrieAndMarshalizerF } arg := createMockArgTrieNodeResolver() - arg.Marshalizer = marshalizerStub + arg.Marshaller = marshalizerStub tnRes, _ := resolvers.NewTrieNodeResolver(arg) data, _ := marshalizerMock.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: []byte("node1")}) @@ -246,7 +243,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageTrieErrorsShouldErr(t *testing.T } tnRes, _ := resolvers.NewTrieNodeResolver(arg) - data, _ := arg.Marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: []byte("node1")}) + data, _ := arg.Marshaller.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: []byte("node1")}) msg := &mock.P2PMessageMock{DataField: data} err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) @@ -276,9 +273,9 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesGetSerializedNodeE b := &batch.Batch{ Data: [][]byte{[]byte("hash1")}, } - buffBatch, _ := arg.Marshalizer.Marshal(b) + buffBatch, _ := arg.Marshaller.Marshal(b) - data, _ := arg.Marshalizer.Marshal( + data, _ := arg.Marshaller.Marshal( &dataRetriever.RequestData{ Type: dataRetriever.HashArrayType, Value: buffBatch, @@ -304,7 +301,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesGetSerializedNodes arg.SenderResolver = &mock.TopicResolverSenderStub{ SendCalled: func(buff []byte, peer core.PeerID) error { b := &batch.Batch{} - err := arg.Marshalizer.Unmarshal(b, buff) + err := arg.Marshaller.Unmarshal(b, buff) require.Nil(t, err) receivedNodes = b.Data @@ -330,9 +327,9 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesGetSerializedNodes b := &batch.Batch{ Data: [][]byte{[]byte("hash1")}, } - buffBatch, _ := arg.Marshalizer.Marshal(b) + buffBatch, _ := arg.Marshaller.Marshal(b) - data, _ := arg.Marshalizer.Marshal( + data, _ := arg.Marshaller.Marshal( &dataRetriever.RequestData{ Type: dataRetriever.HashArrayType, Value: buffBatch, @@ -360,7 +357,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesNotEnoughSpaceShou arg.SenderResolver = &mock.TopicResolverSenderStub{ SendCalled: func(buff []byte, peer core.PeerID) error { b := &batch.Batch{} - err := arg.Marshalizer.Unmarshal(b, buff) + err := arg.Marshaller.Unmarshal(b, buff) require.Nil(t, err) receivedNodes = b.Data @@ -387,9 +384,9 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesNotEnoughSpaceShou b := &batch.Batch{ Data: [][]byte{[]byte("hash1")}, } - buffBatch, _ := arg.Marshalizer.Marshal(b) + buffBatch, _ := arg.Marshaller.Marshal(b) - data, _ := arg.Marshalizer.Marshal( + data, _ := arg.Marshaller.Marshal( &dataRetriever.RequestData{ Type: dataRetriever.HashArrayType, Value: buffBatch, @@ -417,7 +414,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesShouldWorkWithSubt arg.SenderResolver = &mock.TopicResolverSenderStub{ SendCalled: func(buff []byte, peer core.PeerID) error { b := &batch.Batch{} - err := arg.Marshalizer.Unmarshal(b, buff) + err := arg.Marshaller.Unmarshal(b, buff) require.Nil(t, err) receivedNodes = b.Data @@ -448,9 +445,9 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesShouldWorkWithSubt b := &batch.Batch{ Data: [][]byte{[]byte("hash1"), []byte("hash2")}, } - buffBatch, _ := arg.Marshalizer.Marshal(b) + buffBatch, _ := arg.Marshaller.Marshal(b) - data, _ := arg.Marshalizer.Marshal( + data, _ := arg.Marshaller.Marshal( &dataRetriever.RequestData{ Type: dataRetriever.HashArrayType, Value: buffBatch, @@ -487,7 +484,7 @@ func testTrieNodeResolverProcessReceivedMessageLargeTrieNode( arg.SenderResolver = &mock.TopicResolverSenderStub{ SendCalled: func(buff []byte, peer core.PeerID) error { b := &batch.Batch{} - err := arg.Marshalizer.Unmarshal(b, buff) + err := arg.Marshaller.Unmarshal(b, buff) require.Nil(t, err) sendWasCalled = true assert.Equal(t, maxComputedChunks, b.MaxChunks) @@ -515,7 +512,7 @@ func testTrieNodeResolverProcessReceivedMessageLargeTrieNode( } tnRes, _ := resolvers.NewTrieNodeResolver(arg) - data, _ := arg.Marshalizer.Marshal( + data, _ := arg.Marshaller.Marshal( &dataRetriever.RequestData{ Type: dataRetriever.HashType, Value: []byte("hash1"), @@ -656,7 +653,7 @@ func TestTrieNodeResolver_RequestDataFromHashArray(t *testing.T) { assert.Equal(t, dataRetriever.HashArrayType, rd.Type) b := &batch.Batch{} - err := arg.Marshalizer.Unmarshal(b, rd.Value) + err := arg.Marshaller.Unmarshal(b, rd.Value) require.Nil(t, err) assert.Equal(t, [][]byte{hash1, hash2}, b.Data) assert.Equal(t, uint32(0), b.ChunkIndex) //mandatory to be 0 diff --git a/dataRetriever/txpool/shardedTxPool.go b/dataRetriever/txpool/shardedTxPool.go index c9aff40f1c9..31e0e32205a 100644 --- a/dataRetriever/txpool/shardedTxPool.go +++ b/dataRetriever/txpool/shardedTxPool.go @@ -328,7 +328,7 @@ func (txPool *shardedTxPool) GetCounts() counting.CountsWithSize { } // Keys returns all the keys contained in shard caches -func(txPool *shardedTxPool) Keys() [][]byte { +func (txPool *shardedTxPool) Keys() [][]byte { txPool.mutexBackingMap.RLock() defer txPool.mutexBackingMap.RUnlock() diff --git a/dblookupext/historyRepository.go b/dblookupext/historyRepository.go index 1b19cee588a..ea15dd48efa 100644 --- a/dblookupext/historyRepository.go +++ b/dblookupext/historyRepository.go @@ -131,7 +131,7 @@ func (hr *historyRepository) RecordBlock(blockHeaderHash []byte, hr.recordBlockMutex.Lock() defer hr.recordBlockMutex.Unlock() - log.Debug("RecordBlock()", "nonce", blockHeader.GetNonce(), "blockHeaderHash", blockHeaderHash, "header type", fmt.Sprintf("%T", blockHeader)) + log.Trace("RecordBlock()", "nonce", blockHeader.GetNonce(), "blockHeaderHash", blockHeaderHash, "header type", fmt.Sprintf("%T", blockHeader)) body, ok := blockBody.(*block.Body) if !ok { @@ -301,7 +301,7 @@ func (hr *historyRepository) OnNotarizedBlocks(shardID uint32, headers []data.He for i, headerHandler := range headers { headerHash := headersHashes[i] - log.Debug("onNotarizedBlocks():", "shardID", shardID, "nonce", headerHandler.GetNonce(), "headerHash", headerHash, "type", fmt.Sprintf("%T", headerHandler)) + log.Trace("onNotarizedBlocks():", "shardID", shardID, "nonce", headerHandler.GetNonce(), "headerHash", headerHash, "type", fmt.Sprintf("%T", headerHandler)) metaBlock, isMetaBlock := headerHandler.(*block.MetaBlock) if isMetaBlock { @@ -347,7 +347,7 @@ func (hr *historyRepository) onNotarizedMiniblock(metaBlockNonce uint64, metaBlo return } - log.Debug("onNotarizedMiniblock()", + log.Trace("onNotarizedMiniblock()", "metaBlockNonce", metaBlockNonce, "metaBlockHash", metaBlockHash, "shardOfContainingBlock", shardOfContainingBlock, @@ -387,7 +387,7 @@ func (hr *historyRepository) consumePendingNotificationsWithLock() { return } - log.Debug("consumePendingNotificationsWithLock() begin", + log.Trace("consumePendingNotificationsWithLock() begin", "len(source)", hr.pendingNotarizedAtSourceNotifications.Len(), "len(destination)", hr.pendingNotarizedAtDestinationNotifications.Len(), "len(both)", hr.pendingNotarizedAtBothNotifications.Len(), @@ -410,7 +410,7 @@ func (hr *historyRepository) consumePendingNotificationsWithLock() { metadata.NotarizedAtDestinationInMetaHash = notification.metaHash }) - log.Debug("consumePendingNotificationsWithLock() end", + log.Trace("consumePendingNotificationsWithLock() end", "len(source)", hr.pendingNotarizedAtSourceNotifications.Len(), "len(destination)", hr.pendingNotarizedAtDestinationNotifications.Len(), "len(both)", hr.pendingNotarizedAtBothNotifications.Len(), diff --git a/epochStart/bootstrap/baseStorageHandler.go b/epochStart/bootstrap/baseStorageHandler.go index b94af4042cd..7541bb1facd 100644 --- a/epochStart/bootstrap/baseStorageHandler.go +++ b/epochStart/bootstrap/baseStorageHandler.go @@ -53,6 +53,19 @@ func checkNilArgs(args StorageHandlerArgs) error { return nil } +type miniBlockInfo struct { + miniBlockHashes [][]byte + fullyProcessed []bool + indexOfLastTxProcessed []int32 + pendingMiniBlocksMap map[string]struct{} + pendingMiniBlocksPerShardMap map[uint32][][]byte +} + +type processedIndexes struct { + firstIndex int32 + lastIndex int32 +} + // baseStorageHandler handles the storage functions for saving bootstrap data type baseStorageHandler struct { storageService dataRetriever.StorageService diff --git a/epochStart/bootstrap/disabled/disabledPeerShardMapper.go b/epochStart/bootstrap/disabled/disabledPeerShardMapper.go new file mode 100644 index 00000000000..228c353c656 --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledPeerShardMapper.go @@ -0,0 +1,39 @@ +package disabled + +import "github.com/ElrondNetwork/elrond-go-core/core" + +// peerShardMapper represents the disabled structure of peerShardMapper +type peerShardMapper struct { +} + +// NewPeerShardMapper returns default instance +func NewPeerShardMapper() *peerShardMapper { + return &peerShardMapper{} +} + +// GetLastKnownPeerID returns nothing +func (p *peerShardMapper) GetLastKnownPeerID(_ []byte) (*core.PeerID, bool) { + return nil, false +} + +// UpdatePeerIDPublicKeyPair does nothing +func (p *peerShardMapper) UpdatePeerIDPublicKeyPair(_ core.PeerID, _ []byte) { +} + +// PutPeerIdShardId does nothing +func (p *peerShardMapper) PutPeerIdShardId(_ core.PeerID, _ uint32) { +} + +// PutPeerIdSubType does nothing +func (p *peerShardMapper) PutPeerIdSubType(_ core.PeerID, _ core.P2PPeerSubType) { +} + +// GetPeerInfo returns default instance +func (p *peerShardMapper) GetPeerInfo(_ core.PeerID) core.P2PPeerInfo { + return core.P2PPeerInfo{} +} + +// IsInterfaceNil returns true if there is no value under the interface +func (p *peerShardMapper) IsInterfaceNil() bool { + return p == nil +} diff --git a/epochStart/bootstrap/disabled/disabledPeersRatingHandler.go b/epochStart/bootstrap/disabled/disabledPeersRatingHandler.go new file mode 100644 index 00000000000..7476776ccdd --- /dev/null +++ b/epochStart/bootstrap/disabled/disabledPeersRatingHandler.go @@ -0,0 +1,33 @@ +package disabled + +import "github.com/ElrondNetwork/elrond-go-core/core" + +type disabledPeersRatingHandler struct { +} + +// NewDisabledPeersRatingHandler returns a new instance of disabledPeersRatingHandler +func NewDisabledPeersRatingHandler() *disabledPeersRatingHandler { + return &disabledPeersRatingHandler{} +} + +// AddPeer does nothing as it is disabled +func (dprs *disabledPeersRatingHandler) AddPeer(_ core.PeerID) { +} + +// IncreaseRating does nothing as it is disabled +func (dprs *disabledPeersRatingHandler) IncreaseRating(_ core.PeerID) { +} + +// DecreaseRating does nothing as it is disabled +func (dprs *disabledPeersRatingHandler) DecreaseRating(_ core.PeerID) { +} + +// GetTopRatedPeersFromList returns the provided peers list as it is disabled +func (dprs *disabledPeersRatingHandler) GetTopRatedPeersFromList(peers []core.PeerID, _ int) []core.PeerID { + return peers +} + +// IsInterfaceNil returns true if there is no value under the interface +func (dprs *disabledPeersRatingHandler) IsInterfaceNil() bool { + return dprs == nil +} diff --git a/epochStart/bootstrap/disabled/disabledPreferredPeersHolder.go b/epochStart/bootstrap/disabled/disabledPreferredPeersHolder.go index f660895d103..e5669cdec17 100644 --- a/epochStart/bootstrap/disabled/disabledPreferredPeersHolder.go +++ b/epochStart/bootstrap/disabled/disabledPreferredPeersHolder.go @@ -12,11 +12,15 @@ func NewPreferredPeersHolder() *disabledPreferredPeersHolder { return &disabledPreferredPeersHolder{} } -// Put won't do anything -func (d *disabledPreferredPeersHolder) Put(_ []byte, _ core.PeerID, _ uint32) { +// PutConnectionAddress does nothing as it is disabled +func (d *disabledPreferredPeersHolder) PutConnectionAddress(_ core.PeerID, _ string) { } -// Get will return an empty map +// PutShardID does nothing as it is disabled +func (d *disabledPreferredPeersHolder) PutShardID(_ core.PeerID, _ uint32) { +} + +// Get does nothing as it is disabled func (d *disabledPreferredPeersHolder) Get() map[uint32][]core.PeerID { return make(map[uint32][]core.PeerID) } @@ -26,11 +30,11 @@ func (d *disabledPreferredPeersHolder) Contains(_ core.PeerID) bool { return false } -// Remove won't do anything +// Remove does nothing as it is disabled func (d *disabledPreferredPeersHolder) Remove(_ core.PeerID) { } -// Clear won't do anything +// Clear does nothing as it is disabled func (d *disabledPreferredPeersHolder) Clear() { } diff --git a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go index 3f17ba1205d..da2a2f6a977 100644 --- a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go +++ b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go @@ -11,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/epochStart" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" + disabledFactory "github.com/ElrondNetwork/elrond-go/factory/disabled" disabledGenesis "github.com/ElrondNetwork/elrond-go/genesis/process/disabled" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/factory/interceptorscontainer" @@ -40,6 +41,7 @@ type ArgsEpochStartInterceptorContainer struct { EnableSignTxWithHashEpoch uint32 EpochNotifier process.EpochNotifier RequestHandler process.RequestHandler + SignaturesHandler process.SignaturesHandler } // NewEpochStartInterceptorsContainer will return a real interceptors container factory, but with many disabled components @@ -71,31 +73,39 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) sizeCheckDelta := 0 validityAttester := disabled.NewValidityAttester() epochStartTrigger := disabled.NewEpochStartTrigger() + // TODO: move the peerShardMapper creation before boostrapComponents + peerShardMapper := disabled.NewPeerShardMapper() + hardforkTrigger := disabledFactory.HardforkTrigger() containerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ - CoreComponents: args.CoreComponents, - CryptoComponents: cryptoComponents, - ShardCoordinator: args.ShardCoordinator, - NodesCoordinator: nodesCoordinator, - Messenger: args.Messenger, - Store: storer, - DataPool: args.DataPool, - Accounts: accountsAdapter, - MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, - TxFeeHandler: feeHandler, - BlockBlackList: blackListHandler, - HeaderSigVerifier: headerSigVerifier, - HeaderIntegrityVerifier: args.HeaderIntegrityVerifier, - SizeCheckDelta: uint32(sizeCheckDelta), - ValidityAttester: validityAttester, - EpochStartTrigger: epochStartTrigger, - WhiteListHandler: args.WhiteListHandler, - WhiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, - AntifloodHandler: antiFloodHandler, - ArgumentsParser: args.ArgumentsParser, - EnableSignTxWithHashEpoch: args.EnableSignTxWithHashEpoch, - PreferredPeersHolder: disabled.NewPreferredPeersHolder(), - RequestHandler: args.RequestHandler, + CoreComponents: args.CoreComponents, + CryptoComponents: cryptoComponents, + Accounts: accountsAdapter, + ShardCoordinator: args.ShardCoordinator, + NodesCoordinator: nodesCoordinator, + Messenger: args.Messenger, + Store: storer, + DataPool: args.DataPool, + MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, + TxFeeHandler: feeHandler, + BlockBlackList: blackListHandler, + HeaderSigVerifier: headerSigVerifier, + HeaderIntegrityVerifier: args.HeaderIntegrityVerifier, + ValidityAttester: validityAttester, + EpochStartTrigger: epochStartTrigger, + WhiteListHandler: args.WhiteListHandler, + WhiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, + AntifloodHandler: antiFloodHandler, + ArgumentsParser: args.ArgumentsParser, + PreferredPeersHolder: disabled.NewPreferredPeersHolder(), + SizeCheckDelta: uint32(sizeCheckDelta), + EnableSignTxWithHashEpoch: args.EnableSignTxWithHashEpoch, + RequestHandler: args.RequestHandler, + PeerSignatureHandler: cryptoComponents.PeerSignatureHandler(), + SignaturesHandler: args.SignaturesHandler, + HeartbeatExpiryTimespanInSec: args.Config.HeartbeatV2.HeartbeatExpiryTimespanInSec, + PeerShardMapper: peerShardMapper, + HardforkTrigger: hardforkTrigger, } interceptorsContainerFactory, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(containerFactoryArgs) diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go index 77adc810bd2..a80666b7305 100644 --- a/epochStart/bootstrap/interface.go +++ b/epochStart/bootstrap/interface.go @@ -38,6 +38,7 @@ type Messenger interface { UnregisterAllMessageProcessors() error UnjoinAllTopics() error ConnectedPeers() []core.PeerID + Verify(payload []byte, pid core.PeerID, signature []byte) error } // RequestHandler defines which methods a request handler should implement diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index c89f21f5a7d..8af08572965 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -518,7 +518,6 @@ func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { func (e *epochStartBootstrap) createSyncers() error { var err error - args := factoryInterceptors.ArgsEpochStartInterceptorContainer{ CoreComponents: e.coreComponentsHolder, CryptoComponents: e.cryptoComponentsHolder, @@ -533,6 +532,7 @@ func (e *epochStartBootstrap) createSyncers() error { EnableSignTxWithHashEpoch: e.enableEpochs.TransactionSignedWithTxHashEnableEpoch, EpochNotifier: e.epochNotifier, RequestHandler: e.requestHandler, + SignaturesHandler: e.messenger, } e.interceptorContainer, err = factoryInterceptors.NewEpochStartInterceptorsContainer(args) @@ -1150,6 +1150,10 @@ func (e *epochStartBootstrap) createRequestHandler() error { CurrentNetworkEpochProvider: disabled.NewCurrentNetworkEpochProviderHandler(), PreferredPeersHolder: disabled.NewPreferredPeersHolder(), ResolverConfig: e.generalConfig.Resolvers, + PeersRatingHandler: disabled.NewDisabledPeersRatingHandler(), + NodesCoordinator: disabled.NewNodesCoordinator(), + MaxNumOfPeerAuthenticationInResponse: e.generalConfig.HeartbeatV2.MaxNumOfPeerAuthenticationInResponse, + PeerShardMapper: disabled.NewPeerShardMapper(), } resolverFactory, err := resolverscontainer.NewMetaResolversContainerFactory(resolversContainerArgs) if err != nil { @@ -1186,8 +1190,6 @@ func (e *epochStartBootstrap) createRequestHandler() error { func (e *epochStartBootstrap) setEpochStartMetrics() { if !check.IfNil(e.epochStartMeta) { metablockEconomics := e.epochStartMeta.GetEpochStartHandler().GetEconomicsHandler() - e.statusHandler.SetUInt64Value(common.MetricNonceAtEpochStart, e.epochStartMeta.GetNonce()) - e.statusHandler.SetUInt64Value(common.MetricRoundAtEpochStart, e.epochStartMeta.GetRound()) e.statusHandler.SetStringValue(common.MetricTotalSupply, metablockEconomics.GetTotalSupply().String()) e.statusHandler.SetStringValue(common.MetricInflation, metablockEconomics.GetTotalNewlyMinted().String()) e.statusHandler.SetStringValue(common.MetricTotalFees, e.epochStartMeta.GetAccumulatedFees().String()) @@ -1238,13 +1240,12 @@ func (e *epochStartBootstrap) Close() error { e.closeTrieComponents() - if !check.IfNil(e.dataPool) && !check.IfNil(e.dataPool.TrieNodes()) { - log.Debug("closing trie nodes data pool....") - err := e.dataPool.TrieNodes().Close() - log.LogIfError(err) + var err error + if !check.IfNil(e.dataPool) { + err = e.dataPool.Close() } - return nil + return err } // IsInterfaceNil returns true if there is no value under the interface diff --git a/epochStart/bootstrap/process_test.go b/epochStart/bootstrap/process_test.go index f9efb9b0880..ba86865f456 100644 --- a/epochStart/bootstrap/process_test.go +++ b/epochStart/bootstrap/process_test.go @@ -74,13 +74,15 @@ func createComponentsForEpochStart() (*mock.CoreComponentsMock, *mock.CryptoComp TxVersionCheckField: versioning.NewTxVersionChecker(1), NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, ProcessStatusHandlerInstance: &testscommon.ProcessStatusHandlerStub{}, + HardforkTriggerPubKeyField: []byte("provided hardfork pub key"), }, &mock.CryptoComponentsMock{ - PubKey: &cryptoMocks.PublicKeyStub{}, - BlockSig: &cryptoMocks.SignerStub{}, - TxSig: &cryptoMocks.SignerStub{}, - BlKeyGen: &cryptoMocks.KeyGenStub{}, - TxKeyGen: &cryptoMocks.KeyGenStub{}, + PubKey: &cryptoMocks.PublicKeyStub{}, + BlockSig: &cryptoMocks.SignerStub{}, + TxSig: &cryptoMocks.SignerStub{}, + BlKeyGen: &cryptoMocks.KeyGenStub{}, + TxKeyGen: &cryptoMocks.KeyGenStub{}, + PeerSignHandler: &cryptoMocks.PeerSignatureHandlerStub{}, } } @@ -116,6 +118,8 @@ func createMockEpochStartBootstrapArgs( AccountsTrieCheckpointsStorage: generalCfg.AccountsTrieCheckpointsStorage, PeerAccountsTrieCheckpointsStorage: generalCfg.PeerAccountsTrieCheckpointsStorage, Heartbeat: generalCfg.Heartbeat, + HeartbeatV2: generalCfg.HeartbeatV2, + Hardfork: generalCfg.Hardfork, EvictionWaitingList: config.EvictionWaitingListConfig{ HashesSize: 100, RootHashesSize: 100, @@ -898,6 +902,12 @@ func TestCreateSyncers(t *testing.T) { TrieNodesCalled: func() storage.Cacher { return testscommon.NewCacherStub() }, + PeerAuthenticationsCalled: func() storage.Cacher { + return testscommon.NewCacherStub() + }, + HeartbeatsCalled: func() storage.Cacher { + return testscommon.NewCacherStub() + }, } epochStartProvider.whiteListHandler = &testscommon.WhiteListHandlerStub{} epochStartProvider.whiteListerVerifiedTxs = &testscommon.WhiteListHandlerStub{} @@ -2247,3 +2257,20 @@ func TestEpochStartBootstrap_ComputeAllPendingMiniblocks(t *testing.T) { assert.Equal(t, pendingMiniblocksHashes[i], allPendingMiniblocksHeaders[i].GetHash()) } } + +func TestEpochStartBootstrap_Close(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + coreComp, cryptoComp := createComponentsForEpochStart() + args := createMockEpochStartBootstrapArgs(coreComp, cryptoComp) + + epochStartProvider, _ := NewEpochStartBootstrap(args) + epochStartProvider.dataPool = &dataRetrieverMock.PoolsHolderStub{ + CloseCalled: func() error { + return expectedErr + }} + + err := epochStartProvider.Close() + assert.Equal(t, expectedErr, err) +} diff --git a/epochStart/bootstrap/shardStorageHandler.go b/epochStart/bootstrap/shardStorageHandler.go index c740ed70c65..aca185d26ea 100644 --- a/epochStart/bootstrap/shardStorageHandler.go +++ b/epochStart/bootstrap/shardStorageHandler.go @@ -10,6 +10,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" + logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/epochStart" @@ -167,14 +168,12 @@ func getEpochStartShardData(metaBlock data.MetaHeaderHandler, shardId uint32) (d return &block.EpochStartShardData{}, epochStart.ErrEpochStartDataForShardNotFound } -func (ssh *shardStorageHandler) getCrossProcessedMbsDestMeByHeader( - shardHeader data.ShardHeaderHandler, -) map[uint32][]data.MiniBlockHeaderHandler { - crossMbsProcessed := make(map[uint32][]data.MiniBlockHeaderHandler) +func (ssh *shardStorageHandler) getCrossProcessedMiniBlockHeadersDestMe(shardHeader data.ShardHeaderHandler) map[string]data.MiniBlockHeaderHandler { + crossMbsProcessed := make(map[string]data.MiniBlockHeaderHandler) processedMiniBlockHeaders := shardHeader.GetMiniBlockHeaderHandlers() ownShardID := shardHeader.GetShardID() - for i, mbHeader := range processedMiniBlockHeaders { + for index, mbHeader := range processedMiniBlockHeaders { if mbHeader.GetReceiverShardID() != ownShardID { continue } @@ -182,16 +181,86 @@ func (ssh *shardStorageHandler) getCrossProcessedMbsDestMeByHeader( continue } - mbs, ok := crossMbsProcessed[mbHeader.GetSenderShardID()] - if !ok { - mbs = make([]data.MiniBlockHeaderHandler, 0) + crossMbsProcessed[string(mbHeader.GetHash())] = processedMiniBlockHeaders[index] + } + + return crossMbsProcessed +} + +func getProcessedMiniBlocksForFinishedMeta( + referencedMetaBlockHashes [][]byte, + headers map[string]data.HeaderHandler, + selfShardID uint32, +) ([]bootstrapStorage.MiniBlocksInMeta, error) { + + processedMiniBlocks := make([]bootstrapStorage.MiniBlocksInMeta, 0) + for i := 0; i < len(referencedMetaBlockHashes)-1; i++ { + neededMeta, err := getNeededMetaBlock(referencedMetaBlockHashes[i], headers) + if err != nil { + return nil, err } - mbs = append(mbs, processedMiniBlockHeaders[i]) - crossMbsProcessed[mbHeader.GetSenderShardID()] = mbs + log.Debug("getProcessedMiniBlocksForFinishedMeta", "meta block hash", referencedMetaBlockHashes[i]) + processedMiniBlocks = getProcessedMiniBlocks(neededMeta, selfShardID, processedMiniBlocks, referencedMetaBlockHashes[i]) } - return crossMbsProcessed + return processedMiniBlocks, nil +} + +func getNeededMetaBlock( + referencedMetaBlockHash []byte, + headers map[string]data.HeaderHandler, +) (*block.MetaBlock, error) { + header, ok := headers[string(referencedMetaBlockHash)] + if !ok { + return nil, fmt.Errorf("%w in getProcessedMiniBlocksForFinishedMeta: hash: %s", + epochStart.ErrMissingHeader, + hex.EncodeToString(referencedMetaBlockHash)) + } + + neededMeta, ok := header.(*block.MetaBlock) + if !ok { + return nil, epochStart.ErrWrongTypeAssertion + } + if check.IfNil(neededMeta) { + return nil, epochStart.ErrNilMetaBlock + } + + return neededMeta, nil +} + +func getProcessedMiniBlocks( + metaBlock *block.MetaBlock, + shardID uint32, + processedMiniBlocks []bootstrapStorage.MiniBlocksInMeta, + referencedMetaBlockHash []byte, +) []bootstrapStorage.MiniBlocksInMeta { + + miniBlockHeadersDestMe := getMiniBlockHeadersForDest(metaBlock, shardID) + + requiredLength := len(miniBlockHeadersDestMe) + miniBlockHashes := make([][]byte, 0, requiredLength) + fullyProcessed := make([]bool, 0, requiredLength) + indexOfLastTxProcessed := make([]int32, 0, requiredLength) + + for mbHash, mbHeader := range miniBlockHeadersDestMe { + log.Debug("getProcessedMiniBlocks", "mb hash", mbHash) + + miniBlockHashes = append(miniBlockHashes, []byte(mbHash)) + fullyProcessed = append(fullyProcessed, mbHeader.IsFinal()) + indexOfLastTxProcessed = append(indexOfLastTxProcessed, mbHeader.GetIndexOfLastTxProcessed()) + } + + if len(miniBlockHashes) > 0 { + processedMiniBlocks = append(processedMiniBlocks, bootstrapStorage.MiniBlocksInMeta{ + MetaHash: referencedMetaBlockHash, + MiniBlocksHashes: miniBlockHashes, + FullyProcessed: fullyProcessed, + IndexOfLastTxProcessed: indexOfLastTxProcessed, + }) + } + + return processedMiniBlocks } func (ssh *shardStorageHandler) getProcessedAndPendingMiniBlocksWithScheduled( @@ -207,7 +276,7 @@ func (ssh *shardStorageHandler) getProcessedAndPendingMiniBlocksWithScheduled( } log.Debug("getProcessedAndPendingMiniBlocksWithScheduled: initial processed and pending for scheduled") - printProcessedAndPendingMbs(processedMiniBlocks, pendingMiniBlocks) + displayProcessedAndPendingMiniBlocks(processedMiniBlocks, pendingMiniBlocks) if !withScheduled { return processedMiniBlocks, pendingMiniBlocks, nil @@ -218,25 +287,32 @@ func (ssh *shardStorageHandler) getProcessedAndPendingMiniBlocksWithScheduled( return nil, nil, epochStart.ErrWrongTypeAssertion } + mapHashMiniBlockHeaders := ssh.getCrossProcessedMiniBlockHeadersDestMe(shardHeader) + referencedMetaBlocks := shardHeader.GetMetaBlockHashes() if len(referencedMetaBlocks) == 0 { referencedMetaBlocks = append(referencedMetaBlocks, firstPendingMetaBlockHash) } - mapMbHeaderHandlers := ssh.getCrossProcessedMbsDestMeByHeader(shardHeader) - pendingMiniBlocks = addMbsToPending(pendingMiniBlocks, mapMbHeaderHandlers) - pendingMiniBlockHashes := getPendingMiniBlocksHashes(pendingMiniBlocks) - processedMiniBlocks, err = updateProcessedMiniBlocksForScheduled(referencedMetaBlocks, pendingMiniBlockHashes, headers, ssh.shardCoordinator.SelfId()) + processedMiniBlockForFinishedMeta, err := getProcessedMiniBlocksForFinishedMeta(referencedMetaBlocks, headers, ssh.shardCoordinator.SelfId()) + if err != nil { + return nil, nil, err + } + + processedMiniBlocks = append(processedMiniBlockForFinishedMeta, processedMiniBlocks...) + processedMiniBlocks, err = updateProcessedMiniBlocksForScheduled(processedMiniBlocks, mapHashMiniBlockHeaders) if err != nil { return nil, nil, err } + + pendingMiniBlocks = addMiniBlocksToPending(pendingMiniBlocks, mapHashMiniBlockHeaders) pendingMiniBlocks, err = updatePendingMiniBlocksForScheduled(referencedMetaBlocks, pendingMiniBlocks, headers, ssh.shardCoordinator.SelfId()) if err != nil { return nil, nil, err } log.Debug("getProcessedAndPendingMiniBlocksWithScheduled: updated processed and pending for scheduled") - printProcessedAndPendingMbs(processedMiniBlocks, pendingMiniBlocks) + displayProcessedAndPendingMiniBlocks(processedMiniBlocks, pendingMiniBlocks) return processedMiniBlocks, pendingMiniBlocks, nil } @@ -251,30 +327,57 @@ func getPendingMiniBlocksHashes(pendingMbsInfo []bootstrapStorage.PendingMiniBlo } func updateProcessedMiniBlocksForScheduled( - referencedMetaBlockHashes [][]byte, - pendingMiniBlockHashes [][]byte, - headers map[string]data.HeaderHandler, - selfShardID uint32, + processedMiniBlocks []bootstrapStorage.MiniBlocksInMeta, + mapHashMiniBlockHeaders map[string]data.MiniBlockHeaderHandler, ) ([]bootstrapStorage.MiniBlocksInMeta, error) { - miniBlocksInMetaList := make([]bootstrapStorage.MiniBlocksInMeta, 0) - for _, metaBlockHash := range referencedMetaBlockHashes { - mbsInMeta := bootstrapStorage.MiniBlocksInMeta{ - MetaHash: metaBlockHash, - } - mbHashes, err := getProcessedMiniBlockHashesForMetaBlockHash(selfShardID, metaBlockHash, headers) - if err != nil { - return nil, err + + remainingProcessedMiniBlocks := make([]bootstrapStorage.MiniBlocksInMeta, 0) + + for _, miniBlocksInMeta := range processedMiniBlocks { + remainingProcessedMiniBlocks = getProcessedMiniBlocksForScheduled(miniBlocksInMeta, mapHashMiniBlockHeaders, remainingProcessedMiniBlocks) + } + + return remainingProcessedMiniBlocks, nil +} + +func getProcessedMiniBlocksForScheduled( + miniBlocksInMeta bootstrapStorage.MiniBlocksInMeta, + mapHashMiniBlockHeaders map[string]data.MiniBlockHeaderHandler, + remainingProcessedMiniBlocks []bootstrapStorage.MiniBlocksInMeta, +) []bootstrapStorage.MiniBlocksInMeta { + + miniBlockHashes := make([][]byte, 0) + fullyProcessed := make([]bool, 0) + indexOfLastTxProcessed := make([]int32, 0) + + for index := range miniBlocksInMeta.MiniBlocksHashes { + mbHash := miniBlocksInMeta.MiniBlocksHashes[index] + mbHeader, ok := mapHashMiniBlockHeaders[string(mbHash)] + if !ok { + miniBlockHashes = append(miniBlockHashes, mbHash) + fullyProcessed = append(fullyProcessed, miniBlocksInMeta.FullyProcessed[index]) + indexOfLastTxProcessed = append(indexOfLastTxProcessed, miniBlocksInMeta.IndexOfLastTxProcessed[index]) + continue } - if len(mbHashes) > 0 { - remainingMbHashes := removeHashes(mbHashes, pendingMiniBlockHashes) - if len(remainingMbHashes) > 0 { - mbsInMeta.MiniBlocksHashes = remainingMbHashes - miniBlocksInMetaList = append(miniBlocksInMetaList, mbsInMeta) - } + + indexOfFirstTxProcessed := mbHeader.GetIndexOfFirstTxProcessed() + if indexOfFirstTxProcessed > 0 { + miniBlockHashes = append(miniBlockHashes, mbHash) + fullyProcessed = append(fullyProcessed, false) + indexOfLastTxProcessed = append(indexOfLastTxProcessed, indexOfFirstTxProcessed-1) } } - return miniBlocksInMetaList, nil + if len(miniBlockHashes) > 0 { + remainingProcessedMiniBlocks = append(remainingProcessedMiniBlocks, bootstrapStorage.MiniBlocksInMeta{ + MetaHash: miniBlocksInMeta.MetaHash, + MiniBlocksHashes: miniBlockHashes, + FullyProcessed: fullyProcessed, + IndexOfLastTxProcessed: indexOfLastTxProcessed, + }) + } + + return remainingProcessedMiniBlocks } func updatePendingMiniBlocksForScheduled( @@ -286,6 +389,9 @@ func updatePendingMiniBlocksForScheduled( remainingPendingMiniBlocks := make([]bootstrapStorage.PendingMiniBlocksInfo, 0) for index, metaBlockHash := range referencedMetaBlockHashes { if index == 0 { + // There could be situations when even first meta block referenced in one shard block was started + // and finalized here, so the pending mini blocks could be removed at all. Anyway, even if they will remain + // as pending here, this is not critical, as they count only for isShardStuck analysis continue } mbHashes, err := getProcessedMiniBlockHashesForMetaBlockHash(selfShardID, metaBlockHash, headers) @@ -325,7 +431,12 @@ func getProcessedMiniBlockHashesForMetaBlockHash( if !ok { return nil, epochStart.ErrWrongTypeAssertion } - mbHashes := getProcessedMbHashes(neededMeta, selfShardID, noPendingMbs) + mbHeaders := getProcessedMiniBlockHeaders(neededMeta, selfShardID, noPendingMbs) + mbHashes := make([][]byte, 0) + for mbHash := range mbHeaders { + mbHashes = append(mbHashes, []byte(mbHash)) + } + return mbHashes, nil } @@ -350,36 +461,61 @@ func removeHash(hashes [][]byte, hashToRemove []byte) [][]byte { return append(result, hashes...) } -func printProcessedAndPendingMbs(processedMiniBlocks []bootstrapStorage.MiniBlocksInMeta, pendingMiniBlocks []bootstrapStorage.PendingMiniBlocksInfo) { +func displayProcessedAndPendingMiniBlocks(processedMiniBlocks []bootstrapStorage.MiniBlocksInMeta, pendingMiniBlocks []bootstrapStorage.PendingMiniBlocksInfo) { + if log.GetLevel() > logger.LogDebug { + return + } + for _, miniBlocksInMeta := range processedMiniBlocks { - log.Debug("processed meta block", "hash", miniBlocksInMeta.MetaHash) - for _, mbHash := range miniBlocksInMeta.MiniBlocksHashes { - log.Debug("processedMiniBlock", "hash", mbHash) - } + displayProcessedMiniBlocksInMeta(miniBlocksInMeta) } for _, pendingMbsInShard := range pendingMiniBlocks { - log.Debug("shard", "shardID", pendingMbsInShard.ShardID) - for _, mbHash := range pendingMbsInShard.MiniBlocksHashes { - log.Debug("pendingMiniBlock", "hash", mbHash) - } + displayPendingMiniBlocks(pendingMbsInShard) } } -func addMbToPendingList( - mbHandler data.MiniBlockHeaderHandler, +func displayProcessedMiniBlocksInMeta(miniBlocksInMeta bootstrapStorage.MiniBlocksInMeta) { + log.Debug("processed meta block", "hash", miniBlocksInMeta.MetaHash) + + for index, mbHash := range miniBlocksInMeta.MiniBlocksHashes { + fullyProcessed := miniBlocksInMeta.IsFullyProcessed(index) + indexOfLastTxProcessed := miniBlocksInMeta.GetIndexOfLastTxProcessedInMiniBlock(index) + + log.Debug("processedMiniBlock", "hash", mbHash, + "index of last tx processed", indexOfLastTxProcessed, + "fully processed", fullyProcessed) + } +} + +func displayPendingMiniBlocks(pendingMbsInShard bootstrapStorage.PendingMiniBlocksInfo) { + log.Debug("shard", "shardID", pendingMbsInShard.ShardID) + + for _, mbHash := range pendingMbsInShard.MiniBlocksHashes { + log.Debug("pendingMiniBlock", "hash", mbHash) + } +} + +func addMiniBlockToPendingList( + mbHeader data.MiniBlockHeaderHandler, pendingMiniBlocks []bootstrapStorage.PendingMiniBlocksInfo, ) []bootstrapStorage.PendingMiniBlocksInfo { for i := range pendingMiniBlocks { - if pendingMiniBlocks[i].ShardID == mbHandler.GetReceiverShardID() { - pendingMiniBlocks[i].MiniBlocksHashes = append(pendingMiniBlocks[i].MiniBlocksHashes, mbHandler.GetHash()) + if pendingMiniBlocks[i].ShardID != mbHeader.GetReceiverShardID() { + continue + } + + if checkIfMiniBlockIsAlreadyAddedAsPending(mbHeader, pendingMiniBlocks[i]) { return pendingMiniBlocks } + + pendingMiniBlocks[i].MiniBlocksHashes = append(pendingMiniBlocks[i].MiniBlocksHashes, mbHeader.GetHash()) + return pendingMiniBlocks } pendingMbInfo := bootstrapStorage.PendingMiniBlocksInfo{ - ShardID: mbHandler.GetReceiverShardID(), - MiniBlocksHashes: [][]byte{mbHandler.GetHash()}, + ShardID: mbHeader.GetReceiverShardID(), + MiniBlocksHashes: [][]byte{mbHeader.GetHash()}, } pendingMiniBlocks = append(pendingMiniBlocks, pendingMbInfo) @@ -387,14 +523,25 @@ func addMbToPendingList( return pendingMiniBlocks } -func addMbsToPending( +func checkIfMiniBlockIsAlreadyAddedAsPending( + mbHeader data.MiniBlockHeaderHandler, + pendingMiniBlocks bootstrapStorage.PendingMiniBlocksInfo, +) bool { + for _, mbHash := range pendingMiniBlocks.MiniBlocksHashes { + if bytes.Equal(mbHash, mbHeader.GetHash()) { + return true + } + } + + return false +} + +func addMiniBlocksToPending( pendingMiniBlocks []bootstrapStorage.PendingMiniBlocksInfo, - mapMbHeaderHandlers map[uint32][]data.MiniBlockHeaderHandler, + mapHashMiniBlockHeaders map[string]data.MiniBlockHeaderHandler, ) []bootstrapStorage.PendingMiniBlocksInfo { - for _, pendingMbs := range mapMbHeaderHandlers { - for _, pendingMb := range pendingMbs { - pendingMiniBlocks = addMbToPendingList(pendingMb, pendingMiniBlocks) - } + for _, miniBlockHeader := range mapHashMiniBlockHeaders { + pendingMiniBlocks = addMiniBlockToPendingList(miniBlockHeader, pendingMiniBlocks) } return pendingMiniBlocks @@ -404,65 +551,136 @@ func (ssh *shardStorageHandler) getProcessedAndPendingMiniBlocks( meta data.MetaHeaderHandler, headers map[string]data.HeaderHandler, ) ([]bootstrapStorage.MiniBlocksInMeta, []bootstrapStorage.PendingMiniBlocksInfo, []byte, error) { - epochShardData, err := getEpochStartShardData(meta, ssh.shardCoordinator.SelfId()) + + epochShardData, neededMeta, err := getEpochShardDataAndNeededMetaBlock(ssh.shardCoordinator.SelfId(), meta, headers) if err != nil { return nil, nil, nil, err } + mbInfo := getMiniBlocksInfo(epochShardData, neededMeta, ssh.shardCoordinator.SelfId()) + processedMiniBlocks, pendingMiniBlocks := createProcessedAndPendingMiniBlocks(mbInfo, epochShardData) + + return processedMiniBlocks, pendingMiniBlocks, epochShardData.GetFirstPendingMetaBlock(), nil +} + +func getEpochShardDataAndNeededMetaBlock( + shardID uint32, + meta data.MetaHeaderHandler, + headers map[string]data.HeaderHandler, +) (data.EpochStartShardDataHandler, *block.MetaBlock, error) { + + epochShardData, err := getEpochStartShardData(meta, shardID) + if err != nil { + return nil, nil, err + } + header, ok := headers[string(epochShardData.GetFirstPendingMetaBlock())] if !ok { - return nil, nil, nil, fmt.Errorf("%w in getProcessedAndPendingMiniBlocks: hash: %s", + return nil, nil, fmt.Errorf("%w in getEpochShardDataAndNeededMetaBlock: hash: %s", epochStart.ErrMissingHeader, hex.EncodeToString(epochShardData.GetFirstPendingMetaBlock())) } + neededMeta, ok := header.(*block.MetaBlock) if !ok { - return nil, nil, nil, epochStart.ErrWrongTypeAssertion + return nil, nil, epochStart.ErrWrongTypeAssertion } if check.IfNil(neededMeta) { - return nil, nil, nil, epochStart.ErrNilMetaBlock + return nil, nil, epochStart.ErrNilMetaBlock } - pendingMBsMap := make(map[string]struct{}) - pendingMBsPerShardMap := make(map[uint32][][]byte) + return epochShardData, neededMeta, nil +} + +func getMiniBlocksInfo(epochShardData data.EpochStartShardDataHandler, neededMeta *block.MetaBlock, shardID uint32) *miniBlockInfo { + mbInfo := &miniBlockInfo{ + miniBlockHashes: make([][]byte, 0), + fullyProcessed: make([]bool, 0), + indexOfLastTxProcessed: make([]int32, 0), + pendingMiniBlocksMap: make(map[string]struct{}), + pendingMiniBlocksPerShardMap: make(map[uint32][][]byte), + } + + setMiniBlockInfoWithPendingMiniBlocks(epochShardData, mbInfo) + setMiniBlockInfoWithProcessedMiniBlocks(neededMeta, shardID, mbInfo) + + return mbInfo +} + +func setMiniBlockInfoWithPendingMiniBlocks(epochShardData data.EpochStartShardDataHandler, mbInfo *miniBlockInfo) { for _, mbHeader := range epochShardData.GetPendingMiniBlockHeaderHandlers() { + log.Debug("shardStorageHandler.setMiniBlockInfoWithPendingMiniBlocks", + "mb hash", mbHeader.GetHash(), + "len(reserved)", len(mbHeader.GetReserved()), + "index of first tx processed", mbHeader.GetIndexOfFirstTxProcessed(), + "index of last tx processed", mbHeader.GetIndexOfLastTxProcessed(), + ) + receiverShardID := mbHeader.GetReceiverShardID() - pendingMBsPerShardMap[receiverShardID] = append(pendingMBsPerShardMap[receiverShardID], mbHeader.GetHash()) - pendingMBsMap[string(mbHeader.GetHash())] = struct{}{} + mbInfo.pendingMiniBlocksPerShardMap[receiverShardID] = append(mbInfo.pendingMiniBlocksPerShardMap[receiverShardID], mbHeader.GetHash()) + mbInfo.pendingMiniBlocksMap[string(mbHeader.GetHash())] = struct{}{} + + if mbHeader.GetIndexOfLastTxProcessed() > -1 { + mbInfo.miniBlockHashes = append(mbInfo.miniBlockHashes, mbHeader.GetHash()) + mbInfo.fullyProcessed = append(mbInfo.fullyProcessed, false) + mbInfo.indexOfLastTxProcessed = append(mbInfo.indexOfLastTxProcessed, mbHeader.GetIndexOfLastTxProcessed()) + } } +} + +func setMiniBlockInfoWithProcessedMiniBlocks(neededMeta *block.MetaBlock, shardID uint32, mbInfo *miniBlockInfo) { + miniBlockHeaders := getProcessedMiniBlockHeaders(neededMeta, shardID, mbInfo.pendingMiniBlocksMap) + for mbHash, mbHeader := range miniBlockHeaders { + log.Debug("shardStorageHandler.setMiniBlockInfoWithProcessedMiniBlocks", + "mb hash", mbHeader.GetHash(), + "len(reserved)", len(mbHeader.GetReserved()), + "index of first tx processed", mbHeader.GetIndexOfFirstTxProcessed(), + "index of last tx processed", mbHeader.GetIndexOfLastTxProcessed(), + ) + + mbInfo.miniBlockHashes = append(mbInfo.miniBlockHashes, []byte(mbHash)) + mbInfo.fullyProcessed = append(mbInfo.fullyProcessed, mbHeader.IsFinal()) + mbInfo.indexOfLastTxProcessed = append(mbInfo.indexOfLastTxProcessed, mbHeader.GetIndexOfLastTxProcessed()) + } +} - processedMbHashes := getProcessedMbHashes(neededMeta, ssh.shardCoordinator.SelfId(), pendingMBsMap) +func createProcessedAndPendingMiniBlocks( + mbInfo *miniBlockInfo, + epochShardData data.EpochStartShardDataHandler, +) ([]bootstrapStorage.MiniBlocksInMeta, []bootstrapStorage.PendingMiniBlocksInfo) { processedMiniBlocks := make([]bootstrapStorage.MiniBlocksInMeta, 0) - if len(processedMbHashes) > 0 { + if len(mbInfo.miniBlockHashes) > 0 { processedMiniBlocks = append(processedMiniBlocks, bootstrapStorage.MiniBlocksInMeta{ - MetaHash: epochShardData.GetFirstPendingMetaBlock(), - MiniBlocksHashes: processedMbHashes, + MetaHash: epochShardData.GetFirstPendingMetaBlock(), + MiniBlocksHashes: mbInfo.miniBlockHashes, + FullyProcessed: mbInfo.fullyProcessed, + IndexOfLastTxProcessed: mbInfo.indexOfLastTxProcessed, }) } - sliceToRet := make([]bootstrapStorage.PendingMiniBlocksInfo, 0) - for shardID, hashes := range pendingMBsPerShardMap { - sliceToRet = append(sliceToRet, bootstrapStorage.PendingMiniBlocksInfo{ - ShardID: shardID, - MiniBlocksHashes: hashes, + pendingMiniBlocks := make([]bootstrapStorage.PendingMiniBlocksInfo, 0) + for receiverShardID, mbHashes := range mbInfo.pendingMiniBlocksPerShardMap { + pendingMiniBlocks = append(pendingMiniBlocks, bootstrapStorage.PendingMiniBlocksInfo{ + ShardID: receiverShardID, + MiniBlocksHashes: mbHashes, }) } - return processedMiniBlocks, sliceToRet, epochShardData.GetFirstPendingMetaBlock(), nil + return processedMiniBlocks, pendingMiniBlocks } -func getProcessedMbHashes(metaBlock *block.MetaBlock, destShardID uint32, pendingMBsMap map[string]struct{}) [][]byte { - processedMbHashes := make([][]byte, 0) - miniBlocksDstMe := getNewPendingMiniBlocksForDst(metaBlock, destShardID) - for hash, mb := range miniBlocksDstMe { +func getProcessedMiniBlockHeaders(metaBlock *block.MetaBlock, destShardID uint32, pendingMBsMap map[string]struct{}) map[string]block.MiniBlockHeader { + processedMiniBlockHeaders := make(map[string]block.MiniBlockHeader) + miniBlockHeadersDestMe := getMiniBlockHeadersForDest(metaBlock, destShardID) + for hash, mbh := range miniBlockHeadersDestMe { if _, hashExists := pendingMBsMap[hash]; hashExists { continue } - processedMbHashes = append(processedMbHashes, mb.Hash) + processedMiniBlockHeaders[hash] = mbh } - return processedMbHashes + return processedMiniBlockHeaders } func (ssh *shardStorageHandler) saveLastCrossNotarizedHeaders( @@ -615,7 +833,7 @@ func (ssh *shardStorageHandler) saveTriggerRegistry(components *ComponentsNeeded return bootstrapKey, nil } -func getNewPendingMiniBlocksForDst(metaBlock *block.MetaBlock, destId uint32) map[string]block.MiniBlockHeader { +func getMiniBlockHeadersForDest(metaBlock *block.MetaBlock, destId uint32) map[string]block.MiniBlockHeader { hashDst := make(map[string]block.MiniBlockHeader) for i := 0; i < len(metaBlock.ShardInfo); i++ { if metaBlock.ShardInfo[i].ShardID == destId { diff --git a/epochStart/bootstrap/shardStorageHandler_test.go b/epochStart/bootstrap/shardStorageHandler_test.go index 094e6e3dad5..1df9d2df592 100644 --- a/epochStart/bootstrap/shardStorageHandler_test.go +++ b/epochStart/bootstrap/shardStorageHandler_test.go @@ -111,7 +111,7 @@ func TestShardStorageHandler_SaveDataToStorage(t *testing.T) { assert.Nil(t, err) } -func Test_getNewPendingMiniBlocksForDst(t *testing.T) { +func Test_getMiniBlockHeadersForDest(t *testing.T) { t.Parallel() hash1 := []byte("hash1") @@ -135,12 +135,12 @@ func Test_getNewPendingMiniBlocksForDst(t *testing.T) { }, } - shardMbHeaders := getNewPendingMiniBlocksForDst(metablock, 0) + shardMbHeaders := getMiniBlockHeadersForDest(metablock, 0) assert.Equal(t, shardMbHeaders[string(hash1)], shardMiniBlockHeader) assert.NotNil(t, shardMbHeaders[string(hash2)]) } -func TestShardStorageHandler_getCrossProcessedMbsDestMeByHeader(t *testing.T) { +func TestShardStorageHandler_getCrossProcessedMiniBlockHeadersDestMe(t *testing.T) { mb1From1To0 := block.MiniBlockHeader{ Hash: []byte("mb hash1"), SenderShardID: 1, @@ -187,11 +187,13 @@ func TestShardStorageHandler_getCrossProcessedMbsDestMeByHeader(t *testing.T) { MiniBlockHeaders: mbs, } - expectedMbs := map[uint32][]data.MiniBlockHeaderHandler{ - 1: {&mb1From1To0, &mb2From1To0, &mb3From2To0}, + expectedMbs := map[string]data.MiniBlockHeaderHandler{ + string(mb1From1To0.Hash): &mb1From1To0, + string(mb2From1To0.Hash): &mb2From1To0, + string(mb3From2To0.Hash): &mb3From2To0, } - processedMbs := shardStorage.getCrossProcessedMbsDestMeByHeader(shardHeader) + processedMbs := shardStorage.getCrossProcessedMiniBlockHeadersDestMe(shardHeader) require.Equal(t, processedMbs, expectedMbs) } @@ -264,7 +266,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduled(t *te require.Equal(t, scenario.expectedProcessedMbsWithScheduled, processedMiniBlocks) } -func Test_addMbToPendingListNoPreviousEntryForShard(t *testing.T) { +func Test_addMiniBlockToPendingListNoPreviousEntryForShard(t *testing.T) { t.Parallel() mbHash := []byte("hash1") @@ -283,11 +285,11 @@ func Test_addMbToPendingListNoPreviousEntryForShard(t *testing.T) { {ShardID: 0, MiniBlocksHashes: [][]byte{mbHash}}, } - resultingMbsInfo := addMbToPendingList(mbHandler, pendingMbsInfo) + resultingMbsInfo := addMiniBlockToPendingList(mbHandler, pendingMbsInfo) require.Equal(t, expectedPendingMbsInfo, resultingMbsInfo) } -func Test_addMbToPendingListWithPreviousEntryForShard(t *testing.T) { +func Test_addMiniBlockToPendingListWithPreviousEntryForShard(t *testing.T) { t.Parallel() mbHash := []byte("hash1") @@ -308,11 +310,11 @@ func Test_addMbToPendingListWithPreviousEntryForShard(t *testing.T) { {ShardID: 0, MiniBlocksHashes: [][]byte{mbHash2, mbHash}}, } - resultingMbsInfo := addMbToPendingList(mbHandler, pendingMbsInfo) + resultingMbsInfo := addMiniBlockToPendingList(mbHandler, pendingMbsInfo) require.Equal(t, expectedPendingMbsInfo, resultingMbsInfo) } -func Test_addMbsToPending(t *testing.T) { +func Test_addMiniBlocksToPending(t *testing.T) { t.Parallel() mb1Sh1To0Hash := []byte("hash1 1 to 0") @@ -366,10 +368,13 @@ func Test_addMbsToPending(t *testing.T) { mbsToShard1 := []data.MiniBlockHeaderHandler{mb4Header2To1, mb5Header0To1} mbsToMeta := []data.MiniBlockHeaderHandler{mb6Header1ToMeta} - mapMbHeaderHandlers := map[uint32][]data.MiniBlockHeaderHandler{ - 0: mbsToShard0, - 1: mbsToShard1, - core.MetachainShardId: mbsToMeta, + mapMbHeaderHandlers := map[string]data.MiniBlockHeaderHandler{ + string(mbsToShard0[0].GetHash()): mbsToShard0[0], + string(mbsToShard0[1].GetHash()): mbsToShard0[1], + string(mbsToShard0[2].GetHash()): mbsToShard0[2], + string(mbsToShard1[0].GetHash()): mbsToShard1[0], + string(mbsToShard1[1].GetHash()): mbsToShard1[1], + string(mbsToMeta[0].GetHash()): mbsToMeta[0], } expectedPendingMbs := []bootstrapStorage.PendingMiniBlocksInfo{ @@ -378,9 +383,37 @@ func Test_addMbsToPending(t *testing.T) { {ShardID: core.MetachainShardId, MiniBlocksHashes: [][]byte{mb6Sh1ToMetaHash}}, } - pendingMbsInfo := addMbsToPending(pendingMbs, mapMbHeaderHandlers) + pendingMbsInfo := addMiniBlocksToPending(pendingMbs, mapMbHeaderHandlers) - require.Equal(t, expectedPendingMbs, pendingMbsInfo) + mbFound := 0 + for _, pendingMbInfo := range pendingMbsInfo { + for _, mbHash := range pendingMbInfo.MiniBlocksHashes { + mbFound += getExpectedMbHashes(expectedPendingMbs, pendingMbInfo, mbHash) + } + } + + require.Equal(t, 9, mbFound) +} + +func getExpectedMbHashes( + expectedPendingMbs []bootstrapStorage.PendingMiniBlocksInfo, + pendingMbInfo bootstrapStorage.PendingMiniBlocksInfo, + mbHash []byte, +) int { + mbFound := 0 + for _, expectedPendingMb := range expectedPendingMbs { + if expectedPendingMb.ShardID != pendingMbInfo.ShardID { + continue + } + + for _, expectedMbHash := range expectedPendingMb.MiniBlocksHashes { + if bytes.Equal(mbHash, expectedMbHash) { + mbFound++ + } + } + } + + return mbFound } func TestShardStorageHandler_getProcessedAndPendingMiniBlocksErrorGettingEpochStartShardData(t *testing.T) { @@ -980,8 +1013,9 @@ func createPendingAndProcessedMiniBlocksScenario() scenarioData { prevShardHeaderHash := "prevShardHeaderHash" shardHeaderHash := "shardHeaderHash" + txCount := uint32(100) crossMbHeaders := []block.MiniBlockHeader{ - {Hash: []byte("mb_1_0_0"), SenderShardID: 1, ReceiverShardID: 0}, + {Hash: []byte("mb_1_0_0"), SenderShardID: 1, ReceiverShardID: 0, TxCount: txCount}, {Hash: []byte("mb_2_0_1"), SenderShardID: 2, ReceiverShardID: 0}, {Hash: []byte("mb_meta_0_2"), SenderShardID: core.MetachainShardId, ReceiverShardID: 0}, {Hash: []byte("mb_2_0_3"), SenderShardID: 2, ReceiverShardID: 0}, @@ -1008,7 +1042,7 @@ func createPendingAndProcessedMiniBlocksScenario() scenarioData { {ShardID: 0, MiniBlocksHashes: [][]byte{crossMbHeaders[1].Hash, crossMbHeaders[2].Hash, crossMbHeaders[3].Hash, crossMbHeaders[4].Hash}}, } expectedProcessedMiniBlocks := []bootstrapStorage.MiniBlocksInMeta{ - {MetaHash: []byte(firstPendingMetaHash), MiniBlocksHashes: [][]byte{crossMbHeaders[0].Hash}}, + {MetaHash: []byte(firstPendingMetaHash), MiniBlocksHashes: [][]byte{crossMbHeaders[0].Hash}, FullyProcessed: []bool{true}, IndexOfLastTxProcessed: []int32{int32(txCount - 1)}}, } expectedPendingMbsWithScheduled := []bootstrapStorage.PendingMiniBlocksInfo{ @@ -1091,61 +1125,95 @@ func Test_updatePendingMiniBlocksForScheduled(t *testing.T) { assert.Equal(t, hash2, remainingPendingMiniBlocks[0].MiniBlocksHashes[0]) } -func Test_updateProcessedMiniBlocksForScheduled(t *testing.T) { +func Test_getProcessedMiniBlocksForFinishedMeta(t *testing.T) { t.Parallel() - hash1 := []byte("hash1") - hash2 := []byte("hash2") - hash3 := []byte("hash3") - hash4 := []byte("hash4") - hashMeta := []byte("metaHash1") - hashPrevMeta := []byte("metaHash2") - shardMiniBlockHeaders := []block.MiniBlockHeader{ - {SenderShardID: 0, ReceiverShardID: 1, Hash: hash3}, - {SenderShardID: 0, ReceiverShardID: 1, Hash: hash4}, - } - shardMiniBlockHeadersPrevMeta := []block.MiniBlockHeader{ - {SenderShardID: 0, ReceiverShardID: 1, Hash: hash1}, - {SenderShardID: 1, ReceiverShardID: 0, Hash: hash2}, - } + metaHash1 := []byte("metaBlock_hash1") + metaHash2 := []byte("metaBlock_hash2") + miniBlockHash := []byte("miniBlock_hash1") + referencedMetaBlockHashes := [][]byte{metaHash1, metaHash2} - metaBlock := &block.MetaBlock{ - ShardInfo: []block.ShardData{ - { - ShardID: 0, - ShardMiniBlockHeaders: shardMiniBlockHeaders, - }, - }, - } + headers := make(map[string]data.HeaderHandler) + + _, err := getProcessedMiniBlocksForFinishedMeta(referencedMetaBlockHashes, headers, 0) + assert.True(t, errors.Is(err, epochStart.ErrMissingHeader)) - prevMetaBlock := &block.MetaBlock{ + headers[string(metaHash1)] = &block.Header{} + + _, err = getProcessedMiniBlocksForFinishedMeta(referencedMetaBlockHashes, headers, 0) + assert.Equal(t, epochStart.ErrWrongTypeAssertion, err) + + headers[string(metaHash1)] = &block.MetaBlock{ ShardInfo: []block.ShardData{ { - ShardID: 0, - ShardMiniBlockHeaders: shardMiniBlockHeadersPrevMeta, + ShardID: 1, + ShardMiniBlockHeaders: []block.MiniBlockHeader{ + { + TxCount: 100, + SenderShardID: 1, + ReceiverShardID: 0, + Hash: miniBlockHash, + }, + }, }, }, } - referencedMetaBlockHashes := [][]byte{hashPrevMeta, hashMeta} - pendingMiniBlocks := [][]byte{hash4} - headers := make(map[string]data.HeaderHandler) - headers[string(hashMeta)] = metaBlock - headers[string(hashPrevMeta)] = prevMetaBlock - expectedProcessedMbs := []bootstrapStorage.MiniBlocksInMeta{ - { - MetaHash: hashPrevMeta, - MiniBlocksHashes: [][]byte{hash1}, - }, + miniBlocksInMeta, err := getProcessedMiniBlocksForFinishedMeta(referencedMetaBlockHashes, headers, 0) + assert.Nil(t, err) + + require.Equal(t, 1, len(miniBlocksInMeta)) + assert.Equal(t, metaHash1, miniBlocksInMeta[0].MetaHash) + + require.Equal(t, 1, len(miniBlocksInMeta[0].MiniBlocksHashes)) + assert.Equal(t, miniBlockHash, miniBlocksInMeta[0].MiniBlocksHashes[0]) + + require.Equal(t, 1, len(miniBlocksInMeta[0].IndexOfLastTxProcessed)) + assert.Equal(t, int32(99), miniBlocksInMeta[0].IndexOfLastTxProcessed[0]) + + require.Equal(t, 1, len(miniBlocksInMeta[0].FullyProcessed)) + assert.True(t, miniBlocksInMeta[0].FullyProcessed[0]) +} + +func Test_updateProcessedMiniBlocksForScheduled(t *testing.T) { + t.Parallel() + + mbHash1 := []byte("miniBlock_hash1") + + mbHash2 := []byte("miniBlock_hash2") + mbHeader2 := &block.MiniBlockHeader{} + _ = mbHeader2.SetIndexOfFirstTxProcessed(10) + + metaBlockHash := []byte("metaBlock_hash1") + processedMiniBlocks := []bootstrapStorage.MiniBlocksInMeta{ { - MetaHash: hashMeta, - MiniBlocksHashes: [][]byte{hash3}, + MetaHash: metaBlockHash, + MiniBlocksHashes: [][]byte{mbHash1, mbHash2}, + FullyProcessed: []bool{true, false}, + IndexOfLastTxProcessed: []int32{100, 50}, }, } - updatedProcessed, err := updateProcessedMiniBlocksForScheduled(referencedMetaBlockHashes, pendingMiniBlocks, headers, 1) + mapHashMiniBlockHeaders := make(map[string]data.MiniBlockHeaderHandler) + mapHashMiniBlockHeaders[string(mbHash2)] = mbHeader2 + + miniBlocksInMeta, err := updateProcessedMiniBlocksForScheduled(processedMiniBlocks, mapHashMiniBlockHeaders) assert.Nil(t, err) - require.Equal(t, expectedProcessedMbs, updatedProcessed) + + require.Equal(t, 1, len(miniBlocksInMeta)) + assert.Equal(t, metaBlockHash, miniBlocksInMeta[0].MetaHash) + + require.Equal(t, 2, len(miniBlocksInMeta[0].MiniBlocksHashes)) + assert.Equal(t, mbHash1, miniBlocksInMeta[0].MiniBlocksHashes[0]) + assert.Equal(t, mbHash2, miniBlocksInMeta[0].MiniBlocksHashes[1]) + + require.Equal(t, 2, len(miniBlocksInMeta[0].FullyProcessed)) + assert.True(t, miniBlocksInMeta[0].FullyProcessed[0]) + assert.False(t, miniBlocksInMeta[0].FullyProcessed[1]) + + require.Equal(t, 2, len(miniBlocksInMeta[0].IndexOfLastTxProcessed)) + assert.Equal(t, int32(100), miniBlocksInMeta[0].IndexOfLastTxProcessed[0]) + assert.Equal(t, int32(9), miniBlocksInMeta[0].IndexOfLastTxProcessed[1]) } func Test_getPendingMiniBlocksHashes(t *testing.T) { @@ -1279,3 +1347,86 @@ func Test_removeHashes(t *testing.T) { updatedHashes = removeHashes(hashes, different) require.Equal(t, expectedRemoveDifferent, updatedHashes) } + +func Test_getNeededMetaBlock(t *testing.T) { + t.Parallel() + + neededMetaBlock, err := getNeededMetaBlock(nil, nil) + assert.Nil(t, neededMetaBlock) + assert.True(t, errors.Is(err, epochStart.ErrMissingHeader)) + + wrongHash := []byte("wrongHash") + headers := make(map[string]data.HeaderHandler) + neededMetaBlock, err = getNeededMetaBlock(wrongHash, headers) + assert.Nil(t, neededMetaBlock) + assert.True(t, errors.Is(err, epochStart.ErrMissingHeader)) + + hash := []byte("good hash") + header := &block.Header{} + headers[string(hash)] = header + neededMetaBlock, err = getNeededMetaBlock(hash, headers) + assert.Nil(t, neededMetaBlock) + assert.True(t, errors.Is(err, epochStart.ErrWrongTypeAssertion)) + + metaBlock := &block.MetaBlock{} + headers[string(hash)] = metaBlock + neededMetaBlock, err = getNeededMetaBlock(hash, headers) + assert.Nil(t, err) + assert.Equal(t, metaBlock, neededMetaBlock) +} + +func Test_getProcessedMiniBlocks(t *testing.T) { + t.Parallel() + + mbHash1 := []byte("hash1") + mbHash2 := []byte("hash2") + + mbh1 := block.MiniBlockHeader{ + Hash: mbHash1, + SenderShardID: 1, + ReceiverShardID: 0, + TxCount: 5, + } + _ = mbh1.SetIndexOfLastTxProcessed(int32(mbh1.TxCount - 2)) + _ = mbh1.SetConstructionState(int32(block.PartialExecuted)) + + mbh2 := block.MiniBlockHeader{ + Hash: mbHash2, + SenderShardID: 2, + ReceiverShardID: 0, + TxCount: 5, + } + _ = mbh2.SetIndexOfLastTxProcessed(int32(mbh2.TxCount - 1)) + _ = mbh2.SetConstructionState(int32(block.Final)) + + metaBlock := &block.MetaBlock{ + ShardInfo: []block.ShardData{ + { + ShardID: 1, + ShardMiniBlockHeaders: []block.MiniBlockHeader{mbh1}, + }, + { + ShardID: 2, + ShardMiniBlockHeaders: []block.MiniBlockHeader{mbh2}, + }, + }, + } + + processedMiniBlocks := make([]bootstrapStorage.MiniBlocksInMeta, 0) + referencedMetaBlockHash := []byte("hash") + + processedMiniBlocks = getProcessedMiniBlocks(metaBlock, 0, processedMiniBlocks, referencedMetaBlockHash) + + require.Equal(t, 1, len(processedMiniBlocks)) + require.Equal(t, 2, len(processedMiniBlocks[0].MiniBlocksHashes)) + require.Equal(t, 2, len(processedMiniBlocks[0].IndexOfLastTxProcessed)) + require.Equal(t, 2, len(processedMiniBlocks[0].FullyProcessed)) + + require.Equal(t, referencedMetaBlockHash, processedMiniBlocks[0].MetaHash) + assert.Equal(t, int32(mbh1.TxCount-2), processedMiniBlocks[0].IndexOfLastTxProcessed[0]) + assert.Equal(t, int32(mbh1.TxCount-1), processedMiniBlocks[0].IndexOfLastTxProcessed[1]) + assert.False(t, processedMiniBlocks[0].FullyProcessed[0]) + assert.True(t, processedMiniBlocks[0].FullyProcessed[1]) + assert.Equal(t, mbHash1, processedMiniBlocks[0].MiniBlocksHashes[0]) + assert.Equal(t, mbHash2, processedMiniBlocks[0].MiniBlocksHashes[1]) +} diff --git a/epochStart/bootstrap/startInEpochScheduled.go b/epochStart/bootstrap/startInEpochScheduled.go index 590f748d084..d6609f5f786 100644 --- a/epochStart/bootstrap/startInEpochScheduled.go +++ b/epochStart/bootstrap/startInEpochScheduled.go @@ -262,7 +262,7 @@ func (ses *startInEpochWithScheduledDataSyncer) prepareScheduledIntermediateTxs( GasPenalized: additionalData.GetScheduledGasPenalized(), GasRefunded: additionalData.GetScheduledGasRefunded(), } - scheduledMiniBlocks := getScheduledMiniBlocks(header, miniBlocks, scheduledTxHashes) + scheduledMiniBlocks := getScheduledMiniBlocks(header, miniBlocks) scheduledInfo := &process.ScheduledInfo{ RootHash: additionalData.GetScheduledRootHash(), IntermediateTxs: scheduledIntermediateTxsMap, @@ -366,23 +366,22 @@ func getBlockTypeOfTx(txHash []byte, miniBlocks map[string]*block.MiniBlock) blo func getScheduledMiniBlocks( header data.HeaderHandler, miniBlocks map[string]*block.MiniBlock, - scheduledTxHashes map[string]uint32, ) block.MiniBlockSlice { scheduledMiniBlocks := make(block.MiniBlockSlice, 0) mbHeaders := header.GetMiniBlockHeaderHandlers() for _, mbHeader := range mbHeaders { - miniBlock := miniBlocks[string(mbHeader.GetHash())] - if miniBlock == nil || miniBlock.Type == block.InvalidBlock { + if mbHeader.GetProcessingType() != int32(block.Processed) { continue } - if len(miniBlock.TxHashes) > 0 { - _, isScheduledTx := scheduledTxHashes[string(miniBlock.TxHashes[0])] - if isScheduledTx { - scheduledMiniBlocks = append(scheduledMiniBlocks, miniBlock) - } + miniBlock, ok := miniBlocks[string(mbHeader.GetHash())] + if !ok { + log.Warn("getScheduledMiniBlocks: mini block was not found", "mb hash", mbHeader.GetHash()) + continue } + + scheduledMiniBlocks = append(scheduledMiniBlocks, miniBlock) } return scheduledMiniBlocks @@ -438,15 +437,69 @@ func (ses *startInEpochWithScheduledDataSyncer) getScheduledTransactionHashes(he return nil, err } - scheduledTxs := make(map[string]uint32) - for _, mb := range miniBlocks { - for _, txHash := range mb.TxHashes { - scheduledTxs[string(txHash)] = mb.GetReceiverShardID() - log.Debug("startInEpochWithScheduledDataSyncer.getScheduledTransactionHashes", "hash", txHash) + scheduledTxsForShard := make(map[string]uint32) + for _, miniBlockHeader := range miniBlockHeaders { + pi, miniBlock, miniBlockHash, shouldSkip := getMiniBlockAndProcessedIndexes(miniBlockHeader, miniBlocks) + if shouldSkip { + continue } + + createScheduledTxsForShardMap(pi, miniBlock, miniBlockHash, scheduledTxsForShard) + } + + return scheduledTxsForShard, nil +} + +func getMiniBlockAndProcessedIndexes( + miniBlockHeader data.MiniBlockHeaderHandler, + miniBlocks map[string]*block.MiniBlock, +) (*processedIndexes, *block.MiniBlock, []byte, bool) { + + pi := &processedIndexes{} + + miniBlockHash := miniBlockHeader.GetHash() + miniBlock, ok := miniBlocks[string(miniBlockHash)] + if !ok { + log.Warn("startInEpochWithScheduledDataSyncer.getMiniBlockAndProcessedIndexes: mini block was not found", "mb hash", miniBlockHash) + return nil, nil, nil, true + } + + pi.firstIndex = miniBlockHeader.GetIndexOfFirstTxProcessed() + pi.lastIndex = miniBlockHeader.GetIndexOfLastTxProcessed() + + if pi.firstIndex > pi.lastIndex { + log.Warn("startInEpochWithScheduledDataSyncer.getMiniBlockAndProcessedIndexes: wrong first/last index", + "mb hash", miniBlockHash, + "index of first tx processed", pi.firstIndex, + "index of last tx processed", pi.lastIndex, + "num txs", len(miniBlock.TxHashes), + ) + return nil, nil, nil, true } - return scheduledTxs, nil + return pi, miniBlock, miniBlockHash, false +} + +func createScheduledTxsForShardMap( + pi *processedIndexes, + miniBlock *block.MiniBlock, + miniBlockHash []byte, + scheduledTxsForShard map[string]uint32, +) { + for index := pi.firstIndex; index <= pi.lastIndex; index++ { + if index >= int32(len(miniBlock.TxHashes)) { + log.Warn("startInEpochWithScheduledDataSyncer.createScheduledTxsForShardMap: index out of bound", + "mb hash", miniBlockHash, + "index", index, + "num txs", len(miniBlock.TxHashes), + ) + break + } + + txHash := miniBlock.TxHashes[index] + scheduledTxsForShard[string(txHash)] = miniBlock.GetReceiverShardID() + log.Debug("startInEpochWithScheduledDataSyncer.createScheduledTxsForShardMap", "hash", txHash) + } } func getNumScheduledIntermediateTxs(mapScheduledIntermediateTxs map[block.Type][]data.TransactionHandler) int { diff --git a/epochStart/bootstrap/startInEpochScheduled_test.go b/epochStart/bootstrap/startInEpochScheduled_test.go index e95bacdc81f..b93f9397d2f 100644 --- a/epochStart/bootstrap/startInEpochScheduled_test.go +++ b/epochStart/bootstrap/startInEpochScheduled_test.go @@ -560,14 +560,17 @@ func TestStartInEpochWithScheduledDataSyncer_getScheduledTransactionHashesWithDe Hash: hashMb1, } _ = mbHeaderScheduled1.SetProcessingType(int32(block.Scheduled)) + _ = mbHeaderScheduled1.SetIndexOfLastTxProcessed(1) mbHeaderScheduled2 := block.MiniBlockHeader{ Hash: hashMb2, } _ = mbHeaderScheduled2.SetProcessingType(int32(block.Scheduled)) + _ = mbHeaderScheduled2.SetIndexOfLastTxProcessed(1) mbHeaderScheduled3 := block.MiniBlockHeader{ Hash: hashMb3, } _ = mbHeaderScheduled3.SetProcessingType(int32(block.Scheduled)) + _ = mbHeaderScheduled3.SetIndexOfLastTxProcessed(1) mbHeader := block.MiniBlockHeader{ Hash: hashMb4, } @@ -585,7 +588,7 @@ func TestStartInEpochWithScheduledDataSyncer_getScheduledTransactionHashesWithDe scheduledMiniBlocksSyncer: &epochStartMocks.PendingMiniBlockSyncHandlerStub{ SyncPendingMiniBlocksCalled: func(miniBlockHeaders []data.MiniBlockHeaderHandler, ctx context.Context) error { for i := range miniBlockHeaders { - require.Len(t, miniBlockHeaders[i].GetReserved(), 2) + require.Len(t, miniBlockHeaders[i].GetReserved(), 4) } return nil }, @@ -746,17 +749,15 @@ func TestGetScheduledMiniBlocks(t *testing.T) { }, } - schedulesTxHashes := map[string]uint32{ - txHash1: 1, - txHash2: 2, - } + _ = header.MiniBlockHeaders[0].SetProcessingType(int32(block.Processed)) + _ = header.MiniBlockHeaders[1].SetProcessingType(int32(block.Processed)) expectedMiniBlocks := block.MiniBlockSlice{ mb1, mb2, } - mbs := getScheduledMiniBlocks(header, miniBlocks, schedulesTxHashes) + mbs := getScheduledMiniBlocks(header, miniBlocks) assert.Equal(t, expectedMiniBlocks, mbs) } @@ -850,3 +851,100 @@ func Test_isScheduledIntermediateTx(t *testing.T) { require.False(t, isScheduledIntermediateTx(miniBlocks, scheduledTxHashes, []byte(tx2Hash), tx2, selfShardID)) }) } + +func Test_getMiniBlockAndProcessedIndexes(t *testing.T) { + t.Parallel() + + neededMiniBlockHash := []byte("hash") + miniBlockHeader := &block.MiniBlockHeader{ + Hash: neededMiniBlockHash, + TxCount: 5, + } + + miniBlocks := make(map[string]*block.MiniBlock) + pi, miniBlock, miniBlockHash, shouldSkip := getMiniBlockAndProcessedIndexes(miniBlockHeader, miniBlocks) + assert.Nil(t, pi) + assert.Nil(t, miniBlock) + assert.Nil(t, miniBlockHash) + assert.True(t, shouldSkip) + + neededMiniBlock := &block.MiniBlock{} + miniBlocks[string(neededMiniBlockHash)] = neededMiniBlock + + _ = miniBlockHeader.SetIndexOfFirstTxProcessed(int32(miniBlockHeader.TxCount - 2)) + _ = miniBlockHeader.SetIndexOfLastTxProcessed(int32(miniBlockHeader.TxCount - 3)) + pi, miniBlock, miniBlockHash, shouldSkip = getMiniBlockAndProcessedIndexes(miniBlockHeader, miniBlocks) + assert.Nil(t, pi) + assert.Nil(t, miniBlock) + assert.Nil(t, miniBlockHash) + assert.True(t, shouldSkip) + + _ = miniBlockHeader.SetIndexOfFirstTxProcessed(int32(miniBlockHeader.TxCount - 3)) + _ = miniBlockHeader.SetIndexOfLastTxProcessed(int32(miniBlockHeader.TxCount - 2)) + pi, miniBlock, miniBlockHash, shouldSkip = getMiniBlockAndProcessedIndexes(miniBlockHeader, miniBlocks) + assert.Equal(t, int32(miniBlockHeader.TxCount-3), pi.firstIndex) + assert.Equal(t, int32(miniBlockHeader.TxCount-2), pi.lastIndex) + assert.Equal(t, neededMiniBlock, miniBlock) + assert.Equal(t, neededMiniBlockHash, miniBlockHash) + assert.False(t, shouldSkip) +} + +func Test_createScheduledTxsForShardMap(t *testing.T) { + t.Parallel() + + pi := &processedIndexes{ + firstIndex: 1, + lastIndex: 3, + } + + txHash1 := []byte("txHash1") + txHash2 := []byte("txHash2") + txHash3 := []byte("txHash3") + txHash4 := []byte("txHash4") + txHash5 := []byte("txHash5") + miniBlock := &block.MiniBlock{ + ReceiverShardID: 1, + TxHashes: [][]byte{txHash1, txHash2, txHash3, txHash4, txHash5}, + } + + scheduledTxsForShard := make(map[string]uint32) + miniBlockHash := []byte("mbHash") + + createScheduledTxsForShardMap(pi, &block.MiniBlock{}, miniBlockHash, scheduledTxsForShard) + assert.Equal(t, 0, len(scheduledTxsForShard)) + + createScheduledTxsForShardMap(pi, miniBlock, miniBlockHash, scheduledTxsForShard) + require.Equal(t, 3, len(scheduledTxsForShard)) + + _, ok := scheduledTxsForShard[string(txHash1)] + assert.False(t, ok) + _, ok = scheduledTxsForShard[string(txHash2)] + assert.True(t, ok) + _, ok = scheduledTxsForShard[string(txHash3)] + assert.True(t, ok) + _, ok = scheduledTxsForShard[string(txHash4)] + assert.True(t, ok) + _, ok = scheduledTxsForShard[string(txHash5)] + assert.False(t, ok) +} + +func Test_getNumScheduledIntermediateTxs(t *testing.T) { + t.Parallel() + + mapScheduledIntermediateTxs := make(map[block.Type][]data.TransactionHandler) + mapScheduledIntermediateTxs[0] = []data.TransactionHandler{ + &smartContractResult.SmartContractResult{Nonce: 1}, + &smartContractResult.SmartContractResult{Nonce: 2}, + } + mapScheduledIntermediateTxs[1] = []data.TransactionHandler{ + &smartContractResult.SmartContractResult{Nonce: 1}, + } + mapScheduledIntermediateTxs[2] = []data.TransactionHandler{ + &smartContractResult.SmartContractResult{Nonce: 1}, + &smartContractResult.SmartContractResult{Nonce: 2}, + &smartContractResult.SmartContractResult{Nonce: 3}, + } + + numScheduledIntermediateTxs := getNumScheduledIntermediateTxs(mapScheduledIntermediateTxs) + assert.Equal(t, 6, numScheduledIntermediateTxs) +} diff --git a/epochStart/metachain/baseRewards_test.go b/epochStart/metachain/baseRewards_test.go index 6702bb6c524..bac32be7fb1 100644 --- a/epochStart/metachain/baseRewards_test.go +++ b/epochStart/metachain/baseRewards_test.go @@ -21,10 +21,10 @@ import ( "github.com/ElrondNetwork/elrond-go/state" "github.com/ElrondNetwork/elrond-go/state/factory" dataRetrieverMock "github.com/ElrondNetwork/elrond-go/testscommon/dataRetriever" + "github.com/ElrondNetwork/elrond-go/testscommon/hashingMocks" "github.com/ElrondNetwork/elrond-go/testscommon/shardingMocks" stateMock "github.com/ElrondNetwork/elrond-go/testscommon/state" trieMock "github.com/ElrondNetwork/elrond-go/testscommon/trie" - "github.com/ElrondNetwork/elrond-go/testscommon/hashingMocks" "github.com/ElrondNetwork/elrond-go/trie" vmcommon "github.com/ElrondNetwork/elrond-vm-common" "github.com/stretchr/testify/assert" diff --git a/epochStart/metachain/epochStartData.go b/epochStart/metachain/epochStartData.go index dd10e873751..f2c81ce5f5b 100644 --- a/epochStart/metachain/epochStartData.go +++ b/epochStart/metachain/epochStartData.go @@ -374,16 +374,15 @@ func (e *epochStartData) computeStillPending( miniBlockHeaders map[string]block.MiniBlockHeader, ) []block.MiniBlockHeader { - pendingMiniBlocks := make([]block.MiniBlockHeader, 0) + initIndexesOfProcessedTxs(miniBlockHeaders, shardID) for _, shardHdr := range shardHdrs { - for _, mbHeader := range shardHdr.GetMiniBlockHeaderHandlers() { - delete(miniBlockHeaders, string(mbHeader.GetHash())) - } + computeStillPendingInShardHeader(shardHdr, miniBlockHeaders, shardID) } + pendingMiniBlocks := make([]block.MiniBlockHeader, 0) for _, mbHeader := range miniBlockHeaders { - log.Debug("pending miniblock for shard ", "id", shardID, "hash", mbHeader.Hash) + log.Debug("pending mini block for", "shard", shardID, "mb hash", mbHeader.Hash) pendingMiniBlocks = append(pendingMiniBlocks, mbHeader) } @@ -394,6 +393,85 @@ func (e *epochStartData) computeStillPending( return pendingMiniBlocks } +func initIndexesOfProcessedTxs(miniBlockHeaders map[string]block.MiniBlockHeader, shardID uint32) { + for mbHash, mbHeader := range miniBlockHeaders { + log.Debug("epochStartData.initIndexesOfProcessedTxs", + "mb hash", mbHash, + "len(reserved)", len(mbHeader.GetReserved()), + "shard", shardID, + ) + + if len(mbHeader.GetReserved()) > 0 { + continue + } + + setIndexOfFirstAndLastTxProcessed(&mbHeader, -1, -1) + miniBlockHeaders[mbHash] = mbHeader + } +} + +func computeStillPendingInShardHeader( + shardHdr data.HeaderHandler, + miniBlockHeaders map[string]block.MiniBlockHeader, + shardID uint32, +) { + for _, shardMiniBlockHeader := range shardHdr.GetMiniBlockHeaderHandlers() { + shardMiniBlockHash := string(shardMiniBlockHeader.GetHash()) + mbHeader, ok := miniBlockHeaders[shardMiniBlockHash] + if !ok { + continue + } + + if shardMiniBlockHeader.IsFinal() { + log.Debug("epochStartData.computeStillPendingInShardHeader: IsFinal", + "mb hash", shardMiniBlockHash, + "shard", shardID, + ) + delete(miniBlockHeaders, shardMiniBlockHash) + continue + } + + updateIndexesOfProcessedTxs(mbHeader, shardMiniBlockHeader, shardMiniBlockHash, shardID, miniBlockHeaders) + } +} + +func updateIndexesOfProcessedTxs( + mbHeader block.MiniBlockHeader, + shardMiniBlockHeader data.MiniBlockHeaderHandler, + shardMiniBlockHash string, + shardID uint32, + miniBlockHeaders map[string]block.MiniBlockHeader, +) { + currIndexOfFirstTxProcessed := mbHeader.GetIndexOfFirstTxProcessed() + currIndexOfLastTxProcessed := mbHeader.GetIndexOfLastTxProcessed() + newIndexOfFirstTxProcessed := shardMiniBlockHeader.GetIndexOfFirstTxProcessed() + newIndexOfLastTxProcessed := shardMiniBlockHeader.GetIndexOfLastTxProcessed() + if newIndexOfLastTxProcessed > currIndexOfLastTxProcessed { + log.Debug("epochStartData.updateIndexesOfProcessedTxs", + "mb hash", shardMiniBlockHash, + "shard", shardID, + "current index of first tx processed", currIndexOfFirstTxProcessed, + "current index of last tx processed", currIndexOfLastTxProcessed, + "new index of first tx processed", newIndexOfFirstTxProcessed, + "new index of last tx processed", newIndexOfLastTxProcessed, + ) + setIndexOfFirstAndLastTxProcessed(&mbHeader, newIndexOfFirstTxProcessed, newIndexOfLastTxProcessed) + miniBlockHeaders[shardMiniBlockHash] = mbHeader + } +} + +func setIndexOfFirstAndLastTxProcessed(mbHeader *block.MiniBlockHeader, indexOfFirstTxProcessed int32, indexOfLastTxProcessed int32) { + err := mbHeader.SetIndexOfFirstTxProcessed(indexOfFirstTxProcessed) + if err != nil { + log.Warn("setIndexOfFirstAndLastTxProcessed: SetIndexOfFirstTxProcessed", "error", err.Error()) + } + + err = mbHeader.SetIndexOfLastTxProcessed(indexOfLastTxProcessed) + if err != nil { + log.Warn("setIndexOfFirstAndLastTxProcessed: SetIndexOfLastTxProcessed", "error", err.Error()) + } +} + func getAllMiniBlocksWithDst(m *block.MetaBlock, destId uint32) map[string]block.MiniBlockHeader { hashDst := make(map[string]block.MiniBlockHeader) for i := 0; i < len(m.ShardInfo); i++ { diff --git a/epochStart/metachain/epochStartData_test.go b/epochStart/metachain/epochStartData_test.go index bf3119848a2..e501f895f9b 100644 --- a/epochStart/metachain/epochStartData_test.go +++ b/epochStart/metachain/epochStartData_test.go @@ -491,3 +491,146 @@ func TestMetaProcessor_CreateEpochStartFromMetaBlockEdgeCaseChecking(t *testing. err = epoch.VerifyEpochStartDataForMetablock(&block.MetaBlock{EpochStart: *epStart}) assert.Nil(t, err) } + +func TestEpochStartCreator_computeStillPending(t *testing.T) { + t.Parallel() + + arguments := createMockEpochStartCreatorArguments() + epoch, _ := NewEpochStartData(arguments) + + shardHdrs := make([]data.HeaderHandler, 0) + miniBlockHeaders := make(map[string]block.MiniBlockHeader) + mbHash1 := []byte("miniBlock_hash1") + mbHash2 := []byte("miniBlock_hash2") + mbHash3 := []byte("miniBlock_hash3") + mbHeader1 := block.MiniBlockHeader{Hash: mbHash1, TxCount: 3} + mbHeader2 := block.MiniBlockHeader{Hash: mbHash2} + mbHeader3 := block.MiniBlockHeader{Hash: mbHash3, TxCount: 10} + + _ = mbHeader1.SetConstructionState(int32(block.Final)) + _ = mbHeader1.SetIndexOfFirstTxProcessed(0) + _ = mbHeader1.SetIndexOfLastTxProcessed(2) + + _ = mbHeader3.SetConstructionState(int32(block.PartialExecuted)) + _ = mbHeader3.SetIndexOfFirstTxProcessed(1) + _ = mbHeader3.SetIndexOfLastTxProcessed(3) + + miniBlockHeaders[string(mbHash1)] = mbHeader1 + miniBlockHeaders[string(mbHash2)] = mbHeader2 + miniBlockHeaders[string(mbHash3)] = mbHeader3 + + mbh1 := block.MiniBlockHeader{ + Hash: mbHash1, + } + mbh2 := block.MiniBlockHeader{ + Hash: []byte("miniBlock_hash_missing"), + } + mbh3 := block.MiniBlockHeader{ + Hash: mbHash3, + } + + _ = mbh3.SetConstructionState(int32(block.PartialExecuted)) + _ = mbh3.SetIndexOfFirstTxProcessed(4) + _ = mbh3.SetIndexOfLastTxProcessed(8) + + header := &block.Header{ + MiniBlockHeaders: []block.MiniBlockHeader{mbh1, mbh2, mbh3}, + } + + shardHdrs = append(shardHdrs, header) + + stillPending := epoch.computeStillPending(0, shardHdrs, miniBlockHeaders) + require.Equal(t, 2, len(stillPending)) + + assert.Equal(t, mbHash2, stillPending[0].Hash) + assert.Equal(t, mbHash3, stillPending[1].Hash) + + assert.Equal(t, int32(-1), stillPending[0].GetIndexOfFirstTxProcessed()) + assert.Equal(t, int32(-1), stillPending[0].GetIndexOfLastTxProcessed()) + + assert.Equal(t, int32(4), stillPending[1].GetIndexOfFirstTxProcessed()) + assert.Equal(t, int32(8), stillPending[1].GetIndexOfLastTxProcessed()) +} + +func Test_initIndexesOfProcessedTxs(t *testing.T) { + t.Parallel() + + miniBlockHeaders := make(map[string]block.MiniBlockHeader) + mbh1 := block.MiniBlockHeader{ + TxCount: 5, + } + _ = mbh1.SetIndexOfFirstTxProcessed(1) + _ = mbh1.SetIndexOfLastTxProcessed(2) + + mbh2 := block.MiniBlockHeader{ + TxCount: 5, + } + + miniBlockHeaders["mbHash1"] = mbh1 + miniBlockHeaders["mbHash2"] = mbh2 + + initIndexesOfProcessedTxs(miniBlockHeaders, 0) + + mbh := miniBlockHeaders["mbHash1"] + assert.Equal(t, int32(1), mbh.GetIndexOfFirstTxProcessed()) + assert.Equal(t, int32(2), mbh.GetIndexOfLastTxProcessed()) + + mbh = miniBlockHeaders["mbHash2"] + assert.Equal(t, int32(-1), mbh.GetIndexOfFirstTxProcessed()) + assert.Equal(t, int32(-1), mbh.GetIndexOfLastTxProcessed()) +} + +func Test_computeStillPendingInShardHeader(t *testing.T) { + t.Parallel() + + mbHash1 := []byte("mbHash1") + mbHash2 := []byte("mbHash2") + mbHash3 := []byte("mbHash3") + + mbh1 := block.MiniBlockHeader{ + TxCount: 6, + Hash: mbHash1, + } + + mbh2 := block.MiniBlockHeader{ + TxCount: 6, + Hash: mbHash2, + } + _ = mbh2.SetConstructionState(int32(block.Final)) + + mbh3 := block.MiniBlockHeader{ + TxCount: 6, + Hash: mbHash3, + } + oldIndexOfFirstTxProcessed := int32(1) + oldIndexOfLastTxProcessed := int32(2) + _ = mbh3.SetConstructionState(int32(block.PartialExecuted)) + _ = mbh3.SetIndexOfFirstTxProcessed(oldIndexOfFirstTxProcessed) + _ = mbh3.SetIndexOfLastTxProcessed(oldIndexOfLastTxProcessed) + + shardHdr := &block.Header{ + MiniBlockHeaders: []block.MiniBlockHeader{mbh1, mbh2, mbh3}, + } + + newIndexOfFirstTxProcessed := int32(3) + newIndexOfLastTxProcessed := int32(4) + _ = shardHdr.MiniBlockHeaders[2].SetIndexOfFirstTxProcessed(newIndexOfFirstTxProcessed) + _ = shardHdr.MiniBlockHeaders[2].SetIndexOfLastTxProcessed(newIndexOfLastTxProcessed) + + miniBlockHeaders := make(map[string]block.MiniBlockHeader) + miniBlockHeaders[string(mbHash2)] = mbh2 + miniBlockHeaders[string(mbHash3)] = mbh3 + + assert.Equal(t, 2, len(miniBlockHeaders)) + computeStillPendingInShardHeader(shardHdr, miniBlockHeaders, 0) + assert.Equal(t, 1, len(miniBlockHeaders)) + + _, ok := miniBlockHeaders[string(mbHash2)] + assert.False(t, ok) + + mbh, ok := miniBlockHeaders[string(mbHash3)] + require.True(t, ok) + + assert.Equal(t, newIndexOfFirstTxProcessed, mbh.GetIndexOfFirstTxProcessed()) + assert.Equal(t, newIndexOfLastTxProcessed, mbh.GetIndexOfLastTxProcessed()) +} diff --git a/epochStart/mock/coreComponentsMock.go b/epochStart/mock/coreComponentsMock.go index 8dd18bd453a..68b21a5b844 100644 --- a/epochStart/mock/coreComponentsMock.go +++ b/epochStart/mock/coreComponentsMock.go @@ -33,6 +33,7 @@ type CoreComponentsMock struct { ChanStopNode chan endProcess.ArgEndProcess NodeTypeProviderField core.NodeTypeProviderHandler ProcessStatusHandlerInstance common.ProcessStatusHandler + HardforkTriggerPubKeyField []byte mutCore sync.RWMutex } @@ -152,6 +153,11 @@ func (ccm *CoreComponentsMock) ProcessStatusHandler() common.ProcessStatusHandle return ccm.ProcessStatusHandlerInstance } +// HardforkTriggerPubKey - +func (ccm *CoreComponentsMock) HardforkTriggerPubKey() []byte { + return ccm.HardforkTriggerPubKeyField +} + // IsInterfaceNil - func (ccm *CoreComponentsMock) IsInterfaceNil() bool { return ccm == nil diff --git a/epochStart/mock/cryptoComponentsMock.go b/epochStart/mock/cryptoComponentsMock.go index 0f7aa7536de..afbcb00a382 100644 --- a/epochStart/mock/cryptoComponentsMock.go +++ b/epochStart/mock/cryptoComponentsMock.go @@ -8,13 +8,14 @@ import ( // CryptoComponentsMock - type CryptoComponentsMock struct { - PubKey crypto.PublicKey - BlockSig crypto.SingleSigner - TxSig crypto.SingleSigner - MultiSig crypto.MultiSigner - BlKeyGen crypto.KeyGenerator - TxKeyGen crypto.KeyGenerator - mutCrypto sync.RWMutex + PubKey crypto.PublicKey + BlockSig crypto.SingleSigner + TxSig crypto.SingleSigner + MultiSig crypto.MultiSigner + PeerSignHandler crypto.PeerSignatureHandler + BlKeyGen crypto.KeyGenerator + TxKeyGen crypto.KeyGenerator + mutCrypto sync.RWMutex } // PublicKey - @@ -49,6 +50,11 @@ func (ccm *CryptoComponentsMock) SetMultiSigner(m crypto.MultiSigner) error { return nil } +// PeerSignatureHandler - +func (ccm *CryptoComponentsMock) PeerSignatureHandler() crypto.PeerSignatureHandler { + return ccm.PeerSignHandler +} + // BlockSignKeyGen - func (ccm *CryptoComponentsMock) BlockSignKeyGen() crypto.KeyGenerator { return ccm.BlKeyGen @@ -62,13 +68,14 @@ func (ccm *CryptoComponentsMock) TxSignKeyGen() crypto.KeyGenerator { // Clone - func (ccm *CryptoComponentsMock) Clone() interface{} { return &CryptoComponentsMock{ - PubKey: ccm.PubKey, - BlockSig: ccm.BlockSig, - TxSig: ccm.TxSig, - MultiSig: ccm.MultiSig, - BlKeyGen: ccm.BlKeyGen, - TxKeyGen: ccm.TxKeyGen, - mutCrypto: sync.RWMutex{}, + PubKey: ccm.PubKey, + BlockSig: ccm.BlockSig, + TxSig: ccm.TxSig, + MultiSig: ccm.MultiSig, + PeerSignHandler: ccm.PeerSignHandler, + BlKeyGen: ccm.BlKeyGen, + TxKeyGen: ccm.TxKeyGen, + mutCrypto: sync.RWMutex{}, } } diff --git a/epochStart/mock/messengerStub.go b/epochStart/mock/messengerStub.go index cf35ff6a70e..13634d7d613 100644 --- a/epochStart/mock/messengerStub.go +++ b/epochStart/mock/messengerStub.go @@ -9,10 +9,10 @@ import ( type MessengerStub struct { ConnectedPeersCalled func() []core.PeerID RegisterMessageProcessorCalled func(topic string, identifier string, handler p2p.MessageProcessor) error - - CreateTopicCalled func(topic string, identifier bool) error - UnjoinAllTopicsCalled func() error - IDCalled func() core.PeerID + CreateTopicCalled func(topic string, identifier bool) error + UnjoinAllTopicsCalled func() error + IDCalled func() core.PeerID + VerifyCalled func(payload []byte, pid core.PeerID, signature []byte) error } // ConnectedPeersOnTopic - @@ -93,3 +93,12 @@ func (m *MessengerStub) ID() core.PeerID { return "peer ID" } + +// Verify - +func (m *MessengerStub) Verify(payload []byte, pid core.PeerID, signature []byte) error { + if m.VerifyCalled != nil { + return m.VerifyCalled(payload, pid, signature) + } + + return nil +} diff --git a/epochStart/shardchain/trigger.go b/epochStart/shardchain/trigger.go index acb3b570990..27bfb995c18 100644 --- a/epochStart/shardchain/trigger.go +++ b/epochStart/shardchain/trigger.go @@ -584,18 +584,18 @@ func (t *trigger) saveEpochStartMeta(metaHdr data.HeaderHandler) { } // call only if mutex is locked before -func (t *trigger) isMetaBlockValid(_ string, metaHdr data.HeaderHandler) bool { +func (t *trigger) isMetaBlockValid(hash string, metaHdr data.HeaderHandler) bool { currHdr := metaHdr for i := metaHdr.GetNonce() - 1; i >= metaHdr.GetNonce()-t.validity; i-- { neededHdr, err := t.getHeaderWithNonceAndHash(i, currHdr.GetPrevHash()) if err != nil { - log.Debug("isMetaBlockValid.getHeaderWithNonceAndHash", "error", err.Error()) + log.Debug("isMetaBlockValid.getHeaderWithNonceAndHash", "hash", hash, "error", err.Error()) return false } err = t.headerValidator.IsHeaderConstructionValid(currHdr, neededHdr) if err != nil { - log.Debug("isMetaBlockValid.IsHeaderConstructionValid", "error", err.Error()) + log.Debug("isMetaBlockValid.IsHeaderConstructionValid", "hash", hash, "error", err.Error()) return false } diff --git a/errors/errors.go b/errors/errors.go index b95f65cbcd6..603931ad85b 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -98,6 +98,9 @@ var ErrNilDataComponentsFactory = errors.New("nil data components factory") // ErrNilHeartbeatComponentsFactory signals that the provided heartbeat components factory is nil var ErrNilHeartbeatComponentsFactory = errors.New("nil heartbeat components factory") +// ErrNilHeartbeatV2ComponentsFactory signals that the provided heartbeatV2 components factory is nil +var ErrNilHeartbeatV2ComponentsFactory = errors.New("nil heartbeatV2 components factory") + // ErrNilNetworkComponentsFactory signals that the provided network components factory is nil var ErrNilNetworkComponentsFactory = errors.New("nil network components factory") @@ -191,6 +194,9 @@ var ErrNilHeaderSigVerifier = errors.New("") // ErrNilHeartbeatComponents signals that a nil heartbeat components instance was provided var ErrNilHeartbeatComponents = errors.New("nil heartbeat component") +// ErrNilHeartbeatV2Components signals that a nil heartbeatV2 components instance was provided +var ErrNilHeartbeatV2Components = errors.New("nil heartbeatV2 component") + // ErrNilHeartbeatMessageHandler signals that a nil heartbeat message handler was provided var ErrNilHeartbeatMessageHandler = errors.New("nil heartbeat message handler") @@ -200,6 +206,9 @@ var ErrNilHeartbeatMonitor = errors.New("nil heartbeat monitor") // ErrNilHeartbeatSender signals that a nil heartbeat sender was provided var ErrNilHeartbeatSender = errors.New("nil heartbeat sender") +// ErrNilHeartbeatV2Sender signals that a nil heartbeatV2 sender was provided +var ErrNilHeartbeatV2Sender = errors.New("nil heartbeatV2 sender") + // ErrNilHeartbeatStorer signals that a nil heartbeat storer was provided var ErrNilHeartbeatStorer = errors.New("nil heartbeat storer") diff --git a/examples/address_test.go b/examples/address_test.go index 761ced1bc31..3d3cc69d0e9 100644 --- a/examples/address_test.go +++ b/examples/address_test.go @@ -75,6 +75,7 @@ func TestSystemSCsAddressesAndSpecialAddresses(t *testing.T) { genesisMintingAddressBytes, err := hex.DecodeString("f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0") require.NoError(t, err) genesisMintingAddress := addressEncoder.Encode(genesisMintingAddressBytes) + systemAccountAddress := addressEncoder.Encode(core.SystemAccountAddress) header := []string{"Smart contract/Special address", "Address"} lines := []*display.LineData{ @@ -88,6 +89,7 @@ func TestSystemSCsAddressesAndSpecialAddresses(t *testing.T) { display.NewLineData(false, []string{"Delegation manager", delegationManagerScAddress}), display.NewLineData(false, []string{"First delegation", firstDelegationScAddress}), display.NewLineData(false, []string{"Genesis Minting Address", genesisMintingAddress}), + display.NewLineData(false, []string{"System Account Address", systemAccountAddress}), display.NewLineData(false, []string{"Liquid staking", liquidStakingSCAddress}), } @@ -104,5 +106,6 @@ func TestSystemSCsAddressesAndSpecialAddresses(t *testing.T) { assert.Equal(t, "erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq0llllsqkarq6", firstDelegationScAddress) assert.Equal(t, "erd1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq6gq4hu", contractDeployScAdress) assert.Equal(t, "erd17rc0pu8s7rc0pu8s7rc0pu8s7rc0pu8s7rc0pu8s7rc0pu8s7rcqqkhty3", genesisMintingAddress) + assert.Equal(t, "erd1lllllllllllllllllllllllllllllllllllllllllllllllllllsckry7t", systemAccountAddress) assert.Equal(t, "erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq9lllsm6xupm", liquidStakingSCAddress) } diff --git a/facade/nodeFacade_test.go b/facade/nodeFacade_test.go index 3684a151441..fc9672696fe 100644 --- a/facade/nodeFacade_test.go +++ b/facade/nodeFacade_test.go @@ -1296,9 +1296,9 @@ func TestNodeFacade_GetTransactionsPool(t *testing.T) { arg := createMockArguments() expectedPool := &common.TransactionsPoolAPIResponse{ - RegularTransactions: []string{"tx0", "tx1"}, + RegularTransactions: []string{"tx0", "tx1"}, SmartContractResults: []string{"tx2", "tx3"}, - Rewards: []string{"tx4"}, + Rewards: []string{"tx4"}, } arg.ApiResolver = &mock.ApiResolverStub{ GetTransactionsPoolCalled: func() (*common.TransactionsPoolAPIResponse, error) { diff --git a/factory/apiResolverFactory.go b/factory/apiResolverFactory.go index 4fb7d29f357..757ef326f14 100644 --- a/factory/apiResolverFactory.go +++ b/factory/apiResolverFactory.go @@ -115,6 +115,7 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { args.Configs.EpochConfig.EnableEpochs.ESDTTransferRoleEnableEpoch, args.Configs.EpochConfig.EnableEpochs.BuiltInFunctionOnMetaEnableEpoch, args.Configs.EpochConfig.EnableEpochs.OptimizeNFTStoreEnableEpoch, + args.Configs.EpochConfig.EnableEpochs.CheckCorrectTokenIDForTransferRoleEnableEpoch, ) if err != nil { return nil, err @@ -284,6 +285,7 @@ func createScQueryElement( args.epochConfig.EnableEpochs.ESDTTransferRoleEnableEpoch, args.epochConfig.EnableEpochs.BuiltInFunctionOnMetaEnableEpoch, args.epochConfig.EnableEpochs.OptimizeNFTStoreEnableEpoch, + args.epochConfig.EnableEpochs.CheckCorrectTokenIDForTransferRoleEnableEpoch, ) if err != nil { return nil, err @@ -395,6 +397,7 @@ func createScQueryElement( ArwenChangeLocker: args.coreComponents.ArwenChangeLocker(), Bootstrapper: args.bootstrapper, AllowExternalQueriesChan: args.allowVMQueriesChan, + MaxGasLimitPerQuery: args.generalConfig.VirtualMachine.GasConfig.MaxGasPerVmQuery, } return smartContract.NewSCQueryService(argsNewSCQueryService) @@ -411,19 +414,21 @@ func createBuiltinFuncs( esdtTransferRoleEnableEpoch uint32, transferToMetaEnableEpoch uint32, optimizeNFTStoreEnableEpoch uint32, + checkCorrectTokenIDEnableEpoch uint32, ) (vmcommon.BuiltInFunctionContainer, vmcommon.SimpleESDTNFTStorageHandler, error) { argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ - GasSchedule: gasScheduleNotifier, - MapDNSAddresses: make(map[string]struct{}), - Marshalizer: marshalizer, - Accounts: accnts, - ShardCoordinator: shardCoordinator, - EpochNotifier: epochNotifier, - ESDTMultiTransferEnableEpoch: esdtMultiTransferEnableEpoch, - ESDTTransferRoleEnableEpoch: esdtTransferRoleEnableEpoch, - GlobalMintBurnDisableEpoch: esdtGlobalMintBurnDisableEpoch, - ESDTTransferMetaEnableEpoch: transferToMetaEnableEpoch, - OptimizeNFTStoreEnableEpoch: optimizeNFTStoreEnableEpoch, + GasSchedule: gasScheduleNotifier, + MapDNSAddresses: make(map[string]struct{}), + Marshalizer: marshalizer, + Accounts: accnts, + ShardCoordinator: shardCoordinator, + EpochNotifier: epochNotifier, + ESDTMultiTransferEnableEpoch: esdtMultiTransferEnableEpoch, + ESDTTransferRoleEnableEpoch: esdtTransferRoleEnableEpoch, + GlobalMintBurnDisableEpoch: esdtGlobalMintBurnDisableEpoch, + ESDTTransferMetaEnableEpoch: transferToMetaEnableEpoch, + OptimizeNFTStoreEnableEpoch: optimizeNFTStoreEnableEpoch, + CheckCorrectTokenIDEnableEpoch: checkCorrectTokenIDEnableEpoch, } return builtInFunctions.CreateBuiltInFuncContainerAndNFTStorageHandler(argsBuiltIn) } diff --git a/factory/blockProcessorCreator.go b/factory/blockProcessorCreator.go index b14e3c95ebf..51aba518a54 100644 --- a/factory/blockProcessorCreator.go +++ b/factory/blockProcessorCreator.go @@ -57,6 +57,7 @@ func (pcf *processComponentsFactory) newBlockProcessor( txSimulatorProcessorArgs *txsimulator.ArgsTxSimulator, arwenChangeLocker common.Locker, scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, + processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, ) (*blockProcessorAndVmFactories, error) { if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { return pcf.newShardBlockProcessor( @@ -70,6 +71,7 @@ func (pcf *processComponentsFactory) newBlockProcessor( txSimulatorProcessorArgs, arwenChangeLocker, scheduledTxsExecutionHandler, + processedMiniBlocksTracker, ) } if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { @@ -85,6 +87,7 @@ func (pcf *processComponentsFactory) newBlockProcessor( txSimulatorProcessorArgs, arwenChangeLocker, scheduledTxsExecutionHandler, + processedMiniBlocksTracker, ) } @@ -102,6 +105,7 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( txSimulatorProcessorArgs *txsimulator.ArgsTxSimulator, arwenChangeLocker common.Locker, scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, + processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, ) (*blockProcessorAndVmFactories, error) { argsParser := smartContract.NewArgumentParser() @@ -317,6 +321,7 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( enableEpochs.ScheduledMiniBlocksEnableEpoch, txTypeHandler, scheduledTxsExecutionHandler, + processedMiniBlocksTracker, ) if err != nil { return nil, err @@ -340,26 +345,28 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( } argsTransactionCoordinator := coordinator.ArgTransactionCoordinator{ - Hasher: pcf.coreData.Hasher(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - Accounts: pcf.state.AccountsAdapter(), - MiniBlockPool: pcf.data.Datapool().MiniBlocks(), - RequestHandler: requestHandler, - PreProcessors: preProcContainer, - InterProcessors: interimProcContainer, - GasHandler: gasHandler, - FeeHandler: txFeeHandler, - BlockSizeComputation: blockSizeComputationHandler, - BalanceComputation: balanceComputationHandler, - EconomicsFee: pcf.coreData.EconomicsData(), - TxTypeHandler: txTypeHandler, - BlockGasAndFeesReCheckEnableEpoch: pcf.epochConfig.EnableEpochs.BlockGasAndFeesReCheckEnableEpoch, - TransactionsLogProcessor: pcf.txLogsProcessor, - EpochNotifier: pcf.epochNotifier, - ScheduledTxsExecutionHandler: scheduledTxsExecutionHandler, - ScheduledMiniBlocksEnableEpoch: enableEpochs.ScheduledMiniBlocksEnableEpoch, - DoubleTransactionsDetector: doubleTransactionsDetector, + Hasher: pcf.coreData.Hasher(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + Accounts: pcf.state.AccountsAdapter(), + MiniBlockPool: pcf.data.Datapool().MiniBlocks(), + RequestHandler: requestHandler, + PreProcessors: preProcContainer, + InterProcessors: interimProcContainer, + GasHandler: gasHandler, + FeeHandler: txFeeHandler, + BlockSizeComputation: blockSizeComputationHandler, + BalanceComputation: balanceComputationHandler, + EconomicsFee: pcf.coreData.EconomicsData(), + TxTypeHandler: txTypeHandler, + BlockGasAndFeesReCheckEnableEpoch: pcf.epochConfig.EnableEpochs.BlockGasAndFeesReCheckEnableEpoch, + TransactionsLogProcessor: pcf.txLogsProcessor, + EpochNotifier: pcf.epochNotifier, + ScheduledTxsExecutionHandler: scheduledTxsExecutionHandler, + ScheduledMiniBlocksEnableEpoch: enableEpochs.ScheduledMiniBlocksEnableEpoch, + DoubleTransactionsDetector: doubleTransactionsDetector, + MiniBlockPartialExecutionEnableEpoch: enableEpochs.MiniBlockPartialExecutionEnableEpoch, + ProcessedMiniBlocksTracker: processedMiniBlocksTracker, } txCoordinator, err := coordinator.NewTransactionCoordinator(argsTransactionCoordinator) if err != nil { @@ -399,6 +406,7 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( GasHandler: gasHandler, ScheduledTxsExecutionHandler: scheduledTxsExecutionHandler, ScheduledMiniBlocksEnableEpoch: enableEpochs.ScheduledMiniBlocksEnableEpoch, + ProcessedMiniBlocksTracker: processedMiniBlocksTracker, } arguments := block.ArgShardProcessor{ ArgBaseProcessor: argumentsBaseProcessor, @@ -430,6 +438,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( txSimulatorProcessorArgs *txsimulator.ArgsTxSimulator, arwenChangeLocker common.Locker, scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, + processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, ) (*blockProcessorAndVmFactories, error) { builtInFuncs, nftStorageHandler, err := pcf.createBuiltInFunctionContainer(pcf.state.AccountsAdapter(), make(map[string]struct{})) if err != nil { @@ -603,6 +612,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( enableEpochs.ScheduledMiniBlocksEnableEpoch, txTypeHandler, scheduledTxsExecutionHandler, + processedMiniBlocksTracker, ) if err != nil { return nil, err @@ -626,26 +636,28 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( } argsTransactionCoordinator := coordinator.ArgTransactionCoordinator{ - Hasher: pcf.coreData.Hasher(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - Accounts: pcf.state.AccountsAdapter(), - MiniBlockPool: pcf.data.Datapool().MiniBlocks(), - RequestHandler: requestHandler, - PreProcessors: preProcContainer, - InterProcessors: interimProcContainer, - GasHandler: gasHandler, - FeeHandler: txFeeHandler, - BlockSizeComputation: blockSizeComputationHandler, - BalanceComputation: balanceComputationHandler, - EconomicsFee: pcf.coreData.EconomicsData(), - TxTypeHandler: txTypeHandler, - BlockGasAndFeesReCheckEnableEpoch: enableEpochs.BlockGasAndFeesReCheckEnableEpoch, - TransactionsLogProcessor: pcf.txLogsProcessor, - EpochNotifier: pcf.epochNotifier, - ScheduledTxsExecutionHandler: scheduledTxsExecutionHandler, - ScheduledMiniBlocksEnableEpoch: enableEpochs.ScheduledMiniBlocksEnableEpoch, - DoubleTransactionsDetector: doubleTransactionsDetector, + Hasher: pcf.coreData.Hasher(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + Accounts: pcf.state.AccountsAdapter(), + MiniBlockPool: pcf.data.Datapool().MiniBlocks(), + RequestHandler: requestHandler, + PreProcessors: preProcContainer, + InterProcessors: interimProcContainer, + GasHandler: gasHandler, + FeeHandler: txFeeHandler, + BlockSizeComputation: blockSizeComputationHandler, + BalanceComputation: balanceComputationHandler, + EconomicsFee: pcf.coreData.EconomicsData(), + TxTypeHandler: txTypeHandler, + BlockGasAndFeesReCheckEnableEpoch: enableEpochs.BlockGasAndFeesReCheckEnableEpoch, + TransactionsLogProcessor: pcf.txLogsProcessor, + EpochNotifier: pcf.epochNotifier, + ScheduledTxsExecutionHandler: scheduledTxsExecutionHandler, + ScheduledMiniBlocksEnableEpoch: enableEpochs.ScheduledMiniBlocksEnableEpoch, + DoubleTransactionsDetector: doubleTransactionsDetector, + MiniBlockPartialExecutionEnableEpoch: enableEpochs.MiniBlockPartialExecutionEnableEpoch, + ProcessedMiniBlocksTracker: processedMiniBlocksTracker, } txCoordinator, err := coordinator.NewTransactionCoordinator(argsTransactionCoordinator) if err != nil { @@ -796,6 +808,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( GasHandler: gasHandler, ScheduledTxsExecutionHandler: scheduledTxsExecutionHandler, ScheduledMiniBlocksEnableEpoch: enableEpochs.ScheduledMiniBlocksEnableEpoch, + ProcessedMiniBlocksTracker: processedMiniBlocksTracker, } esdtOwnerAddress, err := pcf.coreData.AddressPubKeyConverter().Decode(pcf.systemSCConfig.ESDTSystemSCConfig.OwnerAddress) @@ -1161,17 +1174,18 @@ func (pcf *processComponentsFactory) createBuiltInFunctionContainer( mapDNSAddresses map[string]struct{}, ) (vmcommon.BuiltInFunctionContainer, vmcommon.SimpleESDTNFTStorageHandler, error) { argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ - GasSchedule: pcf.gasSchedule, - MapDNSAddresses: mapDNSAddresses, - Marshalizer: pcf.coreData.InternalMarshalizer(), - Accounts: accounts, - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - EpochNotifier: pcf.epochNotifier, - ESDTMultiTransferEnableEpoch: pcf.epochConfig.EnableEpochs.ESDTMultiTransferEnableEpoch, - ESDTTransferRoleEnableEpoch: pcf.epochConfig.EnableEpochs.ESDTTransferRoleEnableEpoch, - GlobalMintBurnDisableEpoch: pcf.epochConfig.EnableEpochs.GlobalMintBurnDisableEpoch, - ESDTTransferMetaEnableEpoch: pcf.epochConfig.EnableEpochs.BuiltInFunctionOnMetaEnableEpoch, - OptimizeNFTStoreEnableEpoch: pcf.epochConfig.EnableEpochs.OptimizeNFTStoreEnableEpoch, + GasSchedule: pcf.gasSchedule, + MapDNSAddresses: mapDNSAddresses, + Marshalizer: pcf.coreData.InternalMarshalizer(), + Accounts: accounts, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + EpochNotifier: pcf.epochNotifier, + ESDTMultiTransferEnableEpoch: pcf.epochConfig.EnableEpochs.ESDTMultiTransferEnableEpoch, + ESDTTransferRoleEnableEpoch: pcf.epochConfig.EnableEpochs.ESDTTransferRoleEnableEpoch, + GlobalMintBurnDisableEpoch: pcf.epochConfig.EnableEpochs.GlobalMintBurnDisableEpoch, + ESDTTransferMetaEnableEpoch: pcf.epochConfig.EnableEpochs.BuiltInFunctionOnMetaEnableEpoch, + OptimizeNFTStoreEnableEpoch: pcf.epochConfig.EnableEpochs.OptimizeNFTStoreEnableEpoch, + CheckCorrectTokenIDEnableEpoch: pcf.epochConfig.EnableEpochs.CheckCorrectTokenIDForTransferRoleEnableEpoch, } return builtInFunctions.CreateBuiltInFuncContainerAndNFTStorageHandler(argsBuiltIn) diff --git a/factory/blockProcessorCreator_test.go b/factory/blockProcessorCreator_test.go index c6da5bb09f7..31dfb501a88 100644 --- a/factory/blockProcessorCreator_test.go +++ b/factory/blockProcessorCreator_test.go @@ -48,6 +48,7 @@ func Test_newBlockProcessorCreatorForShard(t *testing.T) { }, &sync.RWMutex{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) require.NoError(t, err) @@ -156,6 +157,7 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { }, &sync.RWMutex{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) require.NoError(t, err) diff --git a/factory/bootstrapComponentsHandler.go b/factory/bootstrapComponentsHandler.go index 572f2a40bb4..db5d7b07ce2 100644 --- a/factory/bootstrapComponentsHandler.go +++ b/factory/bootstrapComponentsHandler.go @@ -137,5 +137,5 @@ func (mbf *managedBootstrapComponents) IsInterfaceNil() bool { // String returns the name of the component func (mbf *managedBootstrapComponents) String() string { - return "managedBootstrapComponents" + return bootstrapComponentsName } diff --git a/factory/consensusComponents.go b/factory/consensusComponents.go index 172789c11f7..fd6dbeb287e 100644 --- a/factory/consensusComponents.go +++ b/factory/consensusComponents.go @@ -28,7 +28,6 @@ import ( type ConsensusComponentsFactoryArgs struct { Config config.Config BootstrapRoundIndex uint64 - HardforkTrigger HardforkTrigger CoreComponents CoreComponentsHolder NetworkComponents NetworkComponentsHolder CryptoComponents CryptoComponentsHolder @@ -43,7 +42,6 @@ type ConsensusComponentsFactoryArgs struct { type consensusComponentsFactory struct { config config.Config bootstrapRoundIndex uint64 - hardforkTrigger HardforkTrigger coreComponents CoreComponentsHolder networkComponents NetworkComponentsHolder cryptoComponents CryptoComponentsHolder @@ -60,7 +58,6 @@ type consensusComponents struct { bootstrapper process.Bootstrapper broadcastMessenger consensus.BroadcastMessenger worker ConsensusWorker - hardforkTrigger HardforkTrigger consensusTopic string consensusGroupSize int } @@ -88,9 +85,6 @@ func NewConsensusComponentsFactory(args ConsensusComponentsFactoryArgs) (*consen if check.IfNil(args.StatusComponents) { return nil, errors.ErrNilStatusComponentsHolder } - if check.IfNil(args.HardforkTrigger) { - return nil, errors.ErrNilHardforkTrigger - } if check.IfNil(args.ScheduledProcessor) { return nil, errors.ErrNilScheduledProcessor } @@ -98,7 +92,6 @@ func NewConsensusComponentsFactory(args ConsensusComponentsFactoryArgs) (*consen return &consensusComponentsFactory{ config: args.Config, bootstrapRoundIndex: args.BootstrapRoundIndex, - hardforkTrigger: args.HardforkTrigger, coreComponents: args.CoreComponents, networkComponents: args.NetworkComponents, cryptoComponents: args.CryptoComponents, @@ -128,7 +121,6 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { cc.consensusGroupSize = int(consensusGroupSize) - cc.hardforkTrigger = ccf.hardforkTrigger blockchain := ccf.dataComponents.Blockchain() notInitializedGenesisBlock := len(blockchain.GetGenesisHeaderHash()) == 0 || check.IfNil(blockchain.GetGenesisHeader()) @@ -181,30 +173,29 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { } workerArgs := &spos.WorkerArgs{ - ConsensusService: consensusService, - BlockChain: ccf.dataComponents.Blockchain(), - BlockProcessor: ccf.processComponents.BlockProcessor(), - ScheduledProcessor: ccf.scheduledProcessor, - Bootstrapper: cc.bootstrapper, - BroadcastMessenger: cc.broadcastMessenger, - ConsensusState: consensusState, - ForkDetector: ccf.processComponents.ForkDetector(), - PeerSignatureHandler: ccf.cryptoComponents.PeerSignatureHandler(), - Marshalizer: marshalizer, - Hasher: ccf.coreComponents.Hasher(), - RoundHandler: ccf.processComponents.RoundHandler(), - ShardCoordinator: ccf.processComponents.ShardCoordinator(), - SyncTimer: ccf.coreComponents.SyncTimer(), - HeaderSigVerifier: ccf.processComponents.HeaderSigVerifier(), - HeaderIntegrityVerifier: ccf.processComponents.HeaderIntegrityVerifier(), - ChainID: []byte(ccf.coreComponents.ChainID()), - NetworkShardingCollector: ccf.processComponents.PeerShardMapper(), - AntifloodHandler: ccf.networkComponents.InputAntiFloodHandler(), - PoolAdder: ccf.dataComponents.Datapool().MiniBlocks(), - SignatureSize: ccf.config.ValidatorPubkeyConverter.SignatureLength, - PublicKeySize: ccf.config.ValidatorPubkeyConverter.Length, - AppStatusHandler: ccf.coreComponents.StatusHandler(), - NodeRedundancyHandler: ccf.processComponents.NodeRedundancyHandler(), + ConsensusService: consensusService, + BlockChain: ccf.dataComponents.Blockchain(), + BlockProcessor: ccf.processComponents.BlockProcessor(), + ScheduledProcessor: ccf.scheduledProcessor, + Bootstrapper: cc.bootstrapper, + BroadcastMessenger: cc.broadcastMessenger, + ConsensusState: consensusState, + ForkDetector: ccf.processComponents.ForkDetector(), + PeerSignatureHandler: ccf.cryptoComponents.PeerSignatureHandler(), + Marshalizer: marshalizer, + Hasher: ccf.coreComponents.Hasher(), + RoundHandler: ccf.processComponents.RoundHandler(), + ShardCoordinator: ccf.processComponents.ShardCoordinator(), + SyncTimer: ccf.coreComponents.SyncTimer(), + HeaderSigVerifier: ccf.processComponents.HeaderSigVerifier(), + HeaderIntegrityVerifier: ccf.processComponents.HeaderIntegrityVerifier(), + ChainID: []byte(ccf.coreComponents.ChainID()), + AntifloodHandler: ccf.networkComponents.InputAntiFloodHandler(), + PoolAdder: ccf.dataComponents.Datapool().MiniBlocks(), + SignatureSize: ccf.config.ValidatorPubkeyConverter.SignatureLength, + PublicKeySize: ccf.config.ValidatorPubkeyConverter.Length, + AppStatusHandler: ccf.coreComponents.StatusHandler(), + NodeRedundancyHandler: ccf.processComponents.NodeRedundancyHandler(), } cc.worker, err = spos.NewWorker(workerArgs) @@ -418,6 +409,7 @@ func (ccf *consensusComponentsFactory) createShardBootstrapper() (process.Bootst ScheduledTxsExecutionHandler: ccf.processComponents.ScheduledTxsExecutionHandler(), MiniblocksProvider: ccf.dataComponents.MiniBlocksProvider(), EpochNotifier: ccf.coreComponents.EpochNotifier(), + ProcessedMiniBlocksTracker: ccf.processComponents.ProcessedMiniBlocksTracker(), } argsShardStorageBootstrapper := storageBootstrap.ArgsShardStorageBootstrapper{ @@ -540,6 +532,7 @@ func (ccf *consensusComponentsFactory) createMetaChainBootstrapper() (process.Bo ScheduledTxsExecutionHandler: ccf.processComponents.ScheduledTxsExecutionHandler(), MiniblocksProvider: ccf.dataComponents.MiniBlocksProvider(), EpochNotifier: ccf.coreComponents.EpochNotifier(), + ProcessedMiniBlocksTracker: ccf.processComponents.ProcessedMiniBlocksTracker(), } argsMetaStorageBootstrapper := storageBootstrap.ArgsMetaStorageBootstrapper{ @@ -630,8 +623,9 @@ func (ccf *consensusComponentsFactory) createConsensusTopic(cc *consensusCompone } func (ccf *consensusComponentsFactory) addCloserInstances(closers ...update.Closer) error { + hardforkTrigger := ccf.processComponents.HardforkTrigger() for _, c := range closers { - err := ccf.hardforkTrigger.AddCloser(c) + err := hardforkTrigger.AddCloser(c) if err != nil { return err } @@ -661,6 +655,10 @@ func (ccf *consensusComponentsFactory) checkArgs() error { if check.IfNil(netMessenger) { return errors.ErrNilMessenger } + hardforkTrigger := ccf.processComponents.HardforkTrigger() + if check.IfNil(hardforkTrigger) { + return errors.ErrNilHardforkTrigger + } return nil } diff --git a/factory/consensusComponentsHandler.go b/factory/consensusComponentsHandler.go index 166d39751a8..7bbc649719e 100644 --- a/factory/consensusComponentsHandler.go +++ b/factory/consensusComponentsHandler.go @@ -133,18 +133,6 @@ func (mcc *managedConsensusComponents) CheckSubcomponents() error { return nil } -// HardforkTrigger returns the hardfork trigger -func (mcc *managedConsensusComponents) HardforkTrigger() HardforkTrigger { - mcc.mutConsensusComponents.RLock() - defer mcc.mutConsensusComponents.RUnlock() - - if mcc.consensusComponents == nil { - return nil - } - - return mcc.consensusComponents.hardforkTrigger -} - // Bootstrapper returns the bootstrapper instance func (mcc *managedConsensusComponents) Bootstrapper() process.Bootstrapper { mcc.mutConsensusComponents.RLock() @@ -164,5 +152,5 @@ func (mcc *managedConsensusComponents) IsInterfaceNil() bool { // String returns the name of the component func (mcc *managedConsensusComponents) String() string { - return "managedConsensusComponents" + return consensusComponentsName } diff --git a/factory/consensusComponents_test.go b/factory/consensusComponents_test.go index 34b721fa4c1..6d0d8b9bd16 100644 --- a/factory/consensusComponents_test.go +++ b/factory/consensusComponents_test.go @@ -398,7 +398,6 @@ func getConsensusArgs(shardCoordinator sharding.Coordinator) factory.ConsensusCo return factory.ConsensusComponentsFactoryArgs{ Config: testscommon.GetGeneralConfig(), BootstrapRoundIndex: 0, - HardforkTrigger: &mock.HardforkTriggerStub{}, CoreComponents: coreComponents, NetworkComponents: networkComponents, CryptoComponents: cryptoComponents, @@ -476,6 +475,7 @@ func getDefaultProcessComponents(shardCoordinator sharding.Coordinator) *mock.Pr return &mock.PrivateKeyStub{} }, }, + HardforkTriggerField: &testscommon.HardforkTriggerStub{}, } } diff --git a/factory/constants.go b/factory/constants.go new file mode 100644 index 00000000000..95d2eb61b30 --- /dev/null +++ b/factory/constants.go @@ -0,0 +1,15 @@ +package factory + +const ( + bootstrapComponentsName = "managedBootstrapComponents" + consensusComponentsName = "managedConsensusComponents" + coreComponentsName = "managedCoreComponents" + cryptoComponentsName = "managedCryptoComponents" + dataComponentsName = "managedDataComponents" + heartbeatComponentsName = "managedHeartbeatComponents" + heartbeatV2ComponentsName = "managedHeartbeatV2Components" + networkComponentsName = "managedNetworkComponents" + processComponentsName = "managedProcessComponents" + stateComponentsName = "managedStateComponents" + statusComponentsName = "managedStatusComponents" +) diff --git a/factory/coreComponents.go b/factory/coreComponents.go index e822d696552..67d62159ac7 100644 --- a/factory/coreComponents.go +++ b/factory/coreComponents.go @@ -103,6 +103,7 @@ type coreComponents struct { encodedAddressLen uint32 arwenChangeLocker common.Locker processStatusHandler common.ProcessStatusHandler + hardforkTriggerPubKey []byte } // NewCoreComponentsFactory initializes the factory which is responsible to creating core components @@ -331,6 +332,12 @@ func (ccf *coreComponentsFactory) Create() (*coreComponents, error) { // set as observer at first - it will be updated when creating the nodes coordinator nodeTypeProvider := nodetype.NewNodeTypeProvider(core.NodeTypeObserver) + pubKeyStr := ccf.config.Hardfork.PublicKeyToListenFrom + pubKeyBytes, err := validatorPubkeyConverter.Decode(pubKeyStr) + if err != nil { + return nil, err + } + return &coreComponents{ hasher: hasher, txSignHasher: txSignHasher, @@ -364,6 +371,7 @@ func (ccf *coreComponentsFactory) Create() (*coreComponents, error) { nodeTypeProvider: nodeTypeProvider, arwenChangeLocker: arwenChangeLocker, processStatusHandler: statusHandler.NewProcessStatusHandler(), + hardforkTriggerPubKey: pubKeyBytes, }, nil } diff --git a/factory/coreComponentsHandler.go b/factory/coreComponentsHandler.go index 4b2cb900f0c..de4b1f3d7ed 100644 --- a/factory/coreComponentsHandler.go +++ b/factory/coreComponentsHandler.go @@ -566,6 +566,18 @@ func (mcc *managedCoreComponents) ProcessStatusHandler() common.ProcessStatusHan return mcc.coreComponents.processStatusHandler } +// HardforkTriggerPubKey returns the hardfork source public key +func (mcc *managedCoreComponents) HardforkTriggerPubKey() []byte { + mcc.mutCoreComponents.RLock() + defer mcc.mutCoreComponents.RUnlock() + + if mcc.coreComponents == nil { + return nil + } + + return mcc.coreComponents.hardforkTriggerPubKey +} + // IsInterfaceNil returns true if there is no value under the interface func (mcc *managedCoreComponents) IsInterfaceNil() bool { return mcc == nil @@ -573,5 +585,5 @@ func (mcc *managedCoreComponents) IsInterfaceNil() bool { // String returns the name of the component func (mcc *managedCoreComponents) String() string { - return "managedCoreComponents" + return coreComponentsName } diff --git a/factory/coreComponentsHandler_test.go b/factory/coreComponentsHandler_test.go index 44092a91e0b..558a9525d8a 100644 --- a/factory/coreComponentsHandler_test.go +++ b/factory/coreComponentsHandler_test.go @@ -46,6 +46,7 @@ func TestManagedCoreComponents_CreateShouldWork(t *testing.T) { require.Nil(t, managedCoreComponents.RoundNotifier()) require.Nil(t, managedCoreComponents.ArwenChangeLocker()) require.Nil(t, managedCoreComponents.ProcessStatusHandler()) + require.True(t, len(managedCoreComponents.HardforkTriggerPubKey()) == 0) err = managedCoreComponents.Create() require.NoError(t, err) @@ -63,6 +64,8 @@ func TestManagedCoreComponents_CreateShouldWork(t *testing.T) { require.NotNil(t, managedCoreComponents.RoundNotifier()) require.NotNil(t, managedCoreComponents.ArwenChangeLocker()) require.NotNil(t, managedCoreComponents.ProcessStatusHandler()) + expectedBytes, _ := managedCoreComponents.ValidatorPubKeyConverter().Decode(dummyPk) + require.Equal(t, expectedBytes, managedCoreComponents.HardforkTriggerPubKey()) } func TestManagedCoreComponents_Close(t *testing.T) { diff --git a/factory/coreComponents_test.go b/factory/coreComponents_test.go index 062f59a45ee..878c0d60fba 100644 --- a/factory/coreComponents_test.go +++ b/factory/coreComponents_test.go @@ -323,6 +323,13 @@ func getCoreArgs() factory.CoreComponentsFactoryArgs { Shards: 1, }, }, + PeersRatingConfig: config.PeersRatingConfig{ + TopRatedCacheCapacity: 1000, + BadRatedCacheCapacity: 1000, + }, + Hardfork: config.HardforkConfig{ + PublicKeyToListenFrom: dummyPk, + }, }, ConfigPathsHolder: config.ConfigurationPathsHolder{ GasScheduleDirectoryName: "../cmd/node/config/gasSchedules", diff --git a/factory/cryptoComponentsHandler.go b/factory/cryptoComponentsHandler.go index 953afd908d4..692dab6826b 100644 --- a/factory/cryptoComponentsHandler.go +++ b/factory/cryptoComponentsHandler.go @@ -295,5 +295,5 @@ func (mcc *managedCryptoComponents) IsInterfaceNil() bool { // String returns the name of the component func (mcc *managedCryptoComponents) String() string { - return "managedCryptoComponents" + return cryptoComponentsName } diff --git a/factory/dataComponents.go b/factory/dataComponents.go index 98b3ffbfda3..d0931d26ce2 100644 --- a/factory/dataComponents.go +++ b/factory/dataComponents.go @@ -167,21 +167,19 @@ func (dcf *dataComponentsFactory) createDataStoreFromConfig() (dataRetriever.Sto // Close closes all underlying components that need closing func (cc *dataComponents) Close() error { + var lastError error if cc.store != nil { log.Debug("closing all store units....") err := cc.store.CloseAll() if err != nil { - return err + log.Error("failed to close all store units", "error", err.Error()) + lastError = err } } - if !check.IfNil(cc.datapool) && !check.IfNil(cc.datapool.TrieNodes()) { - log.Debug("closing trie nodes data pool....") - err := cc.datapool.TrieNodes().Close() - if err != nil { - return err - } + if !check.IfNil(cc.datapool) { + lastError = cc.datapool.Close() } - return nil + return lastError } diff --git a/factory/dataComponentsHandler.go b/factory/dataComponentsHandler.go index 1de9646ef82..7bc4acf0b00 100644 --- a/factory/dataComponentsHandler.go +++ b/factory/dataComponentsHandler.go @@ -170,5 +170,5 @@ func (mdc *managedDataComponents) IsInterfaceNil() bool { // String returns the name of the component func (mdc *managedDataComponents) String() string { - return "managedDataComponents" + return dataComponentsName } diff --git a/factory/disabled/hardforkTrigger.go b/factory/disabled/hardforkTrigger.go new file mode 100644 index 00000000000..d471202425a --- /dev/null +++ b/factory/disabled/hardforkTrigger.go @@ -0,0 +1,40 @@ +package disabled + +// hardforkTrigger implements HardforkTrigger interface but does nothing as it is disabled +type hardforkTrigger struct { +} + +// HardforkTrigger returns a disabled hardforkTrigger +func HardforkTrigger() *hardforkTrigger { + return &hardforkTrigger{} +} + +// TriggerReceived does nothing as it is disabled +func (h *hardforkTrigger) TriggerReceived(_ []byte, _ []byte, _ []byte) (bool, error) { + return false, nil +} + +// RecordedTriggerMessage does nothing as it is disabled +func (h *hardforkTrigger) RecordedTriggerMessage() ([]byte, bool) { + return nil, false +} + +// NotifyTriggerReceived does nothing as it is disabled +func (h *hardforkTrigger) NotifyTriggerReceived() <-chan struct{} { + return nil +} + +// NotifyTriggerReceivedV2 does nothing as it is disabled +func (h *hardforkTrigger) NotifyTriggerReceivedV2() <-chan struct{} { + return nil +} + +// CreateData does nothing as it is disabled +func (h *hardforkTrigger) CreateData() []byte { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (h *hardforkTrigger) IsInterfaceNil() bool { + return h == nil +} diff --git a/factory/disabled/txCoordinator.go b/factory/disabled/txCoordinator.go index 9a769541870..bbd3cfb513a 100644 --- a/factory/disabled/txCoordinator.go +++ b/factory/disabled/txCoordinator.go @@ -6,6 +6,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" ) // TxCoordinator implements the TransactionCoordinator interface but does nothing as it is disabled @@ -71,7 +72,7 @@ func (txCoordinator *TxCoordinator) CreateBlockStarted() { // CreateMbsAndProcessCrossShardTransactionsDstMe does nothing as it is disabled func (txCoordinator *TxCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe( _ data.HeaderHandler, - _ map[string]struct{}, + _ map[string]*processedMb.ProcessedMiniBlockInfo, _ func() bool, _ func() bool, _ bool, @@ -124,7 +125,7 @@ func (txCoordinator *TxCoordinator) AddTxsFromMiniBlocks(_ block.MiniBlockSlice) } // AddTransactions does nothing as it is disabled -func (txCoordinator *TxCoordinator) AddTransactions (_ []data.TransactionHandler, _ block.Type) { +func (txCoordinator *TxCoordinator) AddTransactions(_ []data.TransactionHandler, _ block.Type) { } // GetAllCurrentLogs returns empty logs map diff --git a/factory/export_test.go b/factory/export_test.go index d61d4962368..c87ed1645ef 100644 --- a/factory/export_test.go +++ b/factory/export_test.go @@ -67,6 +67,7 @@ func (pcf *processComponentsFactory) NewBlockProcessor( txSimulatorProcessorArgs *txsimulator.ArgsTxSimulator, arwenChangeLocker common.Locker, scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, + processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, ) (process.BlockProcessor, process.VirtualMachinesContainerFactory, error) { blockProcessorComponents, err := pcf.newBlockProcessor( requestHandler, @@ -80,6 +81,7 @@ func (pcf *processComponentsFactory) NewBlockProcessor( txSimulatorProcessorArgs, arwenChangeLocker, scheduledTxsExecutionHandler, + processedMiniBlocksTracker, ) if err != nil { return nil, nil, err diff --git a/factory/heartbeatComponents.go b/factory/heartbeatComponents.go index 41c1d459652..498ea57b48e 100644 --- a/factory/heartbeatComponents.go +++ b/factory/heartbeatComponents.go @@ -22,31 +22,31 @@ import ( // HeartbeatComponentsFactoryArgs holds the arguments needed to create a heartbeat components factory type HeartbeatComponentsFactoryArgs struct { - Config config.Config - Prefs config.Preferences - AppVersion string - GenesisTime time.Time - HardforkTrigger heartbeat.HardforkTrigger - RedundancyHandler heartbeat.NodeRedundancyHandler - CoreComponents CoreComponentsHolder - DataComponents DataComponentsHolder - NetworkComponents NetworkComponentsHolder - CryptoComponents CryptoComponentsHolder - ProcessComponents ProcessComponentsHolder + Config config.Config + Prefs config.Preferences + AppVersion string + GenesisTime time.Time + RedundancyHandler heartbeat.NodeRedundancyHandler + CoreComponents CoreComponentsHolder + DataComponents DataComponentsHolder + NetworkComponents NetworkComponentsHolder + CryptoComponents CryptoComponentsHolder + ProcessComponents ProcessComponentsHolder + HeartbeatDisableEpoch uint32 } type heartbeatComponentsFactory struct { - config config.Config - prefs config.Preferences - version string - GenesisTime time.Time - hardforkTrigger heartbeat.HardforkTrigger - redundancyHandler heartbeat.NodeRedundancyHandler - coreComponents CoreComponentsHolder - dataComponents DataComponentsHolder - networkComponents NetworkComponentsHolder - cryptoComponents CryptoComponentsHolder - processComponents ProcessComponentsHolder + config config.Config + prefs config.Preferences + version string + GenesisTime time.Time + redundancyHandler heartbeat.NodeRedundancyHandler + coreComponents CoreComponentsHolder + dataComponents DataComponentsHolder + networkComponents NetworkComponentsHolder + cryptoComponents CryptoComponentsHolder + processComponents ProcessComponentsHolder + heartbeatDisableEpoch uint32 } type heartbeatComponents struct { @@ -60,9 +60,6 @@ type heartbeatComponents struct { // NewHeartbeatComponentsFactory creates the heartbeat components factory func NewHeartbeatComponentsFactory(args HeartbeatComponentsFactoryArgs) (*heartbeatComponentsFactory, error) { - if check.IfNil(args.HardforkTrigger) { - return nil, heartbeat.ErrNilHardforkTrigger - } if check.IfNil(args.RedundancyHandler) { return nil, heartbeat.ErrNilRedundancyHandler } @@ -81,19 +78,23 @@ func NewHeartbeatComponentsFactory(args HeartbeatComponentsFactoryArgs) (*heartb if check.IfNil(args.ProcessComponents) { return nil, errors.ErrNilProcessComponentsHolder } + hardforkTrigger := args.ProcessComponents.HardforkTrigger() + if check.IfNil(hardforkTrigger) { + return nil, heartbeat.ErrNilHardforkTrigger + } return &heartbeatComponentsFactory{ - config: args.Config, - prefs: args.Prefs, - version: args.AppVersion, - GenesisTime: args.GenesisTime, - hardforkTrigger: args.HardforkTrigger, - redundancyHandler: args.RedundancyHandler, - coreComponents: args.CoreComponents, - dataComponents: args.DataComponents, - networkComponents: args.NetworkComponents, - cryptoComponents: args.CryptoComponents, - processComponents: args.ProcessComponents, + config: args.Config, + prefs: args.Prefs, + version: args.AppVersion, + GenesisTime: args.GenesisTime, + redundancyHandler: args.RedundancyHandler, + coreComponents: args.CoreComponents, + dataComponents: args.DataComponents, + networkComponents: args.NetworkComponents, + cryptoComponents: args.CryptoComponents, + processComponents: args.ProcessComponents, + heartbeatDisableEpoch: args.HeartbeatDisableEpoch, }, nil } @@ -135,22 +136,26 @@ func (hcf *heartbeatComponentsFactory) Create() (*heartbeatComponents, error) { peerSubType = core.FullHistoryObserver } + hardforkTrigger := hcf.processComponents.HardforkTrigger() + argSender := heartbeatProcess.ArgHeartbeatSender{ - PeerSubType: peerSubType, - PeerMessenger: hcf.networkComponents.NetworkMessenger(), - PeerSignatureHandler: hcf.cryptoComponents.PeerSignatureHandler(), - PrivKey: hcf.cryptoComponents.PrivateKey(), - Marshalizer: hcf.coreComponents.InternalMarshalizer(), - Topic: common.HeartbeatTopic, - ShardCoordinator: hcf.processComponents.ShardCoordinator(), - PeerTypeProvider: peerTypeProvider, - StatusHandler: hcf.coreComponents.StatusHandler(), - VersionNumber: hcf.version, - NodeDisplayName: hcf.prefs.Preferences.NodeDisplayName, - KeyBaseIdentity: hcf.prefs.Preferences.Identity, - HardforkTrigger: hcf.hardforkTrigger, - CurrentBlockProvider: hcf.dataComponents.Blockchain(), - RedundancyHandler: hcf.redundancyHandler, + PeerSubType: peerSubType, + PeerMessenger: hcf.networkComponents.NetworkMessenger(), + PeerSignatureHandler: hcf.cryptoComponents.PeerSignatureHandler(), + PrivKey: hcf.cryptoComponents.PrivateKey(), + Marshalizer: hcf.coreComponents.InternalMarshalizer(), + Topic: common.HeartbeatTopic, + ShardCoordinator: hcf.processComponents.ShardCoordinator(), + PeerTypeProvider: peerTypeProvider, + StatusHandler: hcf.coreComponents.StatusHandler(), + VersionNumber: hcf.version, + NodeDisplayName: hcf.prefs.Preferences.NodeDisplayName, + KeyBaseIdentity: hcf.prefs.Preferences.Identity, + HardforkTrigger: hardforkTrigger, + CurrentBlockProvider: hcf.dataComponents.Blockchain(), + RedundancyHandler: hcf.redundancyHandler, + EpochNotifier: hcf.coreComponents.EpochNotifier(), + HeartbeatDisableEpoch: hcf.heartbeatDisableEpoch, } hbc.sender, err = heartbeatProcess.NewSender(argSender) @@ -201,11 +206,13 @@ func (hcf *heartbeatComponentsFactory) Create() (*heartbeatComponents, error) { PeerTypeProvider: peerTypeProvider, Timer: timer, AntifloodHandler: hcf.networkComponents.InputAntiFloodHandler(), - HardforkTrigger: hcf.hardforkTrigger, + HardforkTrigger: hardforkTrigger, ValidatorPubkeyConverter: hcf.coreComponents.ValidatorPubKeyConverter(), HeartbeatRefreshIntervalInSec: hcf.config.Heartbeat.HeartbeatRefreshIntervalInSec, HideInactiveValidatorIntervalInSec: hcf.config.Heartbeat.HideInactiveValidatorIntervalInSec, AppStatusHandler: hcf.coreComponents.StatusHandler(), + EpochNotifier: hcf.coreComponents.EpochNotifier(), + HeartbeatDisableEpoch: hcf.heartbeatDisableEpoch, } hbc.monitor, err = heartbeatProcess.NewMonitor(argMonitor) if err != nil { @@ -256,6 +263,7 @@ func (hcf *heartbeatComponentsFactory) startSendingHeartbeats(ctx context.Contex diffSeconds := cfg.MaxTimeToWaitBetweenBroadcastsInSec - cfg.MinTimeToWaitBetweenBroadcastsInSec diffNanos := int64(diffSeconds) * time.Second.Nanoseconds() + hardforkTrigger := hcf.processComponents.HardforkTrigger() for { randomNanos := r.Int63n(diffNanos) timeToWait := time.Second*time.Duration(cfg.MinTimeToWaitBetweenBroadcastsInSec) + time.Duration(randomNanos) @@ -265,7 +273,7 @@ func (hcf *heartbeatComponentsFactory) startSendingHeartbeats(ctx context.Contex log.Debug("heartbeat's go routine is stopping...") return case <-time.After(timeToWait): - case <-hcf.hardforkTrigger.NotifyTriggerReceived(): + case <-hardforkTrigger.NotifyTriggerReceived(): //this will force an immediate broadcast of the trigger //message on the network log.Debug("hardfork message prepared for heartbeat sending") diff --git a/factory/heartbeatComponentsHandler.go b/factory/heartbeatComponentsHandler.go index 49174275fbe..4edd75cb2a6 100644 --- a/factory/heartbeatComponentsHandler.go +++ b/factory/heartbeatComponentsHandler.go @@ -142,5 +142,5 @@ func (mhc *managedHeartbeatComponents) IsInterfaceNil() bool { // String returns the name of the component func (mhc *managedHeartbeatComponents) String() string { - return "managedHeartbeatComponents" + return heartbeatComponentsName } diff --git a/factory/heartbeatComponents_test.go b/factory/heartbeatComponents_test.go index f112791b021..a0cbd16b2f3 100644 --- a/factory/heartbeatComponents_test.go +++ b/factory/heartbeatComponents_test.go @@ -69,10 +69,10 @@ func getDefaultHeartbeatComponents(shardCoordinator sharding.Coordinator) factor CacheRefreshIntervalInSec: uint32(100), }, }, - Prefs: config.Preferences{}, - AppVersion: "test", - GenesisTime: time.Time{}, - HardforkTrigger: &mock.HardforkTriggerStub{}, + HeartbeatDisableEpoch: 10, + Prefs: config.Preferences{}, + AppVersion: "test", + GenesisTime: time.Time{}, RedundancyHandler: &mock.RedundancyHandlerStub{ ObserverPrivateKeyCalled: func() crypto.PrivateKey { return &mock.PrivateKeyStub{ diff --git a/factory/heartbeatV2Components.go b/factory/heartbeatV2Components.go new file mode 100644 index 00000000000..e6b6ef48ec9 --- /dev/null +++ b/factory/heartbeatV2Components.go @@ -0,0 +1,225 @@ +package factory + +import ( + "time" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/core/random" + "github.com/ElrondNetwork/elrond-go/common" + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/errors" + "github.com/ElrondNetwork/elrond-go/heartbeat/monitor" + "github.com/ElrondNetwork/elrond-go/heartbeat/processor" + "github.com/ElrondNetwork/elrond-go/heartbeat/sender" + "github.com/ElrondNetwork/elrond-go/update" +) + +// ArgHeartbeatV2ComponentsFactory represents the argument for the heartbeat v2 components factory +type ArgHeartbeatV2ComponentsFactory struct { + Config config.Config + Prefs config.Preferences + AppVersion string + BoostrapComponents BootstrapComponentsHolder + CoreComponents CoreComponentsHolder + DataComponents DataComponentsHolder + NetworkComponents NetworkComponentsHolder + CryptoComponents CryptoComponentsHolder + ProcessComponents ProcessComponentsHolder +} + +type heartbeatV2ComponentsFactory struct { + config config.Config + prefs config.Preferences + version string + boostrapComponents BootstrapComponentsHolder + coreComponents CoreComponentsHolder + dataComponents DataComponentsHolder + networkComponents NetworkComponentsHolder + cryptoComponents CryptoComponentsHolder + processComponents ProcessComponentsHolder +} + +type heartbeatV2Components struct { + sender update.Closer + peerAuthRequestsProcessor update.Closer + directConnectionsProcessor update.Closer + monitor HeartbeatV2Monitor +} + +// NewHeartbeatV2ComponentsFactory creates a new instance of heartbeatV2ComponentsFactory +func NewHeartbeatV2ComponentsFactory(args ArgHeartbeatV2ComponentsFactory) (*heartbeatV2ComponentsFactory, error) { + err := checkHeartbeatV2FactoryArgs(args) + if err != nil { + return nil, err + } + + return &heartbeatV2ComponentsFactory{ + config: args.Config, + prefs: args.Prefs, + version: args.AppVersion, + boostrapComponents: args.BoostrapComponents, + coreComponents: args.CoreComponents, + dataComponents: args.DataComponents, + networkComponents: args.NetworkComponents, + cryptoComponents: args.CryptoComponents, + processComponents: args.ProcessComponents, + }, nil +} + +func checkHeartbeatV2FactoryArgs(args ArgHeartbeatV2ComponentsFactory) error { + if check.IfNil(args.BoostrapComponents) { + return errors.ErrNilBootstrapComponentsHolder + } + if check.IfNil(args.CoreComponents) { + return errors.ErrNilCoreComponentsHolder + } + if check.IfNil(args.DataComponents) { + return errors.ErrNilDataComponentsHolder + } + if check.IfNil(args.NetworkComponents) { + return errors.ErrNilNetworkComponentsHolder + } + if check.IfNil(args.CryptoComponents) { + return errors.ErrNilCryptoComponentsHolder + } + if check.IfNil(args.ProcessComponents) { + return errors.ErrNilProcessComponentsHolder + } + hardforkTrigger := args.ProcessComponents.HardforkTrigger() + if check.IfNil(hardforkTrigger) { + return errors.ErrNilHardforkTrigger + } + + return nil +} + +// Create creates the heartbeatV2 components +func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error) { + if !hcf.networkComponents.NetworkMessenger().HasTopic(common.PeerAuthenticationTopic) { + err := hcf.networkComponents.NetworkMessenger().CreateTopic(common.PeerAuthenticationTopic, true) + if err != nil { + return nil, err + } + } + if !hcf.networkComponents.NetworkMessenger().HasTopic(common.HeartbeatV2Topic) { + err := hcf.networkComponents.NetworkMessenger().CreateTopic(common.HeartbeatV2Topic, true) + if err != nil { + return nil, err + } + } + + peerSubType := core.RegularPeer + if hcf.prefs.Preferences.FullArchive { + peerSubType = core.FullHistoryObserver + } + + shardC := hcf.boostrapComponents.ShardCoordinator() + heartbeatTopic := common.HeartbeatV2Topic + shardC.CommunicationIdentifier(shardC.SelfId()) + + cfg := hcf.config.HeartbeatV2 + + argsSender := sender.ArgSender{ + Messenger: hcf.networkComponents.NetworkMessenger(), + Marshaller: hcf.coreComponents.InternalMarshalizer(), + PeerAuthenticationTopic: common.PeerAuthenticationTopic, + HeartbeatTopic: heartbeatTopic, + PeerAuthenticationTimeBetweenSends: time.Second * time.Duration(cfg.PeerAuthenticationTimeBetweenSendsInSec), + PeerAuthenticationTimeBetweenSendsWhenError: time.Second * time.Duration(cfg.PeerAuthenticationTimeBetweenSendsWhenErrorInSec), + PeerAuthenticationThresholdBetweenSends: cfg.PeerAuthenticationThresholdBetweenSends, + HeartbeatTimeBetweenSends: time.Second * time.Duration(cfg.HeartbeatTimeBetweenSendsInSec), + HeartbeatTimeBetweenSendsWhenError: time.Second * time.Duration(cfg.HeartbeatTimeBetweenSendsWhenErrorInSec), + HeartbeatThresholdBetweenSends: cfg.HeartbeatThresholdBetweenSends, + VersionNumber: hcf.version, + NodeDisplayName: hcf.prefs.Preferences.NodeDisplayName, + Identity: hcf.prefs.Preferences.Identity, + PeerSubType: peerSubType, + CurrentBlockProvider: hcf.dataComponents.Blockchain(), + PeerSignatureHandler: hcf.cryptoComponents.PeerSignatureHandler(), + PrivateKey: hcf.cryptoComponents.PrivateKey(), + RedundancyHandler: hcf.processComponents.NodeRedundancyHandler(), + NodesCoordinator: hcf.processComponents.NodesCoordinator(), + HardforkTrigger: hcf.processComponents.HardforkTrigger(), + HardforkTimeBetweenSends: time.Second * time.Duration(cfg.HardforkTimeBetweenSendsInSec), + HardforkTriggerPubKey: hcf.coreComponents.HardforkTriggerPubKey(), + } + heartbeatV2Sender, err := sender.NewSender(argsSender) + if err != nil { + return nil, err + } + + epochBootstrapParams := hcf.boostrapComponents.EpochBootstrapParams() + argsProcessor := processor.ArgPeerAuthenticationRequestsProcessor{ + RequestHandler: hcf.processComponents.RequestHandler(), + NodesCoordinator: hcf.processComponents.NodesCoordinator(), + PeerAuthenticationPool: hcf.dataComponents.Datapool().PeerAuthentications(), + ShardId: epochBootstrapParams.SelfShardID(), + Epoch: epochBootstrapParams.Epoch(), + MessagesInChunk: uint32(cfg.MaxNumOfPeerAuthenticationInResponse), + MinPeersThreshold: cfg.MinPeersThreshold, + DelayBetweenRequests: time.Second * time.Duration(cfg.DelayBetweenRequestsInSec), + MaxTimeout: time.Second * time.Duration(cfg.MaxTimeoutInSec), + MaxMissingKeysInRequest: cfg.MaxMissingKeysInRequest, + Randomizer: &random.ConcurrentSafeIntRandomizer{}, + } + paRequestsProcessor, err := processor.NewPeerAuthenticationRequestsProcessor(argsProcessor) + if err != nil { + return nil, err + } + + argsDirectConnectionsProcessor := processor.ArgDirectConnectionsProcessor{ + Messenger: hcf.networkComponents.NetworkMessenger(), + Marshaller: hcf.coreComponents.InternalMarshalizer(), + ShardCoordinator: hcf.boostrapComponents.ShardCoordinator(), + DelayBetweenNotifications: time.Second * time.Duration(cfg.DelayBetweenConnectionNotificationsInSec), + } + directConnectionsProcessor, err := processor.NewDirectConnectionsProcessor(argsDirectConnectionsProcessor) + if err != nil { + return nil, err + } + + argsMonitor := monitor.ArgHeartbeatV2Monitor{ + Cache: hcf.dataComponents.Datapool().Heartbeats(), + PubKeyConverter: hcf.coreComponents.ValidatorPubKeyConverter(), + Marshaller: hcf.coreComponents.InternalMarshalizer(), + PeerShardMapper: hcf.processComponents.PeerShardMapper(), + MaxDurationPeerUnresponsive: time.Second * time.Duration(cfg.MaxDurationPeerUnresponsiveInSec), + HideInactiveValidatorInterval: time.Second * time.Duration(cfg.HideInactiveValidatorIntervalInSec), + ShardId: epochBootstrapParams.SelfShardID(), + } + heartbeatsMonitor, err := monitor.NewHeartbeatV2Monitor(argsMonitor) + if err != nil { + return nil, err + } + + return &heartbeatV2Components{ + sender: heartbeatV2Sender, + peerAuthRequestsProcessor: paRequestsProcessor, + directConnectionsProcessor: directConnectionsProcessor, + monitor: heartbeatsMonitor, + }, nil +} + +// Close closes the heartbeat components +func (hc *heartbeatV2Components) Close() error { + log.Debug("calling close on heartbeatV2 components") + + if !check.IfNil(hc.sender) { + log.LogIfError(hc.sender.Close()) + } + + if !check.IfNil(hc.peerAuthRequestsProcessor) { + log.LogIfError(hc.peerAuthRequestsProcessor.Close()) + } + + if !check.IfNil(hc.directConnectionsProcessor) { + log.LogIfError(hc.directConnectionsProcessor.Close()) + } + + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (hcf *heartbeatV2ComponentsFactory) IsInterfaceNil() bool { + return hcf == nil +} diff --git a/factory/heartbeatV2ComponentsHandler.go b/factory/heartbeatV2ComponentsHandler.go new file mode 100644 index 00000000000..2841f7cff05 --- /dev/null +++ b/factory/heartbeatV2ComponentsHandler.go @@ -0,0 +1,91 @@ +package factory + +import ( + "sync" + + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go/errors" +) + +type managedHeartbeatV2Components struct { + *heartbeatV2Components + heartbeatV2ComponentsFactory *heartbeatV2ComponentsFactory + mutHeartbeatV2Components sync.RWMutex +} + +// NewManagedHeartbeatV2Components creates a new heartbeatV2 components handler +func NewManagedHeartbeatV2Components(hcf *heartbeatV2ComponentsFactory) (*managedHeartbeatV2Components, error) { + if hcf == nil { + return nil, errors.ErrNilHeartbeatV2ComponentsFactory + } + + return &managedHeartbeatV2Components{ + heartbeatV2Components: nil, + heartbeatV2ComponentsFactory: hcf, + }, nil +} + +// Create creates the heartbeatV2 components +func (mhc *managedHeartbeatV2Components) Create() error { + hc, err := mhc.heartbeatV2ComponentsFactory.Create() + if err != nil { + return err + } + + mhc.mutHeartbeatV2Components.Lock() + mhc.heartbeatV2Components = hc + mhc.mutHeartbeatV2Components.Unlock() + + return nil +} + +// CheckSubcomponents verifies all subcomponents +func (mhc *managedHeartbeatV2Components) CheckSubcomponents() error { + mhc.mutHeartbeatV2Components.RLock() + defer mhc.mutHeartbeatV2Components.RUnlock() + + if mhc.heartbeatV2Components == nil { + return errors.ErrNilHeartbeatV2Components + } + if check.IfNil(mhc.sender) { + return errors.ErrNilHeartbeatV2Sender + } + + return nil +} + +// String returns the name of the component +func (mhc *managedHeartbeatV2Components) String() string { + return heartbeatV2ComponentsName +} + +// Monitor returns the heartbeatV2 monitor +func (mhc *managedHeartbeatV2Components) Monitor() HeartbeatV2Monitor { + mhc.mutHeartbeatV2Components.Lock() + defer mhc.mutHeartbeatV2Components.Unlock() + + return mhc.monitor +} + +// Close closes the heartbeat components +func (mhc *managedHeartbeatV2Components) Close() error { + mhc.mutHeartbeatV2Components.Lock() + defer mhc.mutHeartbeatV2Components.Unlock() + + if mhc.heartbeatV2Components == nil { + return nil + } + + err := mhc.heartbeatV2Components.Close() + if err != nil { + return err + } + mhc.heartbeatV2Components = nil + + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (mhc *managedHeartbeatV2Components) IsInterfaceNil() bool { + return mhc == nil +} diff --git a/factory/heartbeatV2ComponentsHandler_test.go b/factory/heartbeatV2ComponentsHandler_test.go new file mode 100644 index 00000000000..816421ad120 --- /dev/null +++ b/factory/heartbeatV2ComponentsHandler_test.go @@ -0,0 +1,42 @@ +package factory_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go/errors" + "github.com/ElrondNetwork/elrond-go/factory" + "github.com/stretchr/testify/assert" +) + +func TestManagedHeartbeatV2Components(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() + + mhc, err := factory.NewManagedHeartbeatV2Components(nil) + assert.True(t, check.IfNil(mhc)) + assert.Equal(t, errors.ErrNilHeartbeatV2ComponentsFactory, err) + + args := createMockHeartbeatV2ComponentsFactoryArgs() + hcf, _ := factory.NewHeartbeatV2ComponentsFactory(args) + mhc, err = factory.NewManagedHeartbeatV2Components(hcf) + assert.False(t, check.IfNil(mhc)) + assert.Nil(t, err) + + err = mhc.Create() + assert.Nil(t, err) + + err = mhc.CheckSubcomponents() + assert.Nil(t, err) + + assert.Equal(t, "managedHeartbeatV2Components", mhc.String()) + + err = mhc.Close() + assert.Nil(t, err) +} diff --git a/factory/heartbeatV2Components_test.go b/factory/heartbeatV2Components_test.go new file mode 100644 index 00000000000..218ebc8ac2c --- /dev/null +++ b/factory/heartbeatV2Components_test.go @@ -0,0 +1,97 @@ +package factory_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/factory" + "github.com/ElrondNetwork/elrond-go/factory/mock" + "github.com/stretchr/testify/assert" +) + +func createMockHeartbeatV2ComponentsFactoryArgs() factory.ArgHeartbeatV2ComponentsFactory { + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + bootStrapArgs := getBootStrapArgs() + bootstrapComponentsFactory, _ := factory.NewBootstrapComponentsFactory(bootStrapArgs) + bootstrapC, _ := factory.NewManagedBootstrapComponents(bootstrapComponentsFactory) + _ = bootstrapC.Create() + factory.SetShardCoordinator(shardCoordinator, bootstrapC) + + coreC := getCoreComponents() + networkC := getNetworkComponents() + dataC := getDataComponents(coreC, shardCoordinator) + cryptoC := getCryptoComponents(coreC) + stateC := getStateComponents(coreC, shardCoordinator) + processC := getProcessComponents(shardCoordinator, coreC, networkC, dataC, cryptoC, stateC) + return factory.ArgHeartbeatV2ComponentsFactory{ + Config: config.Config{ + HeartbeatV2: config.HeartbeatV2Config{ + PeerAuthenticationTimeBetweenSendsInSec: 1, + PeerAuthenticationTimeBetweenSendsWhenErrorInSec: 1, + PeerAuthenticationThresholdBetweenSends: 0.1, + HeartbeatTimeBetweenSendsInSec: 1, + HeartbeatTimeBetweenSendsWhenErrorInSec: 1, + HeartbeatThresholdBetweenSends: 0.1, + MaxNumOfPeerAuthenticationInResponse: 5, + HeartbeatExpiryTimespanInSec: 30, + MinPeersThreshold: 0.8, + DelayBetweenRequestsInSec: 10, + MaxTimeoutInSec: 60, + DelayBetweenConnectionNotificationsInSec: 5, + MaxMissingKeysInRequest: 100, + MaxDurationPeerUnresponsiveInSec: 10, + HideInactiveValidatorIntervalInSec: 60, + HardforkTimeBetweenSendsInSec: 5, + PeerAuthenticationPool: config.PeerAuthenticationPoolConfig{ + DefaultSpanInSec: 30, + CacheExpiryInSec: 30, + }, + HeartbeatPool: config.CacheConfig{ + Type: "LRU", + Capacity: 1000, + Shards: 1, + }, + }, + Hardfork: config.HardforkConfig{ + PublicKeyToListenFrom: dummyPk, + }, + }, + Prefs: config.Preferences{ + Preferences: config.PreferencesConfig{ + NodeDisplayName: "node", + Identity: "identity", + }, + }, + AppVersion: "test", + BoostrapComponents: bootstrapC, + CoreComponents: coreC, + DataComponents: dataC, + NetworkComponents: networkC, + CryptoComponents: cryptoC, + ProcessComponents: processC, + } +} + +func Test_heartbeatV2Components_Create_ShouldWork(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + hcf, err := factory.NewHeartbeatV2ComponentsFactory(args) + assert.False(t, check.IfNil(hcf)) + assert.Nil(t, err) + + hc, err := hcf.Create() + assert.NotNil(t, hc) + assert.Nil(t, err) + + err = hc.Close() + assert.Nil(t, err) +} diff --git a/factory/interface.go b/factory/interface.go index 398cbf2affb..8dd6fb8ce75 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -72,7 +72,8 @@ type P2PAntifloodHandler interface { // PreferredPeersHolderHandler defines the behavior of a component able to handle preferred peers operations type PreferredPeersHolderHandler interface { - Put(publicKey []byte, peerID core.PeerID, shardID uint32) + PutConnectionAddress(peerID core.PeerID, address string) + PutShardID(peerID core.PeerID, shardID uint32) Get() map[uint32][]core.PeerID Contains(peerID core.PeerID) bool Remove(peerID core.PeerID) @@ -129,6 +130,7 @@ type CoreComponentsHolder interface { NodeTypeProvider() core.NodeTypeProviderHandler ArwenChangeLocker() common.Locker ProcessStatusHandler() common.ProcessStatusHandler + HardforkTriggerPubKey() []byte IsInterfaceNil() bool } @@ -215,6 +217,7 @@ type NetworkComponentsHolder interface { PeerBlackListHandler() process.PeerBlackListCacher PeerHonestyHandler() PeerHonestyHandler PreferredPeersHolderHandler() PreferredPeersHolderHandler + PeersRatingHandler() p2p.PeersRatingHandler IsInterfaceNil() bool } @@ -264,6 +267,8 @@ type ProcessComponentsHolder interface { CurrentEpochProvider() process.CurrentNetworkEpochProviderHandler ScheduledTxsExecutionHandler() process.ScheduledTxsExecutionHandler TxsSenderHandler() process.TxsSenderHandler + HardforkTrigger() HardforkTrigger + ProcessedMiniBlocksTracker() process.ProcessedMiniBlocksTracker IsInterfaceNil() bool } @@ -344,6 +349,24 @@ type HeartbeatComponentsHandler interface { HeartbeatComponentsHolder } +// HeartbeatV2Monitor monitors the cache of heartbeatV2 messages +type HeartbeatV2Monitor interface { + GetHeartbeats() []heartbeatData.PubKeyHeartbeat + IsInterfaceNil() bool +} + +// HeartbeatV2ComponentsHolder holds the heartbeatV2 components +type HeartbeatV2ComponentsHolder interface { + Monitor() HeartbeatV2Monitor + IsInterfaceNil() bool +} + +// HeartbeatV2ComponentsHandler defines the heartbeatV2 components handler actions +type HeartbeatV2ComponentsHandler interface { + ComponentHandler + HeartbeatV2ComponentsHolder +} + // ConsensusWorker is the consensus worker handle for the exported functionality type ConsensusWorker interface { Close() error @@ -374,12 +397,14 @@ type ConsensusWorker interface { // HardforkTrigger defines the hard-fork trigger functionality type HardforkTrigger interface { + SetExportFactoryHandler(exportFactoryHandler update.ExportFactoryHandler) error TriggerReceived(payload []byte, data []byte, pkBytes []byte) (bool, error) RecordedTriggerMessage() ([]byte, bool) Trigger(epoch uint32, withEarlyEndOfEpoch bool) error CreateData() []byte AddCloser(closer update.Closer) error NotifyTriggerReceived() <-chan struct{} + NotifyTriggerReceivedV2() <-chan struct{} IsSelfTrigger() bool IsInterfaceNil() bool } @@ -390,7 +415,6 @@ type ConsensusComponentsHolder interface { ConsensusWorker() ConsensusWorker BroadcastMessenger() consensus.BroadcastMessenger ConsensusGroupSize() (int, error) - HardforkTrigger() HardforkTrigger Bootstrapper() process.Bootstrapper IsInterfaceNil() bool } diff --git a/factory/mock/blockProcessorStub.go b/factory/mock/blockProcessorStub.go index 4c754901e6e..06ec1706f0e 100644 --- a/factory/mock/blockProcessorStub.go +++ b/factory/mock/blockProcessorStub.go @@ -5,7 +5,6 @@ import ( "time" "github.com/ElrondNetwork/elrond-go-core/data" - "github.com/ElrondNetwork/elrond-go/process/block/processedMb" ) // BlockProcessorStub mocks the implementation for a blockProcessor @@ -108,10 +107,6 @@ func (bps *BlockProcessorStub) CreateNewHeader(round uint64, nonce uint64) (data return bps.CreateNewHeaderCalled(round, nonce) } -// ApplyProcessedMiniBlocks - -func (bps *BlockProcessorStub) ApplyProcessedMiniBlocks(_ *processedMb.ProcessedMiniBlockTracker) { -} - // IsInterfaceNil returns true if there is no value under the interface func (bps *BlockProcessorStub) IsInterfaceNil() bool { return bps == nil diff --git a/factory/mock/coreComponentsMock.go b/factory/mock/coreComponentsMock.go index 5da9e5f8f1a..5df9efb4988 100644 --- a/factory/mock/coreComponentsMock.go +++ b/factory/mock/coreComponentsMock.go @@ -57,6 +57,7 @@ type CoreComponentsMock struct { NodeTypeProviderField core.NodeTypeProviderHandler ArwenChangeLockerInternal common.Locker ProcessStatusHandlerInternal common.ProcessStatusHandler + HardforkTriggerPubKeyField []byte } // InternalMarshalizer - @@ -248,6 +249,11 @@ func (ccm *CoreComponentsMock) ProcessStatusHandler() common.ProcessStatusHandle return ccm.ProcessStatusHandlerInternal } +// HardforkTriggerPubKey - +func (ccm *CoreComponentsMock) HardforkTriggerPubKey() []byte { + return ccm.HardforkTriggerPubKeyField +} + // IsInterfaceNil - func (ccm *CoreComponentsMock) IsInterfaceNil() bool { return ccm == nil diff --git a/factory/mock/heartbeatComponentsStub.go b/factory/mock/heartbeatComponentsStub.go new file mode 100644 index 00000000000..75ae805c52c --- /dev/null +++ b/factory/mock/heartbeatComponentsStub.go @@ -0,0 +1,59 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/factory" + "github.com/ElrondNetwork/elrond-go/heartbeat" +) + +// HeartbeatComponentsStub - +type HeartbeatComponentsStub struct { + MessageHandlerField heartbeat.MessageHandler + MonitorField factory.HeartbeatMonitor + SenderField factory.HeartbeatSender + StorerField factory.HeartbeatStorer +} + +// Create - +func (hbc *HeartbeatComponentsStub) Create() error { + return nil +} + +// Close - +func (hbc *HeartbeatComponentsStub) Close() error { + return nil +} + +// CheckSubcomponents - +func (hbc *HeartbeatComponentsStub) CheckSubcomponents() error { + return nil +} + +// String - +func (hbc *HeartbeatComponentsStub) String() string { + return "" +} + +// MessageHandler - +func (hbc *HeartbeatComponentsStub) MessageHandler() heartbeat.MessageHandler { + return hbc.MessageHandlerField +} + +// Monitor - +func (hbc *HeartbeatComponentsStub) Monitor() factory.HeartbeatMonitor { + return hbc.MonitorField +} + +// Sender - +func (hbc *HeartbeatComponentsStub) Sender() factory.HeartbeatSender { + return hbc.SenderField +} + +// Storer - +func (hbc *HeartbeatComponentsStub) Storer() factory.HeartbeatStorer { + return hbc.StorerField +} + +// IsInterfaceNil - +func (hbc *HeartbeatComponentsStub) IsInterfaceNil() bool { + return hbc == nil +} diff --git a/factory/mock/heartbeatV2ComponentsStub.go b/factory/mock/heartbeatV2ComponentsStub.go new file mode 100644 index 00000000000..fe155342614 --- /dev/null +++ b/factory/mock/heartbeatV2ComponentsStub.go @@ -0,0 +1,38 @@ +package mock + +import "github.com/ElrondNetwork/elrond-go/factory" + +// HeartbeatV2ComponentsStub - +type HeartbeatV2ComponentsStub struct { + MonitorField factory.HeartbeatV2Monitor +} + +// Create - +func (hbc *HeartbeatV2ComponentsStub) Create() error { + return nil +} + +// Close - +func (hbc *HeartbeatV2ComponentsStub) Close() error { + return nil +} + +// CheckSubcomponents - +func (hbc *HeartbeatV2ComponentsStub) CheckSubcomponents() error { + return nil +} + +// String - +func (hbc *HeartbeatV2ComponentsStub) String() string { + return "" +} + +// Monitor - +func (hbc *HeartbeatV2ComponentsStub) Monitor() factory.HeartbeatV2Monitor { + return hbc.MonitorField +} + +// IsInterfaceNil - +func (hbc *HeartbeatV2ComponentsStub) IsInterfaceNil() bool { + return hbc == nil +} diff --git a/factory/mock/networkComponentsMock.go b/factory/mock/networkComponentsMock.go index 6beedf5e4b6..f47b7499e66 100644 --- a/factory/mock/networkComponentsMock.go +++ b/factory/mock/networkComponentsMock.go @@ -8,11 +8,12 @@ import ( // NetworkComponentsMock - type NetworkComponentsMock struct { - Messenger p2p.Messenger - InputAntiFlood factory.P2PAntifloodHandler - OutputAntiFlood factory.P2PAntifloodHandler - PeerBlackList process.PeerBlackListCacher - PreferredPeersHolder factory.PreferredPeersHolderHandler + Messenger p2p.Messenger + InputAntiFlood factory.P2PAntifloodHandler + OutputAntiFlood factory.P2PAntifloodHandler + PeerBlackList process.PeerBlackListCacher + PreferredPeersHolder factory.PreferredPeersHolderHandler + PeersRatingHandlerField p2p.PeersRatingHandler } // PubKeyCacher - @@ -65,6 +66,11 @@ func (ncm *NetworkComponentsMock) PreferredPeersHolderHandler() factory.Preferre return ncm.PreferredPeersHolder } +// PeersRatingHandler - +func (ncm *NetworkComponentsMock) PeersRatingHandler() p2p.PeersRatingHandler { + return ncm.PeersRatingHandlerField +} + // IsInterfaceNil - func (ncm *NetworkComponentsMock) IsInterfaceNil() bool { return ncm == nil diff --git a/factory/mock/processComponentsStub.go b/factory/mock/processComponentsStub.go index 89eac5501b6..cc9b6a07e9f 100644 --- a/factory/mock/processComponentsStub.go +++ b/factory/mock/processComponentsStub.go @@ -46,6 +46,8 @@ type ProcessComponentsMock struct { CurrentEpochProviderInternal process.CurrentNetworkEpochProviderHandler ScheduledTxsExecutionHandlerInternal process.ScheduledTxsExecutionHandler TxsSenderHandlerField process.TxsSenderHandler + HardforkTriggerField factory.HardforkTrigger + ProcessedMiniBlocksTrackerInternal process.ProcessedMiniBlocksTracker } // Create - @@ -228,6 +230,16 @@ func (pcm *ProcessComponentsMock) TxsSenderHandler() process.TxsSenderHandler { return pcm.TxsSenderHandlerField } +// HardforkTrigger - +func (pcm *ProcessComponentsMock) HardforkTrigger() factory.HardforkTrigger { + return pcm.HardforkTriggerField +} + +// ProcessedMiniBlocksTracker - +func (pcm *ProcessComponentsMock) ProcessedMiniBlocksTracker() process.ProcessedMiniBlocksTracker { + return pcm.ProcessedMiniBlocksTrackerInternal +} + // IsInterfaceNil - func (pcm *ProcessComponentsMock) IsInterfaceNil() bool { return pcm == nil diff --git a/factory/networkComponents.go b/factory/networkComponents.go index c03c0fd4036..730d1c669eb 100644 --- a/factory/networkComponents.go +++ b/factory/networkComponents.go @@ -7,7 +7,6 @@ import ( "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go-core/core/check" - "github.com/ElrondNetwork/elrond-go-core/core/peersholder" "github.com/ElrondNetwork/elrond-go-core/marshal" "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/consensus" @@ -15,37 +14,40 @@ import ( "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/p2p/libp2p" + peersHolder "github.com/ElrondNetwork/elrond-go/p2p/peersHolder" + "github.com/ElrondNetwork/elrond-go/p2p/rating" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/rating/peerHonesty" antifloodFactory "github.com/ElrondNetwork/elrond-go/process/throttle/antiflood/factory" storageFactory "github.com/ElrondNetwork/elrond-go/storage/factory" + "github.com/ElrondNetwork/elrond-go/storage/lrucache" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" ) // NetworkComponentsFactoryArgs holds the arguments to create a network component handler instance type NetworkComponentsFactoryArgs struct { - P2pConfig config.P2PConfig - MainConfig config.Config - RatingsConfig config.RatingsConfig - StatusHandler core.AppStatusHandler - Marshalizer marshal.Marshalizer - Syncer p2p.SyncTimer - PreferredPublicKeys [][]byte - BootstrapWaitTime time.Duration - NodeOperationMode p2p.NodeOperation + P2pConfig config.P2PConfig + MainConfig config.Config + RatingsConfig config.RatingsConfig + StatusHandler core.AppStatusHandler + Marshalizer marshal.Marshalizer + Syncer p2p.SyncTimer + PreferredPeersSlices []string + BootstrapWaitTime time.Duration + NodeOperationMode p2p.NodeOperation } type networkComponentsFactory struct { - p2pConfig config.P2PConfig - mainConfig config.Config - ratingsConfig config.RatingsConfig - statusHandler core.AppStatusHandler - listenAddress string - marshalizer marshal.Marshalizer - syncer p2p.SyncTimer - preferredPublicKeys [][]byte - bootstrapWaitTime time.Duration - nodeOperationMode p2p.NodeOperation + p2pConfig config.P2PConfig + mainConfig config.Config + ratingsConfig config.RatingsConfig + statusHandler core.AppStatusHandler + listenAddress string + marshalizer marshal.Marshalizer + syncer p2p.SyncTimer + preferredPeersSlices []string + bootstrapWaitTime time.Duration + nodeOperationMode p2p.NodeOperation } // networkComponents struct holds the network components @@ -60,6 +62,7 @@ type networkComponents struct { antifloodConfig config.AntifloodConfig peerHonestyHandler consensus.PeerHonestyHandler peersHolder PreferredPeersHolderHandler + peersRatingHandler p2p.PeersRatingHandler closeFunc context.CancelFunc } @@ -78,31 +81,52 @@ func NewNetworkComponentsFactory( } return &networkComponentsFactory{ - p2pConfig: args.P2pConfig, - ratingsConfig: args.RatingsConfig, - marshalizer: args.Marshalizer, - mainConfig: args.MainConfig, - statusHandler: args.StatusHandler, - listenAddress: libp2p.ListenAddrWithIp4AndTcp, - syncer: args.Syncer, - bootstrapWaitTime: args.BootstrapWaitTime, - preferredPublicKeys: args.PreferredPublicKeys, - nodeOperationMode: args.NodeOperationMode, + p2pConfig: args.P2pConfig, + ratingsConfig: args.RatingsConfig, + marshalizer: args.Marshalizer, + mainConfig: args.MainConfig, + statusHandler: args.StatusHandler, + listenAddress: libp2p.ListenAddrWithIp4AndTcp, + syncer: args.Syncer, + bootstrapWaitTime: args.BootstrapWaitTime, + preferredPeersSlices: args.PreferredPeersSlices, + nodeOperationMode: args.NodeOperationMode, }, nil } // Create creates and returns the network components func (ncf *networkComponentsFactory) Create() (*networkComponents, error) { - peersHolder := peersholder.NewPeersHolder(ncf.preferredPublicKeys) + ph, err := peersHolder.NewPeersHolder(ncf.preferredPeersSlices) + if err != nil { + return nil, err + } + + topRatedCache, err := lrucache.NewCache(ncf.mainConfig.PeersRatingConfig.TopRatedCacheCapacity) + if err != nil { + return nil, err + } + badRatedCache, err := lrucache.NewCache(ncf.mainConfig.PeersRatingConfig.BadRatedCacheCapacity) + if err != nil { + return nil, err + } + argsPeersRatingHandler := rating.ArgPeersRatingHandler{ + TopRatedCache: topRatedCache, + BadRatedCache: badRatedCache, + } + peersRatingHandler, err := rating.NewPeersRatingHandler(argsPeersRatingHandler) + if err != nil { + return nil, err + } + arg := libp2p.ArgsNetworkMessenger{ Marshalizer: ncf.marshalizer, ListenAddress: ncf.listenAddress, P2pConfig: ncf.p2pConfig, SyncTimer: ncf.syncer, - PreferredPeersHolder: peersHolder, + PreferredPeersHolder: ph, NodeOperationMode: ncf.nodeOperationMode, + PeersRatingHandler: peersRatingHandler, } - netMessenger, err := libp2p.NewNetworkMessenger(arg) if err != nil { return nil, err @@ -180,7 +204,8 @@ func (ncf *networkComponentsFactory) Create() (*networkComponents, error) { pubKeyTimeCacher: antiFloodComponents.PubKeysCacher, antifloodConfig: ncf.mainConfig.Antiflood, peerHonestyHandler: peerHonestyHandler, - peersHolder: peersHolder, + peersHolder: ph, + peersRatingHandler: peersRatingHandler, closeFunc: cancelFunc, }, nil } diff --git a/factory/networkComponentsHandler.go b/factory/networkComponentsHandler.go index a94c5efc562..987a04d1a06 100644 --- a/factory/networkComponentsHandler.go +++ b/factory/networkComponentsHandler.go @@ -164,7 +164,7 @@ func (mnc *managedNetworkComponents) PeerHonestyHandler() PeerHonestyHandler { return mnc.networkComponents.peerHonestyHandler } -// PreferredPeersHolder returns the preferred peers holder +// PreferredPeersHolderHandler returns the preferred peers holder func (mnc *managedNetworkComponents) PreferredPeersHolderHandler() PreferredPeersHolderHandler { mnc.mutNetworkComponents.RLock() defer mnc.mutNetworkComponents.RUnlock() @@ -176,6 +176,18 @@ func (mnc *managedNetworkComponents) PreferredPeersHolderHandler() PreferredPeer return mnc.networkComponents.peersHolder } +// PeersRatingHandler returns the peers rating handler +func (mnc *managedNetworkComponents) PeersRatingHandler() p2p.PeersRatingHandler { + mnc.mutNetworkComponents.RLock() + defer mnc.mutNetworkComponents.RUnlock() + + if mnc.networkComponents == nil { + return nil + } + + return mnc.networkComponents.peersRatingHandler +} + // IsInterfaceNil returns true if the value under the interface is nil func (mnc *managedNetworkComponents) IsInterfaceNil() bool { return mnc == nil @@ -183,5 +195,5 @@ func (mnc *managedNetworkComponents) IsInterfaceNil() bool { // String returns the name of the component func (mnc *managedNetworkComponents) String() string { - return "managedNetworkComponents" + return networkComponentsName } diff --git a/factory/networkComponents_test.go b/factory/networkComponents_test.go index 81dd319e107..5d47467a19d 100644 --- a/factory/networkComponents_test.go +++ b/factory/networkComponents_test.go @@ -118,6 +118,10 @@ func getNetworkArgs() factory.NetworkComponentsFactoryArgs { IntervalAutoPrintInSeconds: 1, }, }, + PeersRatingConfig: config.PeersRatingConfig{ + TopRatedCacheCapacity: 1000, + BadRatedCacheCapacity: 1000, + }, } appStatusHandler := statusHandlerMock.NewAppStatusHandlerMock() diff --git a/factory/processComponents.go b/factory/processComponents.go index 7089aad023d..6bddd3d33b3 100644 --- a/factory/processComponents.go +++ b/factory/processComponents.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "math/big" + "path/filepath" "time" "github.com/ElrondNetwork/elrond-go-core/core" @@ -41,6 +42,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/block/pendingMb" "github.com/ElrondNetwork/elrond-go/process/block/poolsCleaner" "github.com/ElrondNetwork/elrond-go/process/block/preprocess" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" "github.com/ElrondNetwork/elrond-go/process/factory/interceptorscontainer" "github.com/ElrondNetwork/elrond-go/process/headerCheck" "github.com/ElrondNetwork/elrond-go/process/peer" @@ -60,6 +62,9 @@ import ( "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/ElrondNetwork/elrond-go/storage/timecache" "github.com/ElrondNetwork/elrond-go/update" + updateDisabled "github.com/ElrondNetwork/elrond-go/update/disabled" + updateFactory "github.com/ElrondNetwork/elrond-go/update/factory" + "github.com/ElrondNetwork/elrond-go/update/trigger" ) var log = logger.GetOrCreate("factory") @@ -106,6 +111,8 @@ type processComponents struct { vmFactoryForProcessing process.VirtualMachinesContainerFactory scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler txsSender process.TxsSenderHandler + hardforkTrigger HardforkTrigger + processedMiniBlocksTracker process.ProcessedMiniBlocksTracker } // ProcessComponentsFactoryArgs holds the arguments needed to create a process components factory @@ -243,7 +250,13 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } - resolversContainerFactory, err := pcf.newResolverContainerFactory(currentEpochProvider) + // TODO: maybe move PeerShardMapper to network components + peerShardMapper, err := pcf.prepareNetworkShardingCollector() + if err != nil { + return nil, err + } + + resolversContainerFactory, err := pcf.newResolverContainerFactory(currentEpochProvider, peerShardMapper) if err != nil { return nil, err } @@ -426,12 +439,19 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } + hardforkTrigger, err := pcf.createHardforkTrigger(epochStartTrigger) + if err != nil { + return nil, err + } + interceptorContainerFactory, blackListHandler, err := pcf.newInterceptorContainerFactory( headerSigVerifier, pcf.bootstrapComponents.HeaderIntegrityVerifier(), blockTracker, epochStartTrigger, requestHandler, + peerShardMapper, + hardforkTrigger, ) if err != nil { return nil, err @@ -443,6 +463,23 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } + exportFactoryHandler, err := pcf.createExportFactoryHandler( + headerValidator, + requestHandler, + resolversFinder, + interceptorsContainer, + headerSigVerifier, + blockTracker, + ) + if err != nil { + return nil, err + } + + err = hardforkTrigger.SetExportFactoryHandler(exportFactoryHandler) + if err != nil { + return nil, err + } + var pendingMiniBlocksHandler process.PendingMiniBlocksHandler pendingMiniBlocksHandler, err = pendingMb.NewNilPendingMiniBlocks() if err != nil { @@ -486,6 +523,8 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } + processedMiniBlocksTracker := processedMb.NewProcessedMiniBlocksTracker() + blockProcessorComponents, err := pcf.newBlockProcessor( requestHandler, forkDetector, @@ -498,6 +537,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { txSimulatorProcessorArgs, pcf.coreData.ArwenChangeLocker(), scheduledTxsExecutionHandler, + processedMiniBlocksTracker, ) if err != nil { return nil, err @@ -524,12 +564,6 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } - // TODO: maybe move PeerShardMapper to network components - peerShardMapper, err := pcf.prepareNetworkShardingCollector() - if err != nil { - return nil, err - } - txSimulator, err := txsimulator.NewTransactionSimulator(*txSimulatorProcessorArgs) if err != nil { return nil, err @@ -608,6 +642,8 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { vmFactoryForProcessing: blockProcessorComponents.vmFactoryForProcessing, scheduledTxsExecutionHandler: scheduledTxsExecutionHandler, txsSender: txsSenderWithAccumulator, + hardforkTrigger: hardforkTrigger, + processedMiniBlocksTracker: processedMiniBlocksTracker, }, nil } @@ -620,10 +656,10 @@ func (pcf *processComponentsFactory) newValidatorStatisticsProcessor() (process. peerDataPool = pcf.data.Datapool() } - hardForkConfig := pcf.config.Hardfork + hardforkConfig := pcf.config.Hardfork ratingEnabledEpoch := uint32(0) - if hardForkConfig.AfterHardFork { - ratingEnabledEpoch = hardForkConfig.StartEpoch + hardForkConfig.ValidatorGracePeriodInEpochs + if hardforkConfig.AfterHardFork { + ratingEnabledEpoch = hardforkConfig.StartEpoch + hardforkConfig.ValidatorGracePeriodInEpochs } arguments := peer.ArgValidatorStatisticsProcessor{ PeerAdapter: pcf.state.PeerAccounts(), @@ -1000,6 +1036,7 @@ func (pcf *processComponentsFactory) newBlockTracker( // -- Resolvers container Factory begin func (pcf *processComponentsFactory) newResolverContainerFactory( currentEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler, + peerShardMapper *networksharding.PeerShardMapper, ) (dataRetriever.ResolversContainerFactory, error) { if pcf.importDBConfig.IsImportDBMode { @@ -1007,10 +1044,10 @@ func (pcf *processComponentsFactory) newResolverContainerFactory( return pcf.newStorageResolver() } if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { - return pcf.newShardResolverContainerFactory(currentEpochProvider) + return pcf.newShardResolverContainerFactory(currentEpochProvider, peerShardMapper) } if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { - return pcf.newMetaResolverContainerFactory(currentEpochProvider) + return pcf.newMetaResolverContainerFactory(currentEpochProvider, peerShardMapper) } return nil, errors.New("could not create interceptor and resolver container factory") @@ -1018,6 +1055,7 @@ func (pcf *processComponentsFactory) newResolverContainerFactory( func (pcf *processComponentsFactory) newShardResolverContainerFactory( currentEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler, + peerShardMapper *networksharding.PeerShardMapper, ) (dataRetriever.ResolversContainerFactory, error) { dataPacker, err := partitioning.NewSimpleDataPacker(pcf.coreData.InternalMarshalizer()) @@ -1026,22 +1064,26 @@ func (pcf *processComponentsFactory) newShardResolverContainerFactory( } resolversContainerFactoryArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - Messenger: pcf.network.NetworkMessenger(), - Store: pcf.data.StorageService(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - DataPools: pcf.data.Datapool(), - Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), - DataPacker: dataPacker, - TriesContainer: pcf.state.TriesContainer(), - SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, - InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), - OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), - NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, - IsFullHistoryNode: pcf.prefConfigs.FullArchive, - CurrentNetworkEpochProvider: currentEpochProvider, - ResolverConfig: pcf.config.Resolvers, - PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + Messenger: pcf.network.NetworkMessenger(), + Store: pcf.data.StorageService(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + DataPools: pcf.data.Datapool(), + Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), + DataPacker: dataPacker, + TriesContainer: pcf.state.TriesContainer(), + SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, + InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), + OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), + NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, + IsFullHistoryNode: pcf.prefConfigs.FullArchive, + CurrentNetworkEpochProvider: currentEpochProvider, + ResolverConfig: pcf.config.Resolvers, + PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), + PeersRatingHandler: pcf.network.PeersRatingHandler(), + NodesCoordinator: pcf.nodesCoordinator, + MaxNumOfPeerAuthenticationInResponse: pcf.config.HeartbeatV2.MaxNumOfPeerAuthenticationInResponse, + PeerShardMapper: peerShardMapper, } resolversContainerFactory, err := resolverscontainer.NewShardResolversContainerFactory(resolversContainerFactoryArgs) if err != nil { @@ -1053,6 +1095,7 @@ func (pcf *processComponentsFactory) newShardResolverContainerFactory( func (pcf *processComponentsFactory) newMetaResolverContainerFactory( currentEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler, + peerShardMapper *networksharding.PeerShardMapper, ) (dataRetriever.ResolversContainerFactory, error) { dataPacker, err := partitioning.NewSimpleDataPacker(pcf.coreData.InternalMarshalizer()) @@ -1061,22 +1104,26 @@ func (pcf *processComponentsFactory) newMetaResolverContainerFactory( } resolversContainerFactoryArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - Messenger: pcf.network.NetworkMessenger(), - Store: pcf.data.StorageService(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - DataPools: pcf.data.Datapool(), - Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), - DataPacker: dataPacker, - TriesContainer: pcf.state.TriesContainer(), - SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, - InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), - OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), - NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, - IsFullHistoryNode: pcf.prefConfigs.FullArchive, - CurrentNetworkEpochProvider: currentEpochProvider, - ResolverConfig: pcf.config.Resolvers, - PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + Messenger: pcf.network.NetworkMessenger(), + Store: pcf.data.StorageService(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + DataPools: pcf.data.Datapool(), + Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), + DataPacker: dataPacker, + TriesContainer: pcf.state.TriesContainer(), + SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, + InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), + OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), + NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, + IsFullHistoryNode: pcf.prefConfigs.FullArchive, + CurrentNetworkEpochProvider: currentEpochProvider, + ResolverConfig: pcf.config.Resolvers, + PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), + PeersRatingHandler: pcf.network.PeersRatingHandler(), + NodesCoordinator: pcf.nodesCoordinator, + MaxNumOfPeerAuthenticationInResponse: pcf.config.HeartbeatV2.MaxNumOfPeerAuthenticationInResponse, + PeerShardMapper: peerShardMapper, } resolversContainerFactory, err := resolverscontainer.NewMetaResolversContainerFactory(resolversContainerFactoryArgs) if err != nil { @@ -1091,6 +1138,8 @@ func (pcf *processComponentsFactory) newInterceptorContainerFactory( validityAttester process.ValidityAttester, epochStartTrigger process.EpochStartTriggerHandler, requestHandler process.RequestHandler, + peerShardMapper *networksharding.PeerShardMapper, + hardforkTrigger HardforkTrigger, ) (process.InterceptorsContainerFactory, process.TimeCacher, error) { if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { return pcf.newShardInterceptorContainerFactory( @@ -1099,6 +1148,8 @@ func (pcf *processComponentsFactory) newInterceptorContainerFactory( validityAttester, epochStartTrigger, requestHandler, + peerShardMapper, + hardforkTrigger, ) } if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { @@ -1108,6 +1159,8 @@ func (pcf *processComponentsFactory) newInterceptorContainerFactory( validityAttester, epochStartTrigger, requestHandler, + peerShardMapper, + hardforkTrigger, ) } @@ -1240,32 +1293,39 @@ func (pcf *processComponentsFactory) newShardInterceptorContainerFactory( validityAttester process.ValidityAttester, epochStartTrigger process.EpochStartTriggerHandler, requestHandler process.RequestHandler, + peerShardMapper *networksharding.PeerShardMapper, + hardforkTrigger HardforkTrigger, ) (process.InterceptorsContainerFactory, process.TimeCacher, error) { headerBlackList := timecache.NewTimeCache(timeSpanForBadHeaders) shardInterceptorsContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ - CoreComponents: pcf.coreData, - CryptoComponents: pcf.crypto, - Accounts: pcf.state.AccountsAdapter(), - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - NodesCoordinator: pcf.nodesCoordinator, - Messenger: pcf.network.NetworkMessenger(), - Store: pcf.data.StorageService(), - DataPool: pcf.data.Datapool(), - MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, - TxFeeHandler: pcf.coreData.EconomicsData(), - BlockBlackList: headerBlackList, - HeaderSigVerifier: headerSigVerifier, - HeaderIntegrityVerifier: headerIntegrityVerifier, - ValidityAttester: validityAttester, - EpochStartTrigger: epochStartTrigger, - WhiteListHandler: pcf.whiteListHandler, - WhiteListerVerifiedTxs: pcf.whiteListerVerifiedTxs, - AntifloodHandler: pcf.network.InputAntiFloodHandler(), - ArgumentsParser: smartContract.NewArgumentParser(), - SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, - EnableSignTxWithHashEpoch: pcf.epochConfig.EnableEpochs.TransactionSignedWithTxHashEnableEpoch, - PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), - RequestHandler: requestHandler, + CoreComponents: pcf.coreData, + CryptoComponents: pcf.crypto, + Accounts: pcf.state.AccountsAdapter(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + NodesCoordinator: pcf.nodesCoordinator, + Messenger: pcf.network.NetworkMessenger(), + Store: pcf.data.StorageService(), + DataPool: pcf.data.Datapool(), + MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, + TxFeeHandler: pcf.coreData.EconomicsData(), + BlockBlackList: headerBlackList, + HeaderSigVerifier: headerSigVerifier, + HeaderIntegrityVerifier: headerIntegrityVerifier, + ValidityAttester: validityAttester, + EpochStartTrigger: epochStartTrigger, + WhiteListHandler: pcf.whiteListHandler, + WhiteListerVerifiedTxs: pcf.whiteListerVerifiedTxs, + AntifloodHandler: pcf.network.InputAntiFloodHandler(), + ArgumentsParser: smartContract.NewArgumentParser(), + PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), + SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, + EnableSignTxWithHashEpoch: pcf.epochConfig.EnableEpochs.TransactionSignedWithTxHashEnableEpoch, + RequestHandler: requestHandler, + PeerSignatureHandler: pcf.crypto.PeerSignatureHandler(), + SignaturesHandler: pcf.network.NetworkMessenger(), + HeartbeatExpiryTimespanInSec: pcf.config.HeartbeatV2.HeartbeatExpiryTimespanInSec, + PeerShardMapper: peerShardMapper, + HardforkTrigger: hardforkTrigger, } log.Debug("shardInterceptor: enable epoch for transaction signed with tx hash", "epoch", shardInterceptorsContainerFactoryArgs.EnableSignTxWithHashEpoch) @@ -1283,32 +1343,39 @@ func (pcf *processComponentsFactory) newMetaInterceptorContainerFactory( validityAttester process.ValidityAttester, epochStartTrigger process.EpochStartTriggerHandler, requestHandler process.RequestHandler, + peerShardMapper *networksharding.PeerShardMapper, + hardforkTrigger HardforkTrigger, ) (process.InterceptorsContainerFactory, process.TimeCacher, error) { headerBlackList := timecache.NewTimeCache(timeSpanForBadHeaders) metaInterceptorsContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ - CoreComponents: pcf.coreData, - CryptoComponents: pcf.crypto, - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - NodesCoordinator: pcf.nodesCoordinator, - Messenger: pcf.network.NetworkMessenger(), - Store: pcf.data.StorageService(), - DataPool: pcf.data.Datapool(), - Accounts: pcf.state.AccountsAdapter(), - MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, - TxFeeHandler: pcf.coreData.EconomicsData(), - BlockBlackList: headerBlackList, - HeaderSigVerifier: headerSigVerifier, - HeaderIntegrityVerifier: headerIntegrityVerifier, - ValidityAttester: validityAttester, - EpochStartTrigger: epochStartTrigger, - WhiteListHandler: pcf.whiteListHandler, - WhiteListerVerifiedTxs: pcf.whiteListerVerifiedTxs, - AntifloodHandler: pcf.network.InputAntiFloodHandler(), - ArgumentsParser: smartContract.NewArgumentParser(), - SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, - EnableSignTxWithHashEpoch: pcf.epochConfig.EnableEpochs.TransactionSignedWithTxHashEnableEpoch, - PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), - RequestHandler: requestHandler, + CoreComponents: pcf.coreData, + CryptoComponents: pcf.crypto, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + NodesCoordinator: pcf.nodesCoordinator, + Messenger: pcf.network.NetworkMessenger(), + Store: pcf.data.StorageService(), + DataPool: pcf.data.Datapool(), + Accounts: pcf.state.AccountsAdapter(), + MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, + TxFeeHandler: pcf.coreData.EconomicsData(), + BlockBlackList: headerBlackList, + HeaderSigVerifier: headerSigVerifier, + HeaderIntegrityVerifier: headerIntegrityVerifier, + ValidityAttester: validityAttester, + EpochStartTrigger: epochStartTrigger, + WhiteListHandler: pcf.whiteListHandler, + WhiteListerVerifiedTxs: pcf.whiteListerVerifiedTxs, + AntifloodHandler: pcf.network.InputAntiFloodHandler(), + ArgumentsParser: smartContract.NewArgumentParser(), + SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, + EnableSignTxWithHashEpoch: pcf.epochConfig.EnableEpochs.TransactionSignedWithTxHashEnableEpoch, + PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), + RequestHandler: requestHandler, + PeerSignatureHandler: pcf.crypto.PeerSignatureHandler(), + SignaturesHandler: pcf.network.NetworkMessenger(), + HeartbeatExpiryTimespanInSec: pcf.config.HeartbeatV2.HeartbeatExpiryTimespanInSec, + PeerShardMapper: peerShardMapper, + HardforkTrigger: hardforkTrigger, } log.Debug("metaInterceptor: enable epoch for transaction signed with tx hash", "epoch", metaInterceptorsContainerFactoryArgs.EnableSignTxWithHashEpoch) @@ -1363,6 +1430,82 @@ func (pcf *processComponentsFactory) prepareNetworkShardingCollector() (*network return networkShardingCollector, nil } +func (pcf *processComponentsFactory) createExportFactoryHandler( + headerValidator epochStart.HeaderValidator, + requestHandler process.RequestHandler, + resolversFinder dataRetriever.ResolversFinder, + interceptorsContainer process.InterceptorsContainer, + headerSigVerifier process.InterceptedHeaderSigVerifier, + blockTracker process.ValidityAttester, +) (update.ExportFactoryHandler, error) { + + hardforkConfig := pcf.config.Hardfork + accountsDBs := make(map[state.AccountsDbIdentifier]state.AccountsAdapter) + accountsDBs[state.UserAccountsState] = pcf.state.AccountsAdapter() + accountsDBs[state.PeerAccountsState] = pcf.state.PeerAccounts() + exportFolder := filepath.Join(pcf.workingDir, hardforkConfig.ImportFolder) + argsExporter := updateFactory.ArgsExporter{ + CoreComponents: pcf.coreData, + CryptoComponents: pcf.crypto, + HeaderValidator: headerValidator, + DataPool: pcf.data.Datapool(), + StorageService: pcf.data.StorageService(), + RequestHandler: requestHandler, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + Messenger: pcf.network.NetworkMessenger(), + ActiveAccountsDBs: accountsDBs, + ExistingResolvers: resolversFinder, + ExportFolder: exportFolder, + ExportTriesStorageConfig: hardforkConfig.ExportTriesStorageConfig, + ExportStateStorageConfig: hardforkConfig.ExportStateStorageConfig, + ExportStateKeysConfig: hardforkConfig.ExportKeysStorageConfig, + MaxTrieLevelInMemory: pcf.config.StateTriesConfig.MaxStateTrieLevelInMemory, + WhiteListHandler: pcf.whiteListHandler, + WhiteListerVerifiedTxs: pcf.whiteListerVerifiedTxs, + InterceptorsContainer: interceptorsContainer, + NodesCoordinator: pcf.nodesCoordinator, + HeaderSigVerifier: headerSigVerifier, + HeaderIntegrityVerifier: pcf.bootstrapComponents.HeaderIntegrityVerifier(), + ValidityAttester: blockTracker, + InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), + OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), + RoundHandler: pcf.coreData.RoundHandler(), + InterceptorDebugConfig: pcf.config.Debug.InterceptorResolver, + EnableSignTxWithHashEpoch: pcf.epochConfig.EnableEpochs.TransactionSignedWithTxHashEnableEpoch, + MaxHardCapForMissingNodes: pcf.config.TrieSync.MaxHardCapForMissingNodes, + NumConcurrentTrieSyncers: pcf.config.TrieSync.NumConcurrentTrieSyncers, + TrieSyncerVersion: pcf.config.TrieSync.TrieSyncerVersion, + PeersRatingHandler: pcf.network.PeersRatingHandler(), + } + return updateFactory.NewExportHandlerFactory(argsExporter) +} + +func (pcf *processComponentsFactory) createHardforkTrigger(epochStartTrigger update.EpochHandler) (HardforkTrigger, error) { + hardforkConfig := pcf.config.Hardfork + selfPubKeyBytes := pcf.crypto.PublicKeyBytes() + triggerPubKeyBytes, err := pcf.coreData.ValidatorPubKeyConverter().Decode(hardforkConfig.PublicKeyToListenFrom) + if err != nil { + return nil, fmt.Errorf("%w while decoding HardforkConfig.PublicKeyToListenFrom", err) + } + + argTrigger := trigger.ArgHardforkTrigger{ + TriggerPubKeyBytes: triggerPubKeyBytes, + SelfPubKeyBytes: selfPubKeyBytes, + Enabled: hardforkConfig.EnableTrigger, + EnabledAuthenticated: hardforkConfig.EnableTriggerFromP2P, + ArgumentParser: smartContract.NewArgumentParser(), + EpochProvider: epochStartTrigger, + ExportFactoryHandler: &updateDisabled.ExportFactoryHandler{}, + ChanStopNodeProcess: pcf.coreData.ChanStopNodeProcess(), + EpochConfirmedNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), + CloseAfterExportInMinutes: hardforkConfig.CloseAfterExportInMinutes, + ImportStartHandler: pcf.importStartHandler, + RoundHandler: pcf.coreData.RoundHandler(), + } + + return trigger.NewTrigger(argTrigger) +} + func createNetworkShardingCollector( config *config.Config, nodesCoordinator nodesCoordinator.NodesCoordinator, diff --git a/factory/processComponentsHandler.go b/factory/processComponentsHandler.go index a1cc79e7438..56c742fdf16 100644 --- a/factory/processComponentsHandler.go +++ b/factory/processComponentsHandler.go @@ -155,6 +155,9 @@ func (m *managedProcessComponents) CheckSubcomponents() error { if check.IfNil(m.processComponents.txsSender) { return errors.ErrNilTxsSender } + if check.IfNil(m.processComponents.processedMiniBlocksTracker) { + return process.ErrNilProcessedMiniBlocksTracker + } return nil } @@ -542,6 +545,30 @@ func (m *managedProcessComponents) TxsSenderHandler() process.TxsSenderHandler { return m.processComponents.txsSender } +// HardforkTrigger returns the hardfork trigger +func (m *managedProcessComponents) HardforkTrigger() HardforkTrigger { + m.mutProcessComponents.RLock() + defer m.mutProcessComponents.RUnlock() + + if m.processComponents == nil { + return nil + } + + return m.processComponents.hardforkTrigger +} + +// ProcessedMiniBlocksTracker returns the processed mini blocks tracker +func (m *managedProcessComponents) ProcessedMiniBlocksTracker() process.ProcessedMiniBlocksTracker { + m.mutProcessComponents.RLock() + defer m.mutProcessComponents.RUnlock() + + if m.processComponents == nil { + return nil + } + + return m.processComponents.processedMiniBlocksTracker +} + // IsInterfaceNil returns true if the interface is nil func (m *managedProcessComponents) IsInterfaceNil() bool { return m == nil @@ -549,5 +576,5 @@ func (m *managedProcessComponents) IsInterfaceNil() bool { // String returns the name of the component func (m *managedProcessComponents) String() string { - return "managedProcessComponents" + return processComponentsName } diff --git a/factory/processComponentsHandler_test.go b/factory/processComponentsHandler_test.go index 954341c6d32..48785b3d0fd 100644 --- a/factory/processComponentsHandler_test.go +++ b/factory/processComponentsHandler_test.go @@ -92,6 +92,8 @@ func TestManagedProcessComponents_Create_ShouldWork(t *testing.T) { require.True(t, check.IfNil(managedProcessComponents.PeerShardMapper())) require.True(t, check.IfNil(managedProcessComponents.ShardCoordinator())) require.True(t, check.IfNil(managedProcessComponents.TxsSenderHandler())) + require.True(t, check.IfNil(managedProcessComponents.HardforkTrigger())) + require.True(t, check.IfNil(managedProcessComponents.ProcessedMiniBlocksTracker())) err = managedProcessComponents.Create() require.NoError(t, err) @@ -126,6 +128,8 @@ func TestManagedProcessComponents_Create_ShouldWork(t *testing.T) { require.False(t, check.IfNil(managedProcessComponents.PeerShardMapper())) require.False(t, check.IfNil(managedProcessComponents.ShardCoordinator())) require.False(t, check.IfNil(managedProcessComponents.TxsSenderHandler())) + require.False(t, check.IfNil(managedProcessComponents.HardforkTrigger())) + require.False(t, check.IfNil(managedProcessComponents.ProcessedMiniBlocksTracker())) nodeSkBytes, err := cryptoComponents.PrivateKey().ToByteArray() require.Nil(t, err) diff --git a/factory/stateComponentsHandler.go b/factory/stateComponentsHandler.go index 27c948064ce..a4435683061 100644 --- a/factory/stateComponentsHandler.go +++ b/factory/stateComponentsHandler.go @@ -193,5 +193,5 @@ func (msc *managedStateComponents) IsInterfaceNil() bool { // String returns the name of the component func (msc *managedStateComponents) String() string { - return "managedStateComponents" + return stateComponentsName } diff --git a/factory/stateComponents_test.go b/factory/stateComponents_test.go index 1928827e2d0..dcd190f5b15 100644 --- a/factory/stateComponents_test.go +++ b/factory/stateComponents_test.go @@ -231,6 +231,10 @@ func getGeneralConfig() config.Config { Type: "LRU", Shards: 1, }, + PeersRatingConfig: config.PeersRatingConfig{ + TopRatedCacheCapacity: 1000, + BadRatedCacheCapacity: 1000, + }, } } diff --git a/factory/statusComponents.go b/factory/statusComponents.go index e44c54788ce..60408c6e801 100644 --- a/factory/statusComponents.go +++ b/factory/statusComponents.go @@ -22,7 +22,6 @@ import ( "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/sharding/nodesCoordinator" "github.com/ElrondNetwork/elrond-go/storage" - notifierFactory "github.com/ElrondNetwork/notifier-go/factory" ) // TODO: move app status handler initialization here @@ -223,6 +222,7 @@ func (scf *statusComponentsFactory) makeElasticIndexerArgs() *indexerFactory.Arg return &indexerFactory.ArgsIndexerFactory{ Enabled: elasticSearchConfig.Enabled, IndexerCacheSize: elasticSearchConfig.IndexerCacheSize, + BulkRequestMaxSize: elasticSearchConfig.BulkRequestMaxSizeInBytes, ShardCoordinator: scf.shardCoordinator, Url: elasticSearchConfig.URL, UserName: elasticSearchConfig.Username, @@ -240,16 +240,17 @@ func (scf *statusComponentsFactory) makeElasticIndexerArgs() *indexerFactory.Arg } } -func (scf *statusComponentsFactory) makeEventNotifierArgs() *notifierFactory.EventNotifierFactoryArgs { +func (scf *statusComponentsFactory) makeEventNotifierArgs() *outportDriverFactory.EventNotifierFactoryArgs { eventNotifierConfig := scf.externalConfig.EventNotifierConnector - return ¬ifierFactory.EventNotifierFactoryArgs{ + return &outportDriverFactory.EventNotifierFactoryArgs{ Enabled: eventNotifierConfig.Enabled, UseAuthorization: eventNotifierConfig.UseAuthorization, ProxyUrl: eventNotifierConfig.ProxyUrl, Username: eventNotifierConfig.Username, Password: eventNotifierConfig.Password, - Marshalizer: scf.coreComponents.InternalMarshalizer(), + Marshaller: scf.coreComponents.InternalMarshalizer(), Hasher: scf.coreComponents.Hasher(), + PubKeyConverter: scf.coreComponents.AddressPubKeyConverter(), } } diff --git a/factory/statusComponentsHandler.go b/factory/statusComponentsHandler.go index fdc289e05d9..d37bca9306c 100644 --- a/factory/statusComponentsHandler.go +++ b/factory/statusComponentsHandler.go @@ -420,7 +420,7 @@ func registerCpuStatistics(ctx context.Context, appStatusPollingHandler *appStat // String returns the name of the component func (msc *managedStatusComponents) String() string { - return "managedStatusComponents" + return statusComponentsName } func (msc *managedStatusComponents) attachEpochGoRoutineAnalyser() { diff --git a/genesis/process/disabled/feeHandler.go b/genesis/process/disabled/feeHandler.go index 20e00b247ab..6a0de35617f 100644 --- a/genesis/process/disabled/feeHandler.go +++ b/genesis/process/disabled/feeHandler.go @@ -106,7 +106,7 @@ func (fh *FeeHandler) ProcessTransactionFee(_ *big.Int, _ *big.Int, _ []byte) { } // ProcessTransactionFeeRelayedUserTx does nothing -func (fh *FeeHandler) ProcessTransactionFeeRelayedUserTx(_ *big.Int, _ *big.Int, _ []byte, _ []byte){ +func (fh *FeeHandler) ProcessTransactionFeeRelayedUserTx(_ *big.Int, _ *big.Int, _ []byte, _ []byte) { } // RevertFees does nothing diff --git a/genesis/process/disabled/processedMiniBlocksTracker.go b/genesis/process/disabled/processedMiniBlocksTracker.go new file mode 100644 index 00000000000..4ae51e65085 --- /dev/null +++ b/genesis/process/disabled/processedMiniBlocksTracker.go @@ -0,0 +1,55 @@ +package disabled + +import ( + "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" +) + +// ProcessedMiniBlocksTracker implements the ProcessedMiniBlocksTracker interface but does nothing as it is disabled +type ProcessedMiniBlocksTracker struct { +} + +// SetProcessedMiniBlockInfo does nothing as it is a disabled component +func (pmbt *ProcessedMiniBlocksTracker) SetProcessedMiniBlockInfo(_ []byte, _ []byte, _ *processedMb.ProcessedMiniBlockInfo) { +} + +// RemoveMetaBlockHash does nothing as it is a disabled component +func (pmbt *ProcessedMiniBlocksTracker) RemoveMetaBlockHash(_ []byte) { +} + +// RemoveMiniBlockHash does nothing as it is a disabled component +func (pmbt *ProcessedMiniBlocksTracker) RemoveMiniBlockHash(_ []byte) { +} + +// GetProcessedMiniBlocksInfo returns nil as it is a disabled component +func (pmbt *ProcessedMiniBlocksTracker) GetProcessedMiniBlocksInfo(_ []byte) map[string]*processedMb.ProcessedMiniBlockInfo { + return nil +} + +// GetProcessedMiniBlockInfo returns nil as it is a disabled component +func (pmbt *ProcessedMiniBlocksTracker) GetProcessedMiniBlockInfo(_ []byte) (*processedMb.ProcessedMiniBlockInfo, []byte) { + return nil, nil +} + +// IsMiniBlockFullyProcessed returns false as it is a disabled component +func (pmbt *ProcessedMiniBlocksTracker) IsMiniBlockFullyProcessed(_ []byte, _ []byte) bool { + return false +} + +// ConvertProcessedMiniBlocksMapToSlice returns nil as it is a disabled component +func (pmbt *ProcessedMiniBlocksTracker) ConvertProcessedMiniBlocksMapToSlice() []bootstrapStorage.MiniBlocksInMeta { + return nil +} + +// ConvertSliceToProcessedMiniBlocksMap does nothing as it is a disabled component +func (pmbt *ProcessedMiniBlocksTracker) ConvertSliceToProcessedMiniBlocksMap(_ []bootstrapStorage.MiniBlocksInMeta) { +} + +// DisplayProcessedMiniBlocks does nothing as it is a disabled component +func (pmbt *ProcessedMiniBlocksTracker) DisplayProcessedMiniBlocks() { +} + +// IsInterfaceNil returns true if underlying object is nil +func (pmbt *ProcessedMiniBlocksTracker) IsInterfaceNil() bool { + return pmbt == nil +} diff --git a/genesis/process/disabled/requestHandler.go b/genesis/process/disabled/requestHandler.go index 2fa9d93fa5c..2265f19ff37 100644 --- a/genesis/process/disabled/requestHandler.go +++ b/genesis/process/disabled/requestHandler.go @@ -78,6 +78,14 @@ func (r *RequestHandler) CreateTrieNodeIdentifier(_ []byte, _ uint32) []byte { return make([]byte, 0) } +// RequestPeerAuthenticationsChunk does nothing +func (r *RequestHandler) RequestPeerAuthenticationsChunk(_ uint32, _ uint32) { +} + +// RequestPeerAuthenticationsByHashes does nothing +func (r *RequestHandler) RequestPeerAuthenticationsByHashes(_ uint32, _ [][]byte) { +} + // IsInterfaceNil returns true if there is no value under the interface func (r *RequestHandler) IsInterfaceNil() bool { return r == nil diff --git a/genesis/process/metaGenesisBlockCreator.go b/genesis/process/metaGenesisBlockCreator.go index f9ec95fb45b..c92cebf131c 100644 --- a/genesis/process/metaGenesisBlockCreator.go +++ b/genesis/process/metaGenesisBlockCreator.go @@ -439,6 +439,7 @@ func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpoc disabledBlockSizeComputationHandler := &disabled.BlockSizeComputationHandler{} disabledBalanceComputationHandler := &disabled.BalanceComputationHandler{} disabledScheduledTxsExecutionHandler := &disabled.ScheduledTxsExecutionHandler{} + disabledProcessedMiniBlocksTracker := &disabled.ProcessedMiniBlocksTracker{} preProcFactory, err := metachain.NewPreProcessorsContainerFactory( arg.ShardCoordinator, @@ -462,6 +463,7 @@ func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpoc enableEpochs.ScheduledMiniBlocksEnableEpoch, txTypeHandler, disabledScheduledTxsExecutionHandler, + disabledProcessedMiniBlocksTracker, ) if err != nil { return nil, err @@ -485,26 +487,28 @@ func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpoc } argsTransactionCoordinator := coordinator.ArgTransactionCoordinator{ - Hasher: arg.Core.Hasher(), - Marshalizer: arg.Core.InternalMarshalizer(), - ShardCoordinator: arg.ShardCoordinator, - Accounts: arg.Accounts, - MiniBlockPool: arg.Data.Datapool().MiniBlocks(), - RequestHandler: disabledRequestHandler, - PreProcessors: preProcContainer, - InterProcessors: interimProcContainer, - GasHandler: gasHandler, - FeeHandler: genesisFeeHandler, - BlockSizeComputation: disabledBlockSizeComputationHandler, - BalanceComputation: disabledBalanceComputationHandler, - EconomicsFee: genesisFeeHandler, - TxTypeHandler: txTypeHandler, - BlockGasAndFeesReCheckEnableEpoch: enableEpochs.BlockGasAndFeesReCheckEnableEpoch, - TransactionsLogProcessor: arg.TxLogsProcessor, - EpochNotifier: epochNotifier, - ScheduledTxsExecutionHandler: disabledScheduledTxsExecutionHandler, - ScheduledMiniBlocksEnableEpoch: enableEpochs.ScheduledMiniBlocksEnableEpoch, - DoubleTransactionsDetector: doubleTransactionsDetector, + Hasher: arg.Core.Hasher(), + Marshalizer: arg.Core.InternalMarshalizer(), + ShardCoordinator: arg.ShardCoordinator, + Accounts: arg.Accounts, + MiniBlockPool: arg.Data.Datapool().MiniBlocks(), + RequestHandler: disabledRequestHandler, + PreProcessors: preProcContainer, + InterProcessors: interimProcContainer, + GasHandler: gasHandler, + FeeHandler: genesisFeeHandler, + BlockSizeComputation: disabledBlockSizeComputationHandler, + BalanceComputation: disabledBalanceComputationHandler, + EconomicsFee: genesisFeeHandler, + TxTypeHandler: txTypeHandler, + BlockGasAndFeesReCheckEnableEpoch: enableEpochs.BlockGasAndFeesReCheckEnableEpoch, + TransactionsLogProcessor: arg.TxLogsProcessor, + EpochNotifier: epochNotifier, + ScheduledTxsExecutionHandler: disabledScheduledTxsExecutionHandler, + ScheduledMiniBlocksEnableEpoch: enableEpochs.ScheduledMiniBlocksEnableEpoch, + DoubleTransactionsDetector: doubleTransactionsDetector, + MiniBlockPartialExecutionEnableEpoch: enableEpochs.MiniBlockPartialExecutionEnableEpoch, + ProcessedMiniBlocksTracker: disabledProcessedMiniBlocksTracker, } txCoordinator, err := coordinator.NewTransactionCoordinator(argsTransactionCoordinator) if err != nil { diff --git a/genesis/process/shardGenesisBlockCreator.go b/genesis/process/shardGenesisBlockCreator.go index e9470042f01..0b5035b0c1d 100644 --- a/genesis/process/shardGenesisBlockCreator.go +++ b/genesis/process/shardGenesisBlockCreator.go @@ -115,9 +115,13 @@ func createGenesisConfig() config.EnableEpochs { FailExecutionOnEveryAPIErrorEnableEpoch: unreachableEpoch, AddFailedRelayedTxToInvalidMBsDisableEpoch: unreachableEpoch, SCRSizeInvariantOnBuiltInResultEnableEpoch: unreachableEpoch, + CheckCorrectTokenIDForTransferRoleEnableEpoch: unreachableEpoch, + HeartbeatDisableEpoch: unreachableEpoch, + MiniBlockPartialExecutionEnableEpoch: unreachableEpoch, StakeLimitsEnableEpoch: unreachableEpoch, StakingV4InitEnableEpoch: unreachableEpoch, StakingV4EnableEpoch: unreachableEpoch, + StakingV4DistributeAuctionToWaitingEpoch: unreachableEpoch, } } @@ -352,17 +356,19 @@ func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpo epochNotifier := forking.NewGenericEpochNotifier() argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ - GasSchedule: arg.GasSchedule, - MapDNSAddresses: make(map[string]struct{}), - EnableUserNameChange: false, - Marshalizer: arg.Core.InternalMarshalizer(), - Accounts: arg.Accounts, - ShardCoordinator: arg.ShardCoordinator, - EpochNotifier: epochNotifier, - ESDTMultiTransferEnableEpoch: enableEpochs.ESDTMultiTransferEnableEpoch, - ESDTTransferRoleEnableEpoch: enableEpochs.ESDTTransferRoleEnableEpoch, - GlobalMintBurnDisableEpoch: enableEpochs.GlobalMintBurnDisableEpoch, - ESDTTransferMetaEnableEpoch: enableEpochs.BuiltInFunctionOnMetaEnableEpoch, + GasSchedule: arg.GasSchedule, + MapDNSAddresses: make(map[string]struct{}), + EnableUserNameChange: false, + Marshalizer: arg.Core.InternalMarshalizer(), + Accounts: arg.Accounts, + ShardCoordinator: arg.ShardCoordinator, + EpochNotifier: epochNotifier, + ESDTMultiTransferEnableEpoch: enableEpochs.ESDTMultiTransferEnableEpoch, + ESDTTransferRoleEnableEpoch: enableEpochs.ESDTTransferRoleEnableEpoch, + GlobalMintBurnDisableEpoch: enableEpochs.GlobalMintBurnDisableEpoch, + ESDTTransferMetaEnableEpoch: enableEpochs.BuiltInFunctionOnMetaEnableEpoch, + OptimizeNFTStoreEnableEpoch: enableEpochs.OptimizeNFTStoreEnableEpoch, + CheckCorrectTokenIDEnableEpoch: enableEpochs.CheckCorrectTokenIDForTransferRoleEnableEpoch, } builtInFuncs, nftStorageHandler, err := builtInFunctions.CreateBuiltInFuncContainerAndNFTStorageHandler(argsBuiltIn) if err != nil { @@ -551,6 +557,7 @@ func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpo disabledBlockSizeComputationHandler := &disabled.BlockSizeComputationHandler{} disabledBalanceComputationHandler := &disabled.BalanceComputationHandler{} disabledScheduledTxsExecutionHandler := &disabled.ScheduledTxsExecutionHandler{} + disabledProcessedMiniBlocksTracker := &disabled.ProcessedMiniBlocksTracker{} preProcFactory, err := shard.NewPreProcessorsContainerFactory( arg.ShardCoordinator, @@ -576,6 +583,7 @@ func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpo enableEpochs.ScheduledMiniBlocksEnableEpoch, txTypeHandler, disabledScheduledTxsExecutionHandler, + disabledProcessedMiniBlocksTracker, ) if err != nil { return nil, err @@ -599,26 +607,28 @@ func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpo } argsTransactionCoordinator := coordinator.ArgTransactionCoordinator{ - Hasher: arg.Core.Hasher(), - Marshalizer: arg.Core.InternalMarshalizer(), - ShardCoordinator: arg.ShardCoordinator, - Accounts: arg.Accounts, - MiniBlockPool: arg.Data.Datapool().MiniBlocks(), - RequestHandler: disabledRequestHandler, - PreProcessors: preProcContainer, - InterProcessors: interimProcContainer, - GasHandler: gasHandler, - FeeHandler: genesisFeeHandler, - BlockSizeComputation: disabledBlockSizeComputationHandler, - BalanceComputation: disabledBalanceComputationHandler, - EconomicsFee: genesisFeeHandler, - TxTypeHandler: txTypeHandler, - BlockGasAndFeesReCheckEnableEpoch: enableEpochs.BlockGasAndFeesReCheckEnableEpoch, - TransactionsLogProcessor: arg.TxLogsProcessor, - EpochNotifier: epochNotifier, - ScheduledTxsExecutionHandler: disabledScheduledTxsExecutionHandler, - ScheduledMiniBlocksEnableEpoch: enableEpochs.ScheduledMiniBlocksEnableEpoch, - DoubleTransactionsDetector: doubleTransactionsDetector, + Hasher: arg.Core.Hasher(), + Marshalizer: arg.Core.InternalMarshalizer(), + ShardCoordinator: arg.ShardCoordinator, + Accounts: arg.Accounts, + MiniBlockPool: arg.Data.Datapool().MiniBlocks(), + RequestHandler: disabledRequestHandler, + PreProcessors: preProcContainer, + InterProcessors: interimProcContainer, + GasHandler: gasHandler, + FeeHandler: genesisFeeHandler, + BlockSizeComputation: disabledBlockSizeComputationHandler, + BalanceComputation: disabledBalanceComputationHandler, + EconomicsFee: genesisFeeHandler, + TxTypeHandler: txTypeHandler, + BlockGasAndFeesReCheckEnableEpoch: enableEpochs.BlockGasAndFeesReCheckEnableEpoch, + TransactionsLogProcessor: arg.TxLogsProcessor, + EpochNotifier: epochNotifier, + ScheduledTxsExecutionHandler: disabledScheduledTxsExecutionHandler, + ScheduledMiniBlocksEnableEpoch: enableEpochs.ScheduledMiniBlocksEnableEpoch, + DoubleTransactionsDetector: doubleTransactionsDetector, + MiniBlockPartialExecutionEnableEpoch: enableEpochs.MiniBlockPartialExecutionEnableEpoch, + ProcessedMiniBlocksTracker: disabledProcessedMiniBlocksTracker, } txCoordinator, err := coordinator.NewTransactionCoordinator(argsTransactionCoordinator) if err != nil { diff --git a/go.mod b/go.mod index 0c5035e622e..4722e9c0a0f 100644 --- a/go.mod +++ b/go.mod @@ -8,13 +8,12 @@ require ( github.com/ElrondNetwork/arwen-wasm-vm/v1_4 v1.4.49 github.com/ElrondNetwork/concurrent-map v0.1.3 github.com/ElrondNetwork/covalent-indexer-go v1.0.6 - github.com/ElrondNetwork/elastic-indexer-go v1.1.41 - github.com/ElrondNetwork/elrond-go-core v1.1.15 + github.com/ElrondNetwork/elastic-indexer-go v1.2.25 + github.com/ElrondNetwork/elrond-go-core v1.1.16-0.20220523150518-f1519c41d352 github.com/ElrondNetwork/elrond-go-crypto v1.0.1 github.com/ElrondNetwork/elrond-go-logger v1.0.7 - github.com/ElrondNetwork/elrond-vm-common v1.3.2 + github.com/ElrondNetwork/elrond-vm-common v1.3.3 github.com/ElrondNetwork/go-libp2p-pubsub v0.5.5-rc2 - github.com/ElrondNetwork/notifier-go v1.1.0 github.com/beevik/ntp v0.3.0 github.com/btcsuite/btcd v0.22.0-beta github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 9dc5d8f8252..c97e28b34ee 100644 --- a/go.sum +++ b/go.sum @@ -23,15 +23,14 @@ github.com/ElrondNetwork/concurrent-map v0.1.3 h1:j2LtPrNJuerannC1cQDE79STvi/P04 github.com/ElrondNetwork/concurrent-map v0.1.3/go.mod h1:3XwSwn4JHI0lrKxWLZvtp53Emr8BXYTmNQGwcukHJEE= github.com/ElrondNetwork/covalent-indexer-go v1.0.6 h1:+LNKItUc+Pb7WuTbil3VuiLMmdQ1AY7lBJM476PtVNE= github.com/ElrondNetwork/covalent-indexer-go v1.0.6/go.mod h1:j3h2g96vqhJAuj3aEX2PWhomae2/o7YfXGEfweNXEeQ= -github.com/ElrondNetwork/elastic-indexer-go v1.1.41 h1:FDE+eIxc8zEwu1sZ9mgij2Ci3Fb2H2VV8EkfF9D5ZO4= -github.com/ElrondNetwork/elastic-indexer-go v1.1.41/go.mod h1:zLa7vRvTJXjGXZuOy0BId3v+fvn5LSibOC2BeTsCqvs= +github.com/ElrondNetwork/elastic-indexer-go v1.2.25 h1:21ala1EQTu/30umkJxLTgIWikA17Iw8bP61EqK4poMo= +github.com/ElrondNetwork/elastic-indexer-go v1.2.25/go.mod h1:XkrkGcomheEZyMC1/OoANQ9KV0OCZF6+UP8lSPRrE9I= github.com/ElrondNetwork/elrond-go-core v1.0.0/go.mod h1:FQMem7fFF4+8pQ6lVsBZq6yO+smD0nV23P4bJpmPjTo= -github.com/ElrondNetwork/elrond-go-core v1.1.6/go.mod h1:O9FkkTT2H9kxCzfn40TbhoCDXzGmUrRVusMomhK/Y3g= github.com/ElrondNetwork/elrond-go-core v1.1.7/go.mod h1:O9FkkTT2H9kxCzfn40TbhoCDXzGmUrRVusMomhK/Y3g= github.com/ElrondNetwork/elrond-go-core v1.1.13/go.mod h1:Yz8JK5sGBctw7+gU8j2mZHbzQ09Ek4XHJ4Uinq1N6nM= github.com/ElrondNetwork/elrond-go-core v1.1.14/go.mod h1:Yz8JK5sGBctw7+gU8j2mZHbzQ09Ek4XHJ4Uinq1N6nM= -github.com/ElrondNetwork/elrond-go-core v1.1.15 h1:CMGtJMsK+fBHsSrh5b3N/okYMywbfH7Hhvgnf3J5hRQ= -github.com/ElrondNetwork/elrond-go-core v1.1.15/go.mod h1:Yz8JK5sGBctw7+gU8j2mZHbzQ09Ek4XHJ4Uinq1N6nM= +github.com/ElrondNetwork/elrond-go-core v1.1.16-0.20220523150518-f1519c41d352 h1:AGHGB7bHGaUHNyplgGk6RyqB3w02eK5gpn2cGZ9LYm0= +github.com/ElrondNetwork/elrond-go-core v1.1.16-0.20220523150518-f1519c41d352/go.mod h1:Yz8JK5sGBctw7+gU8j2mZHbzQ09Ek4XHJ4Uinq1N6nM= github.com/ElrondNetwork/elrond-go-crypto v1.0.0/go.mod h1:DGiR7/j1xv729Xg8SsjYaUzWXL5svMd44REXjWS/gAc= github.com/ElrondNetwork/elrond-go-crypto v1.0.1 h1:xJUUshIZQ7h+rG7Art/9QHVyaPRV1wEjrxXYBdpmRlM= github.com/ElrondNetwork/elrond-go-crypto v1.0.1/go.mod h1:uunsvweBrrhVojL8uiQSaTPsl3YIQ9iBqtYGM6xs4s0= @@ -42,12 +41,11 @@ github.com/ElrondNetwork/elrond-go-logger v1.0.7/go.mod h1:cBfgx0ST/CJx8jrxJSC5a github.com/ElrondNetwork/elrond-vm-common v1.1.0/go.mod h1:w3i6f8uiuRkE68Ie/gebRcLgTuHqvruJSYrFyZWuLrE= github.com/ElrondNetwork/elrond-vm-common v1.2.9/go.mod h1:B/Y8WiqHyDd7xsjNYsaYbVMp1jQgQ+z4jTJkFvj/EWI= github.com/ElrondNetwork/elrond-vm-common v1.3.0/go.mod h1:B/Y8WiqHyDd7xsjNYsaYbVMp1jQgQ+z4jTJkFvj/EWI= -github.com/ElrondNetwork/elrond-vm-common v1.3.2 h1:O/Wr5k7HXX7p0+U3ZsGdY5ydqfSABZvBSzwyV/xbu08= github.com/ElrondNetwork/elrond-vm-common v1.3.2/go.mod h1:B/Y8WiqHyDd7xsjNYsaYbVMp1jQgQ+z4jTJkFvj/EWI= +github.com/ElrondNetwork/elrond-vm-common v1.3.3 h1:c8nwV3oUNfXrelWM6CZMjBjhf6lJq5DYerd8HcadBlg= +github.com/ElrondNetwork/elrond-vm-common v1.3.3/go.mod h1:B/Y8WiqHyDd7xsjNYsaYbVMp1jQgQ+z4jTJkFvj/EWI= github.com/ElrondNetwork/go-libp2p-pubsub v0.5.5-rc2 h1:Eyi2JlK0Eg6D8XNOiK0dLffpKy2ExQ0mXt+xm1cpKHk= github.com/ElrondNetwork/go-libp2p-pubsub v0.5.5-rc2/go.mod h1:3VSrYfPnRU8skcNAJNCPSyzM0dkazQHTdBMWyn/oAIA= -github.com/ElrondNetwork/notifier-go v1.1.0 h1:+urCi+i+5gfLMAmm2fZ0FXSt0S3k9NrzETLV9/uO7fQ= -github.com/ElrondNetwork/notifier-go v1.1.0/go.mod h1:SoAwqYuPh3WpjPb94zB0e6Ud0Gda/ibcCb3iH2NVPGw= github.com/ElrondNetwork/protobuf v1.3.2 h1:qoCSYiO+8GtXBEZWEjw0WPcZfM3g7QuuJrwpN+y6Mvg= github.com/ElrondNetwork/protobuf v1.3.2/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= @@ -155,8 +153,6 @@ github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70d github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -195,7 +191,6 @@ github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm github.com/gin-gonic/gin v1.3.0/go.mod h1:7cKuhb5qV2ggCFctp2fJQ+ErvciLZrIeoOSOm6mUr7Y= github.com/gin-gonic/gin v1.6.2/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gin-gonic/gin v1.7.1/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= -github.com/gin-gonic/gin v1.7.2/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= github.com/gin-gonic/gin v1.7.6/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= github.com/gin-gonic/gin v1.7.7 h1:3DoBmSbJbZAWqXJC3SLjAPfutPJJRN1U5pALB7EeTTs= github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U= @@ -223,13 +218,10 @@ github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+ github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= -github.com/go-redis/redis/v8 v8.11.3 h1:GCjoYp8c+yQTJfc0n69iwSiHjvuAdruxl7elnZCxgt8= -github.com/go-redis/redis/v8 v8.11.3/go.mod h1:xNJ9xDG09FsIPwh3bWdk+0oDWHbtF9rPN0F/oD9XeKc= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -273,9 +265,8 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -290,7 +281,6 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= @@ -838,8 +828,6 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -847,9 +835,8 @@ github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU= -github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= @@ -970,7 +957,6 @@ github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7A github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= @@ -983,8 +969,6 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v1.0.0 h1:kuuDrUJFZL1QYL9hUNuCxNObNzB0bV/ZG5jV3RWAQgo= -github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -999,6 +983,12 @@ github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpP github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7bnBzGhf7BbIv9loSFQcieWWYIjLqcAw= github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tidwall/gjson v1.14.0 h1:6aeJ0bzojgWLa82gDQHcx3S0Lr/O51I9bJ5nv6JFx5w= +github.com/tidwall/gjson v1.14.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tklauser/go-sysconf v0.3.4 h1:HT8SVixZd3IzLdfs/xlpq0jeSfTX57g1v6wB1EuzV7M= github.com/tklauser/go-sysconf v0.3.4/go.mod h1:Cl2c8ZRWfHD5IrfHo9VN+FX9kCFjIOyVklgXycLB6ek= github.com/tklauser/numcpus v0.2.1 h1:ct88eFm+Q7m2ZfXJdan1xYoXKlmwsfP+k88q05KvlZc= @@ -1034,7 +1024,6 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= @@ -1117,7 +1106,6 @@ golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPI golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1215,7 +1203,6 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1262,7 +1249,6 @@ golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1 h1:wGiQel/hW0NnEkJUk8lbzkX2gFJU6PFxf1v5OlCfuOs= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1346,8 +1332,6 @@ gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= diff --git a/heartbeat/data/heartbeat.proto b/heartbeat/data/heartbeat.proto index 0bf26b58ce9..68f8f5ef13a 100644 --- a/heartbeat/data/heartbeat.proto +++ b/heartbeat/data/heartbeat.proto @@ -5,6 +5,7 @@ package proto; option go_package = "data"; // Heartbeat represents the heartbeat message that is sent between peers +// TODO(heartbeat): remove this message after phasing out the old implementation message Heartbeat { bytes Payload = 1 ; bytes Pubkey = 2 ; @@ -19,6 +20,7 @@ message Heartbeat { } // HeartbeatDTO is the struct used for handling DB operations for heartbeatMessageInfo struct +// TODO(heartbeat): remove this message after phasing out the old implementation message HeartbeatDTO { int64 MaxDurationPeerUnresponsive = 1 ; int64 MaxInactiveTime = 2 ; @@ -41,6 +43,7 @@ message HeartbeatDTO { string PidString = 19; } +// TODO(heartbeat): remove this message after phasing out the old implementation message DbTimeStamp { int64 Timestamp = 1; } diff --git a/heartbeat/errors.go b/heartbeat/errors.go index cce9e130120..1da14be0981 100644 --- a/heartbeat/errors.go +++ b/heartbeat/errors.go @@ -11,8 +11,8 @@ var ErrNilMessenger = errors.New("nil P2P Messenger") // ErrNilPrivateKey signals that a nil private key has been provided var ErrNilPrivateKey = errors.New("nil private key") -// ErrNilMarshalizer signals that a nil marshalizer has been provided -var ErrNilMarshalizer = errors.New("nil marshalizer") +// ErrNilMarshaller signals that a nil marshaller has been provided +var ErrNilMarshaller = errors.New("nil marshaller") // ErrNilMessage signals that a nil message has been received var ErrNilMessage = errors.New("nil message") @@ -93,9 +93,6 @@ var ErrNegativeMinTimeToWaitBetweenBroadcastsInSec = errors.New("value MinTimeTo // ErrWrongValues signals that wrong values were provided var ErrWrongValues = errors.New("wrong values for heartbeat parameters") -// ErrValidatorAlreadySet signals that a topic validator has already been set -var ErrValidatorAlreadySet = errors.New("topic validator has already been set") - // ErrNilPeerSignatureHandler signals that a nil peerSignatureHandler object has been provided var ErrNilPeerSignatureHandler = errors.New("trying to set nil peerSignatureHandler") @@ -104,3 +101,39 @@ var ErrNilCurrentBlockProvider = errors.New("nil current block provider") // ErrNilRedundancyHandler signals that a nil redundancy handler was provided var ErrNilRedundancyHandler = errors.New("nil redundancy handler") + +// ErrEmptySendTopic signals that an empty topic string was provided +var ErrEmptySendTopic = errors.New("empty topic for sending messages") + +// ErrInvalidTimeDuration signals that an invalid time duration was provided +var ErrInvalidTimeDuration = errors.New("invalid time duration") + +// ErrInvalidThreshold signals that an invalid threshold was provided +var ErrInvalidThreshold = errors.New("invalid threshold") + +// ErrNilRequestHandler signals that a nil request handler interface was provided +var ErrNilRequestHandler = errors.New("nil request handler") + +// ErrNilNodesCoordinator signals that an operation has been attempted to or with a nil nodes coordinator +var ErrNilNodesCoordinator = errors.New("nil nodes coordinator") + +// ErrNilPeerAuthenticationPool signals that a nil peer authentication pool has been provided +var ErrNilPeerAuthenticationPool = errors.New("nil peer authentication pool") + +// ErrInvalidValue signals that an invalid value has been provided +var ErrInvalidValue = errors.New("invalid value") + +// ErrNilRandomizer signals that a nil randomizer has been provided +var ErrNilRandomizer = errors.New("nil randomizer") + +// ErrNilCacher signals that a nil cache has been provided +var ErrNilCacher = errors.New("nil cacher") + +// ErrNilPeerShardMapper signals that a nil peer shard mapper has been provided +var ErrNilPeerShardMapper = errors.New("nil peer shard mapper") + +// ErrNilEpochNotifier signals that a nil epoch notifier has been provided +var ErrNilEpochNotifier = errors.New("nil epoch notifier") + +// ErrShouldSkipValidator signals that the validator should be skipped +var ErrShouldSkipValidator = errors.New("validator should be skipped") diff --git a/heartbeat/heartbeat.go b/heartbeat/heartbeat.go new file mode 100644 index 00000000000..3b4245c1107 --- /dev/null +++ b/heartbeat/heartbeat.go @@ -0,0 +1,3 @@ +//go:generate protoc -I=proto -I=$GOPATH/src -I=$GOPATH/src/github.com/ElrondNetwork/protobuf/protobuf --gogoslick_out=. heartbeat.proto + +package heartbeat diff --git a/heartbeat/heartbeat.pb.go b/heartbeat/heartbeat.pb.go new file mode 100644 index 00000000000..18af6e21034 --- /dev/null +++ b/heartbeat/heartbeat.pb.go @@ -0,0 +1,1354 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: heartbeat.proto + +package heartbeat + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// HeartbeatV2 represents the heartbeat message that is sent between peers from the same shard containing +// current node status +type HeartbeatV2 struct { + Payload []byte `protobuf:"bytes,1,opt,name=Payload,proto3" json:"Payload,omitempty"` + VersionNumber string `protobuf:"bytes,2,opt,name=VersionNumber,proto3" json:"VersionNumber,omitempty"` + NodeDisplayName string `protobuf:"bytes,3,opt,name=NodeDisplayName,proto3" json:"NodeDisplayName,omitempty"` + Identity string `protobuf:"bytes,4,opt,name=Identity,proto3" json:"Identity,omitempty"` + Nonce uint64 `protobuf:"varint,5,opt,name=Nonce,proto3" json:"Nonce,omitempty"` + PeerSubType uint32 `protobuf:"varint,6,opt,name=PeerSubType,proto3" json:"PeerSubType,omitempty"` +} + +func (m *HeartbeatV2) Reset() { *m = HeartbeatV2{} } +func (*HeartbeatV2) ProtoMessage() {} +func (*HeartbeatV2) Descriptor() ([]byte, []int) { + return fileDescriptor_3c667767fb9826a9, []int{0} +} +func (m *HeartbeatV2) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HeartbeatV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HeartbeatV2.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HeartbeatV2) XXX_Merge(src proto.Message) { + xxx_messageInfo_HeartbeatV2.Merge(m, src) +} +func (m *HeartbeatV2) XXX_Size() int { + return m.Size() +} +func (m *HeartbeatV2) XXX_DiscardUnknown() { + xxx_messageInfo_HeartbeatV2.DiscardUnknown(m) +} + +var xxx_messageInfo_HeartbeatV2 proto.InternalMessageInfo + +func (m *HeartbeatV2) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +func (m *HeartbeatV2) GetVersionNumber() string { + if m != nil { + return m.VersionNumber + } + return "" +} + +func (m *HeartbeatV2) GetNodeDisplayName() string { + if m != nil { + return m.NodeDisplayName + } + return "" +} + +func (m *HeartbeatV2) GetIdentity() string { + if m != nil { + return m.Identity + } + return "" +} + +func (m *HeartbeatV2) GetNonce() uint64 { + if m != nil { + return m.Nonce + } + return 0 +} + +func (m *HeartbeatV2) GetPeerSubType() uint32 { + if m != nil { + return m.PeerSubType + } + return 0 +} + +// PeerAuthentication represents the DTO used to pass peer authentication information such as public key, peer id, +// signature, payload and the signature. This message is used to link the peerID with the associated public key +type PeerAuthentication struct { + Pubkey []byte `protobuf:"bytes,1,opt,name=Pubkey,proto3" json:"Pubkey,omitempty"` + Signature []byte `protobuf:"bytes,2,opt,name=Signature,proto3" json:"Signature,omitempty"` + Pid []byte `protobuf:"bytes,3,opt,name=Pid,proto3" json:"Pid,omitempty"` + Payload []byte `protobuf:"bytes,4,opt,name=Payload,proto3" json:"Payload,omitempty"` + PayloadSignature []byte `protobuf:"bytes,5,opt,name=PayloadSignature,proto3" json:"PayloadSignature,omitempty"` +} + +func (m *PeerAuthentication) Reset() { *m = PeerAuthentication{} } +func (*PeerAuthentication) ProtoMessage() {} +func (*PeerAuthentication) Descriptor() ([]byte, []int) { + return fileDescriptor_3c667767fb9826a9, []int{1} +} +func (m *PeerAuthentication) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PeerAuthentication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PeerAuthentication.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PeerAuthentication) XXX_Merge(src proto.Message) { + xxx_messageInfo_PeerAuthentication.Merge(m, src) +} +func (m *PeerAuthentication) XXX_Size() int { + return m.Size() +} +func (m *PeerAuthentication) XXX_DiscardUnknown() { + xxx_messageInfo_PeerAuthentication.DiscardUnknown(m) +} + +var xxx_messageInfo_PeerAuthentication proto.InternalMessageInfo + +func (m *PeerAuthentication) GetPubkey() []byte { + if m != nil { + return m.Pubkey + } + return nil +} + +func (m *PeerAuthentication) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + +func (m *PeerAuthentication) GetPid() []byte { + if m != nil { + return m.Pid + } + return nil +} + +func (m *PeerAuthentication) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +func (m *PeerAuthentication) GetPayloadSignature() []byte { + if m != nil { + return m.PayloadSignature + } + return nil +} + +// Payload represents the DTO used as payload for both HeartbeatV2 and PeerAuthentication messages +type Payload struct { + Timestamp int64 `protobuf:"varint,1,opt,name=Timestamp,proto3" json:"Timestamp,omitempty"` + HardforkMessage string `protobuf:"bytes,2,opt,name=HardforkMessage,proto3" json:"HardforkMessage,omitempty"` +} + +func (m *Payload) Reset() { *m = Payload{} } +func (*Payload) ProtoMessage() {} +func (*Payload) Descriptor() ([]byte, []int) { + return fileDescriptor_3c667767fb9826a9, []int{2} +} +func (m *Payload) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Payload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Payload.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Payload) XXX_Merge(src proto.Message) { + xxx_messageInfo_Payload.Merge(m, src) +} +func (m *Payload) XXX_Size() int { + return m.Size() +} +func (m *Payload) XXX_DiscardUnknown() { + xxx_messageInfo_Payload.DiscardUnknown(m) +} + +var xxx_messageInfo_Payload proto.InternalMessageInfo + +func (m *Payload) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *Payload) GetHardforkMessage() string { + if m != nil { + return m.HardforkMessage + } + return "" +} + +func init() { + proto.RegisterType((*HeartbeatV2)(nil), "proto.HeartbeatV2") + proto.RegisterType((*PeerAuthentication)(nil), "proto.PeerAuthentication") + proto.RegisterType((*Payload)(nil), "proto.Payload") +} + +func init() { proto.RegisterFile("heartbeat.proto", fileDescriptor_3c667767fb9826a9) } + +var fileDescriptor_3c667767fb9826a9 = []byte{ + // 371 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xbf, 0x6e, 0xe2, 0x40, + 0x10, 0xc6, 0xbd, 0x07, 0xe6, 0x8e, 0x05, 0x04, 0x5a, 0x9d, 0x4e, 0xd6, 0xe9, 0xb4, 0xb2, 0xd0, + 0x15, 0x56, 0x8a, 0x14, 0xc9, 0x03, 0x44, 0x89, 0x52, 0x90, 0x22, 0x96, 0x63, 0x10, 0x45, 0xba, + 0x35, 0x9e, 0x80, 0x05, 0xf6, 0x5a, 0xeb, 0x75, 0xe1, 0x2e, 0x8f, 0x90, 0x67, 0x48, 0x95, 0x47, + 0x89, 0x94, 0x86, 0x92, 0x32, 0x98, 0x26, 0x25, 0x8f, 0x10, 0x79, 0x63, 0xfe, 0xa6, 0xda, 0xf9, + 0x7e, 0x3b, 0x1a, 0x7d, 0xf3, 0x69, 0x70, 0x7b, 0x02, 0x4c, 0x48, 0x0f, 0x98, 0x3c, 0x8d, 0x05, + 0x97, 0x9c, 0xe8, 0xea, 0xe9, 0xbe, 0x21, 0xdc, 0xe8, 0x6d, 0xbe, 0x86, 0x67, 0xc4, 0xc0, 0x3f, + 0x1d, 0x96, 0xcd, 0x38, 0xf3, 0x0d, 0x64, 0x22, 0xab, 0xe9, 0x6e, 0x24, 0xf9, 0x8f, 0x5b, 0x43, + 0x10, 0x49, 0xc0, 0x23, 0x3b, 0x0d, 0x3d, 0x10, 0xc6, 0x0f, 0x13, 0x59, 0x75, 0xf7, 0x10, 0x12, + 0x0b, 0xb7, 0x6d, 0xee, 0xc3, 0x75, 0x90, 0xc4, 0x33, 0x96, 0xd9, 0x2c, 0x04, 0xa3, 0xa2, 0xfa, + 0x8e, 0x31, 0xf9, 0x8b, 0x7f, 0xdd, 0xf8, 0x10, 0xc9, 0x40, 0x66, 0x46, 0x55, 0xb5, 0x6c, 0x35, + 0xf9, 0x8d, 0x75, 0x9b, 0x47, 0x23, 0x30, 0x74, 0x13, 0x59, 0x55, 0xf7, 0x4b, 0x10, 0x13, 0x37, + 0x1c, 0x00, 0xd1, 0x4f, 0xbd, 0x41, 0x16, 0x83, 0x51, 0x33, 0x91, 0xd5, 0x72, 0xf7, 0x51, 0xf7, + 0x19, 0x61, 0x52, 0xe8, 0xcb, 0x54, 0x4e, 0x8a, 0x51, 0x23, 0x26, 0x03, 0x1e, 0x91, 0x3f, 0xb8, + 0xe6, 0xa4, 0xde, 0x14, 0xb2, 0x72, 0xa7, 0x52, 0x91, 0x7f, 0xb8, 0xde, 0x0f, 0xc6, 0x11, 0x93, + 0xa9, 0x00, 0xb5, 0x4e, 0xd3, 0xdd, 0x01, 0xd2, 0xc1, 0x15, 0x27, 0xf0, 0x95, 0xfd, 0xa6, 0x5b, + 0x94, 0xfb, 0xe1, 0x54, 0x0f, 0xc3, 0x39, 0xc1, 0x9d, 0xb2, 0xdc, 0x0d, 0xd4, 0x55, 0xcb, 0x37, + 0xde, 0xbd, 0xdb, 0x4e, 0x29, 0x0c, 0x0c, 0x82, 0x10, 0x12, 0xc9, 0xc2, 0x58, 0x79, 0xab, 0xb8, + 0x3b, 0x50, 0x64, 0xd9, 0x63, 0xc2, 0x7f, 0xe0, 0x62, 0x7a, 0x0b, 0x49, 0xc2, 0xc6, 0x50, 0x66, + 0x7e, 0x8c, 0xaf, 0x2e, 0xe6, 0x4b, 0xaa, 0x2d, 0x96, 0x54, 0x5b, 0x2f, 0x29, 0x7a, 0xcc, 0x29, + 0x7a, 0xc9, 0x29, 0x7a, 0xcd, 0x29, 0x9a, 0xe7, 0x14, 0xbd, 0xe7, 0x14, 0x7d, 0xe4, 0x54, 0x5b, + 0xe7, 0x14, 0x3d, 0xad, 0xa8, 0x36, 0x5f, 0x51, 0x6d, 0xb1, 0xa2, 0xda, 0x7d, 0x7d, 0x7b, 0x13, + 0x5e, 0x4d, 0x5d, 0xc3, 0xf9, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0x86, 0x95, 0xe3, 0x8b, 0x27, + 0x02, 0x00, 0x00, +} + +func (this *HeartbeatV2) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*HeartbeatV2) + if !ok { + that2, ok := that.(HeartbeatV2) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Payload, that1.Payload) { + return false + } + if this.VersionNumber != that1.VersionNumber { + return false + } + if this.NodeDisplayName != that1.NodeDisplayName { + return false + } + if this.Identity != that1.Identity { + return false + } + if this.Nonce != that1.Nonce { + return false + } + if this.PeerSubType != that1.PeerSubType { + return false + } + return true +} +func (this *PeerAuthentication) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PeerAuthentication) + if !ok { + that2, ok := that.(PeerAuthentication) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Pubkey, that1.Pubkey) { + return false + } + if !bytes.Equal(this.Signature, that1.Signature) { + return false + } + if !bytes.Equal(this.Pid, that1.Pid) { + return false + } + if !bytes.Equal(this.Payload, that1.Payload) { + return false + } + if !bytes.Equal(this.PayloadSignature, that1.PayloadSignature) { + return false + } + return true +} +func (this *Payload) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Payload) + if !ok { + that2, ok := that.(Payload) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Timestamp != that1.Timestamp { + return false + } + if this.HardforkMessage != that1.HardforkMessage { + return false + } + return true +} +func (this *HeartbeatV2) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&heartbeat.HeartbeatV2{") + s = append(s, "Payload: "+fmt.Sprintf("%#v", this.Payload)+",\n") + s = append(s, "VersionNumber: "+fmt.Sprintf("%#v", this.VersionNumber)+",\n") + s = append(s, "NodeDisplayName: "+fmt.Sprintf("%#v", this.NodeDisplayName)+",\n") + s = append(s, "Identity: "+fmt.Sprintf("%#v", this.Identity)+",\n") + s = append(s, "Nonce: "+fmt.Sprintf("%#v", this.Nonce)+",\n") + s = append(s, "PeerSubType: "+fmt.Sprintf("%#v", this.PeerSubType)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *PeerAuthentication) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&heartbeat.PeerAuthentication{") + s = append(s, "Pubkey: "+fmt.Sprintf("%#v", this.Pubkey)+",\n") + s = append(s, "Signature: "+fmt.Sprintf("%#v", this.Signature)+",\n") + s = append(s, "Pid: "+fmt.Sprintf("%#v", this.Pid)+",\n") + s = append(s, "Payload: "+fmt.Sprintf("%#v", this.Payload)+",\n") + s = append(s, "PayloadSignature: "+fmt.Sprintf("%#v", this.PayloadSignature)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Payload) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&heartbeat.Payload{") + s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") + s = append(s, "HardforkMessage: "+fmt.Sprintf("%#v", this.HardforkMessage)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringHeartbeat(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *HeartbeatV2) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeartbeatV2) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HeartbeatV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PeerSubType != 0 { + i = encodeVarintHeartbeat(dAtA, i, uint64(m.PeerSubType)) + i-- + dAtA[i] = 0x30 + } + if m.Nonce != 0 { + i = encodeVarintHeartbeat(dAtA, i, uint64(m.Nonce)) + i-- + dAtA[i] = 0x28 + } + if len(m.Identity) > 0 { + i -= len(m.Identity) + copy(dAtA[i:], m.Identity) + i = encodeVarintHeartbeat(dAtA, i, uint64(len(m.Identity))) + i-- + dAtA[i] = 0x22 + } + if len(m.NodeDisplayName) > 0 { + i -= len(m.NodeDisplayName) + copy(dAtA[i:], m.NodeDisplayName) + i = encodeVarintHeartbeat(dAtA, i, uint64(len(m.NodeDisplayName))) + i-- + dAtA[i] = 0x1a + } + if len(m.VersionNumber) > 0 { + i -= len(m.VersionNumber) + copy(dAtA[i:], m.VersionNumber) + i = encodeVarintHeartbeat(dAtA, i, uint64(len(m.VersionNumber))) + i-- + dAtA[i] = 0x12 + } + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarintHeartbeat(dAtA, i, uint64(len(m.Payload))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PeerAuthentication) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PeerAuthentication) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PeerAuthentication) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PayloadSignature) > 0 { + i -= len(m.PayloadSignature) + copy(dAtA[i:], m.PayloadSignature) + i = encodeVarintHeartbeat(dAtA, i, uint64(len(m.PayloadSignature))) + i-- + dAtA[i] = 0x2a + } + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarintHeartbeat(dAtA, i, uint64(len(m.Payload))) + i-- + dAtA[i] = 0x22 + } + if len(m.Pid) > 0 { + i -= len(m.Pid) + copy(dAtA[i:], m.Pid) + i = encodeVarintHeartbeat(dAtA, i, uint64(len(m.Pid))) + i-- + dAtA[i] = 0x1a + } + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintHeartbeat(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0x12 + } + if len(m.Pubkey) > 0 { + i -= len(m.Pubkey) + copy(dAtA[i:], m.Pubkey) + i = encodeVarintHeartbeat(dAtA, i, uint64(len(m.Pubkey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Payload) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Payload) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Payload) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.HardforkMessage) > 0 { + i -= len(m.HardforkMessage) + copy(dAtA[i:], m.HardforkMessage) + i = encodeVarintHeartbeat(dAtA, i, uint64(len(m.HardforkMessage))) + i-- + dAtA[i] = 0x12 + } + if m.Timestamp != 0 { + i = encodeVarintHeartbeat(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintHeartbeat(dAtA []byte, offset int, v uint64) int { + offset -= sovHeartbeat(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *HeartbeatV2) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Payload) + if l > 0 { + n += 1 + l + sovHeartbeat(uint64(l)) + } + l = len(m.VersionNumber) + if l > 0 { + n += 1 + l + sovHeartbeat(uint64(l)) + } + l = len(m.NodeDisplayName) + if l > 0 { + n += 1 + l + sovHeartbeat(uint64(l)) + } + l = len(m.Identity) + if l > 0 { + n += 1 + l + sovHeartbeat(uint64(l)) + } + if m.Nonce != 0 { + n += 1 + sovHeartbeat(uint64(m.Nonce)) + } + if m.PeerSubType != 0 { + n += 1 + sovHeartbeat(uint64(m.PeerSubType)) + } + return n +} + +func (m *PeerAuthentication) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Pubkey) + if l > 0 { + n += 1 + l + sovHeartbeat(uint64(l)) + } + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovHeartbeat(uint64(l)) + } + l = len(m.Pid) + if l > 0 { + n += 1 + l + sovHeartbeat(uint64(l)) + } + l = len(m.Payload) + if l > 0 { + n += 1 + l + sovHeartbeat(uint64(l)) + } + l = len(m.PayloadSignature) + if l > 0 { + n += 1 + l + sovHeartbeat(uint64(l)) + } + return n +} + +func (m *Payload) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timestamp != 0 { + n += 1 + sovHeartbeat(uint64(m.Timestamp)) + } + l = len(m.HardforkMessage) + if l > 0 { + n += 1 + l + sovHeartbeat(uint64(l)) + } + return n +} + +func sovHeartbeat(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozHeartbeat(x uint64) (n int) { + return sovHeartbeat(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *HeartbeatV2) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HeartbeatV2{`, + `Payload:` + fmt.Sprintf("%v", this.Payload) + `,`, + `VersionNumber:` + fmt.Sprintf("%v", this.VersionNumber) + `,`, + `NodeDisplayName:` + fmt.Sprintf("%v", this.NodeDisplayName) + `,`, + `Identity:` + fmt.Sprintf("%v", this.Identity) + `,`, + `Nonce:` + fmt.Sprintf("%v", this.Nonce) + `,`, + `PeerSubType:` + fmt.Sprintf("%v", this.PeerSubType) + `,`, + `}`, + }, "") + return s +} +func (this *PeerAuthentication) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PeerAuthentication{`, + `Pubkey:` + fmt.Sprintf("%v", this.Pubkey) + `,`, + `Signature:` + fmt.Sprintf("%v", this.Signature) + `,`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `Payload:` + fmt.Sprintf("%v", this.Payload) + `,`, + `PayloadSignature:` + fmt.Sprintf("%v", this.PayloadSignature) + `,`, + `}`, + }, "") + return s +} +func (this *Payload) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Payload{`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `HardforkMessage:` + fmt.Sprintf("%v", this.HardforkMessage) + `,`, + `}`, + }, "") + return s +} +func valueToStringHeartbeat(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *HeartbeatV2) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHeartbeat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HeartbeatV2: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HeartbeatV2: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHeartbeat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthHeartbeat + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthHeartbeat + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = append(m.Payload[:0], dAtA[iNdEx:postIndex]...) + if m.Payload == nil { + m.Payload = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VersionNumber", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHeartbeat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHeartbeat + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHeartbeat + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VersionNumber = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeDisplayName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHeartbeat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHeartbeat + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHeartbeat + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeDisplayName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identity", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHeartbeat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHeartbeat + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHeartbeat + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Identity = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nonce", wireType) + } + m.Nonce = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHeartbeat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nonce |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerSubType", wireType) + } + m.PeerSubType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHeartbeat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PeerSubType |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipHeartbeat(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHeartbeat + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthHeartbeat + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PeerAuthentication) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHeartbeat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PeerAuthentication: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PeerAuthentication: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pubkey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHeartbeat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthHeartbeat + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthHeartbeat + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pubkey = append(m.Pubkey[:0], dAtA[iNdEx:postIndex]...) + if m.Pubkey == nil { + m.Pubkey = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHeartbeat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthHeartbeat + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthHeartbeat + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHeartbeat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthHeartbeat + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthHeartbeat + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pid = append(m.Pid[:0], dAtA[iNdEx:postIndex]...) + if m.Pid == nil { + m.Pid = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHeartbeat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthHeartbeat + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthHeartbeat + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = append(m.Payload[:0], dAtA[iNdEx:postIndex]...) + if m.Payload == nil { + m.Payload = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PayloadSignature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHeartbeat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthHeartbeat + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthHeartbeat + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PayloadSignature = append(m.PayloadSignature[:0], dAtA[iNdEx:postIndex]...) + if m.PayloadSignature == nil { + m.PayloadSignature = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipHeartbeat(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHeartbeat + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthHeartbeat + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Payload) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHeartbeat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Payload: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Payload: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHeartbeat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HardforkMessage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHeartbeat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHeartbeat + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHeartbeat + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HardforkMessage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipHeartbeat(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHeartbeat + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthHeartbeat + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipHeartbeat(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHeartbeat + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHeartbeat + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHeartbeat + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthHeartbeat + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupHeartbeat + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthHeartbeat + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthHeartbeat = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowHeartbeat = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupHeartbeat = fmt.Errorf("proto: unexpected end of group") +) diff --git a/heartbeat/interface.go b/heartbeat/interface.go index c6a612eb175..3b63a1e9bf1 100644 --- a/heartbeat/interface.go +++ b/heartbeat/interface.go @@ -1,7 +1,6 @@ package heartbeat import ( - "io" "time" "github.com/ElrondNetwork/elrond-go-core/core" @@ -10,22 +9,15 @@ import ( "github.com/ElrondNetwork/elrond-go/common" heartbeatData "github.com/ElrondNetwork/elrond-go/heartbeat/data" "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/sharding/nodesCoordinator" "github.com/ElrondNetwork/elrond-go/state" ) // P2PMessenger defines a subset of the p2p.Messenger interface type P2PMessenger interface { - io.Closer - Bootstrap() error Broadcast(topic string, buff []byte) - BroadcastOnChannel(channel string, topic string, buff []byte) - BroadcastOnChannelBlocking(channel string, topic string, buff []byte) error - CreateTopic(name string, createChannelForTopic bool) error - HasTopic(name string) bool - RegisterMessageProcessor(topic string, identifier string, handler p2p.MessageProcessor) error - PeerAddresses(pid core.PeerID) []string - IsConnectedToTheNetwork() bool ID() core.PeerID + Sign(payload []byte) ([]byte, error) IsInterfaceNil() bool } @@ -42,7 +34,7 @@ type EligibleListProvider interface { IsInterfaceNil() bool } -//Timer defines an interface for tracking time +// Timer defines an interface for tracking time type Timer interface { Now() time.Time IsInterfaceNil() bool @@ -63,7 +55,7 @@ type HeartbeatStorageHandler interface { // The interface assures that the collected data will be used by the p2p network sharding components type NetworkShardingCollector interface { UpdatePeerIDInfo(pid core.PeerID, pk []byte, shardID uint32) - UpdatePeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) + PutPeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) IsInterfaceNil() bool } @@ -87,7 +79,8 @@ type PeerTypeProviderHandler interface { type HardforkTrigger interface { TriggerReceived(payload []byte, data []byte, pkBytes []byte) (bool, error) RecordedTriggerMessage() ([]byte, bool) - NotifyTriggerReceived() <-chan struct{} + NotifyTriggerReceived() <-chan struct{} // TODO: remove it with heartbeat v1 cleanup + NotifyTriggerReceivedV2() <-chan struct{} CreateData() []byte IsInterfaceNil() bool } @@ -113,3 +106,10 @@ type NodeRedundancyHandler interface { ObserverPrivateKey() crypto.PrivateKey IsInterfaceNil() bool } + +// NodesCoordinator defines the behavior of a struct able to do validator selection +type NodesCoordinator interface { + GetAllEligibleValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) + GetValidatorWithPublicKey(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) + IsInterfaceNil() bool +} diff --git a/heartbeat/mock/hardforkHandlerStub.go b/heartbeat/mock/hardforkHandlerStub.go new file mode 100644 index 00000000000..3f5e270edd7 --- /dev/null +++ b/heartbeat/mock/hardforkHandlerStub.go @@ -0,0 +1,31 @@ +package mock + +// HardforkHandlerStub - +type HardforkHandlerStub struct { + ShouldTriggerHardforkCalled func() <-chan struct{} + ExecuteCalled func() + CloseCalled func() +} + +// ShouldTriggerHardfork - +func (stub *HardforkHandlerStub) ShouldTriggerHardfork() <-chan struct{} { + if stub.ShouldTriggerHardforkCalled != nil { + return stub.ShouldTriggerHardforkCalled() + } + + return nil +} + +// Execute - +func (stub *HardforkHandlerStub) Execute() { + if stub.ExecuteCalled != nil { + stub.ExecuteCalled() + } +} + +// Close - +func (stub *HardforkHandlerStub) Close() { + if stub.CloseCalled != nil { + stub.CloseCalled() + } +} diff --git a/heartbeat/mock/hardforkTriggerStub.go b/heartbeat/mock/hardforkTriggerStub.go deleted file mode 100644 index 6858c666c16..00000000000 --- a/heartbeat/mock/hardforkTriggerStub.go +++ /dev/null @@ -1,82 +0,0 @@ -package mock - -import "github.com/ElrondNetwork/elrond-go/update" - -// HardforkTriggerStub - -type HardforkTriggerStub struct { - TriggerCalled func(epoch uint32, withEarlyEndOfEpoch bool) error - IsSelfTriggerCalled func() bool - TriggerReceivedCalled func(payload []byte, data []byte, pkBytes []byte) (bool, error) - RecordedTriggerMessageCalled func() ([]byte, bool) - CreateDataCalled func() []byte - AddCloserCalled func(closer update.Closer) error - NotifyTriggerReceivedCalled func() <-chan struct{} -} - -// Trigger - -func (hts *HardforkTriggerStub) Trigger(epoch uint32, withEarlyEndOfEpoch bool) error { - if hts.TriggerCalled != nil { - return hts.TriggerCalled(epoch, withEarlyEndOfEpoch) - } - - return nil -} - -// IsSelfTrigger - -func (hts *HardforkTriggerStub) IsSelfTrigger() bool { - if hts.IsSelfTriggerCalled != nil { - return hts.IsSelfTriggerCalled() - } - - return false -} - -// TriggerReceived - -func (hts *HardforkTriggerStub) TriggerReceived(payload []byte, data []byte, pkBytes []byte) (bool, error) { - if hts.TriggerReceivedCalled != nil { - return hts.TriggerReceivedCalled(payload, data, pkBytes) - } - - return false, nil -} - -// RecordedTriggerMessage - -func (hts *HardforkTriggerStub) RecordedTriggerMessage() ([]byte, bool) { - if hts.RecordedTriggerMessageCalled != nil { - return hts.RecordedTriggerMessageCalled() - } - - return nil, false -} - -// CreateData - -func (hts *HardforkTriggerStub) CreateData() []byte { - if hts.CreateDataCalled != nil { - return hts.CreateDataCalled() - } - - return make([]byte, 0) -} - -// AddCloser - -func (hts *HardforkTriggerStub) AddCloser(closer update.Closer) error { - if hts.AddCloserCalled != nil { - return hts.AddCloserCalled(closer) - } - - return nil -} - -// NotifyTriggerReceived - -func (hts *HardforkTriggerStub) NotifyTriggerReceived() <-chan struct{} { - if hts.NotifyTriggerReceivedCalled != nil { - return hts.NotifyTriggerReceivedCalled() - } - - return make(chan struct{}) -} - -// IsInterfaceNil - -func (hts *HardforkTriggerStub) IsInterfaceNil() bool { - return hts == nil -} diff --git a/heartbeat/mock/keyMock.go b/heartbeat/mock/keyMock.go index 5e795b4d5e0..80d42612eaa 100644 --- a/heartbeat/mock/keyMock.go +++ b/heartbeat/mock/keyMock.go @@ -30,7 +30,11 @@ type KeyGenMock struct { // ToByteArray - func (sspk *PublicKeyMock) ToByteArray() ([]byte, error) { - return sspk.ToByteArrayHandler() + if sspk.ToByteArrayHandler != nil { + return sspk.ToByteArrayHandler() + } + + return make([]byte, 0), nil } // Suite - @@ -50,7 +54,11 @@ func (sspk *PublicKeyMock) IsInterfaceNil() bool { // ToByteArray - func (sk *PrivateKeyStub) ToByteArray() ([]byte, error) { - return sk.ToByteArrayHandler() + if sk.ToByteArrayHandler != nil { + return sk.ToByteArrayHandler() + } + + return make([]byte, 0), nil } // GeneratePublic - diff --git a/heartbeat/mock/marshalizerMock.go b/heartbeat/mock/marshallerMock.go similarity index 63% rename from heartbeat/mock/marshalizerMock.go rename to heartbeat/mock/marshallerMock.go index 5299a5bb257..f68a804e2af 100644 --- a/heartbeat/mock/marshalizerMock.go +++ b/heartbeat/mock/marshallerMock.go @@ -5,17 +5,17 @@ import ( "errors" ) -var errMockMarshalizer = errors.New("MarshalizerMock generic error") +var errMockMarshaller = errors.New("MarshallerMock generic error") -// MarshalizerMock that will be used for testing -type MarshalizerMock struct { +// MarshallerMock that will be used for testing +type MarshallerMock struct { Fail bool } // Marshal converts the input object in a slice of bytes -func (mm *MarshalizerMock) Marshal(obj interface{}) ([]byte, error) { +func (mm *MarshallerMock) Marshal(obj interface{}) ([]byte, error) { if mm.Fail { - return nil, errMockMarshalizer + return nil, errMockMarshaller } if obj == nil { @@ -26,9 +26,9 @@ func (mm *MarshalizerMock) Marshal(obj interface{}) ([]byte, error) { } // Unmarshal applies the serialized values over an instantiated object -func (mm *MarshalizerMock) Unmarshal(obj interface{}, buff []byte) error { +func (mm *MarshallerMock) Unmarshal(obj interface{}, buff []byte) error { if mm.Fail { - return errMockMarshalizer + return errMockMarshaller } if obj == nil { @@ -47,6 +47,6 @@ func (mm *MarshalizerMock) Unmarshal(obj interface{}, buff []byte) error { } // IsInterfaceNil returns true if there is no value under the interface -func (mm *MarshalizerMock) IsInterfaceNil() bool { +func (mm *MarshallerMock) IsInterfaceNil() bool { return mm == nil } diff --git a/heartbeat/mock/marshalizerStub.go b/heartbeat/mock/marshallerStub.go similarity index 63% rename from heartbeat/mock/marshalizerStub.go rename to heartbeat/mock/marshallerStub.go index 5addf29238c..43196626152 100644 --- a/heartbeat/mock/marshalizerStub.go +++ b/heartbeat/mock/marshallerStub.go @@ -1,13 +1,13 @@ package mock -// MarshalizerStub - -type MarshalizerStub struct { +// MarshallerStub - +type MarshallerStub struct { MarshalHandler func(obj interface{}) ([]byte, error) UnmarshalHandler func(obj interface{}, buff []byte) error } // Marshal - -func (ms MarshalizerStub) Marshal(obj interface{}) ([]byte, error) { +func (ms MarshallerStub) Marshal(obj interface{}) ([]byte, error) { if ms.MarshalHandler != nil { return ms.MarshalHandler(obj) } @@ -15,7 +15,7 @@ func (ms MarshalizerStub) Marshal(obj interface{}) ([]byte, error) { } // Unmarshal - -func (ms MarshalizerStub) Unmarshal(obj interface{}, buff []byte) error { +func (ms MarshallerStub) Unmarshal(obj interface{}, buff []byte) error { if ms.UnmarshalHandler != nil { return ms.UnmarshalHandler(obj, buff) } @@ -23,6 +23,6 @@ func (ms MarshalizerStub) Unmarshal(obj interface{}, buff []byte) error { } // IsInterfaceNil returns true if there is no value under the interface -func (ms *MarshalizerStub) IsInterfaceNil() bool { +func (ms *MarshallerStub) IsInterfaceNil() bool { return ms == nil } diff --git a/heartbeat/mock/messageHandlerStub.go b/heartbeat/mock/messageHandlerStub.go index 5c51abaa569..f65bfd2bf85 100644 --- a/heartbeat/mock/messageHandlerStub.go +++ b/heartbeat/mock/messageHandlerStub.go @@ -17,5 +17,9 @@ func (mhs *MessageHandlerStub) IsInterfaceNil() bool { // CreateHeartbeatFromP2PMessage - func (mhs *MessageHandlerStub) CreateHeartbeatFromP2PMessage(message p2p.MessageP2P) (*data.Heartbeat, error) { - return mhs.CreateHeartbeatFromP2PMessageCalled(message) + if mhs.CreateHeartbeatFromP2PMessageCalled != nil { + return mhs.CreateHeartbeatFromP2PMessageCalled(message) + } + + return &data.Heartbeat{}, nil } diff --git a/heartbeat/mock/messengerStub.go b/heartbeat/mock/messengerStub.go index 0b1f4b15c91..0fc10e88915 100644 --- a/heartbeat/mock/messengerStub.go +++ b/heartbeat/mock/messengerStub.go @@ -2,22 +2,14 @@ package mock import ( "github.com/ElrondNetwork/elrond-go-core/core" - "github.com/ElrondNetwork/elrond-go/p2p" ) // MessengerStub - type MessengerStub struct { - IDCalled func() core.PeerID - CloseCalled func() error - CreateTopicCalled func(name string, createChannelForTopic bool) error - HasTopicCalled func(name string) bool - BroadcastOnChannelCalled func(channel string, topic string, buff []byte) - BroadcastCalled func(topic string, buff []byte) - RegisterMessageProcessorCalled func(topic string, identifier string, handler p2p.MessageProcessor) error - BootstrapCalled func() error - PeerAddressesCalled func(pid core.PeerID) []string - BroadcastOnChannelBlockingCalled func(channel string, topic string, buff []byte) error - IsConnectedToTheNetworkCalled func() bool + IDCalled func() core.PeerID + BroadcastCalled func(topic string, buff []byte) + SignCalled func(payload []byte) ([]byte, error) + VerifyCalled func(payload []byte, pid core.PeerID, signature []byte) error } // ID - @@ -29,14 +21,6 @@ func (ms *MessengerStub) ID() core.PeerID { return "" } -// RegisterMessageProcessor - -func (ms *MessengerStub) RegisterMessageProcessor(topic string, identifier string, handler p2p.MessageProcessor) error { - if ms.RegisterMessageProcessorCalled != nil { - return ms.RegisterMessageProcessorCalled(topic, identifier, handler) - } - return nil -} - // Broadcast - func (ms *MessengerStub) Broadcast(topic string, buff []byte) { if ms.BroadcastCalled != nil { @@ -44,58 +28,24 @@ func (ms *MessengerStub) Broadcast(topic string, buff []byte) { } } -// Close - -func (ms *MessengerStub) Close() error { - if ms.CloseCalled != nil { - return ms.CloseCalled() +// Sign - +func (ms *MessengerStub) Sign(payload []byte) ([]byte, error) { + if ms.SignCalled != nil { + return ms.SignCalled(payload) } - return nil + return make([]byte, 0), nil } -// CreateTopic - -func (ms *MessengerStub) CreateTopic(name string, createChannelForTopic bool) error { - if ms.CreateTopicCalled != nil { - return ms.CreateTopicCalled(name, createChannelForTopic) +// Verify - +func (ms *MessengerStub) Verify(payload []byte, pid core.PeerID, signature []byte) error { + if ms.VerifyCalled != nil { + return ms.VerifyCalled(payload, pid, signature) } return nil } -// HasTopic - -func (ms *MessengerStub) HasTopic(name string) bool { - if ms.HasTopicCalled != nil { - return ms.HasTopicCalled(name) - } - - return false -} - -// BroadcastOnChannel - -func (ms *MessengerStub) BroadcastOnChannel(channel string, topic string, buff []byte) { - ms.BroadcastOnChannelCalled(channel, topic, buff) -} - -// Bootstrap - -func (ms *MessengerStub) Bootstrap() error { - return ms.BootstrapCalled() -} - -// PeerAddresses - -func (ms *MessengerStub) PeerAddresses(pid core.PeerID) []string { - return ms.PeerAddressesCalled(pid) -} - -// BroadcastOnChannelBlocking - -func (ms *MessengerStub) BroadcastOnChannelBlocking(channel string, topic string, buff []byte) error { - return ms.BroadcastOnChannelBlockingCalled(channel, topic, buff) -} - -// IsConnectedToTheNetwork - -func (ms *MessengerStub) IsConnectedToTheNetwork() bool { - return ms.IsConnectedToTheNetworkCalled() -} - // IsInterfaceNil returns true if there is no value under the interface func (ms *MessengerStub) IsInterfaceNil() bool { return ms == nil diff --git a/heartbeat/mock/peerSignatureHandlerStub.go b/heartbeat/mock/peerSignatureHandlerStub.go new file mode 100644 index 00000000000..1bef7146e86 --- /dev/null +++ b/heartbeat/mock/peerSignatureHandlerStub.go @@ -0,0 +1,35 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-crypto" +) + +// PeerSignatureHandlerStub - +type PeerSignatureHandlerStub struct { + VerifyPeerSignatureCalled func(pk []byte, pid core.PeerID, signature []byte) error + GetPeerSignatureCalled func(key crypto.PrivateKey, pid []byte) ([]byte, error) +} + +// VerifyPeerSignature - +func (stub *PeerSignatureHandlerStub) VerifyPeerSignature(pk []byte, pid core.PeerID, signature []byte) error { + if stub.VerifyPeerSignatureCalled != nil { + return stub.VerifyPeerSignatureCalled(pk, pid, signature) + } + + return nil +} + +// GetPeerSignature - +func (stub *PeerSignatureHandlerStub) GetPeerSignature(key crypto.PrivateKey, pid []byte) ([]byte, error) { + if stub.GetPeerSignatureCalled != nil { + return stub.GetPeerSignatureCalled(key, pid) + } + + return make([]byte, 0), nil +} + +// IsInterfaceNil - +func (stub *PeerSignatureHandlerStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/heartbeat/mock/senderHandlerStub.go b/heartbeat/mock/senderHandlerStub.go new file mode 100644 index 00000000000..d4340465f48 --- /dev/null +++ b/heartbeat/mock/senderHandlerStub.go @@ -0,0 +1,38 @@ +package mock + +import "time" + +// SenderHandlerStub - +type SenderHandlerStub struct { + ExecutionReadyChannelCalled func() <-chan time.Time + ExecuteCalled func() + CloseCalled func() +} + +// ExecutionReadyChannel - +func (stub *SenderHandlerStub) ExecutionReadyChannel() <-chan time.Time { + if stub.ExecutionReadyChannelCalled != nil { + return stub.ExecutionReadyChannelCalled() + } + + return nil +} + +// Execute - +func (stub *SenderHandlerStub) Execute() { + if stub.ExecuteCalled != nil { + stub.ExecuteCalled() + } +} + +// Close - +func (stub *SenderHandlerStub) Close() { + if stub.CloseCalled != nil { + stub.CloseCalled() + } +} + +// IsInterfaceNil - +func (stub *SenderHandlerStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/heartbeat/mock/timerHandlerStub.go b/heartbeat/mock/timerHandlerStub.go new file mode 100644 index 00000000000..5b5536161c5 --- /dev/null +++ b/heartbeat/mock/timerHandlerStub.go @@ -0,0 +1,33 @@ +package mock + +import "time" + +// TimerHandlerStub - +type TimerHandlerStub struct { + CreateNewTimerCalled func(duration time.Duration) + ExecutionReadyChannelCalled func() <-chan time.Time + CloseCalled func() +} + +// CreateNewTimer - +func (stub *TimerHandlerStub) CreateNewTimer(duration time.Duration) { + if stub.CreateNewTimerCalled != nil { + stub.CreateNewTimerCalled(duration) + } +} + +// ExecutionReadyChannel - +func (stub *TimerHandlerStub) ExecutionReadyChannel() <-chan time.Time { + if stub.ExecutionReadyChannelCalled != nil { + return stub.ExecutionReadyChannelCalled() + } + + return nil +} + +// Close - +func (stub *TimerHandlerStub) Close() { + if stub.CloseCalled != nil { + stub.CloseCalled() + } +} diff --git a/heartbeat/monitor/monitor.go b/heartbeat/monitor/monitor.go new file mode 100644 index 00000000000..563ef57f69b --- /dev/null +++ b/heartbeat/monitor/monitor.go @@ -0,0 +1,202 @@ +package monitor + +import ( + "fmt" + "sort" + "strings" + "time" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/marshal" + logger "github.com/ElrondNetwork/elrond-go-logger" + "github.com/ElrondNetwork/elrond-go/common" + "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/heartbeat/data" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/storage" +) + +var log = logger.GetOrCreate("heartbeat/monitor") + +const minDuration = time.Second + +// ArgHeartbeatV2Monitor holds the arguments needed to create a new instance of heartbeatV2Monitor +type ArgHeartbeatV2Monitor struct { + Cache storage.Cacher + PubKeyConverter core.PubkeyConverter + Marshaller marshal.Marshalizer + PeerShardMapper process.PeerShardMapper + MaxDurationPeerUnresponsive time.Duration + HideInactiveValidatorInterval time.Duration + ShardId uint32 +} + +type heartbeatV2Monitor struct { + cache storage.Cacher + pubKeyConverter core.PubkeyConverter + marshaller marshal.Marshalizer + peerShardMapper process.PeerShardMapper + maxDurationPeerUnresponsive time.Duration + hideInactiveValidatorInterval time.Duration + shardId uint32 +} + +// NewHeartbeatV2Monitor creates a new instance of heartbeatV2Monitor +func NewHeartbeatV2Monitor(args ArgHeartbeatV2Monitor) (*heartbeatV2Monitor, error) { + err := checkArgs(args) + if err != nil { + return nil, err + } + + return &heartbeatV2Monitor{ + cache: args.Cache, + pubKeyConverter: args.PubKeyConverter, + marshaller: args.Marshaller, + peerShardMapper: args.PeerShardMapper, + maxDurationPeerUnresponsive: args.MaxDurationPeerUnresponsive, + hideInactiveValidatorInterval: args.HideInactiveValidatorInterval, + shardId: args.ShardId, + }, nil +} + +func checkArgs(args ArgHeartbeatV2Monitor) error { + if check.IfNil(args.Cache) { + return heartbeat.ErrNilCacher + } + if check.IfNil(args.PubKeyConverter) { + return heartbeat.ErrNilPubkeyConverter + } + if check.IfNil(args.Marshaller) { + return heartbeat.ErrNilMarshaller + } + if check.IfNil(args.PeerShardMapper) { + return heartbeat.ErrNilPeerShardMapper + } + if args.MaxDurationPeerUnresponsive < minDuration { + return fmt.Errorf("%w on MaxDurationPeerUnresponsive, provided %d, min expected %d", + heartbeat.ErrInvalidTimeDuration, args.MaxDurationPeerUnresponsive, minDuration) + } + if args.HideInactiveValidatorInterval < minDuration { + return fmt.Errorf("%w on HideInactiveValidatorInterval, provided %d, min expected %d", + heartbeat.ErrInvalidTimeDuration, args.HideInactiveValidatorInterval, minDuration) + } + + return nil +} + +// GetHeartbeats returns the heartbeat status +func (monitor *heartbeatV2Monitor) GetHeartbeats() []data.PubKeyHeartbeat { + numInstances := make(map[string]uint64) + + pids := monitor.cache.Keys() + + heartbeatsV2 := make([]data.PubKeyHeartbeat, 0) + for idx := 0; idx < len(pids); idx++ { + pid := pids[idx] + hb, ok := monitor.cache.Get(pid) + if !ok { + continue + } + + peerId := core.PeerID(pid) + heartbeatData, err := monitor.parseMessage(peerId, hb, numInstances) + if err != nil { + log.Debug("could not parse message for pid", "pid", peerId.Pretty(), "error", err.Error()) + continue + } + + heartbeatsV2 = append(heartbeatsV2, heartbeatData) + } + + for idx := range heartbeatsV2 { + hbData := &heartbeatsV2[idx] + pk := hbData.PublicKey + hbData.NumInstances = numInstances[pk] + } + + sort.Slice(heartbeatsV2, func(i, j int) bool { + return strings.Compare(heartbeatsV2[i].PublicKey, heartbeatsV2[j].PublicKey) < 0 + }) + + return heartbeatsV2 +} + +func (monitor *heartbeatV2Monitor) parseMessage(pid core.PeerID, message interface{}, numInstances map[string]uint64) (data.PubKeyHeartbeat, error) { + pubKeyHeartbeat := data.PubKeyHeartbeat{} + + heartbeatV2, ok := message.(*heartbeat.HeartbeatV2) + if !ok { + return pubKeyHeartbeat, process.ErrWrongTypeAssertion + } + + payload := heartbeat.Payload{} + err := monitor.marshaller.Unmarshal(&payload, heartbeatV2.Payload) + if err != nil { + return pubKeyHeartbeat, err + } + + peerInfo := monitor.peerShardMapper.GetPeerInfo(pid) + + crtTime := time.Now() + messageAge := monitor.getMessageAge(crtTime, payload.Timestamp) + stringType := peerInfo.PeerType.String() + if monitor.shouldSkipMessage(messageAge, stringType) { + return pubKeyHeartbeat, heartbeat.ErrShouldSkipValidator + } + + pk := monitor.pubKeyConverter.Encode(peerInfo.PkBytes) + numInstances[pk]++ + + pubKeyHeartbeat = data.PubKeyHeartbeat{ + PublicKey: pk, + TimeStamp: crtTime, + IsActive: monitor.isActive(messageAge), + ReceivedShardID: monitor.shardId, + ComputedShardID: peerInfo.ShardID, + VersionNumber: heartbeatV2.GetVersionNumber(), + NodeDisplayName: heartbeatV2.GetNodeDisplayName(), + Identity: heartbeatV2.GetIdentity(), + PeerType: stringType, + Nonce: heartbeatV2.GetNonce(), + PeerSubType: heartbeatV2.GetPeerSubType(), + PidString: pid.Pretty(), + } + + return pubKeyHeartbeat, nil +} + +func (monitor *heartbeatV2Monitor) getMessageAge(crtTime time.Time, messageTimestamp int64) time.Duration { + messageTime := time.Unix(messageTimestamp, 0) + msgAge := crtTime.Sub(messageTime) + return monitor.maxDuration(0, msgAge) +} + +func (monitor *heartbeatV2Monitor) maxDuration(first, second time.Duration) time.Duration { + if first > second { + return first + } + + return second +} + +func (monitor *heartbeatV2Monitor) isActive(messageAge time.Duration) bool { + return messageAge <= monitor.maxDurationPeerUnresponsive +} + +func (monitor *heartbeatV2Monitor) shouldSkipMessage(messageAge time.Duration, peerType string) bool { + isActive := monitor.isActive(messageAge) + isInactiveObserver := !isActive && + peerType != string(common.EligibleList) && + peerType != string(common.WaitingList) + if isInactiveObserver { + return messageAge > monitor.hideInactiveValidatorInterval + } + + return false +} + +// IsInterfaceNil returns true if there is no value under the interface +func (monitor *heartbeatV2Monitor) IsInterfaceNil() bool { + return monitor == nil +} diff --git a/heartbeat/monitor/monitor_test.go b/heartbeat/monitor/monitor_test.go new file mode 100644 index 00000000000..0892e56cd37 --- /dev/null +++ b/heartbeat/monitor/monitor_test.go @@ -0,0 +1,344 @@ +package monitor + +import ( + "errors" + "fmt" + "strings" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go/common" + "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/heartbeat/data" + "github.com/ElrondNetwork/elrond-go/process" + processMocks "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/testscommon" + "github.com/ElrondNetwork/elrond-go/testscommon/p2pmocks" + "github.com/stretchr/testify/assert" +) + +func createMockHeartbeatV2MonitorArgs() ArgHeartbeatV2Monitor { + return ArgHeartbeatV2Monitor{ + Cache: testscommon.NewCacherMock(), + PubKeyConverter: &testscommon.PubkeyConverterMock{}, + Marshaller: &testscommon.MarshalizerMock{}, + PeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, + MaxDurationPeerUnresponsive: time.Second * 3, + HideInactiveValidatorInterval: time.Second * 5, + ShardId: 0, + } +} + +func createHeartbeatMessage(active bool) *heartbeat.HeartbeatV2 { + crtTime := time.Now() + providedAgeInSec := int64(1) + messageTimestamp := crtTime.Unix() - providedAgeInSec + + if !active { + messageTimestamp = crtTime.Unix() - int64(60) + } + + payload := heartbeat.Payload{ + Timestamp: messageTimestamp, + } + + marshaller := testscommon.MarshalizerMock{} + payloadBytes, _ := marshaller.Marshal(payload) + return &heartbeat.HeartbeatV2{ + Payload: payloadBytes, + VersionNumber: "v01", + NodeDisplayName: "node name", + Identity: "identity", + Nonce: 0, + PeerSubType: 0, + } +} + +func TestNewHeartbeatV2Monitor(t *testing.T) { + t.Parallel() + + t.Run("nil cache should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2MonitorArgs() + args.Cache = nil + monitor, err := NewHeartbeatV2Monitor(args) + assert.True(t, check.IfNil(monitor)) + assert.Equal(t, heartbeat.ErrNilCacher, err) + }) + t.Run("nil pub key converter should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2MonitorArgs() + args.PubKeyConverter = nil + monitor, err := NewHeartbeatV2Monitor(args) + assert.True(t, check.IfNil(monitor)) + assert.Equal(t, heartbeat.ErrNilPubkeyConverter, err) + }) + t.Run("nil marshaller should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2MonitorArgs() + args.Marshaller = nil + monitor, err := NewHeartbeatV2Monitor(args) + assert.True(t, check.IfNil(monitor)) + assert.Equal(t, heartbeat.ErrNilMarshaller, err) + }) + t.Run("nil peer shard mapper should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2MonitorArgs() + args.PeerShardMapper = nil + monitor, err := NewHeartbeatV2Monitor(args) + assert.True(t, check.IfNil(monitor)) + assert.Equal(t, heartbeat.ErrNilPeerShardMapper, err) + }) + t.Run("invalid max duration peer unresponsive should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2MonitorArgs() + args.MaxDurationPeerUnresponsive = time.Second - time.Nanosecond + monitor, err := NewHeartbeatV2Monitor(args) + assert.True(t, check.IfNil(monitor)) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) + assert.True(t, strings.Contains(err.Error(), "MaxDurationPeerUnresponsive")) + }) + t.Run("invalid hide inactive validator interval should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2MonitorArgs() + args.HideInactiveValidatorInterval = time.Second - time.Nanosecond + monitor, err := NewHeartbeatV2Monitor(args) + assert.True(t, check.IfNil(monitor)) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) + assert.True(t, strings.Contains(err.Error(), "HideInactiveValidatorInterval")) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + monitor, err := NewHeartbeatV2Monitor(createMockHeartbeatV2MonitorArgs()) + assert.False(t, check.IfNil(monitor)) + assert.Nil(t, err) + }) +} + +func TestHeartbeatV2Monitor_parseMessage(t *testing.T) { + t.Parallel() + + t.Run("wrong message type should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2MonitorArgs() + monitor, _ := NewHeartbeatV2Monitor(args) + assert.False(t, check.IfNil(monitor)) + + _, err := monitor.parseMessage("pid", "dummy msg", nil) + assert.Equal(t, process.ErrWrongTypeAssertion, err) + }) + t.Run("unmarshal returns error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2MonitorArgs() + monitor, _ := NewHeartbeatV2Monitor(args) + assert.False(t, check.IfNil(monitor)) + + message := createHeartbeatMessage(true) + message.Payload = []byte("dummy payload") + _, err := monitor.parseMessage("pid", message, nil) + assert.NotNil(t, err) + }) + t.Run("skippable message should return error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2MonitorArgs() + args.PeerShardMapper = &processMocks.PeerShardMapperStub{ + GetPeerInfoCalled: func(pid core.PeerID) core.P2PPeerInfo { + return core.P2PPeerInfo{ + PeerType: core.UnknownPeer, + } + }, + } + monitor, _ := NewHeartbeatV2Monitor(args) + assert.False(t, check.IfNil(monitor)) + + message := createHeartbeatMessage(false) + _, err := monitor.parseMessage("pid", message, nil) + assert.Equal(t, heartbeat.ErrShouldSkipValidator, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedPkBytes := []byte("provided pk") + args := createMockHeartbeatV2MonitorArgs() + args.PeerShardMapper = &processMocks.PeerShardMapperStub{ + GetPeerInfoCalled: func(pid core.PeerID) core.P2PPeerInfo { + return core.P2PPeerInfo{ + PkBytes: providedPkBytes, + } + }, + } + monitor, _ := NewHeartbeatV2Monitor(args) + assert.False(t, check.IfNil(monitor)) + + numInstances := make(map[string]uint64) + message := createHeartbeatMessage(true) + providedPid := core.PeerID("pid") + providedMap := map[string]struct{}{ + providedPid.Pretty(): {}, + } + hb, err := monitor.parseMessage(providedPid, message, numInstances) + assert.Nil(t, err) + checkResults(t, *message, hb, true, providedMap, 0) + assert.Equal(t, 0, len(providedMap)) + pid := args.PubKeyConverter.Encode(providedPkBytes) + entries, ok := numInstances[pid] + assert.True(t, ok) + assert.Equal(t, uint64(1), entries) + }) +} + +func TestHeartbeatV2Monitor_getMessageAge(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2MonitorArgs() + monitor, _ := NewHeartbeatV2Monitor(args) + assert.False(t, check.IfNil(monitor)) + + crtTime := time.Now() + providedAgeInSec := int64(args.MaxDurationPeerUnresponsive.Seconds() - 1) + messageTimestamp := crtTime.Unix() - providedAgeInSec + + msgAge := monitor.getMessageAge(crtTime, messageTimestamp) + assert.Equal(t, providedAgeInSec, int64(msgAge.Seconds())) +} + +func TestHeartbeatV2Monitor_isActive(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2MonitorArgs() + monitor, _ := NewHeartbeatV2Monitor(args) + assert.False(t, check.IfNil(monitor)) + + // negative age should not be active + assert.False(t, monitor.isActive(monitor.getMessageAge(time.Now(), -10))) + // one sec old message should be active + assert.True(t, monitor.isActive(time.Second)) + // too old messages should not be active + assert.False(t, monitor.isActive(args.MaxDurationPeerUnresponsive+time.Second)) +} + +func TestHeartbeatV2Monitor_shouldSkipMessage(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2MonitorArgs() + monitor, _ := NewHeartbeatV2Monitor(args) + assert.False(t, check.IfNil(monitor)) + + // active + assert.False(t, monitor.shouldSkipMessage(time.Second, string(common.EligibleList))) + // inactive observer but should not hide yet + assert.False(t, monitor.shouldSkipMessage(args.HideInactiveValidatorInterval-time.Second, string(common.ObserverList))) + // inactive observer and too old should be hidden + assert.True(t, monitor.shouldSkipMessage(args.HideInactiveValidatorInterval+time.Second, string(common.ObserverList))) +} + +func TestHeartbeatV2Monitor_GetHeartbeats(t *testing.T) { + t.Parallel() + + t.Run("should work - one of the messages should be skipped", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2MonitorArgs() + args.PeerShardMapper = &processMocks.PeerShardMapperStub{ + GetPeerInfoCalled: func(pid core.PeerID) core.P2PPeerInfo { + return core.P2PPeerInfo{ + PkBytes: pid.Bytes(), + PeerType: core.ObserverPeer, + } + }, + } + providedStatuses := []bool{true, true, false} + numOfMessages := len(providedStatuses) + providedPids := make(map[string]struct{}, numOfMessages) + providedMessages := make([]*heartbeat.HeartbeatV2, numOfMessages) + for i := 0; i < numOfMessages; i++ { + pid := core.PeerID(fmt.Sprintf("%s%d", "pid", i)) + providedPids[pid.Pretty()] = struct{}{} + providedMessages[i] = createHeartbeatMessage(providedStatuses[i]) + + args.Cache.Put(pid.Bytes(), providedMessages[i], providedMessages[i].Size()) + } + + monitor, _ := NewHeartbeatV2Monitor(args) + assert.False(t, check.IfNil(monitor)) + + heartbeats := monitor.GetHeartbeats() + assert.Equal(t, args.Cache.Len()-1, len(heartbeats)) + for i := 0; i < len(heartbeats); i++ { + checkResults(t, *providedMessages[i], heartbeats[i], providedStatuses[i], providedPids, 1) + } + assert.Equal(t, 1, len(providedPids)) // one message is skipped + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + args := createMockHeartbeatV2MonitorArgs() + providedStatuses := []bool{true, true, true} + numOfMessages := len(providedStatuses) + providedPids := make(map[string]struct{}, numOfMessages) + providedMessages := make([]*heartbeat.HeartbeatV2, numOfMessages) + for i := 0; i < numOfMessages; i++ { + pid := core.PeerID(fmt.Sprintf("%s%d", "pid", i)) + providedPids[pid.Pretty()] = struct{}{} + providedMessages[i] = createHeartbeatMessage(providedStatuses[i]) + + args.Cache.Put(pid.Bytes(), providedMessages[i], providedMessages[i].Size()) + } + counter := 0 + args.PeerShardMapper = &processMocks.PeerShardMapperStub{ + GetPeerInfoCalled: func(pid core.PeerID) core.P2PPeerInfo { + // Only first entry is unique, then all should have same pk + var info core.P2PPeerInfo + if counter == 0 { + info = core.P2PPeerInfo{ + PkBytes: pid.Bytes(), + } + } else { + info = core.P2PPeerInfo{ + PkBytes: []byte("same pk"), + } + } + counter++ + return info + }, + } + + monitor, _ := NewHeartbeatV2Monitor(args) + assert.False(t, check.IfNil(monitor)) + + heartbeats := monitor.GetHeartbeats() + assert.Equal(t, args.Cache.Len(), len(heartbeats)) + for i := 0; i < numOfMessages; i++ { + numInstances := uint64(1) + if i > 0 { + numInstances = 2 + } + checkResults(t, *providedMessages[i], heartbeats[i], providedStatuses[i], providedPids, numInstances) + } + assert.Equal(t, 0, len(providedPids)) + }) +} + +func checkResults(t *testing.T, message heartbeat.HeartbeatV2, hb data.PubKeyHeartbeat, isActive bool, providedPids map[string]struct{}, numInstances uint64) { + assert.Equal(t, isActive, hb.IsActive) + assert.Equal(t, message.VersionNumber, hb.VersionNumber) + assert.Equal(t, message.NodeDisplayName, hb.NodeDisplayName) + assert.Equal(t, message.Identity, hb.Identity) + assert.Equal(t, message.Nonce, hb.Nonce) + assert.Equal(t, message.PeerSubType, hb.PeerSubType) + assert.Equal(t, numInstances, hb.NumInstances) + _, ok := providedPids[hb.PidString] + assert.True(t, ok) + delete(providedPids, hb.PidString) +} diff --git a/heartbeat/process/messageProcessor.go b/heartbeat/process/messageProcessor.go index 5ebfec72239..b904c2a5d62 100644 --- a/heartbeat/process/messageProcessor.go +++ b/heartbeat/process/messageProcessor.go @@ -28,7 +28,7 @@ func NewMessageProcessor( return nil, heartbeat.ErrNilPeerSignatureHandler } if check.IfNil(marshalizer) { - return nil, heartbeat.ErrNilMarshalizer + return nil, heartbeat.ErrNilMarshaller } if check.IfNil(networkShardingCollector) { return nil, heartbeat.ErrNilNetworkShardingCollector @@ -68,7 +68,7 @@ func (mp *MessageProcessor) CreateHeartbeatFromP2PMessage(message p2p.MessageP2P } mp.networkShardingCollector.UpdatePeerIDInfo(message.Peer(), hbRecv.Pubkey, hbRecv.ShardID) - mp.networkShardingCollector.UpdatePeerIdSubType(message.Peer(), core.P2PPeerSubType(hbRecv.PeerSubType)) + mp.networkShardingCollector.PutPeerIdSubType(message.Peer(), core.P2PPeerSubType(hbRecv.PeerSubType)) return hbRecv, nil } diff --git a/heartbeat/process/messageProcessor_test.go b/heartbeat/process/messageProcessor_test.go index 06d796fa675..0a75c00a798 100644 --- a/heartbeat/process/messageProcessor_test.go +++ b/heartbeat/process/messageProcessor_test.go @@ -31,7 +31,7 @@ func TestNewMessageProcessor_PeerSignatureHandlerNilShouldErr(t *testing.T) { mon, err := process.NewMessageProcessor( nil, - &mock.MarshalizerStub{}, + &mock.MarshallerStub{}, &p2pmocks.NetworkShardingCollectorStub{}, ) @@ -39,7 +39,7 @@ func TestNewMessageProcessor_PeerSignatureHandlerNilShouldErr(t *testing.T) { assert.Equal(t, heartbeat.ErrNilPeerSignatureHandler, err) } -func TestNewMessageProcessor_MarshalizerNilShouldErr(t *testing.T) { +func TestNewMessageProcessor_MarshallerNilShouldErr(t *testing.T) { t.Parallel() mon, err := process.NewMessageProcessor( @@ -49,7 +49,7 @@ func TestNewMessageProcessor_MarshalizerNilShouldErr(t *testing.T) { ) assert.Nil(t, mon) - assert.Equal(t, heartbeat.ErrNilMarshalizer, err) + assert.Equal(t, heartbeat.ErrNilMarshaller, err) } func TestNewMessageProcessor_NetworkShardingCollectorNilShouldErr(t *testing.T) { @@ -57,7 +57,7 @@ func TestNewMessageProcessor_NetworkShardingCollectorNilShouldErr(t *testing.T) mon, err := process.NewMessageProcessor( &mock.PeerSignatureHandler{}, - &mock.MarshalizerStub{}, + &mock.MarshallerStub{}, nil, ) @@ -70,7 +70,7 @@ func TestNewMessageProcessor_ShouldWork(t *testing.T) { mon, err := process.NewMessageProcessor( &mock.PeerSignatureHandler{}, - &mock.MarshalizerStub{}, + &mock.MarshallerStub{}, &p2pmocks.NetworkShardingCollectorStub{}, ) @@ -215,7 +215,7 @@ func TestNewMessageProcessor_CreateHeartbeatFromP2PMessage(t *testing.T) { NodeDisplayName: "NodeDisplayName", } - marshalizer := &mock.MarshalizerStub{} + marshalizer := &mock.MarshallerStub{} marshalizer.UnmarshalHandler = func(obj interface{}, buff []byte) error { (obj.(*data.Heartbeat)).Pubkey = hb.Pubkey @@ -237,7 +237,7 @@ func TestNewMessageProcessor_CreateHeartbeatFromP2PMessage(t *testing.T) { UpdatePeerIDInfoCalled: func(pid core.PeerID, pk []byte, shardID uint32) { updatePeerInfoWasCalled = true }, - UpdatePeerIdSubTypeCalled: func(pid core.PeerID, peerSubType core.P2PPeerSubType) { + PutPeerIdSubTypeCalled: func(pid core.PeerID, peerSubType core.P2PPeerSubType) { updatePidSubTypeCalled = true }, }, @@ -274,7 +274,7 @@ func TestNewMessageProcessor_CreateHeartbeatFromP2PMessageInvalidPeerSignatureSh NodeDisplayName: "NodeDisplayName", } - marshalizer := &mock.MarshalizerStub{} + marshalizer := &mock.MarshallerStub{} marshalizer.UnmarshalHandler = func(obj interface{}, buff []byte) error { (obj.(*data.Heartbeat)).Pubkey = hb.Pubkey @@ -330,7 +330,7 @@ func TestNewMessageProcessor_CreateHeartbeatFromP2pMessageWithNilDataShouldErr(t mon, _ := process.NewMessageProcessor( &mock.PeerSignatureHandler{}, - &mock.MarshalizerStub{}, + &mock.MarshallerStub{}, &p2pmocks.NetworkShardingCollectorStub{}, ) @@ -357,7 +357,7 @@ func TestNewMessageProcessor_CreateHeartbeatFromP2pMessageWithUnmarshaliableData mon, _ := process.NewMessageProcessor( &mock.PeerSignatureHandler{}, - &mock.MarshalizerStub{ + &mock.MarshallerStub{ UnmarshalHandler: func(obj interface{}, buff []byte) error { return expectedErr }, @@ -391,7 +391,7 @@ func TestNewMessageProcessor_CreateHeartbeatFromP2PMessageWithTooLongLengthsShou NodeDisplayName: bigNodeName, } - marshalizer := &mock.MarshalizerStub{} + marshalizer := &mock.MarshallerStub{} marshalizer.UnmarshalHandler = func(obj interface{}, buff []byte) error { (obj.(*data.Heartbeat)).Pubkey = hb.Pubkey @@ -432,7 +432,7 @@ func TestNewMessageProcessor_CreateHeartbeatFromP2pNilMessageShouldErr(t *testin mon, _ := process.NewMessageProcessor( &mock.PeerSignatureHandler{}, - &mock.MarshalizerStub{}, + &mock.MarshallerStub{}, &p2pmocks.NetworkShardingCollectorStub{}, ) diff --git a/heartbeat/process/monitor.go b/heartbeat/process/monitor.go index 16addd0874d..828f4c622f2 100644 --- a/heartbeat/process/monitor.go +++ b/heartbeat/process/monitor.go @@ -10,6 +10,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/atomic" "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go-core/marshal" logger "github.com/ElrondNetwork/elrond-go-logger" @@ -19,6 +20,7 @@ import ( "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/storage/timecache" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" ) var log = logger.GetOrCreate("heartbeat/process") @@ -39,6 +41,8 @@ type ArgHeartbeatMonitor struct { HeartbeatRefreshIntervalInSec uint32 HideInactiveValidatorIntervalInSec uint32 AppStatusHandler core.AppStatusHandler + EpochNotifier vmcommon.EpochNotifier + HeartbeatDisableEpoch uint32 } // Monitor represents the heartbeat component that processes received heartbeat messages @@ -62,13 +66,15 @@ type Monitor struct { validatorPubkeyConverter core.PubkeyConverter heartbeatRefreshIntervalInSec uint32 hideInactiveValidatorIntervalInSec uint32 + flagHeartbeatDisableEpoch atomic.Flag + heartbeatDisableEpoch uint32 cancelFunc context.CancelFunc } // NewMonitor returns a new monitor instance func NewMonitor(arg ArgHeartbeatMonitor) (*Monitor, error) { if check.IfNil(arg.Marshalizer) { - return nil, heartbeat.ErrNilMarshalizer + return nil, heartbeat.ErrNilMarshaller } if check.IfNil(arg.PeerTypeProvider) { return nil, heartbeat.ErrNilPeerTypeProvider @@ -103,6 +109,9 @@ func NewMonitor(arg ArgHeartbeatMonitor) (*Monitor, error) { if arg.HideInactiveValidatorIntervalInSec == 0 { return nil, heartbeat.ErrZeroHideInactiveValidatorIntervalInSec } + if check.IfNil(arg.EpochNotifier) { + return nil, heartbeat.ErrNilEpochNotifier + } ctx, cancelFunc := context.WithCancel(context.Background()) @@ -122,6 +131,7 @@ func NewMonitor(arg ArgHeartbeatMonitor) (*Monitor, error) { heartbeatRefreshIntervalInSec: arg.HeartbeatRefreshIntervalInSec, hideInactiveValidatorIntervalInSec: arg.HideInactiveValidatorIntervalInSec, doubleSignerPeers: make(map[string]process.TimeCacher), + heartbeatDisableEpoch: arg.HeartbeatDisableEpoch, cancelFunc: cancelFunc, } @@ -140,6 +150,8 @@ func NewMonitor(arg ArgHeartbeatMonitor) (*Monitor, error) { log.Debug("heartbeat can't load public keys from storage", "error", err.Error()) } + arg.EpochNotifier.RegisterNotifyHandler(mon) + mon.startValidatorProcessing(ctx) return mon, nil @@ -242,6 +254,10 @@ func (m *Monitor) loadHeartbeatsFromStorer(pubKey string) (*heartbeatMessageInfo // ProcessReceivedMessage satisfies the p2p.MessageProcessor interface so it can be called // by the p2p subsystem each time a new heartbeat message arrives func (m *Monitor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { + if m.flagHeartbeatDisableEpoch.IsSet() { + return nil + } + if check.IfNil(message) { return heartbeat.ErrNilMessage } @@ -296,6 +312,12 @@ func (m *Monitor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPe return nil } +// EpochConfirmed is called whenever an epoch is confirmed +func (m *Monitor) EpochConfirmed(epoch uint32, _ uint64) { + m.flagHeartbeatDisableEpoch.SetValue(epoch >= m.heartbeatDisableEpoch) + log.Debug("heartbeat v1 monitor", "enabled", !m.flagHeartbeatDisableEpoch.IsSet()) +} + func (m *Monitor) addHeartbeatMessageToMap(hb *data.Heartbeat) { pubKeyStr := string(hb.Pubkey) m.mutHeartbeatMessages.Lock() diff --git a/heartbeat/process/monitorEdgeCases_test.go b/heartbeat/process/monitorEdgeCases_test.go index e0317814d99..060efeaeb0a 100644 --- a/heartbeat/process/monitorEdgeCases_test.go +++ b/heartbeat/process/monitorEdgeCases_test.go @@ -9,6 +9,8 @@ import ( "github.com/ElrondNetwork/elrond-go/heartbeat/mock" "github.com/ElrondNetwork/elrond-go/heartbeat/process" "github.com/ElrondNetwork/elrond-go/heartbeat/storage" + "github.com/ElrondNetwork/elrond-go/testscommon" + "github.com/ElrondNetwork/elrond-go/testscommon/epochNotifier" statusHandlerMock "github.com/ElrondNetwork/elrond-go/testscommon/statusHandler" "github.com/stretchr/testify/assert" ) @@ -23,7 +25,7 @@ func createMonitor( ) *process.Monitor { arg := process.ArgHeartbeatMonitor{ - Marshalizer: &mock.MarshalizerMock{}, + Marshalizer: &mock.MarshallerMock{}, MaxDurationPeerUnresponsive: maxDurationPeerUnresponsive, PubKeysMap: map[uint32][]string{0: {pkValidator}}, GenesisTime: genesisTime, @@ -32,11 +34,13 @@ func createMonitor( PeerTypeProvider: &mock.PeerTypeProviderStub{}, Timer: timer, AntifloodHandler: createMockP2PAntifloodHandler(), - HardforkTrigger: &mock.HardforkTriggerStub{}, + HardforkTrigger: &testscommon.HardforkTriggerStub{}, ValidatorPubkeyConverter: mock.NewPubkeyConverterMock(32), HeartbeatRefreshIntervalInSec: 1, HideInactiveValidatorIntervalInSec: 600, AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + HeartbeatDisableEpoch: 1, } mon, _ := process.NewMonitor(arg) @@ -66,7 +70,7 @@ const twoHundredSeconds = 200 func TestMonitor_ObserverGapValidatorOffline(t *testing.T) { t.Parallel() - storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshalizerMock{}) + storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshallerMock{}) timer := mock.NewTimerMock() genesisTime := timer.Now() @@ -103,7 +107,7 @@ func TestMonitor_ObserverGapValidatorOffline(t *testing.T) { func TestMonitor_ObserverGapValidatorOnline(t *testing.T) { t.Parallel() - storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshalizerMock{}) + storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshallerMock{}) timer := mock.NewTimerMock() genesisTime := timer.Now() @@ -152,7 +156,7 @@ func TestMonitor_ObserverGapValidatorOnline(t *testing.T) { func TestMonitor_ObserverGapValidatorActiveUnitlMaxPeriodEnds(t *testing.T) { t.Parallel() - storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshalizerMock{}) + storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshallerMock{}) timer := mock.NewTimerMock() genesisTime := timer.Now() @@ -191,7 +195,7 @@ func TestMonitor_ObserverGapValidatorActiveUnitlMaxPeriodEnds(t *testing.T) { func TestMonitor_ObserverGapValidatorPartlyOnline1(t *testing.T) { t.Parallel() - storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshalizerMock{}) + storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshallerMock{}) timer := mock.NewTimerMock() genesisTime := timer.Now() @@ -243,7 +247,7 @@ func TestMonitor_ObserverGapValidatorPartlyOnline1(t *testing.T) { func TestMonitor_ObserverGapValidatorPartlyOnline2(t *testing.T) { t.Parallel() - storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshalizerMock{}) + storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshallerMock{}) timer := mock.NewTimerMock() genesisTime := timer.Now() @@ -295,7 +299,7 @@ func TestMonitor_ObserverGapValidatorPartlyOnline2(t *testing.T) { func TestMonitor_ObserverGapValidatorPartlyOnline3(t *testing.T) { t.Parallel() - storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshalizerMock{}) + storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshallerMock{}) timer := mock.NewTimerMock() genesisTime := timer.Now() @@ -347,7 +351,7 @@ func TestMonitor_ObserverGapValidatorPartlyOnline3(t *testing.T) { func TestMonitor_ObserverGapValidatorPartlyOnline4(t *testing.T) { t.Parallel() - storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshalizerMock{}) + storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshallerMock{}) timer := mock.NewTimerMock() genesisTime := timer.Now() @@ -396,7 +400,7 @@ func TestMonitor_ObserverGapValidatorPartlyOnline4(t *testing.T) { func TestMonitor_ObserverGapValidatorPartlyOnline5(t *testing.T) { t.Parallel() - storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshalizerMock{}) + storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshallerMock{}) timer := mock.NewTimerMock() genesisTime := timer.Now() @@ -447,7 +451,7 @@ func TestMonitor_ObserverGapValidatorPartlyOnline5(t *testing.T) { func TestMonitor_ObserverGapValidatorPartlyOnline6(t *testing.T) { t.Parallel() - storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshalizerMock{}) + storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshallerMock{}) timer := mock.NewTimerMock() genesisTime := timer.Now() @@ -493,7 +497,7 @@ func TestMonitor_ObserverGapValidatorPartlyOnline6(t *testing.T) { func TestMonitor_ObserverGapValidatorPartlyOnline7(t *testing.T) { t.Parallel() - storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshalizerMock{}) + storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshallerMock{}) timer := mock.NewTimerMock() genesisTime := timer.Now() diff --git a/heartbeat/process/monitor_test.go b/heartbeat/process/monitor_test.go index cfb6b1a9fd6..659737cc9ab 100644 --- a/heartbeat/process/monitor_test.go +++ b/heartbeat/process/monitor_test.go @@ -16,6 +16,8 @@ import ( "github.com/ElrondNetwork/elrond-go/heartbeat/process" "github.com/ElrondNetwork/elrond-go/heartbeat/storage" "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/testscommon" + "github.com/ElrondNetwork/elrond-go/testscommon/epochNotifier" statusHandlerMock "github.com/ElrondNetwork/elrond-go/testscommon/statusHandler" "github.com/stretchr/testify/assert" ) @@ -55,7 +57,7 @@ func createMockStorer() heartbeat.HeartbeatStorageHandler { func createMockArgHeartbeatMonitor() process.ArgHeartbeatMonitor { return process.ArgHeartbeatMonitor{ - Marshalizer: &mock.MarshalizerStub{}, + Marshalizer: &mock.MarshallerStub{}, MaxDurationPeerUnresponsive: 1, PubKeysMap: map[uint32][]string{0: {""}}, GenesisTime: time.Now(), @@ -72,17 +74,19 @@ func createMockArgHeartbeatMonitor() process.ArgHeartbeatMonitor { }, Timer: mock.NewTimerMock(), AntifloodHandler: createMockP2PAntifloodHandler(), - HardforkTrigger: &mock.HardforkTriggerStub{}, + HardforkTrigger: &testscommon.HardforkTriggerStub{}, ValidatorPubkeyConverter: mock.NewPubkeyConverterMock(96), HeartbeatRefreshIntervalInSec: 1, HideInactiveValidatorIntervalInSec: 600, AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + HeartbeatDisableEpoch: 1, } } -//------- NewMonitor +// ------- NewMonitor -func TestNewMonitor_NilMarshalizerShouldErr(t *testing.T) { +func TestNewMonitor_NilMarshallerShouldErr(t *testing.T) { t.Parallel() arg := createMockArgHeartbeatMonitor() @@ -90,7 +94,7 @@ func TestNewMonitor_NilMarshalizerShouldErr(t *testing.T) { mon, err := process.NewMonitor(arg) assert.Nil(t, mon) - assert.Equal(t, heartbeat.ErrNilMarshalizer, err) + assert.Equal(t, heartbeat.ErrNilMarshaller, err) } func TestNewMonitor_NilPublicKeyListShouldErr(t *testing.T) { @@ -203,6 +207,17 @@ func TestNewMonitor_ZeroHideInactiveVlidatorIntervalInHoursShouldErr(t *testing. assert.True(t, errors.Is(err, heartbeat.ErrZeroHideInactiveValidatorIntervalInSec)) } +func TestNewMonitor_NilEpochNotifierShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockArgHeartbeatMonitor() + arg.EpochNotifier = nil + mon, err := process.NewMonitor(arg) + + assert.Nil(t, mon) + assert.Equal(t, heartbeat.ErrNilEpochNotifier, err) +} + func TestNewMonitor_OkValsShouldCreatePubkeyMap(t *testing.T) { t.Parallel() @@ -238,7 +253,7 @@ func TestNewMonitor_ShouldComputeShardId(t *testing.T) { assert.Equal(t, uint32(1), hbStatus[1].ComputedShardID) } -//------- ProcessReceivedMessage +// ------- ProcessReceivedMessage func TestMonitor_ProcessReceivedMessageShouldWork(t *testing.T) { t.Parallel() @@ -246,7 +261,7 @@ func TestMonitor_ProcessReceivedMessageShouldWork(t *testing.T) { pubKey := "pk1" arg := createMockArgHeartbeatMonitor() - arg.Marshalizer = &mock.MarshalizerStub{ + arg.Marshalizer = &mock.MarshallerStub{ UnmarshalHandler: func(obj interface{}, buff []byte) error { (obj.(*data.Heartbeat)).Pubkey = []byte(pubKey) return nil @@ -270,7 +285,7 @@ func TestMonitor_ProcessReceivedMessageShouldWork(t *testing.T) { err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: hbBytes}, fromConnectedPeerId) assert.Nil(t, err) - //a delay is mandatory for the go routine to finish its job + // a delay is mandatory for the go routine to finish its job time.Sleep(time.Second) hbStatus := mon.GetHeartbeats() @@ -295,7 +310,7 @@ func TestMonitor_ProcessReceivedMessageProcessTriggerErrorShouldErr(t *testing.T return &rcvHb, nil }, } - arg.HardforkTrigger = &mock.HardforkTriggerStub{ + arg.HardforkTrigger = &testscommon.HardforkTriggerStub{ TriggerReceivedCalled: func(payload []byte, data []byte, pkBytes []byte) (bool, error) { triggerWasCalled = true @@ -310,7 +325,7 @@ func TestMonitor_ProcessReceivedMessageProcessTriggerErrorShouldErr(t *testing.T hbBytes, _ := json.Marshal(hb) err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: hbBytes}, fromConnectedPeerId) - //a delay is mandatory for the go routine to finish its job + // a delay is mandatory for the go routine to finish its job time.Sleep(time.Second) assert.Equal(t, expectedErr, err) @@ -323,7 +338,7 @@ func TestMonitor_ProcessReceivedMessageWithNewPublicKey(t *testing.T) { pubKey := "pk1" arg := createMockArgHeartbeatMonitor() - arg.Marshalizer = &mock.MarshalizerStub{ + arg.Marshalizer = &mock.MarshallerStub{ UnmarshalHandler: func(obj interface{}, buff []byte) error { (obj.(*data.Heartbeat)).Pubkey = []byte(pubKey) return nil @@ -347,10 +362,10 @@ func TestMonitor_ProcessReceivedMessageWithNewPublicKey(t *testing.T) { err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: hbBytes}, fromConnectedPeerId) assert.Nil(t, err) - //a delay is mandatory for the go routine to finish its job + // a delay is mandatory for the go routine to finish its job time.Sleep(time.Second) - //there should be 2 heartbeats, because a new one should have been added with pk2 + // there should be 2 heartbeats, because a new one should have been added with pk2 hbStatus := mon.GetHeartbeats() assert.Equal(t, 2, len(hbStatus)) assert.Equal(t, hex.EncodeToString([]byte(pubKey)), hbStatus[0].PublicKey) @@ -362,7 +377,7 @@ func TestMonitor_ProcessReceivedMessageWithNewShardID(t *testing.T) { pubKey := []byte("pk1") arg := createMockArgHeartbeatMonitor() - arg.Marshalizer = &mock.MarshalizerStub{ + arg.Marshalizer = &mock.MarshallerStub{ UnmarshalHandler: func(obj interface{}, buff []byte) error { var rcvdHb data.Heartbeat _ = json.Unmarshal(buff, &rcvdHb) @@ -395,7 +410,7 @@ func TestMonitor_ProcessReceivedMessageWithNewShardID(t *testing.T) { err = mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: buffToSend}, fromConnectedPeerId) assert.Nil(t, err) - //a delay is mandatory for the go routine to finish its job + // a delay is mandatory for the go routine to finish its job time.Sleep(time.Second) hbStatus := mon.GetHeartbeats() @@ -429,9 +444,9 @@ func TestMonitor_ProcessReceivedMessageShouldSetPeerInactive(t *testing.T) { th := mock.NewTimerMock() pubKey1 := "pk1-should-stay-online" pubKey2 := "pk2-should-go-offline" - storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshalizerMock{}) + storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshallerMock{}) arg := createMockArgHeartbeatMonitor() - arg.Marshalizer = &mock.MarshalizerStub{ + arg.Marshalizer = &mock.MarshallerStub{ UnmarshalHandler: func(obj interface{}, buff []byte) error { var rcvdHb data.Heartbeat _ = json.Unmarshal(buff, &rcvdHb) @@ -470,7 +485,6 @@ func TestMonitor_ProcessReceivedMessageShouldSetPeerInactive(t *testing.T) { mon.RefreshHeartbeatMessageInfo() hbStatus := mon.GetHeartbeats() assert.Equal(t, 2, len(hbStatus)) - //assert.False(t, hbStatus[1].IsActive) // Now send a message from pk1 in order to see that pk2 is not active anymore err = sendHbMessageFromPubKey(pubKey1, mon) @@ -494,13 +508,13 @@ func TestMonitor_RemoveInactiveValidatorsIfIntervalExceeded(t *testing.T) { pubKey3 := "pk3-observer" pubKey4 := "pk4-inactive" - storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshalizerMock{}) + storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshallerMock{}) timer := mock.NewTimerMock() genesisTime := timer.Now() arg := process.ArgHeartbeatMonitor{ - Marshalizer: &mock.MarshalizerMock{}, + Marshalizer: &mock.MarshallerMock{}, MaxDurationPeerUnresponsive: unresponsiveDuration, PubKeysMap: map[uint32][]string{ 0: {pkValidator}, @@ -529,11 +543,12 @@ func TestMonitor_RemoveInactiveValidatorsIfIntervalExceeded(t *testing.T) { }, Timer: timer, AntifloodHandler: createMockP2PAntifloodHandler(), - HardforkTrigger: &mock.HardforkTriggerStub{}, + HardforkTrigger: &testscommon.HardforkTriggerStub{}, ValidatorPubkeyConverter: mock.NewPubkeyConverterMock(32), HeartbeatRefreshIntervalInSec: 1, HideInactiveValidatorIntervalInSec: 600, AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, } mon, _ := process.NewMonitor(arg) mon.SendHeartbeatMessage(&data.Heartbeat{Pubkey: []byte(pkValidator)}) @@ -567,7 +582,7 @@ func TestMonitor_ProcessReceivedMessageImpersonatedMessageShouldErr(t *testing.T originator := core.PeerID("message originator") arg := createMockArgHeartbeatMonitor() - arg.Marshalizer = &mock.MarshalizerStub{ + arg.Marshalizer = &mock.MarshallerStub{ UnmarshalHandler: func(obj interface{}, buff []byte) error { (obj.(*data.Heartbeat)).Pubkey = []byte(pubKey) return nil @@ -620,6 +635,40 @@ func sendHbMessageFromPubKey(pubKey string, mon *process.Monitor) error { return err } +func TestMonitor_ProcessReceivedMessageShouldNotProcessAfterEpoch(t *testing.T) { + t.Parallel() + + providedEpoch := uint32(210) + args := createMockArgHeartbeatMonitor() + args.HeartbeatDisableEpoch = providedEpoch + + wasCanProcessMessageCalled := false + args.AntifloodHandler = &mock.P2PAntifloodHandlerStub{ + CanProcessMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { + wasCanProcessMessageCalled = true + return nil + }, + } + + mon, err := process.NewMonitor(args) + assert.Nil(t, err) + assert.False(t, check.IfNil(mon)) + + message := &mock.P2PMessageStub{DataField: []byte("data field")} + + mon.EpochConfirmed(providedEpoch-1, 0) + err = mon.ProcessReceivedMessage(message, "pid") + assert.Nil(t, err) + assert.True(t, wasCanProcessMessageCalled) + + wasCanProcessMessageCalled = false + mon.EpochConfirmed(providedEpoch, 0) + err = mon.ProcessReceivedMessage(message, "pid") + assert.Nil(t, err) + assert.False(t, wasCanProcessMessageCalled) + +} + func TestMonitor_AddAndGetDoubleSignerPeersShouldWork(t *testing.T) { t.Parallel() diff --git a/heartbeat/process/sender.go b/heartbeat/process/sender.go index 86a61e34b5c..076d075a214 100644 --- a/heartbeat/process/sender.go +++ b/heartbeat/process/sender.go @@ -5,6 +5,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/atomic" "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go-core/marshal" "github.com/ElrondNetwork/elrond-go-crypto" @@ -12,48 +13,53 @@ import ( "github.com/ElrondNetwork/elrond-go/heartbeat" heartbeatData "github.com/ElrondNetwork/elrond-go/heartbeat/data" "github.com/ElrondNetwork/elrond-go/sharding" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" ) const delayAfterHardforkMessageBroadcast = time.Second * 5 // ArgHeartbeatSender represents the arguments for the heartbeat sender type ArgHeartbeatSender struct { - PeerMessenger heartbeat.P2PMessenger - PeerSignatureHandler crypto.PeerSignatureHandler - PrivKey crypto.PrivateKey - Marshalizer marshal.Marshalizer - Topic string - ShardCoordinator sharding.Coordinator - PeerTypeProvider heartbeat.PeerTypeProviderHandler - PeerSubType core.P2PPeerSubType - StatusHandler core.AppStatusHandler - VersionNumber string - NodeDisplayName string - KeyBaseIdentity string - HardforkTrigger heartbeat.HardforkTrigger - CurrentBlockProvider heartbeat.CurrentBlockProvider - RedundancyHandler heartbeat.NodeRedundancyHandler + PeerMessenger heartbeat.P2PMessenger + PeerSignatureHandler crypto.PeerSignatureHandler + PrivKey crypto.PrivateKey + Marshalizer marshal.Marshalizer + Topic string + ShardCoordinator sharding.Coordinator + PeerTypeProvider heartbeat.PeerTypeProviderHandler + PeerSubType core.P2PPeerSubType + StatusHandler core.AppStatusHandler + VersionNumber string + NodeDisplayName string + KeyBaseIdentity string + HardforkTrigger heartbeat.HardforkTrigger + CurrentBlockProvider heartbeat.CurrentBlockProvider + RedundancyHandler heartbeat.NodeRedundancyHandler + EpochNotifier vmcommon.EpochNotifier + HeartbeatDisableEpoch uint32 } // Sender periodically sends heartbeat messages on a pubsub topic type Sender struct { - peerMessenger heartbeat.P2PMessenger - peerSignatureHandler crypto.PeerSignatureHandler - privKey crypto.PrivateKey - publicKey crypto.PublicKey - observerPublicKey crypto.PublicKey - marshalizer marshal.Marshalizer - shardCoordinator sharding.Coordinator - peerTypeProvider heartbeat.PeerTypeProviderHandler - peerSubType core.P2PPeerSubType - statusHandler core.AppStatusHandler - topic string - versionNumber string - nodeDisplayName string - keyBaseIdentity string - hardforkTrigger heartbeat.HardforkTrigger - currentBlockProvider heartbeat.CurrentBlockProvider - redundancy heartbeat.NodeRedundancyHandler + peerMessenger heartbeat.P2PMessenger + peerSignatureHandler crypto.PeerSignatureHandler + privKey crypto.PrivateKey + publicKey crypto.PublicKey + observerPublicKey crypto.PublicKey + marshalizer marshal.Marshalizer + shardCoordinator sharding.Coordinator + peerTypeProvider heartbeat.PeerTypeProviderHandler + peerSubType core.P2PPeerSubType + statusHandler core.AppStatusHandler + topic string + versionNumber string + nodeDisplayName string + keyBaseIdentity string + hardforkTrigger heartbeat.HardforkTrigger + currentBlockProvider heartbeat.CurrentBlockProvider + redundancy heartbeat.NodeRedundancyHandler + flagHeartbeatDisableEpoch atomic.Flag + heartbeatDisableEpoch uint32 } // NewSender will create a new sender instance @@ -68,7 +74,7 @@ func NewSender(arg ArgHeartbeatSender) (*Sender, error) { return nil, fmt.Errorf("%w for arg.PrivKey", heartbeat.ErrNilPrivateKey) } if check.IfNil(arg.Marshalizer) { - return nil, heartbeat.ErrNilMarshalizer + return nil, heartbeat.ErrNilMarshaller } if check.IfNil(arg.ShardCoordinator) { return nil, heartbeat.ErrNilShardCoordinator @@ -92,6 +98,9 @@ func NewSender(arg ArgHeartbeatSender) (*Sender, error) { if err != nil { return nil, err } + if check.IfNil(arg.EpochNotifier) { + return nil, heartbeat.ErrNilEpochNotifier + } observerPrivateKey := arg.RedundancyHandler.ObserverPrivateKey() if check.IfNil(observerPrivateKey) { @@ -99,30 +108,37 @@ func NewSender(arg ArgHeartbeatSender) (*Sender, error) { } sender := &Sender{ - peerMessenger: arg.PeerMessenger, - peerSignatureHandler: arg.PeerSignatureHandler, - privKey: arg.PrivKey, - publicKey: arg.PrivKey.GeneratePublic(), - observerPublicKey: observerPrivateKey.GeneratePublic(), - marshalizer: arg.Marshalizer, - topic: arg.Topic, - shardCoordinator: arg.ShardCoordinator, - peerTypeProvider: arg.PeerTypeProvider, - peerSubType: arg.PeerSubType, - statusHandler: arg.StatusHandler, - versionNumber: arg.VersionNumber, - nodeDisplayName: arg.NodeDisplayName, - keyBaseIdentity: arg.KeyBaseIdentity, - hardforkTrigger: arg.HardforkTrigger, - currentBlockProvider: arg.CurrentBlockProvider, - redundancy: arg.RedundancyHandler, + peerMessenger: arg.PeerMessenger, + peerSignatureHandler: arg.PeerSignatureHandler, + privKey: arg.PrivKey, + publicKey: arg.PrivKey.GeneratePublic(), + observerPublicKey: observerPrivateKey.GeneratePublic(), + marshalizer: arg.Marshalizer, + topic: arg.Topic, + shardCoordinator: arg.ShardCoordinator, + peerTypeProvider: arg.PeerTypeProvider, + peerSubType: arg.PeerSubType, + statusHandler: arg.StatusHandler, + versionNumber: arg.VersionNumber, + nodeDisplayName: arg.NodeDisplayName, + keyBaseIdentity: arg.KeyBaseIdentity, + hardforkTrigger: arg.HardforkTrigger, + currentBlockProvider: arg.CurrentBlockProvider, + redundancy: arg.RedundancyHandler, + heartbeatDisableEpoch: arg.HeartbeatDisableEpoch, } + arg.EpochNotifier.RegisterNotifyHandler(sender) + return sender, nil } // SendHeartbeat broadcasts a new heartbeat message func (s *Sender) SendHeartbeat() error { + if s.flagHeartbeatDisableEpoch.IsSet() { + return nil + } + nonce := uint64(0) crtBlock := s.currentBlockProvider.GetCurrentBlockHeader() if !check.IfNil(crtBlock) { @@ -144,7 +160,7 @@ func (s *Sender) SendHeartbeat() error { if isHardforkTriggered { isPayloadRecorded := len(triggerMessage) != 0 if isPayloadRecorded { - //beside sending the regular heartbeat message, send also the initial payload hardfork trigger message + // beside sending the regular heartbeat message, send also the initial payload hardfork trigger message // so that will be spread in an epidemic manner log.Debug("broadcasting stored hardfork message") s.peerMessenger.Broadcast(s.topic, triggerMessage) @@ -205,6 +221,12 @@ func (s *Sender) getCurrentPrivateAndPublicKeys() (crypto.PrivateKey, crypto.Pub return s.redundancy.ObserverPrivateKey(), s.observerPublicKey } +// EpochConfirmed is called whenever an epoch is confirmed +func (s *Sender) EpochConfirmed(epoch uint32, _ uint64) { + s.flagHeartbeatDisableEpoch.SetValue(epoch >= s.heartbeatDisableEpoch) + log.Debug("heartbeat v1 sender", "enabled", !s.flagHeartbeatDisableEpoch.IsSet()) +} + // IsInterfaceNil returns true if there is no value under the interface func (s *Sender) IsInterfaceNil() bool { return s == nil diff --git a/heartbeat/process/sender_test.go b/heartbeat/process/sender_test.go index 3653357e7e2..f91322253c0 100644 --- a/heartbeat/process/sender_test.go +++ b/heartbeat/process/sender_test.go @@ -13,11 +13,13 @@ import ( "github.com/ElrondNetwork/elrond-go/heartbeat/data" "github.com/ElrondNetwork/elrond-go/heartbeat/mock" "github.com/ElrondNetwork/elrond-go/heartbeat/process" + "github.com/ElrondNetwork/elrond-go/testscommon" + "github.com/ElrondNetwork/elrond-go/testscommon/epochNotifier" statusHandlerMock "github.com/ElrondNetwork/elrond-go/testscommon/statusHandler" "github.com/stretchr/testify/assert" ) -//------- NewSender +// ------- NewSender func createMockArgHeartbeatSender() process.ArgHeartbeatSender { return process.ArgHeartbeatSender{ @@ -26,20 +28,22 @@ func createMockArgHeartbeatSender() process.ArgHeartbeatSender { }, PeerSignatureHandler: &mock.PeerSignatureHandler{}, PrivKey: &mock.PrivateKeyStub{}, - Marshalizer: &mock.MarshalizerStub{ - MarshalHandler: func(obj interface{}) (i []byte, e error) { + Marshalizer: &testscommon.MarshalizerStub{ + MarshalCalled: func(obj interface{}) (i []byte, e error) { return nil, nil }, }, - Topic: "", - ShardCoordinator: &mock.ShardCoordinatorMock{}, - PeerTypeProvider: &mock.PeerTypeProviderStub{}, - StatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, - VersionNumber: "v0.1", - NodeDisplayName: "undefined", - HardforkTrigger: &mock.HardforkTriggerStub{}, - CurrentBlockProvider: &mock.CurrentBlockProviderStub{}, - RedundancyHandler: &mock.RedundancyHandlerStub{}, + Topic: "", + ShardCoordinator: &mock.ShardCoordinatorMock{}, + PeerTypeProvider: &mock.PeerTypeProviderStub{}, + StatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + VersionNumber: "v0.1", + NodeDisplayName: "undefined", + HardforkTrigger: &testscommon.HardforkTriggerStub{}, + CurrentBlockProvider: &mock.CurrentBlockProviderStub{}, + RedundancyHandler: &mock.RedundancyHandlerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + HeartbeatDisableEpoch: 1, } } @@ -87,7 +91,7 @@ func TestNewSender_NilPrivateKeyShouldErr(t *testing.T) { assert.True(t, errors.Is(err, heartbeat.ErrNilPrivateKey)) } -func TestNewSender_NilMarshalizerShouldErr(t *testing.T) { +func TestNewSender_NilMarshallerShouldErr(t *testing.T) { t.Parallel() arg := createMockArgHeartbeatSender() @@ -95,7 +99,7 @@ func TestNewSender_NilMarshalizerShouldErr(t *testing.T) { sender, err := process.NewSender(arg) assert.Nil(t, sender) - assert.Equal(t, heartbeat.ErrNilMarshalizer, err) + assert.Equal(t, heartbeat.ErrNilMarshaller, err) } func TestNewSender_NilPeerTypeProviderShouldErr(t *testing.T) { @@ -179,6 +183,17 @@ func TestNewSender_RedundancyHandlerReturnsANilObserverPrivateKeyShouldErr(t *te assert.True(t, errors.Is(err, heartbeat.ErrNilPrivateKey)) } +func TestNewSender_NilEpochNotifierShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockArgHeartbeatSender() + arg.EpochNotifier = nil + sender, err := process.NewSender(arg) + + assert.Nil(t, sender) + assert.Equal(t, heartbeat.ErrNilEpochNotifier, err) +} + func TestNewSender_ShouldWork(t *testing.T) { t.Parallel() @@ -189,7 +204,7 @@ func TestNewSender_ShouldWork(t *testing.T) { assert.Nil(t, err) } -//------- SendHeartbeat +// ------- SendHeartbeat func TestSender_SendHeartbeatGeneratePublicKeyErrShouldErr(t *testing.T) { t.Parallel() @@ -240,8 +255,8 @@ func testSendHeartbeat(t *testing.T, pubKeyErr, signErr, marshalErr error) { } arg.PeerSignatureHandler = &mock.PeerSignatureHandler{Signer: singleSigner} - arg.Marshalizer = &mock.MarshalizerStub{ - MarshalHandler: func(obj interface{}) (i []byte, e error) { + arg.Marshalizer = &testscommon.MarshalizerStub{ + MarshalCalled: func(obj interface{}) (i []byte, e error) { expectedErr = marshalErr return nil, marshalErr }, @@ -294,8 +309,8 @@ func TestSender_SendHeartbeatShouldWork(t *testing.T) { return pubKey }, } - arg.Marshalizer = &mock.MarshalizerStub{ - MarshalHandler: func(obj interface{}) (i []byte, e error) { + arg.Marshalizer = &testscommon.MarshalizerStub{ + MarshalCalled: func(obj interface{}) (i []byte, e error) { hb, ok := obj.(*data.Heartbeat) if ok { pubkeyBytes, _ := pubKey.ToByteArray() @@ -338,7 +353,7 @@ func TestSender_SendHeartbeatNotABackupNodeShouldWork(t *testing.T) { genPubKeyCalled := false arg := createMockArgHeartbeatSender() - arg.Marshalizer = &mock.MarshalizerMock{} + arg.Marshalizer = &testscommon.MarshalizerMock{} arg.Topic = testTopic arg.PeerMessenger = &mock.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { @@ -410,7 +425,7 @@ func TestSender_SendHeartbeatBackupNodeShouldWork(t *testing.T) { } }, } - arg.Marshalizer = &mock.MarshalizerMock{} + arg.Marshalizer = &testscommon.MarshalizerMock{} arg.Topic = testTopic arg.PeerMessenger = &mock.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { @@ -482,7 +497,7 @@ func TestSender_SendHeartbeatIsBackupNodeButMainIsNotActiveShouldWork(t *testing } }, } - arg.Marshalizer = &mock.MarshalizerMock{} + arg.Marshalizer = &testscommon.MarshalizerMock{} arg.Topic = testTopic arg.PeerMessenger = &mock.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { @@ -561,8 +576,8 @@ func TestSender_SendHeartbeatAfterTriggerShouldWork(t *testing.T) { return pubKey }, } - arg.Marshalizer = &mock.MarshalizerStub{ - MarshalHandler: func(obj interface{}) (i []byte, e error) { + arg.Marshalizer = &testscommon.MarshalizerStub{ + MarshalCalled: func(obj interface{}) (i []byte, e error) { hb, ok := obj.(*data.Heartbeat) if ok { pubkeyBytes, _ := pubKey.ToByteArray() @@ -578,7 +593,7 @@ func TestSender_SendHeartbeatAfterTriggerShouldWork(t *testing.T) { return nil, nil }, } - arg.HardforkTrigger = &mock.HardforkTriggerStub{ + arg.HardforkTrigger = &testscommon.HardforkTriggerStub{ RecordedTriggerMessageCalled: func() (i []byte, b bool) { return nil, true }, @@ -645,8 +660,8 @@ func TestSender_SendHeartbeatAfterTriggerWithRecorededPayloadShouldWork(t *testi return pubKey }, } - arg.Marshalizer = &mock.MarshalizerStub{ - MarshalHandler: func(obj interface{}) (i []byte, e error) { + arg.Marshalizer = &testscommon.MarshalizerStub{ + MarshalCalled: func(obj interface{}) (i []byte, e error) { hb, ok := obj.(*data.Heartbeat) if ok { pubkeyBytes, _ := pubKey.ToByteArray() @@ -661,7 +676,7 @@ func TestSender_SendHeartbeatAfterTriggerWithRecorededPayloadShouldWork(t *testi return nil, nil }, } - arg.HardforkTrigger = &mock.HardforkTriggerStub{ + arg.HardforkTrigger = &testscommon.HardforkTriggerStub{ RecordedTriggerMessageCalled: func() (i []byte, b bool) { return originalTriggerPayload, true }, @@ -677,3 +692,31 @@ func TestSender_SendHeartbeatAfterTriggerWithRecorededPayloadShouldWork(t *testi assert.True(t, genPubKeyCalled) assert.True(t, marshalCalled) } + +func TestSender_SendHeartbeatShouldNotSendAfterEpoch(t *testing.T) { + t.Parallel() + + providedEpoch := uint32(210) + arg := createMockArgHeartbeatSender() + arg.HeartbeatDisableEpoch = providedEpoch + + wasBroadcastCalled := false + arg.PeerMessenger = &mock.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + wasBroadcastCalled = true + }, + } + + sender, _ := process.NewSender(arg) + + sender.EpochConfirmed(providedEpoch-1, 0) + err := sender.SendHeartbeat() + assert.Nil(t, err) + assert.True(t, wasBroadcastCalled) + + wasBroadcastCalled = false + sender.EpochConfirmed(providedEpoch, 0) + err = sender.SendHeartbeat() + assert.Nil(t, err) + assert.False(t, wasBroadcastCalled) +} diff --git a/heartbeat/processor/directConnectionsProcessor.go b/heartbeat/processor/directConnectionsProcessor.go new file mode 100644 index 00000000000..6be6ac2653f --- /dev/null +++ b/heartbeat/processor/directConnectionsProcessor.go @@ -0,0 +1,147 @@ +package processor + +import ( + "context" + "fmt" + "time" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/marshal" + "github.com/ElrondNetwork/elrond-go/common" + "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/p2p/message" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// ArgDirectConnectionsProcessor represents the arguments for the direct connections processor +type ArgDirectConnectionsProcessor struct { + Messenger p2p.Messenger + Marshaller marshal.Marshalizer + ShardCoordinator sharding.Coordinator + DelayBetweenNotifications time.Duration +} + +type directConnectionsProcessor struct { + messenger p2p.Messenger + marshaller marshal.Marshalizer + shardCoordinator sharding.Coordinator + delayBetweenNotifications time.Duration + notifiedPeersMap map[core.PeerID]struct{} + cancel func() +} + +// NewDirectConnectionsProcessor creates a new instance of directConnectionsProcessor +func NewDirectConnectionsProcessor(args ArgDirectConnectionsProcessor) (*directConnectionsProcessor, error) { + err := checkArgDirectConnectionsProcessor(args) + if err != nil { + return nil, err + } + + dcp := &directConnectionsProcessor{ + messenger: args.Messenger, + marshaller: args.Marshaller, + shardCoordinator: args.ShardCoordinator, + delayBetweenNotifications: args.DelayBetweenNotifications, + notifiedPeersMap: make(map[core.PeerID]struct{}), + } + + var ctx context.Context + ctx, dcp.cancel = context.WithCancel(context.Background()) + + go dcp.startProcessLoop(ctx) + + return dcp, nil +} + +func checkArgDirectConnectionsProcessor(args ArgDirectConnectionsProcessor) error { + if check.IfNil(args.Messenger) { + return process.ErrNilMessenger + } + if check.IfNil(args.Marshaller) { + return process.ErrNilMarshalizer + } + if check.IfNil(args.ShardCoordinator) { + return process.ErrNilShardCoordinator + } + if args.DelayBetweenNotifications < minDelayBetweenRequests { + return fmt.Errorf("%w for DelayBetweenNotifications, provided %d, min expected %d", + heartbeat.ErrInvalidTimeDuration, args.DelayBetweenNotifications, minDelayBetweenRequests) + } + + return nil +} + +func (dcp *directConnectionsProcessor) startProcessLoop(ctx context.Context) { + timer := time.NewTimer(dcp.delayBetweenNotifications) + defer timer.Stop() + + for { + timer.Reset(dcp.delayBetweenNotifications) + + select { + case <-timer.C: + dcp.sendMessageToNewConnections() + case <-ctx.Done(): + log.Debug("closing directConnectionsProcessor go routine") + return + } + } +} + +func (dcp *directConnectionsProcessor) sendMessageToNewConnections() { + connectedPeers := dcp.messenger.ConnectedPeers() + newPeers := dcp.computeNewPeers(connectedPeers) + dcp.notifyNewPeers(newPeers) +} + +func (dcp *directConnectionsProcessor) computeNewPeers(connectedPeers []core.PeerID) []core.PeerID { + newPeers := make([]core.PeerID, 0) + + for _, connectedPeer := range connectedPeers { + _, wasNotified := dcp.notifiedPeersMap[connectedPeer] + if !wasNotified { + newPeers = append(newPeers, connectedPeer) + } + } + + return newPeers +} + +func (dcp *directConnectionsProcessor) notifyNewPeers(newPeers []core.PeerID) { + dcp.notifiedPeersMap = make(map[core.PeerID]struct{}) + + shardValidatorInfo := &message.DirectConnectionInfo{ + ShardId: fmt.Sprintf("%d", dcp.shardCoordinator.SelfId()), + } + + shardValidatorInfoBuff, err := dcp.marshaller.Marshal(shardValidatorInfo) + if err != nil { + return + } + + for _, newPeer := range newPeers { + errNotCritical := dcp.messenger.SendToConnectedPeer(common.ConnectionTopic, shardValidatorInfoBuff, newPeer) + if errNotCritical != nil { + log.Trace("directConnectionsProcessor.notifyNewPeers", "pid", newPeer.Pretty(), "error", errNotCritical) + continue + } + + dcp.notifiedPeersMap[newPeer] = struct{}{} + } +} + +// Close triggers the closing of the internal goroutine +func (dcp *directConnectionsProcessor) Close() error { + log.Debug("closing directConnectionsProcessor...") + dcp.cancel() + + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (dcp *directConnectionsProcessor) IsInterfaceNil() bool { + return dcp == nil +} diff --git a/heartbeat/processor/directConnectionsProcessor_test.go b/heartbeat/processor/directConnectionsProcessor_test.go new file mode 100644 index 00000000000..d8bbb36b815 --- /dev/null +++ b/heartbeat/processor/directConnectionsProcessor_test.go @@ -0,0 +1,267 @@ +package processor + +import ( + "errors" + "fmt" + "sort" + "strings" + "sync" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/marshal" + "github.com/ElrondNetwork/elrond-go/common" + "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/p2p/message" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/testscommon" + "github.com/ElrondNetwork/elrond-go/testscommon/p2pmocks" + "github.com/stretchr/testify/assert" +) + +func createMockArgDirectConnectionsProcessor() ArgDirectConnectionsProcessor { + return ArgDirectConnectionsProcessor{ + Messenger: &p2pmocks.MessengerStub{}, + Marshaller: &marshal.GogoProtoMarshalizer{}, + ShardCoordinator: &testscommon.ShardsCoordinatorMock{}, + DelayBetweenNotifications: time.Second, + } +} + +func TestNewDirectConnectionsProcessor(t *testing.T) { + t.Parallel() + + t.Run("nil messenger should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgDirectConnectionsProcessor() + args.Messenger = nil + + cp, err := NewDirectConnectionsProcessor(args) + assert.Equal(t, process.ErrNilMessenger, err) + assert.True(t, check.IfNil(cp)) + }) + t.Run("nil marshaller should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgDirectConnectionsProcessor() + args.Marshaller = nil + + cp, err := NewDirectConnectionsProcessor(args) + assert.Equal(t, process.ErrNilMarshalizer, err) + assert.True(t, check.IfNil(cp)) + }) + t.Run("nil shard coordinator should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgDirectConnectionsProcessor() + args.ShardCoordinator = nil + + cp, err := NewDirectConnectionsProcessor(args) + assert.Equal(t, process.ErrNilShardCoordinator, err) + assert.True(t, check.IfNil(cp)) + }) + t.Run("invalid delay should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgDirectConnectionsProcessor() + args.DelayBetweenNotifications = time.Second - time.Nanosecond + + cp, err := NewDirectConnectionsProcessor(args) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) + assert.True(t, strings.Contains(err.Error(), "DelayBetweenNotifications")) + assert.True(t, check.IfNil(cp)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cp, err := NewDirectConnectionsProcessor(createMockArgDirectConnectionsProcessor()) + assert.Nil(t, err) + assert.False(t, check.IfNil(cp)) + }) + t.Run("should work and process once", func(t *testing.T) { + t.Parallel() + + providedConnectedPeers := []core.PeerID{"pid1", "pid2", "pid3", "pid4", "pid5", "pid6"} + notifiedPeers := make([]core.PeerID, 0) + var mutNotifiedPeers sync.RWMutex + args := createMockArgDirectConnectionsProcessor() + expectedShard := fmt.Sprintf("%d", args.ShardCoordinator.SelfId()) + args.Messenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + mutNotifiedPeers.Lock() + defer mutNotifiedPeers.Unlock() + + shardValidatorInfo := &message.DirectConnectionInfo{} + err := args.Marshaller.Unmarshal(shardValidatorInfo, buff) + assert.Nil(t, err) + assert.Equal(t, expectedShard, shardValidatorInfo.ShardId) + + notifiedPeers = append(notifiedPeers, peerID) + return nil + }, + ConnectedPeersCalled: func() []core.PeerID { + return providedConnectedPeers + }, + } + args.DelayBetweenNotifications = 2 * time.Second + + cp, _ := NewDirectConnectionsProcessor(args) + assert.False(t, check.IfNil(cp)) + + time.Sleep(3 * time.Second) + _ = cp.Close() + + mutNotifiedPeers.Lock() + defer mutNotifiedPeers.Unlock() + + sort.Slice(notifiedPeers, func(i, j int) bool { + return notifiedPeers[i] < notifiedPeers[j] + }) + assert.Equal(t, providedConnectedPeers, notifiedPeers) + }) +} + +func Test_directConnectionsProcessor_computeNewPeers(t *testing.T) { + t.Parallel() + + t.Run("no peers connected", func(t *testing.T) { + t.Parallel() + + cp, _ := NewDirectConnectionsProcessorNoGoRoutine(createMockArgDirectConnectionsProcessor()) + assert.False(t, check.IfNil(cp)) + + providedNotifiedPeersMap := make(map[core.PeerID]struct{}) + providedNotifiedPeersMap["pid1"] = struct{}{} + providedNotifiedPeersMap["pid2"] = struct{}{} + + cp.notifiedPeersMap = providedNotifiedPeersMap + + newPeers := cp.computeNewPeers(nil) + assert.Equal(t, 0, len(newPeers)) + }) + t.Run("some connected peers are new", func(t *testing.T) { + t.Parallel() + + cp, _ := NewDirectConnectionsProcessorNoGoRoutine(createMockArgDirectConnectionsProcessor()) + assert.False(t, check.IfNil(cp)) + + providedNotifiedPeersMap := make(map[core.PeerID]struct{}) + providedNotifiedPeersMap["pid1"] = struct{}{} + providedNotifiedPeersMap["pid2"] = struct{}{} + + cp.notifiedPeersMap = providedNotifiedPeersMap + + connectedPeers := []core.PeerID{"pid2", "pid3"} + newPeers := cp.computeNewPeers(connectedPeers) + + assert.Equal(t, []core.PeerID{"pid3"}, newPeers) + }) + t.Run("all connected peers are new", func(t *testing.T) { + t.Parallel() + + cp, _ := NewDirectConnectionsProcessorNoGoRoutine(createMockArgDirectConnectionsProcessor()) + assert.False(t, check.IfNil(cp)) + + connectedPeers := []core.PeerID{"pid3", "pid4"} + newPeers := cp.computeNewPeers(connectedPeers) + + assert.Equal(t, connectedPeers, newPeers) + }) +} + +func Test_directConnectionsProcessor_notifyNewPeers(t *testing.T) { + t.Parallel() + + t.Run("marshal returns error", func(t *testing.T) { + t.Parallel() + + wasCalled := false + args := createMockArgDirectConnectionsProcessor() + args.Messenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + wasCalled = true + return nil + }, + } + args.Marshaller = &testscommon.MarshalizerStub{ + MarshalCalled: func(obj interface{}) ([]byte, error) { + return nil, errors.New("error") + }, + } + + cp, _ := NewDirectConnectionsProcessorNoGoRoutine(args) + assert.False(t, check.IfNil(cp)) + + cp.notifyNewPeers(nil) + assert.False(t, wasCalled) + }) + t.Run("no new peers", func(t *testing.T) { + t.Parallel() + + wasCalled := false + args := createMockArgDirectConnectionsProcessor() + args.Messenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + wasCalled = true + return nil + }, + } + + cp, _ := NewDirectConnectionsProcessorNoGoRoutine(args) + assert.False(t, check.IfNil(cp)) + + cp.notifyNewPeers(nil) + assert.False(t, wasCalled) + }) + t.Run("send returns error", func(t *testing.T) { + t.Parallel() + + providedPeer := core.PeerID("pid") + args := createMockArgDirectConnectionsProcessor() + args.Messenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + assert.Equal(t, common.ConnectionTopic, topic) + assert.Equal(t, providedPeer, peerID) + return errors.New("error") + }, + } + + cp, _ := NewDirectConnectionsProcessorNoGoRoutine(args) + assert.False(t, check.IfNil(cp)) + + cp.notifyNewPeers([]core.PeerID{providedPeer}) + assert.Equal(t, 0, len(cp.notifiedPeersMap)) + }) + t.Run("send returns error only after 4th call", func(t *testing.T) { + t.Parallel() + + providedConnectedPeers := []core.PeerID{"pid1", "pid2", "pid3", "pid4", "pid5", "pid6"} + counter := 0 + args := createMockArgDirectConnectionsProcessor() + expectedShard := fmt.Sprintf("%d", args.ShardCoordinator.SelfId()) + args.Messenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + shardValidatorInfo := &message.DirectConnectionInfo{} + err := args.Marshaller.Unmarshal(shardValidatorInfo, buff) + assert.Nil(t, err) + assert.Equal(t, expectedShard, shardValidatorInfo.ShardId) + + counter++ + if counter > 4 { + return errors.New("error") + } + + return nil + }, + } + + cp, _ := NewDirectConnectionsProcessorNoGoRoutine(args) + assert.False(t, check.IfNil(cp)) + + cp.notifyNewPeers(providedConnectedPeers) + assert.Equal(t, 4, len(cp.notifiedPeersMap)) + }) +} diff --git a/heartbeat/processor/export_test.go b/heartbeat/processor/export_test.go new file mode 100644 index 00000000000..f9aac9dc0b9 --- /dev/null +++ b/heartbeat/processor/export_test.go @@ -0,0 +1,23 @@ +package processor + +import ( + "github.com/ElrondNetwork/elrond-go-core/core" +) + +// NewDirectConnectionsProcessorNoGoRoutine creates a new instance of directConnectionsProcessor but does not start the goroutine +func NewDirectConnectionsProcessorNoGoRoutine(args ArgDirectConnectionsProcessor) (*directConnectionsProcessor, error) { + err := checkArgDirectConnectionsProcessor(args) + if err != nil { + return nil, err + } + + dcp := &directConnectionsProcessor{ + messenger: args.Messenger, + marshaller: args.Marshaller, + shardCoordinator: args.ShardCoordinator, + delayBetweenNotifications: args.DelayBetweenNotifications, + notifiedPeersMap: make(map[core.PeerID]struct{}), + } + + return dcp, nil +} diff --git a/heartbeat/processor/peerAuthenticationRequestsProcessor.go b/heartbeat/processor/peerAuthenticationRequestsProcessor.go new file mode 100644 index 00000000000..f664e9f0c66 --- /dev/null +++ b/heartbeat/processor/peerAuthenticationRequestsProcessor.go @@ -0,0 +1,262 @@ +package processor + +import ( + "bytes" + "context" + "fmt" + "sort" + "time" + + "github.com/ElrondNetwork/elrond-go-core/core/check" + logger "github.com/ElrondNetwork/elrond-go-logger" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/storage" +) + +var log = logger.GetOrCreate("heartbeat/processor") + +const ( + minMessagesInChunk = 1 + minDelayBetweenRequests = time.Second + minTimeout = time.Second + minMessagesThreshold = 0.5 + maxMessagesThreshold = 1.0 + minMissingKeysAllowed = 1 +) + +// ArgPeerAuthenticationRequestsProcessor represents the arguments for the peer authentication request processor +type ArgPeerAuthenticationRequestsProcessor struct { + RequestHandler process.RequestHandler + NodesCoordinator heartbeat.NodesCoordinator + PeerAuthenticationPool storage.Cacher + ShardId uint32 + Epoch uint32 + MessagesInChunk uint32 + MinPeersThreshold float32 + DelayBetweenRequests time.Duration + MaxTimeout time.Duration + MaxMissingKeysInRequest uint32 + Randomizer dataRetriever.IntRandomizer +} + +// peerAuthenticationRequestsProcessor defines the component that sends the requests for peer authentication messages +type peerAuthenticationRequestsProcessor struct { + requestHandler process.RequestHandler + nodesCoordinator heartbeat.NodesCoordinator + peerAuthenticationPool storage.Cacher + shardId uint32 + epoch uint32 + messagesInChunk uint32 + minPeersThreshold float32 + delayBetweenRequests time.Duration + maxTimeout time.Duration + maxMissingKeysInRequest uint32 + randomizer dataRetriever.IntRandomizer + cancel func() +} + +// NewPeerAuthenticationRequestsProcessor creates a new instance of peerAuthenticationRequestsProcessor +func NewPeerAuthenticationRequestsProcessor(args ArgPeerAuthenticationRequestsProcessor) (*peerAuthenticationRequestsProcessor, error) { + err := checkArgs(args) + if err != nil { + return nil, err + } + + processor := &peerAuthenticationRequestsProcessor{ + requestHandler: args.RequestHandler, + nodesCoordinator: args.NodesCoordinator, + peerAuthenticationPool: args.PeerAuthenticationPool, + shardId: args.ShardId, + epoch: args.Epoch, + messagesInChunk: args.MessagesInChunk, + minPeersThreshold: args.MinPeersThreshold, + delayBetweenRequests: args.DelayBetweenRequests, + maxTimeout: args.MaxTimeout, + maxMissingKeysInRequest: args.MaxMissingKeysInRequest, + randomizer: args.Randomizer, + } + + var ctx context.Context + ctx, processor.cancel = context.WithTimeout(context.Background(), args.MaxTimeout) + + go processor.startRequestingMessages(ctx) + + return processor, nil +} + +func checkArgs(args ArgPeerAuthenticationRequestsProcessor) error { + if check.IfNil(args.RequestHandler) { + return heartbeat.ErrNilRequestHandler + } + if check.IfNil(args.NodesCoordinator) { + return heartbeat.ErrNilNodesCoordinator + } + if check.IfNil(args.PeerAuthenticationPool) { + return heartbeat.ErrNilPeerAuthenticationPool + } + if args.MessagesInChunk < minMessagesInChunk { + return fmt.Errorf("%w for MessagesInChunk, provided %d, min expected %d", + heartbeat.ErrInvalidValue, args.MessagesInChunk, minMessagesInChunk) + } + if args.MinPeersThreshold < minMessagesThreshold || args.MinPeersThreshold > maxMessagesThreshold { + return fmt.Errorf("%w for MinPeersThreshold, provided %f, expected min %f, max %f", + heartbeat.ErrInvalidValue, args.MinPeersThreshold, minMessagesThreshold, maxMessagesThreshold) + } + if args.DelayBetweenRequests < minDelayBetweenRequests { + return fmt.Errorf("%w for DelayBetweenRequests, provided %d, min expected %d", + heartbeat.ErrInvalidTimeDuration, args.DelayBetweenRequests, minDelayBetweenRequests) + } + if args.MaxTimeout < minTimeout { + return fmt.Errorf("%w for MaxTimeout, provided %d, min expected %d", + heartbeat.ErrInvalidTimeDuration, args.MaxTimeout, minTimeout) + } + if args.MaxMissingKeysInRequest < minMissingKeysAllowed { + return fmt.Errorf("%w for MaxMissingKeysInRequest, provided %d, min expected %d", + heartbeat.ErrInvalidValue, args.MaxMissingKeysInRequest, minMissingKeysAllowed) + } + if check.IfNil(args.Randomizer) { + return heartbeat.ErrNilRandomizer + } + + return nil +} + +func (processor *peerAuthenticationRequestsProcessor) startRequestingMessages(ctx context.Context) { + defer processor.cancel() + + sortedValidatorsKeys, err := processor.getSortedValidatorsKeys() + if err != nil { + return + } + + // first request messages by chunks + processor.requestKeysChunks(sortedValidatorsKeys) + + // start endless loop until enough messages received or timeout reached + requestsTimer := time.NewTimer(processor.delayBetweenRequests) + for { + if processor.isThresholdReached(sortedValidatorsKeys) { + log.Debug("received enough messages, closing peerAuthenticationRequestsProcessor go routine") + return + } + + requestsTimer.Reset(processor.delayBetweenRequests) + select { + case <-requestsTimer.C: + processor.requestMissingKeys(sortedValidatorsKeys) + case <-ctx.Done(): + log.Debug("closing peerAuthenticationRequestsProcessor go routine") + return + } + } +} + +func (processor *peerAuthenticationRequestsProcessor) requestKeysChunks(keys [][]byte) { + maxChunks := processor.getMaxChunks(keys) + for chunkIndex := uint32(0); chunkIndex < maxChunks; chunkIndex++ { + processor.requestHandler.RequestPeerAuthenticationsChunk(processor.shardId, chunkIndex) + + time.Sleep(processor.delayBetweenRequests) + } +} + +func (processor *peerAuthenticationRequestsProcessor) getSortedValidatorsKeys() ([][]byte, error) { + validatorsPKsMap, err := processor.nodesCoordinator.GetAllEligibleValidatorsPublicKeys(processor.epoch) + if err != nil { + return nil, err + } + + validatorsPKs := make([][]byte, 0) + for _, shardValidators := range validatorsPKsMap { + validatorsPKs = append(validatorsPKs, shardValidators...) + } + + sort.Slice(validatorsPKs, func(i, j int) bool { + return bytes.Compare(validatorsPKs[i], validatorsPKs[j]) < 0 + }) + + return validatorsPKs, nil +} + +func (processor *peerAuthenticationRequestsProcessor) getMaxChunks(dataBuff [][]byte) uint32 { + maxChunks := len(dataBuff) / int(processor.messagesInChunk) + if len(dataBuff)%int(processor.messagesInChunk) != 0 { + maxChunks++ + } + + return uint32(maxChunks) +} + +func (processor *peerAuthenticationRequestsProcessor) isThresholdReached(sortedValidatorsKeys [][]byte) bool { + minKeysExpected := float32(len(sortedValidatorsKeys)) * processor.minPeersThreshold + keysInCache := processor.peerAuthenticationPool.Keys() + + return float32(len(keysInCache)) >= minKeysExpected +} + +func (processor *peerAuthenticationRequestsProcessor) requestMissingKeys(sortedValidatorsKeys [][]byte) { + missingKeys := processor.getMissingKeys(sortedValidatorsKeys) + if len(missingKeys) == 0 { + return + } + + processor.requestHandler.RequestPeerAuthenticationsByHashes(processor.shardId, missingKeys) +} + +func (processor *peerAuthenticationRequestsProcessor) getMissingKeys(sortedValidatorsKeys [][]byte) [][]byte { + validatorsMap := make(map[string]bool, len(sortedValidatorsKeys)) + for _, key := range sortedValidatorsKeys { + validatorsMap[string(key)] = false + } + + keysInCache := processor.peerAuthenticationPool.Keys() + for _, key := range keysInCache { + validatorsMap[string(key)] = true + } + + missingKeys := make([][]byte, 0) + for mKey, mVal := range validatorsMap { + if !mVal { + missingKeys = append(missingKeys, []byte(mKey)) + } + } + + return processor.getRandMaxMissingKeys(missingKeys) +} + +func (processor *peerAuthenticationRequestsProcessor) getRandMaxMissingKeys(missingKeys [][]byte) [][]byte { + if len(missingKeys) <= int(processor.maxMissingKeysInRequest) { + return missingKeys + } + + lenMissingKeys := len(missingKeys) + tmpKeys := make([][]byte, lenMissingKeys) + copy(tmpKeys, missingKeys) + + randMissingKeys := make([][]byte, 0) + for len(randMissingKeys) != int(processor.maxMissingKeysInRequest) { + randomIndex := processor.randomizer.Intn(lenMissingKeys) + randMissingKeys = append(randMissingKeys, tmpKeys[randomIndex]) + + tmpKeys[randomIndex] = tmpKeys[lenMissingKeys-1] + tmpKeys = tmpKeys[:lenMissingKeys-1] + lenMissingKeys-- + } + + return randMissingKeys +} + +// Close closes the internal components +func (processor *peerAuthenticationRequestsProcessor) Close() error { + log.Debug("closing peerAuthenticationRequestsProcessor...") + processor.cancel() + + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (processor *peerAuthenticationRequestsProcessor) IsInterfaceNil() bool { + return processor == nil +} diff --git a/heartbeat/processor/peerAuthenticationRequestsProcessor_test.go b/heartbeat/processor/peerAuthenticationRequestsProcessor_test.go new file mode 100644 index 00000000000..d33f060ec64 --- /dev/null +++ b/heartbeat/processor/peerAuthenticationRequestsProcessor_test.go @@ -0,0 +1,395 @@ +package processor + +import ( + "bytes" + "errors" + "sort" + "strings" + "sync/atomic" + "testing" + "time" + + coreAtomic "github.com/ElrondNetwork/elrond-go-core/core/atomic" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/core/random" + "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/testscommon" + "github.com/ElrondNetwork/elrond-go/testscommon/shardingMocks" + "github.com/stretchr/testify/assert" +) + +func createMockArgPeerAuthenticationRequestsProcessor() ArgPeerAuthenticationRequestsProcessor { + return ArgPeerAuthenticationRequestsProcessor{ + RequestHandler: &testscommon.RequestHandlerStub{}, + NodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, + PeerAuthenticationPool: &testscommon.CacherMock{}, + ShardId: 0, + Epoch: 0, + MessagesInChunk: 5, + MinPeersThreshold: 0.8, + DelayBetweenRequests: time.Second, + MaxTimeout: 5 * time.Second, + MaxMissingKeysInRequest: 10, + Randomizer: &random.ConcurrentSafeIntRandomizer{}, + } +} + +func getSortedSlice(slice [][]byte) [][]byte { + sort.Slice(slice, func(i, j int) bool { + return bytes.Compare(slice[i], slice[j]) < 0 + }) + + return slice +} + +func TestNewPeerAuthenticationRequestsProcessor(t *testing.T) { + t.Parallel() + + t.Run("nil request handler should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgPeerAuthenticationRequestsProcessor() + args.RequestHandler = nil + + processor, err := NewPeerAuthenticationRequestsProcessor(args) + assert.Equal(t, heartbeat.ErrNilRequestHandler, err) + assert.True(t, check.IfNil(processor)) + }) + t.Run("nil nodes coordinator should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgPeerAuthenticationRequestsProcessor() + args.NodesCoordinator = nil + + processor, err := NewPeerAuthenticationRequestsProcessor(args) + assert.Equal(t, heartbeat.ErrNilNodesCoordinator, err) + assert.True(t, check.IfNil(processor)) + }) + t.Run("nil peer auth pool should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgPeerAuthenticationRequestsProcessor() + args.PeerAuthenticationPool = nil + + processor, err := NewPeerAuthenticationRequestsProcessor(args) + assert.Equal(t, heartbeat.ErrNilPeerAuthenticationPool, err) + assert.True(t, check.IfNil(processor)) + }) + t.Run("invalid messages in chunk should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgPeerAuthenticationRequestsProcessor() + args.MessagesInChunk = 0 + + processor, err := NewPeerAuthenticationRequestsProcessor(args) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidValue)) + assert.True(t, strings.Contains(err.Error(), "MessagesInChunk")) + assert.True(t, check.IfNil(processor)) + }) + t.Run("invalid min peers threshold should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgPeerAuthenticationRequestsProcessor() + args.MinPeersThreshold = 0.1 + + processor, err := NewPeerAuthenticationRequestsProcessor(args) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidValue)) + assert.True(t, strings.Contains(err.Error(), "MinPeersThreshold")) + assert.True(t, check.IfNil(processor)) + }) + t.Run("min peers threshold too big should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgPeerAuthenticationRequestsProcessor() + args.MinPeersThreshold = 1.001 + + processor, err := NewPeerAuthenticationRequestsProcessor(args) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidValue)) + assert.True(t, strings.Contains(err.Error(), "MinPeersThreshold")) + assert.True(t, check.IfNil(processor)) + }) + t.Run("invalid delay between requests should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgPeerAuthenticationRequestsProcessor() + args.DelayBetweenRequests = time.Second - time.Nanosecond + + processor, err := NewPeerAuthenticationRequestsProcessor(args) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) + assert.True(t, strings.Contains(err.Error(), "DelayBetweenRequests")) + assert.True(t, check.IfNil(processor)) + }) + t.Run("invalid max timeout should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgPeerAuthenticationRequestsProcessor() + args.MaxMissingKeysInRequest = 0 + + processor, err := NewPeerAuthenticationRequestsProcessor(args) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidValue)) + assert.True(t, strings.Contains(err.Error(), "MaxMissingKeysInRequest")) + assert.True(t, check.IfNil(processor)) + }) + t.Run("invalid max missing keys should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgPeerAuthenticationRequestsProcessor() + args.MaxTimeout = time.Second - time.Nanosecond + + processor, err := NewPeerAuthenticationRequestsProcessor(args) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) + assert.True(t, strings.Contains(err.Error(), "MaxTimeout")) + assert.True(t, check.IfNil(processor)) + }) + t.Run("nil randomizer should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgPeerAuthenticationRequestsProcessor() + args.Randomizer = nil + + processor, err := NewPeerAuthenticationRequestsProcessor(args) + assert.Equal(t, heartbeat.ErrNilRandomizer, err) + assert.True(t, check.IfNil(processor)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() + + processor, err := NewPeerAuthenticationRequestsProcessor(createMockArgPeerAuthenticationRequestsProcessor()) + assert.Nil(t, err) + assert.False(t, check.IfNil(processor)) + + err = processor.Close() + assert.Nil(t, err) + }) +} + +func TestPeerAuthenticationRequestsProcessor_startRequestingMessages(t *testing.T) { + t.Parallel() + + t.Run("threshold reached from requestKeysChunks", func(t *testing.T) { + t.Parallel() + + providedKeys := [][]byte{[]byte("pk3"), []byte("pk2"), []byte("pk0"), []byte("pk1")} + providedKeysMap := make(map[uint32][][]byte, 2) + providedKeysMap[0] = providedKeys[:len(providedKeys)/2] + providedKeysMap[1] = providedKeys[len(providedKeys)/2:] + args := createMockArgPeerAuthenticationRequestsProcessor() + args.NodesCoordinator = &shardingMocks.NodesCoordinatorStub{ + GetAllEligibleValidatorsPublicKeysCalled: func(epoch uint32) (map[uint32][][]byte, error) { + return providedKeysMap, nil + }, + } + + args.MessagesInChunk = 5 // all provided keys in one chunk + + wasRequestPeerAuthenticationsChunkCalled := coreAtomic.Flag{} + wasRequestPeerAuthenticationsByHashesCalled := coreAtomic.Flag{} + args.RequestHandler = &testscommon.RequestHandlerStub{ + RequestPeerAuthenticationsChunkCalled: func(destShardID uint32, chunkIndex uint32) { + wasRequestPeerAuthenticationsChunkCalled.SetValue(true) + assert.Equal(t, uint32(0), chunkIndex) + }, + RequestPeerAuthenticationsByHashesCalled: func(destShardID uint32, hashes [][]byte) { + wasRequestPeerAuthenticationsByHashesCalled.SetValue(true) + }, + } + + args.PeerAuthenticationPool = &testscommon.CacherStub{ + KeysCalled: func() [][]byte { + return providedKeys // all keys requested available in cache + }, + } + + processor, err := NewPeerAuthenticationRequestsProcessor(args) + assert.Nil(t, err) + assert.False(t, check.IfNil(processor)) + + time.Sleep(3 * time.Second) + _ = processor.Close() + + assert.False(t, wasRequestPeerAuthenticationsByHashesCalled.IsSet()) + assert.True(t, wasRequestPeerAuthenticationsChunkCalled.IsSet()) + }) + t.Run("should work: <-requestsTimer.C", func(t *testing.T) { + t.Parallel() + + providedKeys := [][]byte{[]byte("pk3"), []byte("pk2"), []byte("pk0"), []byte("pk1")} + providedKeysMap := make(map[uint32][][]byte, 2) + providedKeysMap[0] = providedKeys[:len(providedKeys)/2] + providedKeysMap[1] = providedKeys[len(providedKeys)/2:] + args := createMockArgPeerAuthenticationRequestsProcessor() + args.NodesCoordinator = &shardingMocks.NodesCoordinatorStub{ + GetAllEligibleValidatorsPublicKeysCalled: func(epoch uint32) (map[uint32][][]byte, error) { + return providedKeysMap, nil + }, + } + + args.MessagesInChunk = 5 // all provided keys in one chunk + args.MinPeersThreshold = 1 // need messages from all peers + + wasRequestPeerAuthenticationsChunkCalled := coreAtomic.Flag{} + wasRequestPeerAuthenticationsByHashesCalled := coreAtomic.Flag{} + args.RequestHandler = &testscommon.RequestHandlerStub{ + RequestPeerAuthenticationsChunkCalled: func(destShardID uint32, chunkIndex uint32) { + wasRequestPeerAuthenticationsChunkCalled.SetValue(true) + assert.Equal(t, uint32(0), chunkIndex) + }, + RequestPeerAuthenticationsByHashesCalled: func(destShardID uint32, hashes [][]byte) { + wasRequestPeerAuthenticationsByHashesCalled.SetValue(true) + assert.Equal(t, getSortedSlice(providedKeys[len(providedKeys)/2:]), getSortedSlice(hashes)) + }, + } + + args.PeerAuthenticationPool = &testscommon.CacherStub{ + KeysCalled: func() [][]byte { + return providedKeys[:len(providedKeys)/2] + }, + } + + processor, err := NewPeerAuthenticationRequestsProcessor(args) + assert.Nil(t, err) + assert.False(t, check.IfNil(processor)) + + time.Sleep(3 * time.Second) + _ = processor.Close() + + assert.True(t, wasRequestPeerAuthenticationsByHashesCalled.IsSet()) + assert.True(t, wasRequestPeerAuthenticationsChunkCalled.IsSet()) + }) +} + +func TestPeerAuthenticationRequestsProcessor_requestKeysChunks(t *testing.T) { + t.Parallel() + + providedKeys := [][]byte{[]byte("pk3"), []byte("pk2"), []byte("pk0"), []byte("pk1")} // 2 chunks of 2 + counter := uint32(0) + args := createMockArgPeerAuthenticationRequestsProcessor() + args.MessagesInChunk = 2 + args.RequestHandler = &testscommon.RequestHandlerStub{ + RequestPeerAuthenticationsChunkCalled: func(destShardID uint32, chunkIndex uint32) { + assert.Equal(t, counter, chunkIndex) + atomic.AddUint32(&counter, 1) + }, + } + + processor, err := NewPeerAuthenticationRequestsProcessor(args) + assert.Nil(t, err) + assert.False(t, check.IfNil(processor)) + + processor.requestKeysChunks(providedKeys) +} + +func TestPeerAuthenticationRequestsProcessor_getMaxChunks(t *testing.T) { + t.Parallel() + + args := createMockArgPeerAuthenticationRequestsProcessor() + args.MessagesInChunk = 2 + + processor, err := NewPeerAuthenticationRequestsProcessor(args) + assert.Nil(t, err) + assert.False(t, check.IfNil(processor)) + + maxChunks := processor.getMaxChunks(nil) + assert.Equal(t, uint32(0), maxChunks) + + providedBuff := [][]byte{[]byte("msg")} + maxChunks = processor.getMaxChunks(providedBuff) + assert.Equal(t, uint32(1), maxChunks) + + providedBuff = [][]byte{[]byte("msg"), []byte("msg")} + maxChunks = processor.getMaxChunks(providedBuff) + assert.Equal(t, uint32(1), maxChunks) + + providedBuff = [][]byte{[]byte("msg"), []byte("msg"), []byte("msg")} + maxChunks = processor.getMaxChunks(providedBuff) + assert.Equal(t, uint32(2), maxChunks) +} + +func TestPeerAuthenticationRequestsProcessor_isThresholdReached(t *testing.T) { + t.Parallel() + + providedPks := [][]byte{[]byte("pk0"), []byte("pk1"), []byte("pk2"), []byte("pk3")} + args := createMockArgPeerAuthenticationRequestsProcessor() + args.MinPeersThreshold = 0.6 + counter := uint32(0) + args.PeerAuthenticationPool = &testscommon.CacherStub{ + KeysCalled: func() [][]byte { + var keys = make([][]byte, 0) + switch atomic.LoadUint32(&counter) { + case 0: + keys = [][]byte{[]byte("pk0")} + case 1: + keys = [][]byte{[]byte("pk0"), []byte("pk2")} + case 2: + keys = [][]byte{[]byte("pk0"), []byte("pk1"), []byte("pk2")} + case 3: + keys = [][]byte{[]byte("pk0"), []byte("pk1"), []byte("pk2"), []byte("pk3")} + } + + atomic.AddUint32(&counter, 1) + return keys + }, + } + + processor, err := NewPeerAuthenticationRequestsProcessor(args) + assert.Nil(t, err) + assert.False(t, check.IfNil(processor)) + + assert.False(t, processor.isThresholdReached(providedPks)) // counter 0 + assert.False(t, processor.isThresholdReached(providedPks)) // counter 1 + assert.True(t, processor.isThresholdReached(providedPks)) // counter 2 + assert.True(t, processor.isThresholdReached(providedPks)) // counter 3 +} + +func TestPeerAuthenticationRequestsProcessor_requestMissingKeys(t *testing.T) { + t.Parallel() + + t.Run("get missing keys returns nil", func(t *testing.T) { + t.Parallel() + + wasCalled := false + args := createMockArgPeerAuthenticationRequestsProcessor() + args.RequestHandler = &testscommon.RequestHandlerStub{ + RequestPeerAuthenticationsByHashesCalled: func(destShardID uint32, hashes [][]byte) { + wasCalled = true + }, + } + + processor, err := NewPeerAuthenticationRequestsProcessor(args) + assert.Nil(t, err) + assert.False(t, check.IfNil(processor)) + + processor.requestMissingKeys(nil) + assert.False(t, wasCalled) + }) +} + +func TestPeerAuthenticationRequestsProcessor_getRandMaxMissingKeys(t *testing.T) { + t.Parallel() + + providedPks := [][]byte{[]byte("pk0"), []byte("pk1"), []byte("pk2"), []byte("pk3"), []byte("pk5"), + []byte("pk8"), []byte("pk4"), []byte("pk7"), []byte("pk6")} + + args := createMockArgPeerAuthenticationRequestsProcessor() + args.MaxMissingKeysInRequest = 3 + processor, err := NewPeerAuthenticationRequestsProcessor(args) + assert.Nil(t, err) + assert.False(t, check.IfNil(processor)) + + for i := 0; i < 100; i++ { + randMissingKeys := processor.getRandMaxMissingKeys(providedPks) + assert.Equal(t, int(args.MaxMissingKeysInRequest), len(randMissingKeys)) + + randMissingKeys = getSortedSlice(randMissingKeys) + for j := 0; j < len(randMissingKeys)-1; j++ { + assert.NotEqual(t, randMissingKeys[j], randMissingKeys[j+1]) + } + } +} diff --git a/heartbeat/proto/heartbeat.proto b/heartbeat/proto/heartbeat.proto new file mode 100644 index 00000000000..3c510aba2fd --- /dev/null +++ b/heartbeat/proto/heartbeat.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package proto; + +option go_package = "heartbeat"; + +// HeartbeatV2 represents the heartbeat message that is sent between peers from the same shard containing +// current node status +message HeartbeatV2 { + bytes Payload = 1; + string VersionNumber = 2; + string NodeDisplayName = 3; + string Identity = 4; + uint64 Nonce = 5; + uint32 PeerSubType = 6; +} + +// PeerAuthentication represents the DTO used to pass peer authentication information such as public key, peer id, +// signature, payload and the signature. This message is used to link the peerID with the associated public key +message PeerAuthentication { + bytes Pubkey = 1; + bytes Signature = 2; + bytes Pid = 3; + bytes Payload = 4; + bytes PayloadSignature = 5; +} + +// Payload represents the DTO used as payload for both HeartbeatV2 and PeerAuthentication messages +message Payload { + int64 Timestamp = 1; + string HardforkMessage = 2; +} diff --git a/heartbeat/sender/baseSender.go b/heartbeat/sender/baseSender.go new file mode 100644 index 00000000000..98ec55e0b9b --- /dev/null +++ b/heartbeat/sender/baseSender.go @@ -0,0 +1,86 @@ +package sender + +import ( + "fmt" + "time" + + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/core/random" + "github.com/ElrondNetwork/elrond-go-core/marshal" + "github.com/ElrondNetwork/elrond-go/heartbeat" +) + +var randomizer = &random.ConcurrentSafeIntRandomizer{} + +const minTimeBetweenSends = time.Second +const minThresholdBetweenSends = 0.05 // 5% +const maxThresholdBetweenSends = 1.00 // 100% + +// argBaseSender represents the arguments for base sender +type argBaseSender struct { + messenger heartbeat.P2PMessenger + marshaller marshal.Marshalizer + topic string + timeBetweenSends time.Duration + timeBetweenSendsWhenError time.Duration + thresholdBetweenSends float64 +} + +type baseSender struct { + timerHandler + messenger heartbeat.P2PMessenger + marshaller marshal.Marshalizer + topic string + timeBetweenSends time.Duration + timeBetweenSendsWhenError time.Duration + thresholdBetweenSends float64 +} + +func createBaseSender(args argBaseSender) baseSender { + bs := baseSender{ + messenger: args.messenger, + marshaller: args.marshaller, + topic: args.topic, + timeBetweenSends: args.timeBetweenSends, + timeBetweenSendsWhenError: args.timeBetweenSendsWhenError, + thresholdBetweenSends: args.thresholdBetweenSends, + } + bs.timerHandler = &timerWrapper{ + timer: time.NewTimer(bs.computeRandomDuration()), + } + + return bs +} + +func checkBaseSenderArgs(args argBaseSender) error { + if check.IfNil(args.messenger) { + return heartbeat.ErrNilMessenger + } + if check.IfNil(args.marshaller) { + return heartbeat.ErrNilMarshaller + } + if len(args.topic) == 0 { + return heartbeat.ErrEmptySendTopic + } + if args.timeBetweenSends < minTimeBetweenSends { + return fmt.Errorf("%w for timeBetweenSends", heartbeat.ErrInvalidTimeDuration) + } + if args.timeBetweenSendsWhenError < minTimeBetweenSends { + return fmt.Errorf("%w for timeBetweenSendsWhenError", heartbeat.ErrInvalidTimeDuration) + } + if args.thresholdBetweenSends < minThresholdBetweenSends || args.thresholdBetweenSends > maxThresholdBetweenSends { + return fmt.Errorf("%w for thresholdBetweenSends, receieved %f, min allowed %f, max allowed %f", + heartbeat.ErrInvalidThreshold, args.thresholdBetweenSends, minThresholdBetweenSends, maxThresholdBetweenSends) + } + + return nil +} + +func (bs *baseSender) computeRandomDuration() time.Duration { + timeBetweenSendsInNano := bs.timeBetweenSends.Nanoseconds() + maxThreshold := float64(timeBetweenSendsInNano) * bs.thresholdBetweenSends + randThreshold := randomizer.Intn(int(maxThreshold)) + + ret := time.Duration(timeBetweenSendsInNano + int64(randThreshold)) + return ret +} diff --git a/heartbeat/sender/baseSender_test.go b/heartbeat/sender/baseSender_test.go new file mode 100644 index 00000000000..e0fead0340b --- /dev/null +++ b/heartbeat/sender/baseSender_test.go @@ -0,0 +1,41 @@ +package sender + +import ( + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/testscommon" + "github.com/ElrondNetwork/elrond-go/testscommon/p2pmocks" + "github.com/stretchr/testify/assert" +) + +func createMockBaseArgs() argBaseSender { + return argBaseSender{ + messenger: &p2pmocks.MessengerStub{}, + marshaller: &testscommon.MarshalizerMock{}, + topic: "topic", + timeBetweenSends: time.Second, + timeBetweenSendsWhenError: time.Second, + thresholdBetweenSends: 0.1, + } +} + +func TestBaseSender_computeRandomDuration(t *testing.T) { + t.Parallel() + + bs := createBaseSender(createMockBaseArgs()) + assert.NotNil(t, bs) + + var d1, d2, d3 time.Duration + for i := 0; i < 100; i++ { + d1 = bs.computeRandomDuration() + d2 = bs.computeRandomDuration() + d3 = bs.computeRandomDuration() + if d1 != d2 && d2 != d3 && d1 != d3 { + break + } + } + + assert.False(t, d1 == d2) + assert.False(t, d2 == d3) +} diff --git a/heartbeat/sender/heartbeatSender.go b/heartbeat/sender/heartbeatSender.go new file mode 100644 index 00000000000..ac671de2a54 --- /dev/null +++ b/heartbeat/sender/heartbeatSender.go @@ -0,0 +1,126 @@ +package sender + +import ( + "time" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/data/batch" + "github.com/ElrondNetwork/elrond-go/heartbeat" +) + +const maxSizeInBytes = 128 + +// argHeartbeatSender represents the arguments for the heartbeat sender +type argHeartbeatSender struct { + argBaseSender + versionNumber string + nodeDisplayName string + identity string + peerSubType core.P2PPeerSubType + currentBlockProvider heartbeat.CurrentBlockProvider +} + +type heartbeatSender struct { + baseSender + versionNumber string + nodeDisplayName string + identity string + peerSubType core.P2PPeerSubType + currentBlockProvider heartbeat.CurrentBlockProvider +} + +// newHeartbeatSender creates a new instance of type heartbeatSender +func newHeartbeatSender(args argHeartbeatSender) (*heartbeatSender, error) { + err := checkHeartbeatSenderArgs(args) + if err != nil { + return nil, err + } + + return &heartbeatSender{ + baseSender: createBaseSender(args.argBaseSender), + versionNumber: args.versionNumber, + nodeDisplayName: args.nodeDisplayName, + identity: args.identity, + currentBlockProvider: args.currentBlockProvider, + peerSubType: args.peerSubType, + }, nil +} + +func checkHeartbeatSenderArgs(args argHeartbeatSender) error { + err := checkBaseSenderArgs(args.argBaseSender) + if err != nil { + return err + } + if len(args.versionNumber) > maxSizeInBytes { + return heartbeat.ErrPropertyTooLong + } + if check.IfNil(args.currentBlockProvider) { + return heartbeat.ErrNilCurrentBlockProvider + } + + return nil +} + +// Execute will handle the execution of a cycle in which the heartbeat message will be sent +func (sender *heartbeatSender) Execute() { + duration := sender.computeRandomDuration() + err := sender.execute() + if err != nil { + duration = sender.timeBetweenSendsWhenError + log.Error("error sending heartbeat message", "error", err, "next send will be in", duration) + } else { + log.Debug("heartbeat message sent", "next send will be in", duration) + } + + sender.CreateNewTimer(duration) +} + +func (sender *heartbeatSender) execute() error { + payload := &heartbeat.Payload{ + Timestamp: time.Now().Unix(), + HardforkMessage: "", // sent through peer authentication message + } + payloadBytes, err := sender.marshaller.Marshal(payload) + if err != nil { + return err + } + + nonce := uint64(0) + currentBlock := sender.currentBlockProvider.GetCurrentBlockHeader() + if currentBlock != nil { + nonce = currentBlock.GetNonce() + } + + msg := &heartbeat.HeartbeatV2{ + Payload: payloadBytes, + VersionNumber: sender.versionNumber, + NodeDisplayName: sender.nodeDisplayName, + Identity: sender.identity, + Nonce: nonce, + PeerSubType: uint32(sender.peerSubType), + } + + msgBytes, err := sender.marshaller.Marshal(msg) + if err != nil { + return err + } + + b := &batch.Batch{ + Data: make([][]byte, 1), + } + b.Data[0] = msgBytes + data, err := sender.marshaller.Marshal(b) + if err != nil { + return err + } + + sender.messenger.Broadcast(sender.topic, data) + + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sender *heartbeatSender) IsInterfaceNil() bool { + return sender == nil +} diff --git a/heartbeat/sender/heartbeatSender_test.go b/heartbeat/sender/heartbeatSender_test.go new file mode 100644 index 00000000000..f8115c36248 --- /dev/null +++ b/heartbeat/sender/heartbeatSender_test.go @@ -0,0 +1,269 @@ +package sender + +import ( + "errors" + "strings" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/data" + "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/heartbeat/mock" + "github.com/ElrondNetwork/elrond-go/testscommon" + "github.com/stretchr/testify/assert" +) + +var expectedErr = errors.New("expected error") + +func createMockHeartbeatSenderArgs(argBase argBaseSender) argHeartbeatSender { + return argHeartbeatSender{ + argBaseSender: argBase, + versionNumber: "v1", + nodeDisplayName: "node", + identity: "identity", + peerSubType: core.RegularPeer, + currentBlockProvider: &mock.CurrentBlockProviderStub{}, + } +} + +func TestNewHeartbeatSender(t *testing.T) { + t.Parallel() + + t.Run("nil peer messenger should error", func(t *testing.T) { + t.Parallel() + + argBase := createMockBaseArgs() + argBase.messenger = nil + args := createMockHeartbeatSenderArgs(argBase) + sender, err := newHeartbeatSender(args) + + assert.Nil(t, sender) + assert.Equal(t, heartbeat.ErrNilMessenger, err) + }) + t.Run("nil marshaller should error", func(t *testing.T) { + t.Parallel() + + argBase := createMockBaseArgs() + argBase.marshaller = nil + args := createMockHeartbeatSenderArgs(argBase) + sender, err := newHeartbeatSender(args) + + assert.Nil(t, sender) + assert.Equal(t, heartbeat.ErrNilMarshaller, err) + }) + t.Run("empty topic should error", func(t *testing.T) { + t.Parallel() + + argBase := createMockBaseArgs() + argBase.topic = "" + args := createMockHeartbeatSenderArgs(argBase) + sender, err := newHeartbeatSender(args) + + assert.Nil(t, sender) + assert.Equal(t, heartbeat.ErrEmptySendTopic, err) + }) + t.Run("invalid time between sends should error", func(t *testing.T) { + t.Parallel() + + argBase := createMockBaseArgs() + argBase.timeBetweenSends = time.Second - time.Nanosecond + args := createMockHeartbeatSenderArgs(argBase) + sender, err := newHeartbeatSender(args) + + assert.Nil(t, sender) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) + assert.True(t, strings.Contains(err.Error(), "timeBetweenSends")) + assert.False(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) + }) + t.Run("invalid time between sends should error", func(t *testing.T) { + t.Parallel() + + argBase := createMockBaseArgs() + argBase.timeBetweenSendsWhenError = time.Second - time.Nanosecond + args := createMockHeartbeatSenderArgs(argBase) + sender, err := newHeartbeatSender(args) + + assert.Nil(t, sender) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) + assert.True(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) + }) + t.Run("empty version number should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatSenderArgs(createMockBaseArgs()) + args.versionNumber = string(make([]byte, 150)) + sender, err := newHeartbeatSender(args) + + assert.Nil(t, sender) + assert.Equal(t, heartbeat.ErrPropertyTooLong, err) + }) + t.Run("nil current block provider should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatSenderArgs(createMockBaseArgs()) + args.currentBlockProvider = nil + sender, err := newHeartbeatSender(args) + + assert.Nil(t, sender) + assert.Equal(t, heartbeat.ErrNilCurrentBlockProvider, err) + }) + t.Run("threshold too small should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatSenderArgs(createMockBaseArgs()) + args.thresholdBetweenSends = 0.001 + sender, err := newHeartbeatSender(args) + + assert.Nil(t, sender) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidThreshold)) + assert.True(t, strings.Contains(err.Error(), "thresholdBetweenSends")) + }) + t.Run("threshold too big should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatSenderArgs(createMockBaseArgs()) + args.thresholdBetweenSends = 1.001 + sender, err := newHeartbeatSender(args) + + assert.Nil(t, sender) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidThreshold)) + assert.True(t, strings.Contains(err.Error(), "thresholdBetweenSends")) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatSenderArgs(createMockBaseArgs()) + sender, err := newHeartbeatSender(args) + + assert.False(t, check.IfNil(sender)) + assert.Nil(t, err) + }) +} + +func TestHeartbeatSender_Execute(t *testing.T) { + t.Parallel() + + t.Run("execute errors, should set the error time duration value", func(t *testing.T) { + t.Parallel() + + wasCalled := false + argsBase := createMockBaseArgs() + argsBase.timeBetweenSendsWhenError = time.Second * 3 + argsBase.timeBetweenSends = time.Second * 2 + argsBase.marshaller = &testscommon.MarshalizerStub{ + MarshalCalled: func(obj interface{}) ([]byte, error) { + return nil, expectedErr + }, + } + + args := createMockHeartbeatSenderArgs(argsBase) + sender, _ := newHeartbeatSender(args) + sender.timerHandler = &mock.TimerHandlerStub{ + CreateNewTimerCalled: func(duration time.Duration) { + assert.Equal(t, argsBase.timeBetweenSendsWhenError, duration) + wasCalled = true + }, + } + + sender.Execute() + assert.True(t, wasCalled) + }) + t.Run("execute worked, should set the normal time duration value", func(t *testing.T) { + t.Parallel() + + wasCalled := false + argsBase := createMockBaseArgs() + argsBase.timeBetweenSendsWhenError = time.Second * 3 + argsBase.timeBetweenSends = time.Second * 2 + + args := createMockHeartbeatSenderArgs(argsBase) + sender, _ := newHeartbeatSender(args) + sender.timerHandler = &mock.TimerHandlerStub{ + CreateNewTimerCalled: func(duration time.Duration) { + floatTBS := float64(argsBase.timeBetweenSends.Nanoseconds()) + maxDuration := floatTBS + floatTBS*argsBase.thresholdBetweenSends + assert.True(t, time.Duration(maxDuration) > duration) + assert.True(t, argsBase.timeBetweenSends <= duration) + wasCalled = true + }, + } + + sender.Execute() + assert.True(t, wasCalled) + }) +} + +func TestHeartbeatSender_execute(t *testing.T) { + t.Parallel() + + t.Run("marshal returns error first time", func(t *testing.T) { + t.Parallel() + + argsBase := createMockBaseArgs() + argsBase.marshaller = &testscommon.MarshalizerStub{ + MarshalCalled: func(obj interface{}) ([]byte, error) { + return nil, expectedErr + }, + } + + args := createMockHeartbeatSenderArgs(argsBase) + sender, _ := newHeartbeatSender(args) + assert.False(t, check.IfNil(sender)) + + err := sender.execute() + assert.Equal(t, expectedErr, err) + }) + t.Run("marshal returns error second time", func(t *testing.T) { + t.Parallel() + + argsBase := createMockBaseArgs() + numOfCalls := 0 + argsBase.marshaller = &testscommon.MarshalizerStub{ + MarshalCalled: func(obj interface{}) ([]byte, error) { + if numOfCalls < 1 { + numOfCalls++ + return []byte(""), nil + } + + return nil, expectedErr + }, + } + + args := createMockHeartbeatSenderArgs(argsBase) + sender, _ := newHeartbeatSender(args) + assert.False(t, check.IfNil(sender)) + + err := sender.execute() + assert.Equal(t, expectedErr, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + argsBase := createMockBaseArgs() + broadcastCalled := false + argsBase.messenger = &mock.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + assert.Equal(t, argsBase.topic, topic) + broadcastCalled = true + }, + } + + args := createMockHeartbeatSenderArgs(argsBase) + + args.currentBlockProvider = &mock.CurrentBlockProviderStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + } + + sender, _ := newHeartbeatSender(args) + assert.False(t, check.IfNil(sender)) + + err := sender.execute() + assert.Nil(t, err) + assert.True(t, broadcastCalled) + assert.Equal(t, uint64(1), args.currentBlockProvider.GetCurrentBlockHeader().GetNonce()) + }) +} diff --git a/heartbeat/sender/interface.go b/heartbeat/sender/interface.go new file mode 100644 index 00000000000..f7fa9a7482a --- /dev/null +++ b/heartbeat/sender/interface.go @@ -0,0 +1,22 @@ +package sender + +import "time" + +type senderHandler interface { + ExecutionReadyChannel() <-chan time.Time + Execute() + Close() + IsInterfaceNil() bool +} + +type hardforkHandler interface { + ShouldTriggerHardfork() <-chan struct{} + Execute() + Close() +} + +type timerHandler interface { + CreateNewTimer(duration time.Duration) + ExecutionReadyChannel() <-chan time.Time + Close() +} diff --git a/heartbeat/sender/peerAuthenticationSender.go b/heartbeat/sender/peerAuthenticationSender.go new file mode 100644 index 00000000000..1eadf3e1c18 --- /dev/null +++ b/heartbeat/sender/peerAuthenticationSender.go @@ -0,0 +1,214 @@ +package sender + +import ( + "bytes" + "fmt" + "time" + + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/data/batch" + crypto "github.com/ElrondNetwork/elrond-go-crypto" + "github.com/ElrondNetwork/elrond-go/heartbeat" +) + +// argPeerAuthenticationSender represents the arguments for the peer authentication sender +type argPeerAuthenticationSender struct { + argBaseSender + nodesCoordinator heartbeat.NodesCoordinator + peerSignatureHandler crypto.PeerSignatureHandler + privKey crypto.PrivateKey + redundancyHandler heartbeat.NodeRedundancyHandler + hardforkTrigger heartbeat.HardforkTrigger + hardforkTimeBetweenSends time.Duration + hardforkTriggerPubKey []byte +} + +type peerAuthenticationSender struct { + baseSender + nodesCoordinator heartbeat.NodesCoordinator + peerSignatureHandler crypto.PeerSignatureHandler + redundancy heartbeat.NodeRedundancyHandler + privKey crypto.PrivateKey + publicKey crypto.PublicKey + observerPublicKey crypto.PublicKey + hardforkTrigger heartbeat.HardforkTrigger + hardforkTimeBetweenSends time.Duration + hardforkTriggerPubKey []byte +} + +// newPeerAuthenticationSender will create a new instance of type peerAuthenticationSender +func newPeerAuthenticationSender(args argPeerAuthenticationSender) (*peerAuthenticationSender, error) { + err := checkPeerAuthenticationSenderArgs(args) + if err != nil { + return nil, err + } + + redundancyHandler := args.redundancyHandler + sender := &peerAuthenticationSender{ + baseSender: createBaseSender(args.argBaseSender), + nodesCoordinator: args.nodesCoordinator, + peerSignatureHandler: args.peerSignatureHandler, + redundancy: redundancyHandler, + privKey: args.privKey, + publicKey: args.privKey.GeneratePublic(), + observerPublicKey: redundancyHandler.ObserverPrivateKey().GeneratePublic(), + hardforkTrigger: args.hardforkTrigger, + hardforkTimeBetweenSends: args.hardforkTimeBetweenSends, + hardforkTriggerPubKey: args.hardforkTriggerPubKey, + } + + return sender, nil +} + +func checkPeerAuthenticationSenderArgs(args argPeerAuthenticationSender) error { + err := checkBaseSenderArgs(args.argBaseSender) + if err != nil { + return err + } + if check.IfNil(args.nodesCoordinator) { + return heartbeat.ErrNilNodesCoordinator + } + if check.IfNil(args.peerSignatureHandler) { + return heartbeat.ErrNilPeerSignatureHandler + } + if check.IfNil(args.privKey) { + return heartbeat.ErrNilPrivateKey + } + if check.IfNil(args.redundancyHandler) { + return heartbeat.ErrNilRedundancyHandler + } + if check.IfNil(args.hardforkTrigger) { + return heartbeat.ErrNilHardforkTrigger + } + if args.hardforkTimeBetweenSends < minTimeBetweenSends { + return fmt.Errorf("%w for hardforkTimeBetweenSends", heartbeat.ErrInvalidTimeDuration) + } + if len(args.hardforkTriggerPubKey) == 0 { + return fmt.Errorf("%w hardfork trigger public key bytes length is 0", heartbeat.ErrInvalidValue) + } + + return nil +} + +// Execute will handle the execution of a cycle in which the peer authentication message will be sent +func (sender *peerAuthenticationSender) Execute() { + var duration time.Duration + defer func() { + sender.CreateNewTimer(duration) + }() + + _, pk := sender.getCurrentPrivateAndPublicKeys() + pkBytes, err := pk.ToByteArray() + if err != nil { + duration = sender.timeBetweenSendsWhenError + return + } + + if !sender.isValidator(pkBytes) && !sender.isHardforkSource(pkBytes) { + duration = sender.timeBetweenSendsWhenError + return + } + + duration = sender.computeRandomDuration() + err, isHardforkTriggered := sender.execute() + if err != nil { + duration = sender.timeBetweenSendsWhenError + log.Error("error sending peer authentication message", "error", err, "is hardfork triggered", isHardforkTriggered, "next send will be in", duration) + return + } + + if isHardforkTriggered { + duration = sender.hardforkTimeBetweenSends + } + + log.Debug("peer authentication message sent", "is hardfork triggered", isHardforkTriggered, "next send will be in", duration) +} + +func (sender *peerAuthenticationSender) execute() (error, bool) { + sk, pk := sender.getCurrentPrivateAndPublicKeys() + + msg := &heartbeat.PeerAuthentication{ + Pid: sender.messenger.ID().Bytes(), + } + + hardforkPayload, isTriggered := sender.getHardforkPayload() + payload := &heartbeat.Payload{ + Timestamp: time.Now().Unix(), + HardforkMessage: string(hardforkPayload), + } + payloadBytes, err := sender.marshaller.Marshal(payload) + if err != nil { + return err, isTriggered + } + msg.Payload = payloadBytes + msg.PayloadSignature, err = sender.messenger.Sign(payloadBytes) + if err != nil { + return err, isTriggered + } + + msg.Pubkey, err = pk.ToByteArray() + if err != nil { + return err, isTriggered + } + + msg.Signature, err = sender.peerSignatureHandler.GetPeerSignature(sk, msg.Pid) + if err != nil { + return err, isTriggered + } + + msgBytes, err := sender.marshaller.Marshal(msg) + if err != nil { + return err, isTriggered + } + + b := &batch.Batch{ + Data: make([][]byte, 1), + } + b.Data[0] = msgBytes + data, err := sender.marshaller.Marshal(b) + if err != nil { + return err, isTriggered + } + + sender.messenger.Broadcast(sender.topic, data) + + return nil, isTriggered +} + +// ShouldTriggerHardfork signals when hardfork message should be sent +func (sender *peerAuthenticationSender) ShouldTriggerHardfork() <-chan struct{} { + return sender.hardforkTrigger.NotifyTriggerReceivedV2() +} + +func (sender *peerAuthenticationSender) getCurrentPrivateAndPublicKeys() (crypto.PrivateKey, crypto.PublicKey) { + shouldUseOriginalKeys := !sender.redundancy.IsRedundancyNode() || (sender.redundancy.IsRedundancyNode() && !sender.redundancy.IsMainMachineActive()) + if shouldUseOriginalKeys { + return sender.privKey, sender.publicKey + } + + return sender.redundancy.ObserverPrivateKey(), sender.observerPublicKey +} + +func (sender *peerAuthenticationSender) isValidator(pkBytes []byte) bool { + _, _, err := sender.nodesCoordinator.GetValidatorWithPublicKey(pkBytes) + return err == nil +} + +func (sender *peerAuthenticationSender) isHardforkSource(pkBytes []byte) bool { + return bytes.Equal(pkBytes, sender.hardforkTriggerPubKey) +} + +func (sender *peerAuthenticationSender) getHardforkPayload() ([]byte, bool) { + payload := make([]byte, 0) + _, isTriggered := sender.hardforkTrigger.RecordedTriggerMessage() + if isTriggered { + payload = sender.hardforkTrigger.CreateData() + } + + return payload, isTriggered +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sender *peerAuthenticationSender) IsInterfaceNil() bool { + return sender == nil +} diff --git a/heartbeat/sender/peerAuthenticationSender_test.go b/heartbeat/sender/peerAuthenticationSender_test.go new file mode 100644 index 00000000000..ea2aa7a062e --- /dev/null +++ b/heartbeat/sender/peerAuthenticationSender_test.go @@ -0,0 +1,713 @@ +package sender + +import ( + "context" + "errors" + "strings" + "sync" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/data/batch" + "github.com/ElrondNetwork/elrond-go-crypto" + "github.com/ElrondNetwork/elrond-go-crypto/signing" + "github.com/ElrondNetwork/elrond-go-crypto/signing/ed25519" + ed25519SingleSig "github.com/ElrondNetwork/elrond-go-crypto/signing/ed25519/singlesig" + "github.com/ElrondNetwork/elrond-go-crypto/signing/mcl" + "github.com/ElrondNetwork/elrond-go-crypto/signing/mcl/singlesig" + "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/heartbeat/mock" + "github.com/ElrondNetwork/elrond-go/sharding/nodesCoordinator" + "github.com/ElrondNetwork/elrond-go/testscommon" + "github.com/ElrondNetwork/elrond-go/testscommon/cryptoMocks" + "github.com/ElrondNetwork/elrond-go/testscommon/shardingMocks" + "github.com/stretchr/testify/assert" +) + +var providedHardforkPubKey = []byte("provided hardfork pub key") + +func createMockPeerAuthenticationSenderArgs(argBase argBaseSender) argPeerAuthenticationSender { + return argPeerAuthenticationSender{ + argBaseSender: argBase, + nodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, + peerSignatureHandler: &cryptoMocks.PeerSignatureHandlerStub{}, + privKey: &cryptoMocks.PrivateKeyStub{}, + redundancyHandler: &mock.RedundancyHandlerStub{}, + hardforkTrigger: &testscommon.HardforkTriggerStub{}, + hardforkTimeBetweenSends: time.Second, + hardforkTriggerPubKey: providedHardforkPubKey, + } +} + +func createMockPeerAuthenticationSenderArgsSemiIntegrationTests(baseArg argBaseSender) argPeerAuthenticationSender { + keyGen := signing.NewKeyGenerator(mcl.NewSuiteBLS12()) + sk, _ := keyGen.GeneratePair() + singleSigner := singlesig.NewBlsSigner() + + return argPeerAuthenticationSender{ + argBaseSender: baseArg, + nodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, + peerSignatureHandler: &mock.PeerSignatureHandlerStub{ + VerifyPeerSignatureCalled: func(pk []byte, pid core.PeerID, signature []byte) error { + senderPubKey, err := keyGen.PublicKeyFromByteArray(pk) + if err != nil { + return err + } + return singleSigner.Verify(senderPubKey, pid.Bytes(), signature) + }, + GetPeerSignatureCalled: func(privateKey crypto.PrivateKey, pid []byte) ([]byte, error) { + return singleSigner.Sign(privateKey, pid) + }, + }, + privKey: sk, + redundancyHandler: &mock.RedundancyHandlerStub{}, + hardforkTrigger: &testscommon.HardforkTriggerStub{}, + hardforkTimeBetweenSends: time.Second, + hardforkTriggerPubKey: providedHardforkPubKey, + } +} + +func TestNewPeerAuthenticationSender(t *testing.T) { + t.Parallel() + + t.Run("nil peer messenger should error", func(t *testing.T) { + t.Parallel() + + argsBase := createMockBaseArgs() + argsBase.messenger = nil + + args := createMockPeerAuthenticationSenderArgs(argsBase) + sender, err := newPeerAuthenticationSender(args) + + assert.True(t, check.IfNil(sender)) + assert.Equal(t, heartbeat.ErrNilMessenger, err) + }) + t.Run("nil nodes coordinator should error", func(t *testing.T) { + t.Parallel() + + args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) + args.nodesCoordinator = nil + sender, err := newPeerAuthenticationSender(args) + + assert.True(t, check.IfNil(sender)) + assert.Equal(t, heartbeat.ErrNilNodesCoordinator, err) + }) + t.Run("nil peer signature handler should error", func(t *testing.T) { + t.Parallel() + + args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) + args.peerSignatureHandler = nil + sender, err := newPeerAuthenticationSender(args) + + assert.True(t, check.IfNil(sender)) + assert.Equal(t, heartbeat.ErrNilPeerSignatureHandler, err) + }) + t.Run("nil private key should error", func(t *testing.T) { + t.Parallel() + + args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) + args.privKey = nil + sender, err := newPeerAuthenticationSender(args) + + assert.True(t, check.IfNil(sender)) + assert.Equal(t, heartbeat.ErrNilPrivateKey, err) + }) + t.Run("nil marshaller should error", func(t *testing.T) { + t.Parallel() + + argsBase := createMockBaseArgs() + argsBase.marshaller = nil + + args := createMockPeerAuthenticationSenderArgs(argsBase) + sender, err := newPeerAuthenticationSender(args) + + assert.True(t, check.IfNil(sender)) + assert.Equal(t, heartbeat.ErrNilMarshaller, err) + }) + t.Run("empty topic should error", func(t *testing.T) { + t.Parallel() + + argsBase := createMockBaseArgs() + argsBase.topic = "" + + args := createMockPeerAuthenticationSenderArgs(argsBase) + sender, err := newPeerAuthenticationSender(args) + + assert.True(t, check.IfNil(sender)) + assert.Equal(t, heartbeat.ErrEmptySendTopic, err) + }) + t.Run("nil redundancy handler should error", func(t *testing.T) { + t.Parallel() + + args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) + args.redundancyHandler = nil + sender, err := newPeerAuthenticationSender(args) + + assert.True(t, check.IfNil(sender)) + assert.Equal(t, heartbeat.ErrNilRedundancyHandler, err) + }) + t.Run("invalid time between sends should error", func(t *testing.T) { + t.Parallel() + + argsBase := createMockBaseArgs() + argsBase.timeBetweenSends = time.Second - time.Nanosecond + + args := createMockPeerAuthenticationSenderArgs(argsBase) + sender, err := newPeerAuthenticationSender(args) + + assert.True(t, check.IfNil(sender)) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) + assert.True(t, strings.Contains(err.Error(), "timeBetweenSends")) + assert.False(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) + }) + t.Run("invalid time between sends should error", func(t *testing.T) { + t.Parallel() + + argsBase := createMockBaseArgs() + argsBase.timeBetweenSendsWhenError = time.Second - time.Nanosecond + + args := createMockPeerAuthenticationSenderArgs(argsBase) + sender, err := newPeerAuthenticationSender(args) + + assert.True(t, check.IfNil(sender)) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) + assert.True(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) + }) + t.Run("threshold too small should error", func(t *testing.T) { + t.Parallel() + + args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) + args.thresholdBetweenSends = 0.001 + sender, err := newPeerAuthenticationSender(args) + + assert.Nil(t, sender) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidThreshold)) + assert.True(t, strings.Contains(err.Error(), "thresholdBetweenSends")) + }) + t.Run("threshold too big should error", func(t *testing.T) { + t.Parallel() + + args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) + args.thresholdBetweenSends = 1.001 + sender, err := newPeerAuthenticationSender(args) + + assert.Nil(t, sender) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidThreshold)) + assert.True(t, strings.Contains(err.Error(), "thresholdBetweenSends")) + }) + t.Run("nil hardfork trigger should error", func(t *testing.T) { + t.Parallel() + + args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) + args.hardforkTrigger = nil + sender, err := newPeerAuthenticationSender(args) + + assert.True(t, check.IfNil(sender)) + assert.Equal(t, heartbeat.ErrNilHardforkTrigger, err) + }) + t.Run("invalid time between hardforks should error", func(t *testing.T) { + t.Parallel() + + args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) + args.hardforkTimeBetweenSends = time.Second - time.Nanosecond + sender, err := newPeerAuthenticationSender(args) + + assert.True(t, check.IfNil(sender)) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) + assert.True(t, strings.Contains(err.Error(), "hardforkTimeBetweenSends")) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) + sender, err := newPeerAuthenticationSender(args) + + assert.False(t, check.IfNil(sender)) + assert.Nil(t, err) + }) +} + +func TestPeerAuthenticationSender_execute(t *testing.T) { + t.Parallel() + + t.Run("messenger Sign method fails, should return error", func(t *testing.T) { + t.Parallel() + + argsBase := createMockBaseArgs() + argsBase.messenger = &mock.MessengerStub{ + SignCalled: func(payload []byte) ([]byte, error) { + return nil, expectedErr + }, + BroadcastCalled: func(topic string, buff []byte) { + assert.Fail(t, "should have not called Messenger.BroadcastCalled") + }, + } + + args := createMockPeerAuthenticationSenderArgs(argsBase) + sender, _ := newPeerAuthenticationSender(args) + + err, isHardforkTriggered := sender.execute() + assert.Equal(t, expectedErr, err) + assert.False(t, isHardforkTriggered) + }) + t.Run("marshaller fails in first time, should return error", func(t *testing.T) { + t.Parallel() + + argsBase := createMockBaseArgs() + argsBase.messenger = &mock.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + assert.Fail(t, "should have not called Messenger.BroadcastCalled") + }, + } + argsBase.marshaller = &testscommon.MarshalizerStub{ + MarshalCalled: func(obj interface{}) ([]byte, error) { + return nil, expectedErr + }, + } + + args := createMockPeerAuthenticationSenderArgs(argsBase) + sender, _ := newPeerAuthenticationSender(args) + + err, isHardforkTriggered := sender.execute() + assert.Equal(t, expectedErr, err) + assert.False(t, isHardforkTriggered) + }) + t.Run("get peer signature method fails, should return error", func(t *testing.T) { + t.Parallel() + + baseArgs := createMockBaseArgs() + baseArgs.messenger = &mock.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + assert.Fail(t, "should have not called Messenger.BroadcastCalled") + }, + } + args := createMockPeerAuthenticationSenderArgs(baseArgs) + args.peerSignatureHandler = &mock.PeerSignatureHandlerStub{ + GetPeerSignatureCalled: func(key crypto.PrivateKey, pid []byte) ([]byte, error) { + return nil, expectedErr + }, + } + sender, _ := newPeerAuthenticationSender(args) + + err, isHardforkTriggered := sender.execute() + assert.Equal(t, expectedErr, err) + assert.False(t, isHardforkTriggered) + }) + t.Run("marshaller fails for the second time, should return error", func(t *testing.T) { + t.Parallel() + + numCalls := 0 + argsBase := createMockBaseArgs() + argsBase.messenger = &mock.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + assert.Fail(t, "should have not called Messenger.BroadcastCalled") + }, + } + argsBase.marshaller = &testscommon.MarshalizerStub{ + MarshalCalled: func(obj interface{}) ([]byte, error) { + numCalls++ + if numCalls < 2 { + return make([]byte, 0), nil + } + return nil, expectedErr + }, + } + + args := createMockPeerAuthenticationSenderArgs(argsBase) + sender, _ := newPeerAuthenticationSender(args) + + err, isHardforkTriggered := sender.execute() + assert.Equal(t, expectedErr, err) + assert.False(t, isHardforkTriggered) + }) + t.Run("should work with stubs", func(t *testing.T) { + t.Parallel() + + argsBase := createMockBaseArgs() + broadcastCalled := false + argsBase.messenger = &mock.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + assert.Equal(t, argsBase.topic, topic) + broadcastCalled = true + }, + } + + args := createMockPeerAuthenticationSenderArgs(argsBase) + sender, _ := newPeerAuthenticationSender(args) + + err, isHardforkTriggered := sender.execute() + assert.Nil(t, err) + assert.True(t, broadcastCalled) + assert.False(t, isHardforkTriggered) + }) + t.Run("should work with some real components", func(t *testing.T) { + t.Parallel() + + startTime := time.Now() + // use the Elrond defined ed25519 operations instead of the secp256k1 implemented in the "real" network messenger, + // should work with both + keyGen := signing.NewKeyGenerator(ed25519.NewEd25519()) + skMessenger, pkMessenger := keyGen.GeneratePair() + signerMessenger := ed25519SingleSig.Ed25519Signer{} + + argsBase := createMockBaseArgs() + var buffResulted []byte + messenger := &mock.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + assert.Equal(t, argsBase.topic, topic) + buffResulted = buff + }, + SignCalled: func(payload []byte) ([]byte, error) { + return signerMessenger.Sign(skMessenger, payload) + }, + VerifyCalled: func(payload []byte, pid core.PeerID, signature []byte) error { + pk, _ := keyGen.PublicKeyFromByteArray(pid.Bytes()) + + return signerMessenger.Verify(pk, payload, signature) + }, + IDCalled: func() core.PeerID { + pkBytes, _ := pkMessenger.ToByteArray() + return core.PeerID(pkBytes) + }, + } + argsBase.messenger = messenger + args := createMockPeerAuthenticationSenderArgsSemiIntegrationTests(argsBase) + sender, _ := newPeerAuthenticationSender(args) + + err, isHardforkTriggered := sender.execute() + assert.Nil(t, err) + assert.False(t, isHardforkTriggered) + + skBytes, _ := sender.privKey.ToByteArray() + pkBytes, _ := sender.publicKey.ToByteArray() + log.Info("args", "pid", argsBase.messenger.ID().Pretty(), "bls sk", skBytes, "bls pk", pkBytes) + + // verify the received bytes if they can be converted in a valid peer authentication message + recoveredBatch := batch.Batch{} + err = argsBase.marshaller.Unmarshal(&recoveredBatch, buffResulted) + assert.Nil(t, err) + recoveredMessage := &heartbeat.PeerAuthentication{} + err = argsBase.marshaller.Unmarshal(recoveredMessage, recoveredBatch.Data[0]) + assert.Nil(t, err) + assert.Equal(t, pkBytes, recoveredMessage.Pubkey) + assert.Equal(t, argsBase.messenger.ID().Pretty(), core.PeerID(recoveredMessage.Pid).Pretty()) + t.Run("verify BLS sig on having the payload == message's pid", func(t *testing.T) { + errVerify := args.peerSignatureHandler.VerifyPeerSignature(recoveredMessage.Pubkey, core.PeerID(recoveredMessage.Pid), recoveredMessage.Signature) + assert.Nil(t, errVerify) + }) + t.Run("verify ed25519 sig having the payload == message's payload", func(t *testing.T) { + errVerify := messenger.Verify(recoveredMessage.Payload, core.PeerID(recoveredMessage.Pid), recoveredMessage.PayloadSignature) + assert.Nil(t, errVerify) + }) + t.Run("verify payload", func(t *testing.T) { + recoveredPayload := &heartbeat.Payload{} + err = argsBase.marshaller.Unmarshal(recoveredPayload, recoveredMessage.Payload) + assert.Nil(t, err) + + endTime := time.Now() + + messageTime := time.Unix(recoveredPayload.Timestamp, 0) + assert.True(t, startTime.Unix() <= messageTime.Unix()) + assert.True(t, messageTime.Unix() <= endTime.Unix()) + }) + }) +} + +func TestPeerAuthenticationSender_Execute(t *testing.T) { + t.Parallel() + + t.Run("observer should not execute", func(t *testing.T) { + t.Parallel() + + argsBase := createMockBaseArgs() + wasBroadcastCalled := false + argsBase.messenger = &mock.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + wasBroadcastCalled = true + }, + } + args := createMockPeerAuthenticationSenderArgs(argsBase) + args.nodesCoordinator = &shardingMocks.NodesCoordinatorStub{ + GetValidatorWithPublicKeyCalled: func(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) { + return nil, 0, errors.New("observer") + }, + } + sender, _ := newPeerAuthenticationSender(args) + + sender.Execute() + assert.False(t, wasBroadcastCalled) + }) + t.Run("execute errors, should set the error time duration value", func(t *testing.T) { + t.Parallel() + + wasCalled := false + argsBase := createMockBaseArgs() + argsBase.timeBetweenSendsWhenError = time.Second * 3 + argsBase.timeBetweenSends = time.Second * 2 + + args := createMockPeerAuthenticationSenderArgs(argsBase) + args.peerSignatureHandler = &mock.PeerSignatureHandlerStub{ + GetPeerSignatureCalled: func(key crypto.PrivateKey, pid []byte) ([]byte, error) { + return nil, errors.New("error") + }, + } + + sender, _ := newPeerAuthenticationSender(args) + sender.timerHandler = &mock.TimerHandlerStub{ + CreateNewTimerCalled: func(duration time.Duration) { + assert.Equal(t, argsBase.timeBetweenSendsWhenError, duration) + wasCalled = true + }, + } + + sender.Execute() + assert.True(t, wasCalled) + }) + t.Run("execute worked, should set the normal time duration value", func(t *testing.T) { + t.Parallel() + + wasCalled := false + argsBase := createMockBaseArgs() + argsBase.timeBetweenSendsWhenError = time.Second * 3 + argsBase.timeBetweenSends = time.Second * 2 + args := createMockPeerAuthenticationSenderArgs(argsBase) + + sender, _ := newPeerAuthenticationSender(args) + sender.timerHandler = &mock.TimerHandlerStub{ + CreateNewTimerCalled: func(duration time.Duration) { + floatTBS := float64(argsBase.timeBetweenSends.Nanoseconds()) + maxDuration := floatTBS + floatTBS*argsBase.thresholdBetweenSends + assert.True(t, time.Duration(maxDuration) > duration) + assert.True(t, argsBase.timeBetweenSends <= duration) + wasCalled = true + }, + } + + sender.Execute() + assert.True(t, wasCalled) + }) + t.Run("observer->validator->observer should work", func(t *testing.T) { + t.Parallel() + + argsBase := createMockBaseArgs() + counterBroadcast := 0 + argsBase.messenger = &mock.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + counterBroadcast++ + }, + } + args := createMockPeerAuthenticationSenderArgs(argsBase) + counter := 0 + args.nodesCoordinator = &shardingMocks.NodesCoordinatorStub{ + GetValidatorWithPublicKeyCalled: func(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) { + counter++ + if counter == 2 { + return nil, 0, nil // validator + } + + return nil, 0, errors.New("observer") // observer + }, + } + + sender, _ := newPeerAuthenticationSender(args) + + sender.Execute() // observer + sender.Execute() // validator + sender.Execute() // observer + assert.Equal(t, 1, counterBroadcast) + }) + t.Run("execute worked, should set the hardfork time duration value", func(t *testing.T) { + t.Parallel() + + wasCalled := false + argsBase := createMockBaseArgs() + args := createMockPeerAuthenticationSenderArgs(argsBase) + args.hardforkTimeBetweenSends = time.Second * 3 + args.hardforkTrigger = &testscommon.HardforkTriggerStub{ + RecordedTriggerMessageCalled: func() ([]byte, bool) { + return make([]byte, 0), true + }, + } + sender, _ := newPeerAuthenticationSender(args) + sender.timerHandler = &mock.TimerHandlerStub{ + CreateNewTimerCalled: func(duration time.Duration) { + floatTBH := float64(args.hardforkTimeBetweenSends.Nanoseconds()) + maxDuration := floatTBH + floatTBH*argsBase.thresholdBetweenSends + assert.True(t, time.Duration(maxDuration) > duration) + assert.True(t, args.hardforkTimeBetweenSends <= duration) + wasCalled = true + }, + } + + sender.Execute() + assert.True(t, wasCalled) + }) +} + +func TestPeerAuthenticationSender_getCurrentPrivateAndPublicKeys(t *testing.T) { + t.Parallel() + + t.Run("is not redundancy node should return regular keys", func(t *testing.T) { + t.Parallel() + + args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) + args.redundancyHandler = &mock.RedundancyHandlerStub{ + IsRedundancyNodeCalled: func() bool { + return false + }, + } + sender, _ := newPeerAuthenticationSender(args) + sk, pk := sender.getCurrentPrivateAndPublicKeys() + assert.True(t, sk == args.privKey) // pointer testing + assert.True(t, pk == sender.publicKey) // pointer testing + }) + t.Run("is redundancy node but the main machine is not active should return regular keys", func(t *testing.T) { + t.Parallel() + + args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) + args.redundancyHandler = &mock.RedundancyHandlerStub{ + IsRedundancyNodeCalled: func() bool { + return true + }, + IsMainMachineActiveCalled: func() bool { + return false + }, + } + sender, _ := newPeerAuthenticationSender(args) + sk, pk := sender.getCurrentPrivateAndPublicKeys() + assert.True(t, sk == args.privKey) // pointer testing + assert.True(t, pk == sender.publicKey) // pointer testing + }) + t.Run("is redundancy node but the main machine is active should return the observer keys", func(t *testing.T) { + t.Parallel() + + observerSk := &mock.PrivateKeyStub{} + args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) + args.redundancyHandler = &mock.RedundancyHandlerStub{ + IsRedundancyNodeCalled: func() bool { + return true + }, + IsMainMachineActiveCalled: func() bool { + return true + }, + ObserverPrivateKeyCalled: func() crypto.PrivateKey { + return observerSk + }, + } + sender, _ := newPeerAuthenticationSender(args) + sk, pk := sender.getCurrentPrivateAndPublicKeys() + assert.True(t, sk == args.redundancyHandler.ObserverPrivateKey()) // pointer testing + assert.True(t, pk == sender.observerPublicKey) // pointer testing + }) + t.Run("call from multiple threads", func(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() + + args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) + args.redundancyHandler = &mock.RedundancyHandlerStub{ + IsRedundancyNodeCalled: func() bool { + return false + }, + } + sender, _ := newPeerAuthenticationSender(args) + + numOfThreads := 10 + var wg sync.WaitGroup + wg.Add(numOfThreads) + for i := 0; i < numOfThreads; i++ { + go func() { + defer wg.Done() + sk, pk := sender.getCurrentPrivateAndPublicKeys() + assert.True(t, sk == args.privKey) // pointer testing + assert.True(t, pk == sender.publicKey) // pointer testing + }() + } + + wg.Wait() + }) +} + +func TestPeerAuthenticationSender_getHardforkPayload(t *testing.T) { + t.Parallel() + + t.Run("hardfork not triggered should work", func(t *testing.T) { + t.Parallel() + + providedPayload := make([]byte, 0) + args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) + args.hardforkTrigger = &testscommon.HardforkTriggerStub{ + RecordedTriggerMessageCalled: func() ([]byte, bool) { + return nil, false + }, + } + + sender, _ := newPeerAuthenticationSender(args) + + payload, isTriggered := sender.getHardforkPayload() + assert.False(t, isTriggered) + assert.Equal(t, providedPayload, payload) + }) + t.Run("hardfork triggered should work", func(t *testing.T) { + t.Parallel() + + providedPayload := []byte("provided payload") + args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) + args.hardforkTrigger = &testscommon.HardforkTriggerStub{ + RecordedTriggerMessageCalled: func() ([]byte, bool) { + return nil, true + }, + CreateDataCalled: func() []byte { + return providedPayload + }, + } + + sender, _ := newPeerAuthenticationSender(args) + + payload, isTriggered := sender.getHardforkPayload() + assert.True(t, isTriggered) + assert.Equal(t, providedPayload, payload) + }) +} + +func TestPeerAuthenticationSender_ShouldTriggerHardfork(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() + + ch := make(chan struct{}) + args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) + args.hardforkTrigger = &testscommon.HardforkTriggerStub{ + NotifyTriggerReceivedV2Called: func() <-chan struct{} { + return ch + }, + } + + go func() { + time.Sleep(time.Second) + ch <- struct{}{} + }() + + sender, _ := newPeerAuthenticationSender(args) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) + defer cancel() + select { + case <-sender.ShouldTriggerHardfork(): + return + case <-ctx.Done(): + assert.Fail(t, "should not reach timeout") + } +} diff --git a/heartbeat/sender/routineHandler.go b/heartbeat/sender/routineHandler.go new file mode 100644 index 00000000000..6bfb405d90b --- /dev/null +++ b/heartbeat/sender/routineHandler.go @@ -0,0 +1,60 @@ +package sender + +import ( + "context" + + logger "github.com/ElrondNetwork/elrond-go-logger" +) + +var log = logger.GetOrCreate("heartbeat/sender") + +type routineHandler struct { + peerAuthenticationSender senderHandler + heartbeatSender senderHandler + hardforkSender hardforkHandler + cancel func() +} + +func newRoutineHandler(peerAuthenticationSender senderHandler, heartbeatSender senderHandler, hardforkSender hardforkHandler) *routineHandler { + handler := &routineHandler{ + peerAuthenticationSender: peerAuthenticationSender, + heartbeatSender: heartbeatSender, + hardforkSender: hardforkSender, + } + + var ctx context.Context + ctx, handler.cancel = context.WithCancel(context.Background()) + go handler.processLoop(ctx) + + return handler +} + +func (handler *routineHandler) processLoop(ctx context.Context) { + defer func() { + log.Debug("heartbeat's routine handler is closing...") + + handler.peerAuthenticationSender.Close() + handler.heartbeatSender.Close() + handler.hardforkSender.Close() + }() + + handler.peerAuthenticationSender.Execute() + handler.heartbeatSender.Execute() + + for { + select { + case <-handler.peerAuthenticationSender.ExecutionReadyChannel(): + handler.peerAuthenticationSender.Execute() + case <-handler.heartbeatSender.ExecutionReadyChannel(): + handler.heartbeatSender.Execute() + case <-handler.hardforkSender.ShouldTriggerHardfork(): + handler.hardforkSender.Execute() + case <-ctx.Done(): + return + } + } +} + +func (handler *routineHandler) closeProcessLoop() { + handler.cancel() +} diff --git a/heartbeat/sender/routineHandler_test.go b/heartbeat/sender/routineHandler_test.go new file mode 100644 index 00000000000..3d693deac91 --- /dev/null +++ b/heartbeat/sender/routineHandler_test.go @@ -0,0 +1,129 @@ +package sender + +import ( + "sync/atomic" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/heartbeat/mock" + "github.com/stretchr/testify/assert" +) + +func TestRoutineHandler_ShouldWork(t *testing.T) { + t.Parallel() + + t.Run("should work concurrently, calling all handlers, twice", func(t *testing.T) { + t.Parallel() + + ch1 := make(chan time.Time) + ch2 := make(chan time.Time) + ch3 := make(chan struct{}) + + numExecuteCalled1 := uint32(0) + numExecuteCalled2 := uint32(0) + numExecuteCalled3 := uint32(0) + + handler1 := &mock.SenderHandlerStub{ + ExecutionReadyChannelCalled: func() <-chan time.Time { + return ch1 + }, + ExecuteCalled: func() { + atomic.AddUint32(&numExecuteCalled1, 1) + }, + } + handler2 := &mock.SenderHandlerStub{ + ExecutionReadyChannelCalled: func() <-chan time.Time { + return ch2 + }, + ExecuteCalled: func() { + atomic.AddUint32(&numExecuteCalled2, 1) + }, + } + handler3 := &mock.HardforkHandlerStub{ + ShouldTriggerHardforkCalled: func() <-chan struct{} { + return ch3 + }, + ExecuteCalled: func() { + atomic.AddUint32(&numExecuteCalled3, 1) + }, + } + + _ = newRoutineHandler(handler1, handler2, handler3) + time.Sleep(time.Second) // wait for the go routine start + + assert.Equal(t, uint32(1), atomic.LoadUint32(&numExecuteCalled1)) // initial call + assert.Equal(t, uint32(1), atomic.LoadUint32(&numExecuteCalled2)) // initial call + + go func() { + time.Sleep(time.Millisecond * 100) + ch1 <- time.Now() + }() + go func() { + time.Sleep(time.Millisecond * 100) + ch2 <- time.Now() + }() + go func() { + time.Sleep(time.Millisecond * 100) + ch3 <- struct{}{} + }() + + time.Sleep(time.Second) // wait for the iteration + + assert.Equal(t, uint32(2), atomic.LoadUint32(&numExecuteCalled1)) + assert.Equal(t, uint32(2), atomic.LoadUint32(&numExecuteCalled2)) + assert.Equal(t, uint32(1), atomic.LoadUint32(&numExecuteCalled3)) + }) + t.Run("close should work", func(t *testing.T) { + t.Parallel() + + ch1 := make(chan time.Time) + ch2 := make(chan time.Time) + + numExecuteCalled1 := uint32(0) + numExecuteCalled2 := uint32(0) + + numCloseCalled1 := uint32(0) + numCloseCalled2 := uint32(0) + + handler1 := &mock.SenderHandlerStub{ + ExecutionReadyChannelCalled: func() <-chan time.Time { + return ch1 + }, + ExecuteCalled: func() { + atomic.AddUint32(&numExecuteCalled1, 1) + }, + CloseCalled: func() { + atomic.AddUint32(&numCloseCalled1, 1) + }, + } + handler2 := &mock.SenderHandlerStub{ + ExecutionReadyChannelCalled: func() <-chan time.Time { + return ch2 + }, + ExecuteCalled: func() { + atomic.AddUint32(&numExecuteCalled2, 1) + }, + CloseCalled: func() { + atomic.AddUint32(&numCloseCalled2, 1) + }, + } + handler3 := &mock.HardforkHandlerStub{} + + rh := newRoutineHandler(handler1, handler2, handler3) + time.Sleep(time.Second) // wait for the go routine start + + assert.Equal(t, uint32(1), atomic.LoadUint32(&numExecuteCalled1)) // initial call + assert.Equal(t, uint32(1), atomic.LoadUint32(&numExecuteCalled2)) // initial call + assert.Equal(t, uint32(0), atomic.LoadUint32(&numCloseCalled1)) + assert.Equal(t, uint32(0), atomic.LoadUint32(&numCloseCalled2)) + + rh.closeProcessLoop() + + time.Sleep(time.Second) // wait for the go routine to stop + + assert.Equal(t, uint32(1), atomic.LoadUint32(&numExecuteCalled1)) + assert.Equal(t, uint32(1), atomic.LoadUint32(&numExecuteCalled2)) + assert.Equal(t, uint32(1), atomic.LoadUint32(&numCloseCalled1)) + assert.Equal(t, uint32(1), atomic.LoadUint32(&numCloseCalled2)) + }) +} diff --git a/heartbeat/sender/sender.go b/heartbeat/sender/sender.go new file mode 100644 index 00000000000..32637a77c0a --- /dev/null +++ b/heartbeat/sender/sender.go @@ -0,0 +1,146 @@ +package sender + +import ( + "time" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/marshal" + crypto "github.com/ElrondNetwork/elrond-go-crypto" + "github.com/ElrondNetwork/elrond-go/heartbeat" +) + +// ArgSender represents the arguments for the sender +type ArgSender struct { + Messenger heartbeat.P2PMessenger + Marshaller marshal.Marshalizer + PeerAuthenticationTopic string + HeartbeatTopic string + PeerAuthenticationTimeBetweenSends time.Duration + PeerAuthenticationTimeBetweenSendsWhenError time.Duration + PeerAuthenticationThresholdBetweenSends float64 + HeartbeatTimeBetweenSends time.Duration + HeartbeatTimeBetweenSendsWhenError time.Duration + HeartbeatThresholdBetweenSends float64 + VersionNumber string + NodeDisplayName string + Identity string + PeerSubType core.P2PPeerSubType + CurrentBlockProvider heartbeat.CurrentBlockProvider + PeerSignatureHandler crypto.PeerSignatureHandler + PrivateKey crypto.PrivateKey + RedundancyHandler heartbeat.NodeRedundancyHandler + NodesCoordinator heartbeat.NodesCoordinator + HardforkTrigger heartbeat.HardforkTrigger + HardforkTimeBetweenSends time.Duration + HardforkTriggerPubKey []byte +} + +// sender defines the component which sends authentication and heartbeat messages +type sender struct { + routineHandler *routineHandler +} + +// NewSender creates a new instance of sender +func NewSender(args ArgSender) (*sender, error) { + err := checkSenderArgs(args) + if err != nil { + return nil, err + } + + pas, err := newPeerAuthenticationSender(argPeerAuthenticationSender{ + argBaseSender: argBaseSender{ + messenger: args.Messenger, + marshaller: args.Marshaller, + topic: args.PeerAuthenticationTopic, + timeBetweenSends: args.PeerAuthenticationTimeBetweenSends, + timeBetweenSendsWhenError: args.PeerAuthenticationTimeBetweenSendsWhenError, + thresholdBetweenSends: args.PeerAuthenticationThresholdBetweenSends, + }, + nodesCoordinator: args.NodesCoordinator, + peerSignatureHandler: args.PeerSignatureHandler, + privKey: args.PrivateKey, + redundancyHandler: args.RedundancyHandler, + hardforkTrigger: args.HardforkTrigger, + hardforkTimeBetweenSends: args.HardforkTimeBetweenSends, + hardforkTriggerPubKey: args.HardforkTriggerPubKey, + }) + if err != nil { + return nil, err + } + + hbs, err := newHeartbeatSender(argHeartbeatSender{ + argBaseSender: argBaseSender{ + messenger: args.Messenger, + marshaller: args.Marshaller, + topic: args.HeartbeatTopic, + timeBetweenSends: args.HeartbeatTimeBetweenSends, + timeBetweenSendsWhenError: args.HeartbeatTimeBetweenSendsWhenError, + thresholdBetweenSends: args.HeartbeatThresholdBetweenSends, + }, + versionNumber: args.VersionNumber, + nodeDisplayName: args.NodeDisplayName, + identity: args.Identity, + peerSubType: args.PeerSubType, + currentBlockProvider: args.CurrentBlockProvider, + }) + if err != nil { + return nil, err + } + + return &sender{ + routineHandler: newRoutineHandler(pas, hbs, pas), + }, nil +} + +func checkSenderArgs(args ArgSender) error { + pasArg := argPeerAuthenticationSender{ + argBaseSender: argBaseSender{ + messenger: args.Messenger, + marshaller: args.Marshaller, + topic: args.PeerAuthenticationTopic, + timeBetweenSends: args.PeerAuthenticationTimeBetweenSends, + timeBetweenSendsWhenError: args.PeerAuthenticationTimeBetweenSendsWhenError, + thresholdBetweenSends: args.PeerAuthenticationThresholdBetweenSends, + }, + nodesCoordinator: args.NodesCoordinator, + peerSignatureHandler: args.PeerSignatureHandler, + privKey: args.PrivateKey, + redundancyHandler: args.RedundancyHandler, + hardforkTrigger: args.HardforkTrigger, + hardforkTimeBetweenSends: args.HardforkTimeBetweenSends, + hardforkTriggerPubKey: args.HardforkTriggerPubKey, + } + err := checkPeerAuthenticationSenderArgs(pasArg) + if err != nil { + return err + } + + hbsArgs := argHeartbeatSender{ + argBaseSender: argBaseSender{ + messenger: args.Messenger, + marshaller: args.Marshaller, + topic: args.HeartbeatTopic, + timeBetweenSends: args.HeartbeatTimeBetweenSends, + timeBetweenSendsWhenError: args.HeartbeatTimeBetweenSendsWhenError, + thresholdBetweenSends: args.HeartbeatThresholdBetweenSends, + }, + versionNumber: args.VersionNumber, + nodeDisplayName: args.NodeDisplayName, + identity: args.Identity, + peerSubType: args.PeerSubType, + currentBlockProvider: args.CurrentBlockProvider, + } + return checkHeartbeatSenderArgs(hbsArgs) +} + +// Close closes the internal components +func (sender *sender) Close() error { + sender.routineHandler.closeProcessLoop() + + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sender *sender) IsInterfaceNil() bool { + return sender == nil +} diff --git a/heartbeat/sender/sender_test.go b/heartbeat/sender/sender_test.go new file mode 100644 index 00000000000..de10d202db5 --- /dev/null +++ b/heartbeat/sender/sender_test.go @@ -0,0 +1,253 @@ +package sender + +import ( + "errors" + "strings" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/heartbeat/mock" + "github.com/ElrondNetwork/elrond-go/testscommon" + "github.com/ElrondNetwork/elrond-go/testscommon/cryptoMocks" + "github.com/ElrondNetwork/elrond-go/testscommon/p2pmocks" + "github.com/ElrondNetwork/elrond-go/testscommon/shardingMocks" + "github.com/stretchr/testify/assert" +) + +func createMockSenderArgs() ArgSender { + return ArgSender{ + Messenger: &p2pmocks.MessengerStub{}, + Marshaller: &testscommon.MarshalizerMock{}, + PeerAuthenticationTopic: "pa-topic", + HeartbeatTopic: "hb-topic", + PeerAuthenticationTimeBetweenSends: time.Second, + PeerAuthenticationTimeBetweenSendsWhenError: time.Second, + PeerAuthenticationThresholdBetweenSends: 0.1, + HeartbeatTimeBetweenSends: time.Second, + HeartbeatTimeBetweenSendsWhenError: time.Second, + HeartbeatThresholdBetweenSends: 0.1, + VersionNumber: "v1", + NodeDisplayName: "node", + Identity: "identity", + PeerSubType: core.RegularPeer, + CurrentBlockProvider: &mock.CurrentBlockProviderStub{}, + PeerSignatureHandler: &cryptoMocks.PeerSignatureHandlerStub{}, + PrivateKey: &cryptoMocks.PrivateKeyStub{}, + RedundancyHandler: &mock.RedundancyHandlerStub{}, + NodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, + HardforkTrigger: &testscommon.HardforkTriggerStub{}, + HardforkTimeBetweenSends: time.Second, + HardforkTriggerPubKey: providedHardforkPubKey, + } +} + +func TestNewSender(t *testing.T) { + t.Parallel() + + t.Run("nil peer messenger should error", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + args.Messenger = nil + sender, err := NewSender(args) + + assert.Nil(t, sender) + assert.Equal(t, heartbeat.ErrNilMessenger, err) + }) + t.Run("nil marshaller should error", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + args.Marshaller = nil + sender, err := NewSender(args) + + assert.Nil(t, sender) + assert.Equal(t, heartbeat.ErrNilMarshaller, err) + }) + t.Run("empty peer auth topic should error", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + args.PeerAuthenticationTopic = "" + sender, err := NewSender(args) + + assert.Nil(t, sender) + assert.Equal(t, heartbeat.ErrEmptySendTopic, err) + }) + t.Run("empty heartbeat topic should error", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + args.HeartbeatTopic = "" + sender, err := NewSender(args) + + assert.Nil(t, sender) + assert.Equal(t, heartbeat.ErrEmptySendTopic, err) + }) + t.Run("invalid peer auth time between sends should error", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + args.PeerAuthenticationTimeBetweenSends = time.Second - time.Nanosecond + sender, err := NewSender(args) + + assert.Nil(t, sender) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) + assert.True(t, strings.Contains(err.Error(), "timeBetweenSends")) + assert.False(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) + }) + t.Run("invalid peer auth time between sends should error", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + args.PeerAuthenticationTimeBetweenSendsWhenError = time.Second - time.Nanosecond + sender, err := NewSender(args) + + assert.Nil(t, sender) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) + assert.True(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) + }) + t.Run("invalid time between sends should error", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + args.HeartbeatTimeBetweenSends = time.Second - time.Nanosecond + sender, err := NewSender(args) + + assert.Nil(t, sender) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) + assert.True(t, strings.Contains(err.Error(), "timeBetweenSends")) + assert.False(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) + }) + t.Run("invalid time between sends should error", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + args.HeartbeatTimeBetweenSendsWhenError = time.Second - time.Nanosecond + sender, err := NewSender(args) + + assert.Nil(t, sender) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) + assert.True(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) + }) + t.Run("empty version number should error", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + args.VersionNumber = string(make([]byte, 150)) + sender, err := NewSender(args) + + assert.Nil(t, sender) + assert.Equal(t, heartbeat.ErrPropertyTooLong, err) + }) + t.Run("nil current block provider should error", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + args.CurrentBlockProvider = nil + sender, err := NewSender(args) + + assert.Nil(t, sender) + assert.Equal(t, heartbeat.ErrNilCurrentBlockProvider, err) + }) + t.Run("nil nodes coordinator should error", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + args.NodesCoordinator = nil + sender, err := NewSender(args) + + assert.Nil(t, sender) + assert.Equal(t, heartbeat.ErrNilNodesCoordinator, err) + }) + t.Run("nil peer signature handler should error", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + args.PeerSignatureHandler = nil + sender, err := NewSender(args) + + assert.Nil(t, sender) + assert.Equal(t, heartbeat.ErrNilPeerSignatureHandler, err) + }) + t.Run("nil private key should error", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + args.PrivateKey = nil + sender, err := NewSender(args) + + assert.Nil(t, sender) + assert.Equal(t, heartbeat.ErrNilPrivateKey, err) + }) + t.Run("nil redundancy handler should error", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + args.RedundancyHandler = nil + sender, err := NewSender(args) + + assert.Nil(t, sender) + assert.Equal(t, heartbeat.ErrNilRedundancyHandler, err) + }) + t.Run("nil hardfork trigger should error", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + args.HardforkTrigger = nil + sender, err := NewSender(args) + + assert.Nil(t, sender) + assert.Equal(t, heartbeat.ErrNilHardforkTrigger, err) + }) + t.Run("invalid time between hardforks should error", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + args.HardforkTimeBetweenSends = time.Second - time.Nanosecond + sender, err := NewSender(args) + + assert.Nil(t, sender) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) + assert.True(t, strings.Contains(err.Error(), "hardforkTimeBetweenSends")) + }) + t.Run("invalid hardfork pub key should error", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + args.HardforkTriggerPubKey = make([]byte, 0) + sender, err := NewSender(args) + + assert.Nil(t, sender) + assert.True(t, errors.Is(err, heartbeat.ErrInvalidValue)) + assert.True(t, strings.Contains(err.Error(), "hardfork")) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + sender, err := NewSender(args) + + assert.False(t, check.IfNil(sender)) + assert.Nil(t, err) + }) +} + +func TestSender_Close(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() + + args := createMockSenderArgs() + sender, _ := NewSender(args) + err := sender.Close() + assert.Nil(t, err) +} diff --git a/heartbeat/sender/timerWrapper.go b/heartbeat/sender/timerWrapper.go new file mode 100644 index 00000000000..ea0e85f3fb6 --- /dev/null +++ b/heartbeat/sender/timerWrapper.go @@ -0,0 +1,44 @@ +package sender + +import ( + "sync" + "time" +) + +type timerWrapper struct { + mutTimer sync.Mutex + timer *time.Timer +} + +// CreateNewTimer will stop the existing timer and will initialize a new one +func (wrapper *timerWrapper) CreateNewTimer(duration time.Duration) { + wrapper.mutTimer.Lock() + wrapper.stopTimer() + wrapper.timer = time.NewTimer(duration) + wrapper.mutTimer.Unlock() +} + +// ExecutionReadyChannel returns the chan on which the ticker will emit periodic values as to signal that +// the execution is ready to take place +func (wrapper *timerWrapper) ExecutionReadyChannel() <-chan time.Time { + wrapper.mutTimer.Lock() + defer wrapper.mutTimer.Unlock() + + return wrapper.timer.C +} + +func (wrapper *timerWrapper) stopTimer() { + if wrapper.timer == nil { + return + } + + wrapper.timer.Stop() +} + +// Close will simply stop the inner timer so this component won't contain leaked resource +func (wrapper *timerWrapper) Close() { + wrapper.mutTimer.Lock() + defer wrapper.mutTimer.Unlock() + + wrapper.stopTimer() +} diff --git a/heartbeat/sender/timerWrapper_test.go b/heartbeat/sender/timerWrapper_test.go new file mode 100644 index 00000000000..ced0c0ee822 --- /dev/null +++ b/heartbeat/sender/timerWrapper_test.go @@ -0,0 +1,114 @@ +package sender + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestTimerWrapper_createTimerAndExecutionReadyChannel(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + wrapper := &timerWrapper{} + wrapper.CreateNewTimer(time.Second) + select { + case <-wrapper.ExecutionReadyChannel(): + return + case <-ctx.Done(): + assert.Fail(t, "timeout reached") + } + }) + t.Run("double call to should execute, should work", func(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + wrapper := &timerWrapper{} + wrapper.CreateNewTimer(time.Second) + wrapper.CreateNewTimer(time.Second) + select { + case <-wrapper.ExecutionReadyChannel(): + return + case <-ctx.Done(): + assert.Fail(t, "timeout reached") + } + }) +} + +func TestTimerWrapper_Close(t *testing.T) { + t.Parallel() + + t.Run("close on a nil timer should not panic", func(t *testing.T) { + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should have not panicked") + } + }() + wrapper := &timerWrapper{} + wrapper.Close() + }) + t.Run("double close on a valid timer should not panic", func(t *testing.T) { + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should have not panicked") + } + }() + wrapper := &timerWrapper{} + wrapper.CreateNewTimer(time.Second) + wrapper.Close() + wrapper.Close() + }) + t.Run("close should stop the timer", func(t *testing.T) { + wrapper := &timerWrapper{} + wrapper.CreateNewTimer(time.Second) + + wrapper.Close() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + select { + case <-wrapper.ExecutionReadyChannel(): + assert.Fail(t, "should have not called execute again") + case <-ctx.Done(): + return + } + }) +} + +func TestTimerWrapper_ExecutionReadyChannelMultipleTriggers(t *testing.T) { + t.Parallel() + + wrapper := &timerWrapper{} + wrapper.CreateNewTimer(time.Second) + numTriggers := 5 + numExecuted := 0 + for i := 0; i < numTriggers; i++ { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + select { + case <-ctx.Done(): + assert.Fail(t, "timeout reached in iteration") + cancel() + return + case <-wrapper.ExecutionReadyChannel(): + fmt.Printf("iteration %d\n", i) + numExecuted++ + wrapper.CreateNewTimer(time.Second) + } + + cancel() + } + + assert.Equal(t, numTriggers, numExecuted) +} diff --git a/heartbeat/storage/heartbeatStorer.go b/heartbeat/storage/heartbeatStorer.go index a1cccedfd39..acd43c06825 100644 --- a/heartbeat/storage/heartbeatStorer.go +++ b/heartbeat/storage/heartbeatStorer.go @@ -32,7 +32,7 @@ func NewHeartbeatDbStorer( return nil, heartbeat.ErrNilMonitorDb } if check.IfNil(marshalizer) { - return nil, heartbeat.ErrNilMarshalizer + return nil, heartbeat.ErrNilMarshaller } return &HeartbeatDbStorer{ diff --git a/heartbeat/storage/heartbeatStorer_test.go b/heartbeat/storage/heartbeatStorer_test.go index 4b3f1f55483..3f681e6eeb4 100644 --- a/heartbeat/storage/heartbeatStorer_test.go +++ b/heartbeat/storage/heartbeatStorer_test.go @@ -21,7 +21,7 @@ func TestNewHeartbeatStorer_NilStorerShouldErr(t *testing.T) { hs, err := storage.NewHeartbeatDbStorer( nil, - &mock.MarshalizerStub{}, + &mock.MarshallerStub{}, ) assert.Nil(t, hs) assert.Equal(t, heartbeat.ErrNilMonitorDb, err) @@ -35,7 +35,7 @@ func TestNewHeartbeatStorer_NilMarshalizerShouldErr(t *testing.T) { nil, ) assert.Nil(t, hs) - assert.Equal(t, heartbeat.ErrNilMarshalizer, err) + assert.Equal(t, heartbeat.ErrNilMarshaller, err) } func TestNewHeartbeatStorer_OkValsShouldWork(t *testing.T) { @@ -43,7 +43,7 @@ func TestNewHeartbeatStorer_OkValsShouldWork(t *testing.T) { hs, err := storage.NewHeartbeatDbStorer( &storageStubs.StorerStub{}, - &mock.MarshalizerStub{}, + &mock.MarshallerStub{}, ) assert.Nil(t, err) assert.False(t, check.IfNil(hs)) @@ -54,7 +54,7 @@ func TestHeartbeatDbStorer_LoadKeysEntryNotFoundShouldErr(t *testing.T) { hs, _ := storage.NewHeartbeatDbStorer( mock.NewStorerMock(), - &mock.MarshalizerMock{}, + &mock.MarshallerMock{}, ) restoredKeys, err := hs.LoadKeys() @@ -72,7 +72,7 @@ func TestHeartbeatDbStorer_LoadKeysUnmarshalInvalidShouldErr(t *testing.T) { hs, _ := storage.NewHeartbeatDbStorer( storer, - &mock.MarshalizerMock{}, + &mock.MarshallerMock{}, ) restoredKeys, err := hs.LoadKeys() @@ -85,13 +85,13 @@ func TestHeartbeatDbStorer_LoadKeysShouldWork(t *testing.T) { storer := mock.NewStorerMock() keys := [][]byte{[]byte("key1"), []byte("key2")} - msr := &mock.MarshalizerMock{} + msr := &mock.MarshallerMock{} keysBytes, _ := msr.Marshal(&batch.Batch{Data: keys}) _ = storer.Put([]byte("keys"), keysBytes) hs, _ := storage.NewHeartbeatDbStorer( storer, - &mock.MarshalizerMock{}, + &mock.MarshallerMock{}, ) restoredKeys, err := hs.LoadKeys() @@ -105,7 +105,7 @@ func TestHeartbeatDbStorer_SaveKeys(t *testing.T) { keys := [][]byte{[]byte("key1"), []byte("key2")} hs, _ := storage.NewHeartbeatDbStorer( mock.NewStorerMock(), - &mock.MarshalizerMock{}, + &mock.MarshallerMock{}, ) err := hs.SaveKeys(keys) @@ -120,7 +120,7 @@ func TestHeartbeatDbStorer_LoadGenesisTimeNotFoundInDbShouldErr(t *testing.T) { hs, _ := storage.NewHeartbeatDbStorer( mock.NewStorerMock(), - &mock.MarshalizerMock{}, + &mock.MarshallerMock{}, ) _, err := hs.LoadGenesisTime() @@ -135,7 +135,7 @@ func TestHeartbeatDbStorer_LoadGenesisUnmarshalIssueShouldErr(t *testing.T) { hs, _ := storage.NewHeartbeatDbStorer( storer, - &mock.MarshalizerMock{}, + &mock.MarshallerMock{}, ) _, err := hs.LoadGenesisTime() @@ -146,7 +146,7 @@ func TestHeartbeatDbStorer_LoadGenesisTimeShouldWork(t *testing.T) { t.Parallel() storer := mock.NewStorerMock() - msr := &mock.MarshalizerMock{} + msr := &mock.MarshallerMock{} dbt := &data.DbTimeStamp{ Timestamp: time.Now().UnixNano(), @@ -170,7 +170,7 @@ func TestHeartbeatDbStorer_UpdateGenesisTimeShouldFindAndReplace(t *testing.T) { t.Parallel() storer := mock.NewStorerMock() - msr := &mock.MarshalizerMock{} + msr := &mock.MarshallerMock{} dbt := &data.DbTimeStamp{ Timestamp: time.Now().UnixNano(), @@ -197,7 +197,7 @@ func TestHeartbeatDbStorer_UpdateGenesisTimeShouldAddNewEntry(t *testing.T) { hs, _ := storage.NewHeartbeatDbStorer( mock.NewStorerMock(), - &mock.MarshalizerMock{}, + &mock.MarshallerMock{}, ) genesisTime := time.Now() @@ -214,7 +214,7 @@ func TestHeartbeatDbSnorer_SavePubkeyDataDataMarshalNotSucceededShouldErr(t *tes expectedErr := errors.New("error marshal") hs, _ := storage.NewHeartbeatDbStorer( mock.NewStorerMock(), - &mock.MarshalizerStub{ + &mock.MarshallerStub{ MarshalHandler: func(obj interface{}) ([]byte, error) { return nil, expectedErr }, @@ -238,7 +238,7 @@ func TestHeartbeatDbSnorer_SavePubkeyDataPutNotSucceededShouldErr(t *testing.T) return expectedErr }, }, - &mock.MarshalizerMock{}, + &mock.MarshallerMock{}, ) hb := data.HeartbeatDTO{ @@ -253,7 +253,7 @@ func TestHeartbeatDbSnorer_SavePubkeyDataPutShouldWork(t *testing.T) { hs, _ := storage.NewHeartbeatDbStorer( mock.NewStorerMock(), - &mock.MarshalizerMock{}, + &mock.MarshallerMock{}, ) hb := data.HeartbeatDTO{ @@ -268,7 +268,7 @@ func TestHeartbeatDbStorer_LoadHeartBeatDTOShouldWork(t *testing.T) { hs, _ := storage.NewHeartbeatDbStorer( mock.NewStorerMock(), - &mock.MarshalizerMock{}, + &mock.MarshallerMock{}, ) hb := data.HeartbeatDTO{ diff --git a/integrationTests/consensus/consensus_test.go b/integrationTests/consensus/consensus_test.go index fe150978078..df958286154 100644 --- a/integrationTests/consensus/consensus_test.go +++ b/integrationTests/consensus/consensus_test.go @@ -122,7 +122,6 @@ func startNodesWithCommitBlock(nodes []*testNode, mutex *sync.Mutex, nonceForRou }, }, BootstrapRoundIndex: 0, - HardforkTrigger: n.node.GetHardforkTrigger(), CoreComponents: n.node.GetCoreComponents(), NetworkComponents: n.node.GetNetworkComponents(), CryptoComponents: n.node.GetCryptoComponents(), diff --git a/integrationTests/consensus/testInitializer.go b/integrationTests/consensus/testInitializer.go index da966024d83..1494a8f0add 100644 --- a/integrationTests/consensus/testInitializer.go +++ b/integrationTests/consensus/testInitializer.go @@ -436,6 +436,7 @@ func createConsensusOnlyNode( processComponents.PeerMapper = networkShardingCollector processComponents.RoundHandlerField = roundHandler processComponents.ScheduledTxsExecutionHandlerInternal = &testscommon.ScheduledTxsExecutionStub{} + processComponents.ProcessedMiniBlocksTrackerInternal = &testscommon.ProcessedMiniBlocksTrackerStub{} dataComponents := integrationTests.GetDefaultDataComponents() dataComponents.BlockChain = blockChain @@ -467,7 +468,6 @@ func createConsensusOnlyNode( node.WithRequestedItemsHandler(&mock.RequestedItemsHandlerStub{}), node.WithValidatorSignatureSize(signatureSize), node.WithPublicKeySize(publicKeySize), - node.WithHardforkTrigger(&mock.HardforkTriggerStub{}), ) if err != nil { diff --git a/integrationTests/factory/consensusComponents/consensusComponents_test.go b/integrationTests/factory/consensusComponents/consensusComponents_test.go index ae079b2023a..323d68f7e07 100644 --- a/integrationTests/factory/consensusComponents/consensusComponents_test.go +++ b/integrationTests/factory/consensusComponents/consensusComponents_test.go @@ -111,13 +111,10 @@ func TestConsensusComponents_Close_ShouldWork(t *testing.T) { managedCoreComponents, managedNetworkComponents, managedCryptoComponents, - managedBootstrapComponents, managedDataComponents, managedStateComponents, managedStatusComponents, managedProcessComponents, - nodesCoordinator, - nodesShufflerOut, ) require.Nil(t, err) require.NotNil(t, managedConsensusComponents) diff --git a/integrationTests/interface.go b/integrationTests/interface.go index 02e968cd255..db68c48d0be 100644 --- a/integrationTests/interface.go +++ b/integrationTests/interface.go @@ -45,9 +45,12 @@ type NodesCoordinatorFactory interface { // NetworkShardingUpdater defines the updating methods used by the network sharding component type NetworkShardingUpdater interface { + GetLastKnownPeerID(pk []byte) (*core.PeerID, bool) GetPeerInfo(pid core.PeerID) core.P2PPeerInfo + UpdatePeerIDPublicKeyPair(pid core.PeerID, pk []byte) + PutPeerIdShardId(pid core.PeerID, shardID uint32) UpdatePeerIDInfo(pid core.PeerID, pk []byte, shardID uint32) - UpdatePeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) + PutPeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) IsInterfaceNil() bool } diff --git a/integrationTests/mock/blockProcessorMock.go b/integrationTests/mock/blockProcessorMock.go index 8f52171049a..a85851ba42a 100644 --- a/integrationTests/mock/blockProcessorMock.go +++ b/integrationTests/mock/blockProcessorMock.go @@ -6,7 +6,6 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go-core/marshal" - "github.com/ElrondNetwork/elrond-go/process/block/processedMb" ) // BlockProcessorMock mocks the implementation for a blockProcessor @@ -48,10 +47,6 @@ func (bpm *BlockProcessorMock) ProcessScheduledBlock(header data.HeaderHandler, return bpm.ProcessScheduledBlockCalled(header, body, haveTime) } -// ApplyProcessedMiniBlocks - -func (bpm *BlockProcessorMock) ApplyProcessedMiniBlocks(_ *processedMb.ProcessedMiniBlockTracker) { -} - // CommitBlock mocks the commit of a block func (bpm *BlockProcessorMock) CommitBlock(header data.HeaderHandler, body data.BodyHandler) error { return bpm.CommitBlockCalled(header, body) diff --git a/integrationTests/mock/coreComponentsStub.go b/integrationTests/mock/coreComponentsStub.go index 4ed8ce03232..e857b3f152c 100644 --- a/integrationTests/mock/coreComponentsStub.go +++ b/integrationTests/mock/coreComponentsStub.go @@ -53,6 +53,7 @@ type CoreComponentsStub struct { NodeTypeProviderField core.NodeTypeProviderHandler ArwenChangeLockerInternal common.Locker ProcessStatusHandlerInternal common.ProcessStatusHandler + HardforkTriggerPubKeyField []byte } // Create - @@ -253,6 +254,11 @@ func (ccs *CoreComponentsStub) String() string { return "CoreComponentsStub" } +// HardforkTriggerPubKey - +func (ccs *CoreComponentsStub) HardforkTriggerPubKey() []byte { + return ccs.HardforkTriggerPubKeyField +} + // IsInterfaceNil - func (ccs *CoreComponentsStub) IsInterfaceNil() bool { return ccs == nil diff --git a/integrationTests/mock/databaseWritterMock.go b/integrationTests/mock/databaseWritterMock.go index 7dd22b555c2..d040b418fe2 100644 --- a/integrationTests/mock/databaseWritterMock.go +++ b/integrationTests/mock/databaseWritterMock.go @@ -14,6 +14,16 @@ type DatabaseWriterStub struct { DoMultiGetCalled func(ids []string, index string, withSource bool, res interface{}) error } +// DoScrollRequest - +func (dws *DatabaseWriterStub) DoScrollRequest(_ string, _ []byte, _ bool, _ func(responseBytes []byte) error) error { + return nil +} + +// DoCountRequest - +func (dws *DatabaseWriterStub) DoCountRequest(_ string, _ []byte) (uint64, error) { + return 0, nil +} + // DoRequest - func (dws *DatabaseWriterStub) DoRequest(req *esapi.IndexRequest) error { if dws.DoRequestCalled != nil { diff --git a/integrationTests/mock/hardforkTriggerStub.go b/integrationTests/mock/hardforkTriggerStub.go deleted file mode 100644 index 6858c666c16..00000000000 --- a/integrationTests/mock/hardforkTriggerStub.go +++ /dev/null @@ -1,82 +0,0 @@ -package mock - -import "github.com/ElrondNetwork/elrond-go/update" - -// HardforkTriggerStub - -type HardforkTriggerStub struct { - TriggerCalled func(epoch uint32, withEarlyEndOfEpoch bool) error - IsSelfTriggerCalled func() bool - TriggerReceivedCalled func(payload []byte, data []byte, pkBytes []byte) (bool, error) - RecordedTriggerMessageCalled func() ([]byte, bool) - CreateDataCalled func() []byte - AddCloserCalled func(closer update.Closer) error - NotifyTriggerReceivedCalled func() <-chan struct{} -} - -// Trigger - -func (hts *HardforkTriggerStub) Trigger(epoch uint32, withEarlyEndOfEpoch bool) error { - if hts.TriggerCalled != nil { - return hts.TriggerCalled(epoch, withEarlyEndOfEpoch) - } - - return nil -} - -// IsSelfTrigger - -func (hts *HardforkTriggerStub) IsSelfTrigger() bool { - if hts.IsSelfTriggerCalled != nil { - return hts.IsSelfTriggerCalled() - } - - return false -} - -// TriggerReceived - -func (hts *HardforkTriggerStub) TriggerReceived(payload []byte, data []byte, pkBytes []byte) (bool, error) { - if hts.TriggerReceivedCalled != nil { - return hts.TriggerReceivedCalled(payload, data, pkBytes) - } - - return false, nil -} - -// RecordedTriggerMessage - -func (hts *HardforkTriggerStub) RecordedTriggerMessage() ([]byte, bool) { - if hts.RecordedTriggerMessageCalled != nil { - return hts.RecordedTriggerMessageCalled() - } - - return nil, false -} - -// CreateData - -func (hts *HardforkTriggerStub) CreateData() []byte { - if hts.CreateDataCalled != nil { - return hts.CreateDataCalled() - } - - return make([]byte, 0) -} - -// AddCloser - -func (hts *HardforkTriggerStub) AddCloser(closer update.Closer) error { - if hts.AddCloserCalled != nil { - return hts.AddCloserCalled(closer) - } - - return nil -} - -// NotifyTriggerReceived - -func (hts *HardforkTriggerStub) NotifyTriggerReceived() <-chan struct{} { - if hts.NotifyTriggerReceivedCalled != nil { - return hts.NotifyTriggerReceivedCalled() - } - - return make(chan struct{}) -} - -// IsInterfaceNil - -func (hts *HardforkTriggerStub) IsInterfaceNil() bool { - return hts == nil -} diff --git a/integrationTests/mock/intermediateTransactionHandlerMock.go b/integrationTests/mock/intermediateTransactionHandlerMock.go index 1bbbbcbd1cc..66c9b858a85 100644 --- a/integrationTests/mock/intermediateTransactionHandlerMock.go +++ b/integrationTests/mock/intermediateTransactionHandlerMock.go @@ -15,23 +15,23 @@ type IntermediateTransactionHandlerMock struct { CreateBlockStartedCalled func() CreateMarshalizedDataCalled func(txHashes [][]byte) ([][]byte, error) GetAllCurrentFinishedTxsCalled func() map[string]data.TransactionHandler - RemoveProcessedResultsCalled func() [][]byte - InitProcessedResultsCalled func() + RemoveProcessedResultsCalled func(key []byte) [][]byte + InitProcessedResultsCalled func(key []byte) intermediateTransactions []data.TransactionHandler } // RemoveProcessedResults - -func (ith *IntermediateTransactionHandlerMock) RemoveProcessedResults() [][]byte { +func (ith *IntermediateTransactionHandlerMock) RemoveProcessedResults(key []byte) [][]byte { if ith.RemoveProcessedResultsCalled != nil { - return ith.RemoveProcessedResultsCalled() + return ith.RemoveProcessedResultsCalled(key) } return nil } // InitProcessedResults - -func (ith *IntermediateTransactionHandlerMock) InitProcessedResults() { +func (ith *IntermediateTransactionHandlerMock) InitProcessedResults(key []byte) { if ith.InitProcessedResultsCalled != nil { - ith.InitProcessedResultsCalled() + ith.InitProcessedResultsCalled(key) } } diff --git a/integrationTests/mock/networkComponentsMock.go b/integrationTests/mock/networkComponentsMock.go index 2890db54237..e46fee76d1e 100644 --- a/integrationTests/mock/networkComponentsMock.go +++ b/integrationTests/mock/networkComponentsMock.go @@ -8,12 +8,13 @@ import ( // NetworkComponentsStub - type NetworkComponentsStub struct { - Messenger p2p.Messenger - InputAntiFlood factory.P2PAntifloodHandler - OutputAntiFlood factory.P2PAntifloodHandler - PeerBlackList process.PeerBlackListCacher - PeerHonesty factory.PeerHonestyHandler - PreferredPeersHolder factory.PreferredPeersHolderHandler + Messenger p2p.Messenger + InputAntiFlood factory.P2PAntifloodHandler + OutputAntiFlood factory.P2PAntifloodHandler + PeerBlackList process.PeerBlackListCacher + PeerHonesty factory.PeerHonestyHandler + PreferredPeersHolder factory.PreferredPeersHolderHandler + PeersRatingHandlerField p2p.PeersRatingHandler } // PubKeyCacher - @@ -66,6 +67,11 @@ func (ncs *NetworkComponentsStub) PreferredPeersHolderHandler() factory.Preferre return ncs.PreferredPeersHolder } +// PeersRatingHandler - +func (ncs *NetworkComponentsStub) PeersRatingHandler() p2p.PeersRatingHandler { + return ncs.PeersRatingHandlerField +} + // String - func (ncs *NetworkComponentsStub) String() string { return "NetworkComponentsStub" diff --git a/integrationTests/mock/networkShardingCollectorMock.go b/integrationTests/mock/networkShardingCollectorMock.go index ab5e83f5bbb..acf740ada5b 100644 --- a/integrationTests/mock/networkShardingCollectorMock.go +++ b/integrationTests/mock/networkShardingCollectorMock.go @@ -7,8 +7,9 @@ import ( ) type networkShardingCollectorMock struct { - mutPeerIdPkMap sync.RWMutex - peerIdPkMap map[core.PeerID][]byte + mutMaps sync.RWMutex + peerIdPkMap map[core.PeerID][]byte + pkPeerIdMap map[string]core.PeerID mutFallbackPkShardMap sync.RWMutex fallbackPkShardMap map[string]uint32 @@ -24,17 +25,27 @@ type networkShardingCollectorMock struct { func NewNetworkShardingCollectorMock() *networkShardingCollectorMock { return &networkShardingCollectorMock{ peerIdPkMap: make(map[core.PeerID][]byte), + pkPeerIdMap: make(map[string]core.PeerID), peerIdSubType: make(map[core.PeerID]uint32), fallbackPkShardMap: make(map[string]uint32), fallbackPidShardMap: make(map[string]uint32), } } -// UpdatePeerIdPublicKey - +// UpdatePeerIDPublicKeyPair - +func (nscm *networkShardingCollectorMock) UpdatePeerIDPublicKeyPair(pid core.PeerID, pk []byte) { + nscm.mutMaps.Lock() + nscm.peerIdPkMap[pid] = pk + nscm.pkPeerIdMap[string(pk)] = pid + nscm.mutMaps.Unlock() +} + +// UpdatePeerIDInfo - func (nscm *networkShardingCollectorMock) UpdatePeerIDInfo(pid core.PeerID, pk []byte, shardID uint32) { - nscm.mutPeerIdPkMap.Lock() + nscm.mutMaps.Lock() nscm.peerIdPkMap[pid] = pk - nscm.mutPeerIdPkMap.Unlock() + nscm.pkPeerIdMap[string(pk)] = pid + nscm.mutMaps.Unlock() if shardID == core.AllShardId { return @@ -49,13 +60,20 @@ func (nscm *networkShardingCollectorMock) UpdatePeerIDInfo(pid core.PeerID, pk [ nscm.mutFallbackPidShardMap.Unlock() } -// UpdatePeerIdSubType - -func (nscm *networkShardingCollectorMock) UpdatePeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) { +// PutPeerIdSubType - +func (nscm *networkShardingCollectorMock) PutPeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) { nscm.mutPeerIdSubType.Lock() nscm.peerIdSubType[pid] = uint32(peerSubType) nscm.mutPeerIdSubType.Unlock() } +// PutPeerIdShardId - +func (nscm *networkShardingCollectorMock) PutPeerIdShardId(pid core.PeerID, shardID uint32) { + nscm.mutFallbackPidShardMap.Lock() + nscm.fallbackPidShardMap[string(pid)] = shardID + nscm.mutFallbackPidShardMap.Unlock() +} + // GetPeerInfo - func (nscm *networkShardingCollectorMock) GetPeerInfo(pid core.PeerID) core.P2PPeerInfo { nscm.mutPeerIdSubType.Lock() @@ -64,9 +82,20 @@ func (nscm *networkShardingCollectorMock) GetPeerInfo(pid core.PeerID) core.P2PP return core.P2PPeerInfo{ PeerType: core.ObserverPeer, PeerSubType: core.P2PPeerSubType(nscm.peerIdSubType[pid]), + PkBytes: nscm.peerIdPkMap[pid], } } +// GetLastKnownPeerID - +func (nscm *networkShardingCollectorMock) GetLastKnownPeerID(pk []byte) (*core.PeerID, bool) { + nscm.mutMaps.RLock() + defer nscm.mutMaps.RUnlock() + + pid, ok := nscm.pkPeerIdMap[string(pk)] + + return &pid, ok +} + // IsInterfaceNil - func (nscm *networkShardingCollectorMock) IsInterfaceNil() bool { return nscm == nil diff --git a/integrationTests/mock/peerShardMapperStub.go b/integrationTests/mock/peerShardMapperStub.go index cd95201623d..95dc9039c54 100644 --- a/integrationTests/mock/peerShardMapperStub.go +++ b/integrationTests/mock/peerShardMapperStub.go @@ -4,6 +4,40 @@ import "github.com/ElrondNetwork/elrond-go-core/core" // PeerShardMapperStub - type PeerShardMapperStub struct { + GetLastKnownPeerIDCalled func(pk []byte) (*core.PeerID, bool) + UpdatePeerIDPublicKeyPairCalled func(pid core.PeerID, pk []byte) + PutPeerIdShardIdCalled func(pid core.PeerID, shardID uint32) + PutPeerIdSubTypeCalled func(pid core.PeerID, peerSubType core.P2PPeerSubType) +} + +// UpdatePeerIDPublicKeyPair - +func (psms *PeerShardMapperStub) UpdatePeerIDPublicKeyPair(pid core.PeerID, pk []byte) { + if psms.UpdatePeerIDPublicKeyPairCalled != nil { + psms.UpdatePeerIDPublicKeyPairCalled(pid, pk) + } +} + +// PutPeerIdShardId - +func (psms *PeerShardMapperStub) PutPeerIdShardId(pid core.PeerID, shardID uint32) { + if psms.PutPeerIdShardIdCalled != nil { + psms.PutPeerIdShardIdCalled(pid, shardID) + } +} + +// PutPeerIdSubType - +func (psms *PeerShardMapperStub) PutPeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) { + if psms.PutPeerIdSubTypeCalled != nil { + psms.PutPeerIdSubTypeCalled(pid, peerSubType) + } +} + +// GetLastKnownPeerID - +func (psms *PeerShardMapperStub) GetLastKnownPeerID(pk []byte) (*core.PeerID, bool) { + if psms.GetLastKnownPeerIDCalled != nil { + return psms.GetLastKnownPeerIDCalled(pk) + } + + return nil, false } // GetPeerInfo - diff --git a/integrationTests/mock/processComponentsStub.go b/integrationTests/mock/processComponentsStub.go index b19b18cb083..d971ba2eb7e 100644 --- a/integrationTests/mock/processComponentsStub.go +++ b/integrationTests/mock/processComponentsStub.go @@ -46,6 +46,8 @@ type ProcessComponentsStub struct { CurrentEpochProviderInternal process.CurrentNetworkEpochProviderHandler ScheduledTxsExecutionHandlerInternal process.ScheduledTxsExecutionHandler TxsSenderHandlerField process.TxsSenderHandler + HardforkTriggerField factory.HardforkTrigger + ProcessedMiniBlocksTrackerInternal process.ProcessedMiniBlocksTracker } // Create - @@ -228,6 +230,16 @@ func (pcs *ProcessComponentsStub) TxsSenderHandler() process.TxsSenderHandler { return pcs.TxsSenderHandlerField } +// HardforkTrigger - +func (pcs *ProcessComponentsStub) HardforkTrigger() factory.HardforkTrigger { + return pcs.HardforkTriggerField +} + +// ProcessedMiniBlocksTracker - +func (pcs *ProcessComponentsStub) ProcessedMiniBlocksTracker() process.ProcessedMiniBlocksTracker { + return pcs.ProcessedMiniBlocksTrackerInternal +} + // IsInterfaceNil - func (pcs *ProcessComponentsStub) IsInterfaceNil() bool { return pcs == nil diff --git a/integrationTests/mock/transactionCoordinatorMock.go b/integrationTests/mock/transactionCoordinatorMock.go index df193addd5c..eeff0eb9e62 100644 --- a/integrationTests/mock/transactionCoordinatorMock.go +++ b/integrationTests/mock/transactionCoordinatorMock.go @@ -6,6 +6,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" ) // TransactionCoordinatorMock - @@ -20,7 +21,7 @@ type TransactionCoordinatorMock struct { RemoveTxsFromPoolCalled func(body *block.Body) error ProcessBlockTransactionCalled func(header data.HeaderHandler, body *block.Body, haveTime func() time.Duration) error CreateBlockStartedCalled func() - CreateMbsAndProcessCrossShardTransactionsDstMeCalled func(header data.HeaderHandler, processedMiniBlocksHashes map[string]struct{}, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool) (block.MiniBlockSlice, uint32, bool, error) + CreateMbsAndProcessCrossShardTransactionsDstMeCalled func(header data.HeaderHandler, processedMiniBlocksInfo map[string]*processedMb.ProcessedMiniBlockInfo, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool) (block.MiniBlockSlice, uint32, bool, error) CreateMbsAndProcessTransactionsFromMeCalled func(haveTime func() bool) block.MiniBlockSlice CreateMarshalizedDataCalled func(body *block.Body) map[string][][]byte GetAllCurrentUsedTxsCalled func(blockType block.Type) map[string]data.TransactionHandler @@ -145,7 +146,7 @@ func (tcm *TransactionCoordinatorMock) CreateBlockStarted() { // CreateMbsAndProcessCrossShardTransactionsDstMe - func (tcm *TransactionCoordinatorMock) CreateMbsAndProcessCrossShardTransactionsDstMe( header data.HeaderHandler, - processedMiniBlocksHashes map[string]struct{}, + processedMiniBlocksInfo map[string]*processedMb.ProcessedMiniBlockInfo, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool, @@ -154,7 +155,7 @@ func (tcm *TransactionCoordinatorMock) CreateMbsAndProcessCrossShardTransactions return nil, 0, false, nil } - return tcm.CreateMbsAndProcessCrossShardTransactionsDstMeCalled(header, processedMiniBlocksHashes, haveTime, haveAdditionalTime, scheduledMode) + return tcm.CreateMbsAndProcessCrossShardTransactionsDstMeCalled(header, processedMiniBlocksInfo, haveTime, haveAdditionalTime, scheduledMode) } // CreateMbsAndProcessTransactionsFromMe - diff --git a/integrationTests/multiShard/block/interceptedHeadersSigVerification/interceptedHeadersSigVerification_test.go b/integrationTests/multiShard/block/interceptedHeadersSigVerification/interceptedHeadersSigVerification_test.go index 96204b1f163..e5d08540d4c 100644 --- a/integrationTests/multiShard/block/interceptedHeadersSigVerification/interceptedHeadersSigVerification_test.go +++ b/integrationTests/multiShard/block/interceptedHeadersSigVerification/interceptedHeadersSigVerification_test.go @@ -71,15 +71,15 @@ func TestInterceptedShardBlockHeaderVerifiedWithCorrectConsensusGroup(t *testing // all nodes in metachain have the block header in pool as interceptor validates it for _, metaNode := range nodesMap[core.MetachainShardId] { - v, err := metaNode.DataPool.Headers().GetHeaderByHash(headerHash) - assert.Nil(t, err) + v, errGet := metaNode.DataPool.Headers().GetHeaderByHash(headerHash) + assert.Nil(t, errGet) assert.Equal(t, header, v) } // all nodes in shard have the block in pool as interceptor validates it for _, shardNode := range nodesMap[0] { - v, err := shardNode.DataPool.Headers().GetHeaderByHash(headerHash) - assert.Nil(t, err) + v, errGet := shardNode.DataPool.Headers().GetHeaderByHash(headerHash) + assert.Nil(t, errGet) assert.Equal(t, header, v) } } @@ -213,15 +213,15 @@ func TestInterceptedShardBlockHeaderWithLeaderSignatureAndRandSeedChecks(t *test // all nodes in metachain have the block header in pool as interceptor validates it for _, metaNode := range nodesMap[core.MetachainShardId] { - v, err := metaNode.DataPool.Headers().GetHeaderByHash(headerHash) - assert.Nil(t, err) + v, errGet := metaNode.DataPool.Headers().GetHeaderByHash(headerHash) + assert.Nil(t, errGet) assert.Equal(t, header, v) } // all nodes in shard have the block in pool as interceptor validates it for _, shardNode := range nodesMap[0] { - v, err := shardNode.DataPool.Headers().GetHeaderByHash(headerHash) - assert.Nil(t, err) + v, errGet := shardNode.DataPool.Headers().GetHeaderByHash(headerHash) + assert.Nil(t, errGet) assert.Equal(t, header, v) } } diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index 452236bc07b..f3bd83c937e 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -208,6 +208,7 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui } coreComponents.NodeTypeProviderField = &nodeTypeProviderMock.NodeTypeProviderStub{} coreComponents.ChanStopNodeProcessField = endProcess.GetDummyEndProcessChannel() + coreComponents.HardforkTriggerPubKeyField = []byte("provided hardfork pub key") nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( &testscommon.MarshalizerMock{}, @@ -308,6 +309,7 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, MiniblocksProvider: &mock.MiniBlocksProviderStub{}, EpochNotifier: &epochNotifierMock.EpochNotifierStub{}, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } bootstrapper, err := getBootstrapper(shardID, argsBaseBootstrapper) diff --git a/integrationTests/multiShard/hardFork/hardFork_test.go b/integrationTests/multiShard/hardFork/hardFork_test.go index cc536d5369a..a3cbfd03b04 100644 --- a/integrationTests/multiShard/hardFork/hardFork_test.go +++ b/integrationTests/multiShard/hardFork/hardFork_test.go @@ -562,6 +562,7 @@ func createHardForkExporter( coreComponents.ChainIdCalled = func() string { return string(node.ChainID) } + coreComponents.HardforkTriggerPubKeyField = []byte("provided hardfork pub key") cryptoComponents := integrationTests.GetDefaultCryptoComponents() cryptoComponents.BlockSig = node.OwnAccount.BlockSingleSigner @@ -621,6 +622,7 @@ func createHardForkExporter( MaxHardCapForMissingNodes: 500, NumConcurrentTrieSyncers: 50, TrieSyncerVersion: 2, + PeersRatingHandler: node.PeersRatingHandler, } exportHandler, err := factory.NewExportHandlerFactory(argsExportHandler) diff --git a/integrationTests/multiShard/txScenarios/common.go b/integrationTests/multiShard/txScenarios/common.go index 856f2e0359a..4990d1f936d 100644 --- a/integrationTests/multiShard/txScenarios/common.go +++ b/integrationTests/multiShard/txScenarios/common.go @@ -38,6 +38,8 @@ func createGeneralSetupForTxTest(initialBalance *big.Int) ( enableEpochs := config.EnableEpochs{ OptimizeGasUsedInCrossMiniBlocksEnableEpoch: 10, + ScheduledMiniBlocksEnableEpoch: 10, + MiniBlockPartialExecutionEnableEpoch: 10, } nodes := integrationTests.CreateNodesWithEnableEpochs( diff --git a/integrationTests/node/heartbeat/heartbeat_test.go b/integrationTests/node/heartbeat/heartbeat_test.go index c0f4a0acd54..f0d4066a9bb 100644 --- a/integrationTests/node/heartbeat/heartbeat_test.go +++ b/integrationTests/node/heartbeat/heartbeat_test.go @@ -6,9 +6,12 @@ import ( "testing" "time" + "github.com/ElrondNetwork/elrond-go/heartbeat" mock2 "github.com/ElrondNetwork/elrond-go/heartbeat/mock" "github.com/ElrondNetwork/elrond-go/testscommon" + "github.com/ElrondNetwork/elrond-go/testscommon/epochNotifier" "github.com/ElrondNetwork/elrond-go/testscommon/p2pmocks" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go-core/marshal" @@ -27,9 +30,16 @@ import ( "github.com/stretchr/testify/assert" ) -var stepDelay = time.Second / 10 var log = logger.GetOrCreate("integrationtests/node") +var handlers []vmcommon.EpochSubscriberHandler + +const ( + stepDelay = time.Second / 10 + durationBetweenHeartbeats = time.Second * 5 + providedEpoch = uint32(11) +) + // TestHeartbeatMonitorWillUpdateAnInactivePeer test what happen if a peer out of 2 stops being responsive on heartbeat status // The active monitor should change it's active flag to false when a new heartbeat message has arrived. func TestHeartbeatMonitorWillUpdateAnInactivePeer(t *testing.T) { @@ -37,10 +47,13 @@ func TestHeartbeatMonitorWillUpdateAnInactivePeer(t *testing.T) { t.Skip("this is not a short test") } - maxUnresposiveTime := time.Second * 10 + interactingNodes := 3 + nodes := make([]p2p.Messenger, interactingNodes) + maxUnresposiveTime := time.Second * 10 monitor := createMonitor(maxUnresposiveTime) - nodes, senders, pks := prepareNodes(monitor, 3, "nodeName") + + senders, pks := prepareNodes(nodes, monitor, interactingNodes, "nodeName") defer func() { for _, n := range nodes { @@ -80,8 +93,6 @@ func TestHeartbeatMonitorWillNotUpdateTooLongHeartbeatMessages(t *testing.T) { t.Skip("this is not a short test") } - maxUnresposiveTime := time.Second * 10 - length := 129 buff := make([]byte, length) @@ -90,8 +101,13 @@ func TestHeartbeatMonitorWillNotUpdateTooLongHeartbeatMessages(t *testing.T) { } bigNodeName := string(buff) + interactingNodes := 3 + nodes := make([]p2p.Messenger, interactingNodes) + + maxUnresposiveTime := time.Second * 10 monitor := createMonitor(maxUnresposiveTime) - nodes, senders, pks := prepareNodes(monitor, 3, bigNodeName) + + senders, pks := prepareNodes(nodes, monitor, interactingNodes, bigNodeName) defer func() { for _, n := range nodes { @@ -116,20 +132,120 @@ func TestHeartbeatMonitorWillNotUpdateTooLongHeartbeatMessages(t *testing.T) { assert.True(t, isMessageCorrectLen(pkHeartBeats, secondPK, expectedLen)) } +func TestHeartbeatV2_DeactivationOfHeartbeat(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + interactingNodes := 3 + nodes := make([]*integrationTests.TestHeartbeatNode, interactingNodes) + p2pConfig := integrationTests.CreateP2PConfigWithNoDiscovery() + for i := 0; i < interactingNodes; i++ { + nodes[i] = integrationTests.NewTestHeartbeatNode(3, 0, interactingNodes, p2pConfig) + } + assert.Equal(t, interactingNodes, len(nodes)) + + messengers := make([]p2p.Messenger, interactingNodes) + for i := 0; i < interactingNodes; i++ { + messengers[i] = nodes[i].Messenger + } + + maxUnresposiveTime := time.Second * 10 + monitor := createMonitor(maxUnresposiveTime) + senders, _ := prepareNodes(messengers, monitor, interactingNodes, "nodeName") + + // Start sending heartbeats + timer := time.NewTimer(durationBetweenHeartbeats) + defer timer.Stop() + go startSendingHeartbeats(t, senders, timer) + + // Wait for first messages + time.Sleep(time.Second * 6) + + heartbeats := monitor.GetHeartbeats() + assert.False(t, heartbeats[0].IsActive) //first one is the monitor which is inactive + + for _, hb := range heartbeats[1:] { + assert.True(t, hb.IsActive) + } + + // Stop sending heartbeats + for _, handler := range handlers { + handler.EpochConfirmed(providedEpoch+1, 0) + } + + // Wait enough time to make sure some heartbeats should have been sent + time.Sleep(time.Second * 15) + + // Check sent messages + maxHbV2DurationAllowed := time.Second * 5 + checkMessages(t, nodes, monitor, maxHbV2DurationAllowed) +} + +func startSendingHeartbeats(t *testing.T, senders []*process.Sender, timer *time.Timer) { + for { + timer.Reset(durationBetweenHeartbeats) + + <-timer.C + for _, sender := range senders { + err := sender.SendHeartbeat() + assert.Nil(t, err) + } + } +} + +func checkMessages(t *testing.T, nodes []*integrationTests.TestHeartbeatNode, monitor *process.Monitor, maxHbV2DurationAllowed time.Duration) { + heartbeats := monitor.GetHeartbeats() + for _, hb := range heartbeats { + assert.False(t, hb.IsActive) + } + + numOfNodes := len(nodes) + for i := 0; i < numOfNodes; i++ { + paCache := nodes[i].DataPool.PeerAuthentications() + hbCache := nodes[i].DataPool.Heartbeats() + + assert.Equal(t, numOfNodes, paCache.Len()) + assert.Equal(t, numOfNodes, hbCache.Len()) + + // Check this node received messages from all peers + for _, node := range nodes { + assert.True(t, paCache.Has(node.Messenger.ID().Bytes())) + assert.True(t, hbCache.Has(node.Messenger.ID().Bytes())) + + // Also check message age + value, _ := paCache.Get(node.Messenger.ID().Bytes()) + msg := value.(*heartbeat.PeerAuthentication) + + marshaller := integrationTests.TestMarshaller + payload := &heartbeat.Payload{} + err := marshaller.Unmarshal(payload, msg.Payload) + assert.Nil(t, err) + + currentTimestamp := time.Now().Unix() + messageAge := time.Duration(currentTimestamp - payload.Timestamp) + assert.True(t, messageAge < maxHbV2DurationAllowed) + } + } +} + func prepareNodes( + nodes []p2p.Messenger, monitor *process.Monitor, interactingNodes int, defaultNodeName string, -) ([]p2p.Messenger, []*process.Sender, []crypto.PublicKey) { +) ([]*process.Sender, []crypto.PublicKey) { senderIdxs := []int{0, 1} - nodes := make([]p2p.Messenger, interactingNodes) topicHeartbeat := "topic" senders := make([]*process.Sender, 0) pks := make([]crypto.PublicKey, 0) + handlers = make([]vmcommon.EpochSubscriberHandler, 0) for i := 0; i < interactingNodes; i++ { - nodes[i] = integrationTests.CreateMessengerWithNoDiscovery() + if nodes[i] == nil { + nodes[i] = integrationTests.CreateMessengerWithNoDiscovery() + } _ = nodes[i].CreateTopic(topicHeartbeat, true) isSender := integrationTests.IsIntInSlice(i, senderIdxs) @@ -148,7 +264,7 @@ func prepareNodes( } } - return nodes, senders, pks + return senders, pks } func checkReceivedMessages(t *testing.T, monitor *process.Monitor, pks []crypto.PublicKey, activeIdxs []int) { @@ -221,9 +337,15 @@ func createSenderWithName(messenger p2p.Messenger, topic string, nodeName string StatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, VersionNumber: version, NodeDisplayName: nodeName, - HardforkTrigger: &mock.HardforkTriggerStub{}, + HardforkTrigger: &testscommon.HardforkTriggerStub{}, CurrentBlockProvider: &testscommon.ChainHandlerStub{}, RedundancyHandler: &mock.RedundancyHandlerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{ + RegisterNotifyHandlerCalled: func(handler vmcommon.EpochSubscriberHandler) { + handlers = append(handlers, handler) + }, + }, + HeartbeatDisableEpoch: providedEpoch, } sender, _ := process.NewSender(argSender) @@ -272,11 +394,13 @@ func createMonitor(maxDurationPeerUnresponsive time.Duration) *process.Monitor { return nil }, }, - HardforkTrigger: &mock.HardforkTriggerStub{}, + HardforkTrigger: &testscommon.HardforkTriggerStub{}, ValidatorPubkeyConverter: integrationTests.TestValidatorPubkeyConverter, HeartbeatRefreshIntervalInSec: 1, HideInactiveValidatorIntervalInSec: 600, AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + HeartbeatDisableEpoch: providedEpoch, } monitor, _ := process.NewMonitor(argMonitor) diff --git a/integrationTests/node/heartbeatV2/heartbeatV2_test.go b/integrationTests/node/heartbeatV2/heartbeatV2_test.go new file mode 100644 index 00000000000..0eaea6fd738 --- /dev/null +++ b/integrationTests/node/heartbeatV2/heartbeatV2_test.go @@ -0,0 +1,117 @@ +package heartbeatV2 + +import ( + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHeartbeatV2_AllPeersSendMessages(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + interactingNodes := 3 + nodes := make([]*integrationTests.TestHeartbeatNode, interactingNodes) + p2pConfig := integrationTests.CreateP2PConfigWithNoDiscovery() + for i := 0; i < interactingNodes; i++ { + nodes[i] = integrationTests.NewTestHeartbeatNode(3, 0, interactingNodes, p2pConfig) + } + assert.Equal(t, interactingNodes, len(nodes)) + + connectNodes(nodes, interactingNodes) + + // Wait for messages to broadcast + time.Sleep(time.Second * 15) + + for i := 0; i < len(nodes); i++ { + nodes[i].Close() + } + + // Check sent messages + maxMessageAgeAllowed := time.Second * 5 + checkMessages(t, nodes, maxMessageAgeAllowed) +} + +func TestHeartbeatV2_PeerJoiningLate(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + interactingNodes := 3 + nodes := make([]*integrationTests.TestHeartbeatNode, interactingNodes) + p2pConfig := integrationTests.CreateP2PConfigWithNoDiscovery() + for i := 0; i < interactingNodes; i++ { + nodes[i] = integrationTests.NewTestHeartbeatNode(3, 0, interactingNodes, p2pConfig) + } + assert.Equal(t, interactingNodes, len(nodes)) + + connectNodes(nodes, interactingNodes) + + // Wait for messages to broadcast + time.Sleep(time.Second * 15) + + // Check sent messages + maxMessageAgeAllowed := time.Second * 5 + checkMessages(t, nodes, maxMessageAgeAllowed) + + // Add new delayed node which requests messages + delayedNode := integrationTests.NewTestHeartbeatNode(3, 0, 0, p2pConfig) + nodes = append(nodes, delayedNode) + connectNodes(nodes, len(nodes)) + // Wait for messages to broadcast and requests to finish + time.Sleep(time.Second * 15) + + for i := 0; i < len(nodes); i++ { + nodes[i].Close() + } + + // Check sent messages again - now should have from all peers + maxMessageAgeAllowed = time.Second * 5 // should not have messages from first Send + checkMessages(t, nodes, maxMessageAgeAllowed) +} + +func connectNodes(nodes []*integrationTests.TestHeartbeatNode, interactingNodes int) { + for i := 0; i < interactingNodes-1; i++ { + for j := i + 1; j < interactingNodes; j++ { + src := nodes[i] + dst := nodes[j] + _ = src.ConnectTo(dst) + } + } +} + +func checkMessages(t *testing.T, nodes []*integrationTests.TestHeartbeatNode, maxMessageAgeAllowed time.Duration) { + numOfNodes := len(nodes) + for i := 0; i < numOfNodes; i++ { + paCache := nodes[i].DataPool.PeerAuthentications() + hbCache := nodes[i].DataPool.Heartbeats() + + assert.Equal(t, numOfNodes, paCache.Len()) + assert.Equal(t, numOfNodes, hbCache.Len()) + + // Check this node received messages from all peers + for _, node := range nodes { + assert.True(t, paCache.Has(node.Messenger.ID().Bytes())) + assert.True(t, hbCache.Has(node.Messenger.ID().Bytes())) + + // Also check message age + value, found := paCache.Get(node.Messenger.ID().Bytes()) + require.True(t, found) + msg := value.(*heartbeat.PeerAuthentication) + + marshaller := integrationTests.TestMarshaller + payload := &heartbeat.Payload{} + err := marshaller.Unmarshal(payload, msg.Payload) + assert.Nil(t, err) + + currentTimestamp := time.Now().Unix() + messageAge := time.Duration(currentTimestamp - payload.Timestamp) + assert.True(t, messageAge < maxMessageAgeAllowed) + } + } +} diff --git a/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go b/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go new file mode 100644 index 00000000000..6f3b08aeeee --- /dev/null +++ b/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go @@ -0,0 +1,219 @@ +package networkSharding + +import ( + "fmt" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/stretchr/testify/assert" +) + +var p2pBootstrapStepDelay = 2 * time.Second + +func createDefaultConfig() config.P2PConfig { + return config.P2PConfig{ + Node: config.NodeConfig{ + Port: "0", + ConnectionWatcherType: "print", + }, + KadDhtPeerDiscovery: config.KadDhtPeerDiscoveryConfig{ + Enabled: true, + Type: "optimized", + RefreshIntervalInSec: 1, + RoutingTableRefreshIntervalInSec: 1, + ProtocolID: "/erd/kad/1.0.0", + InitialPeerList: nil, + BucketSize: 100, + }, + } +} + +func TestConnectionsInNetworkShardingWithShardingWithLists(t *testing.T) { + p2pConfig := createDefaultConfig() + p2pConfig.Sharding = config.ShardingConfig{ + TargetPeerCount: 12, + MaxIntraShardValidators: 6, + MaxCrossShardValidators: 1, + MaxIntraShardObservers: 1, + MaxCrossShardObservers: 1, + MaxSeeders: 1, + Type: p2p.ListsSharder, + AdditionalConnections: config.AdditionalConnectionsConfig{ + MaxFullHistoryObservers: 1, + }, + } + + testConnectionsInNetworkSharding(t, p2pConfig) +} + +func testConnectionsInNetworkSharding(t *testing.T, p2pConfig config.P2PConfig) { + if testing.Short() { + t.Skip("this is not a short test") + } + + nodesPerShard := 8 + numMetaNodes := 8 + numObserversOnShard := 2 + numShards := 2 + consensusGroupSize := 2 + + advertiser := integrationTests.CreateMessengerWithKadDht("") + _ = advertiser.Bootstrap() + seedAddress := integrationTests.GetConnectableAddress(advertiser) + + p2pConfig.KadDhtPeerDiscovery.InitialPeerList = []string{seedAddress} + + // create map of shard - testHeartbeatNodes for metachain and shard chain + nodesMap := integrationTests.CreateNodesWithTestHeartbeatNode( + nodesPerShard, + numMetaNodes, + numShards, + consensusGroupSize, + numMetaNodes, + numObserversOnShard, + p2pConfig, + ) + + defer func() { + stopNodes(advertiser, nodesMap) + }() + + createTestInterceptorForEachNode(nodesMap) + + time.Sleep(time.Second * 2) + + startNodes(nodesMap) + + fmt.Println("Delaying for node bootstrap and topic announcement...") + time.Sleep(p2pBootstrapStepDelay) + + for i := 0; i < 5; i++ { + fmt.Println("\n" + integrationTests.MakeDisplayTableForHeartbeatNodes(nodesMap)) + + time.Sleep(time.Second) + } + + fmt.Println("Initializing nodes components...") + initNodes(nodesMap) + + for i := 0; i < 5; i++ { + fmt.Println("\n" + integrationTests.MakeDisplayTableForHeartbeatNodes(nodesMap)) + + time.Sleep(time.Second) + } + + sendMessageOnGlobalTopic(nodesMap) + sendMessagesOnIntraShardTopic(nodesMap) + sendMessagesOnCrossShardTopic(nodesMap) + + for i := 0; i < 5; i++ { + fmt.Println("\n" + integrationTests.MakeDisplayTableForHeartbeatNodes(nodesMap)) + + time.Sleep(time.Second) + } + + testCounters(t, nodesMap, 1, 1, numShards*2) + testUnknownSeederPeers(t, nodesMap) +} + +func stopNodes(advertiser p2p.Messenger, nodesMap map[uint32][]*integrationTests.TestHeartbeatNode) { + _ = advertiser.Close() + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Messenger.Close() + } + } +} + +func startNodes(nodesMap map[uint32][]*integrationTests.TestHeartbeatNode) { + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Messenger.Bootstrap() + } + } +} + +func initNodes(nodesMap map[uint32][]*integrationTests.TestHeartbeatNode) { + for _, nodes := range nodesMap { + for _, n := range nodes { + n.InitTestHeartbeatNode(0) + } + } +} + +func createTestInterceptorForEachNode(nodesMap map[uint32][]*integrationTests.TestHeartbeatNode) { + for _, nodes := range nodesMap { + for _, n := range nodes { + n.CreateTestInterceptors() + } + } +} + +func sendMessageOnGlobalTopic(nodesMap map[uint32][]*integrationTests.TestHeartbeatNode) { + fmt.Println("sending a message on global topic") + nodesMap[0][0].Messenger.Broadcast(integrationTests.GlobalTopic, []byte("global message")) + time.Sleep(time.Second) +} + +func sendMessagesOnIntraShardTopic(nodesMap map[uint32][]*integrationTests.TestHeartbeatNode) { + fmt.Println("sending a message on intra shard topic") + for _, nodes := range nodesMap { + n := nodes[0] + + identifier := integrationTests.ShardTopic + + n.ShardCoordinator.CommunicationIdentifier(n.ShardCoordinator.SelfId()) + nodes[0].Messenger.Broadcast(identifier, []byte("intra shard message")) + } + time.Sleep(time.Second) +} + +func sendMessagesOnCrossShardTopic(nodesMap map[uint32][]*integrationTests.TestHeartbeatNode) { + fmt.Println("sending messages on cross shard topics") + + for shardIdSrc, nodes := range nodesMap { + n := nodes[0] + + for shardIdDest := range nodesMap { + if shardIdDest == shardIdSrc { + continue + } + + identifier := integrationTests.ShardTopic + + n.ShardCoordinator.CommunicationIdentifier(shardIdDest) + nodes[0].Messenger.Broadcast(identifier, []byte("cross shard message")) + } + } + time.Sleep(time.Second) +} + +func testCounters( + t *testing.T, + nodesMap map[uint32][]*integrationTests.TestHeartbeatNode, + globalTopicMessagesCount int, + intraTopicMessagesCount int, + crossTopicMessagesCount int, +) { + + for _, nodes := range nodesMap { + for _, n := range nodes { + assert.Equal(t, globalTopicMessagesCount, n.CountGlobalMessages()) + assert.Equal(t, intraTopicMessagesCount, n.CountIntraShardMessages()) + assert.Equal(t, crossTopicMessagesCount, n.CountCrossShardMessages()) + } + } +} + +func testUnknownSeederPeers( + t *testing.T, + nodesMap map[uint32][]*integrationTests.TestHeartbeatNode, +) { + + for _, nodes := range nodesMap { + for _, n := range nodes { + assert.Equal(t, 1, len(n.Messenger.GetConnectedPeersInfo().Seeders)) + } + } +} diff --git a/integrationTests/p2p/peerDisconnecting/peerDisconnecting_test.go b/integrationTests/p2p/peerDisconnecting/peerDisconnecting_test.go index 6a113531f7d..752211d027d 100644 --- a/integrationTests/p2p/peerDisconnecting/peerDisconnecting_test.go +++ b/integrationTests/p2p/peerDisconnecting/peerDisconnecting_test.go @@ -69,6 +69,7 @@ func testPeerDisconnectionWithOneAdvertiser(t *testing.T, p2pConfig config.P2PCo NodeOperationMode: p2p.NormalOperation, Marshalizer: &testscommon.MarshalizerMock{}, SyncTimer: &testscommon.SyncTimerStub{}, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, } // Step 1. Create advertiser advertiser, err := libp2p.NewMockMessenger(argSeeder, netw) @@ -85,6 +86,7 @@ func testPeerDisconnectionWithOneAdvertiser(t *testing.T, p2pConfig config.P2PCo NodeOperationMode: p2p.NormalOperation, Marshalizer: &testscommon.MarshalizerMock{}, SyncTimer: &testscommon.SyncTimerStub{}, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, } node, errCreate := libp2p.NewMockMessenger(arg, netw) require.Nil(t, errCreate) diff --git a/integrationTests/p2p/peerDisconnecting/seedersDisconnecting_test.go b/integrationTests/p2p/peerDisconnecting/seedersDisconnecting_test.go index 897d6d97052..3b46cf39292 100644 --- a/integrationTests/p2p/peerDisconnecting/seedersDisconnecting_test.go +++ b/integrationTests/p2p/peerDisconnecting/seedersDisconnecting_test.go @@ -57,6 +57,7 @@ func TestSeedersDisconnectionWith2AdvertiserAnd3Peers(t *testing.T) { NodeOperationMode: p2p.NormalOperation, Marshalizer: &testscommon.MarshalizerMock{}, SyncTimer: &testscommon.SyncTimerStub{}, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, } node, err := libp2p.NewMockMessenger(arg, netw) require.Nil(t, err) @@ -129,6 +130,7 @@ func createBootstrappedSeeders(baseP2PConfig config.P2PConfig, numSeeders int, n NodeOperationMode: p2p.NormalOperation, Marshalizer: &testscommon.MarshalizerMock{}, SyncTimer: &testscommon.SyncTimerStub{}, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, } seeders[0], _ = libp2p.NewMockMessenger(argSeeder, netw) _ = seeders[0].Bootstrap() @@ -144,6 +146,7 @@ func createBootstrappedSeeders(baseP2PConfig config.P2PConfig, numSeeders int, n NodeOperationMode: p2p.NormalOperation, Marshalizer: &testscommon.MarshalizerMock{}, SyncTimer: &testscommon.SyncTimerStub{}, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, } seeders[i], _ = libp2p.NewMockMessenger(argSeeder, netw) _ = netw.LinkAll() diff --git a/integrationTests/singleShard/block/consensusNotAchieved/consensusNotAchieved_test.go b/integrationTests/singleShard/block/consensusNotAchieved/consensusNotAchieved_test.go index 09b6869f8de..841dcb94fb6 100644 --- a/integrationTests/singleShard/block/consensusNotAchieved/consensusNotAchieved_test.go +++ b/integrationTests/singleShard/block/consensusNotAchieved/consensusNotAchieved_test.go @@ -6,11 +6,11 @@ import ( "testing" "time" - logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go-crypto" + logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" testBlock "github.com/ElrondNetwork/elrond-go/integrationTests/singleShard/block" diff --git a/integrationTests/state/stateTrieClose/stateTrieClose_test.go b/integrationTests/state/stateTrieClose/stateTrieClose_test.go index c7034a9f344..2e307398df9 100644 --- a/integrationTests/state/stateTrieClose/stateTrieClose_test.go +++ b/integrationTests/state/stateTrieClose/stateTrieClose_test.go @@ -35,6 +35,7 @@ func TestPatriciaMerkleTrie_Close(t *testing.T) { rootHash, _ := tr.RootHash() leavesChannel1 := make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity) _ = tr.GetAllLeavesOnChannel(leavesChannel1, context.Background(), rootHash) + time.Sleep(time.Second) // allow the go routine to start idx, _ := gc.Snapshot() diff := gc.DiffGoRoutines(idxInitial, idx) assert.True(t, len(diff) <= 1) // can be 0 on a fast running host @@ -61,6 +62,7 @@ func TestPatriciaMerkleTrie_Close(t *testing.T) { rootHash, _ = tr.RootHash() leavesChannel2 := make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity) _ = tr.GetAllLeavesOnChannel(leavesChannel2, context.Background(), rootHash) + time.Sleep(time.Second) // allow the go routine to start idx, _ = gc.Snapshot() diff = gc.DiffGoRoutines(idxInitial, idx) assert.True(t, len(diff) <= 4) diff --git a/integrationTests/sync/basicSync/basicSync_test.go b/integrationTests/sync/basicSync/basicSync_test.go index 46aac2ba53c..157d513a162 100644 --- a/integrationTests/sync/basicSync/basicSync_test.go +++ b/integrationTests/sync/basicSync/basicSync_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" - logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go-core/data" + logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/stretchr/testify/assert" ) diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go new file mode 100644 index 00000000000..0351863377a --- /dev/null +++ b/integrationTests/testHeartbeatNode.go @@ -0,0 +1,764 @@ +package integrationTests + +import ( + "encoding/hex" + "fmt" + "strconv" + "time" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/core/partitioning" + "github.com/ElrondNetwork/elrond-go-core/core/random" + "github.com/ElrondNetwork/elrond-go-core/data/endProcess" + "github.com/ElrondNetwork/elrond-go-core/display" + "github.com/ElrondNetwork/elrond-go-core/marshal" + crypto "github.com/ElrondNetwork/elrond-go-crypto" + "github.com/ElrondNetwork/elrond-go-crypto/signing" + "github.com/ElrondNetwork/elrond-go-crypto/signing/mcl" + "github.com/ElrondNetwork/elrond-go-crypto/signing/mcl/singlesig" + "github.com/ElrondNetwork/elrond-go/common" + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" + "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/resolverscontainer" + "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" + "github.com/ElrondNetwork/elrond-go/epochStart/notifier" + "github.com/ElrondNetwork/elrond-go/heartbeat/processor" + "github.com/ElrondNetwork/elrond-go/heartbeat/sender" + "github.com/ElrondNetwork/elrond-go/integrationTests/mock" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/interceptors" + interceptorFactory "github.com/ElrondNetwork/elrond-go/process/interceptors/factory" + interceptorsProcessor "github.com/ElrondNetwork/elrond-go/process/interceptors/processor" + processMock "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/sharding/networksharding" + "github.com/ElrondNetwork/elrond-go/sharding/nodesCoordinator" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + "github.com/ElrondNetwork/elrond-go/storage/timecache" + "github.com/ElrondNetwork/elrond-go/testscommon" + "github.com/ElrondNetwork/elrond-go/testscommon/cryptoMocks" + dataRetrieverMock "github.com/ElrondNetwork/elrond-go/testscommon/dataRetriever" + "github.com/ElrondNetwork/elrond-go/testscommon/nodeTypeProviderMock" + "github.com/ElrondNetwork/elrond-go/testscommon/p2pmocks" + "github.com/ElrondNetwork/elrond-go/testscommon/shardingMocks" + trieMock "github.com/ElrondNetwork/elrond-go/testscommon/trie" + "github.com/ElrondNetwork/elrond-go/update" +) + +const ( + defaultNodeName = "heartbeatNode" + timeBetweenPeerAuths = 10 * time.Second + timeBetweenHeartbeats = 5 * time.Second + timeBetweenSendsWhenError = time.Second + thresholdBetweenSends = 0.2 + timeBetweenHardforks = 2 * time.Second + + messagesInChunk = 10 + minPeersThreshold = 1.0 + delayBetweenRequests = time.Second + maxTimeout = time.Minute + maxMissingKeysInRequest = 1 + providedHardforkPubKey = "provided pub key" +) + +// TestMarshaller represents the main marshaller +var TestMarshaller = &marshal.GogoProtoMarshalizer{} + +// TestThrottler - +var TestThrottler = &processMock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, +} + +// TestHeartbeatNode represents a container type of class used in integration tests +// with all its fields exported +type TestHeartbeatNode struct { + ShardCoordinator sharding.Coordinator + NodesCoordinator nodesCoordinator.NodesCoordinator + PeerShardMapper process.NetworkShardingCollector + Messenger p2p.Messenger + NodeKeys TestKeyPair + DataPool dataRetriever.PoolsHolder + Sender update.Closer + PeerAuthInterceptor *interceptors.MultiDataInterceptor + HeartbeatInterceptor *interceptors.MultiDataInterceptor + ValidatorInfoInterceptor *interceptors.SingleDataInterceptor + PeerSigHandler crypto.PeerSignatureHandler + WhiteListHandler process.WhiteListHandler + Storage dataRetriever.StorageService + ResolversContainer dataRetriever.ResolversContainer + ResolverFinder dataRetriever.ResolversFinder + RequestHandler process.RequestHandler + RequestedItemsHandler dataRetriever.RequestedItemsHandler + RequestsProcessor update.Closer + DirectConnectionsProcessor update.Closer + Interceptor *CountInterceptor +} + +// NewTestHeartbeatNode returns a new TestHeartbeatNode instance with a libp2p messenger +func NewTestHeartbeatNode( + maxShards uint32, + nodeShardId uint32, + minPeersWaiting int, + p2pConfig config.P2PConfig, +) *TestHeartbeatNode { + keygen := signing.NewKeyGenerator(mcl.NewSuiteBLS12()) + sk, pk := keygen.GeneratePair() + + pksBytes := make(map[uint32][]byte, maxShards) + pksBytes[nodeShardId], _ = pk.ToByteArray() + + nodesCoordinatorInstance := &shardingMocks.NodesCoordinatorStub{ + GetAllValidatorsPublicKeysCalled: func() (map[uint32][][]byte, error) { + keys := make(map[uint32][][]byte) + for shardID := uint32(0); shardID < maxShards; shardID++ { + keys[shardID] = append(keys[shardID], pksBytes[shardID]) + } + + shardID := core.MetachainShardId + keys[shardID] = append(keys[shardID], pksBytes[shardID]) + + return keys, nil + }, + GetValidatorWithPublicKeyCalled: func(publicKey []byte) (nodesCoordinator.Validator, uint32, error) { + validator, _ := nodesCoordinator.NewValidator(publicKey, defaultChancesSelection, 1) + return validator, 0, nil + }, + } + singleSigner := singlesig.NewBlsSigner() + + peerSigHandler := &cryptoMocks.PeerSignatureHandlerStub{ + VerifyPeerSignatureCalled: func(pk []byte, pid core.PeerID, signature []byte) error { + senderPubKey, err := keygen.PublicKeyFromByteArray(pk) + if err != nil { + return err + } + return singleSigner.Verify(senderPubKey, pid.Bytes(), signature) + }, + GetPeerSignatureCalled: func(privateKey crypto.PrivateKey, pid []byte) ([]byte, error) { + return singleSigner.Sign(privateKey, pid) + }, + } + + shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) + + messenger := CreateMessengerFromConfig(p2pConfig) + pidPk, _ := storageUnit.NewCache(storageUnit.CacheConfig{Type: storageUnit.LRUCache, Capacity: 1000}) + pkShardId, _ := storageUnit.NewCache(storageUnit.CacheConfig{Type: storageUnit.LRUCache, Capacity: 1000}) + pidShardId, _ := storageUnit.NewCache(storageUnit.CacheConfig{Type: storageUnit.LRUCache, Capacity: 1000}) + startInEpoch := uint32(0) + arg := networksharding.ArgPeerShardMapper{ + PeerIdPkCache: pidPk, + FallbackPkShardCache: pkShardId, + FallbackPidShardCache: pidShardId, + NodesCoordinator: nodesCoordinatorInstance, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + StartEpoch: startInEpoch, + } + peerShardMapper, err := networksharding.NewPeerShardMapper(arg) + if err != nil { + log.Error("error creating NewPeerShardMapper", "error", err) + } + err = messenger.SetPeerShardResolver(peerShardMapper) + if err != nil { + log.Error("error setting NewPeerShardMapper in p2p messenger", "error", err) + } + + thn := &TestHeartbeatNode{ + ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinatorInstance, + Messenger: messenger, + PeerSigHandler: peerSigHandler, + PeerShardMapper: peerShardMapper, + } + + localId := thn.Messenger.ID() + pkBytes, _ := pk.ToByteArray() + thn.PeerShardMapper.UpdatePeerIDInfo(localId, pkBytes, shardCoordinator.SelfId()) + + thn.NodeKeys = TestKeyPair{ + Sk: sk, + Pk: pk, + } + + // start a go routine in order to allow peers to connect first + go thn.InitTestHeartbeatNode(minPeersWaiting) + + return thn +} + +// NewTestHeartbeatNodeWithCoordinator returns a new TestHeartbeatNode instance with a libp2p messenger +// using provided coordinator and keys +func NewTestHeartbeatNodeWithCoordinator( + maxShards uint32, + nodeShardId uint32, + p2pConfig config.P2PConfig, + coordinator nodesCoordinator.NodesCoordinator, + keys TestKeyPair, +) *TestHeartbeatNode { + keygen := signing.NewKeyGenerator(mcl.NewSuiteBLS12()) + singleSigner := singlesig.NewBlsSigner() + + peerSigHandler := &cryptoMocks.PeerSignatureHandlerStub{ + VerifyPeerSignatureCalled: func(pk []byte, pid core.PeerID, signature []byte) error { + senderPubKey, err := keygen.PublicKeyFromByteArray(pk) + if err != nil { + return err + } + return singleSigner.Verify(senderPubKey, pid.Bytes(), signature) + }, + GetPeerSignatureCalled: func(privateKey crypto.PrivateKey, pid []byte) ([]byte, error) { + return singleSigner.Sign(privateKey, pid) + }, + } + + shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) + + messenger := CreateMessengerFromConfig(p2pConfig) + pidPk, _ := storageUnit.NewCache(storageUnit.CacheConfig{Type: storageUnit.LRUCache, Capacity: 1000}) + pkShardId, _ := storageUnit.NewCache(storageUnit.CacheConfig{Type: storageUnit.LRUCache, Capacity: 1000}) + pidShardId, _ := storageUnit.NewCache(storageUnit.CacheConfig{Type: storageUnit.LRUCache, Capacity: 1000}) + startInEpoch := uint32(0) + arg := networksharding.ArgPeerShardMapper{ + PeerIdPkCache: pidPk, + FallbackPkShardCache: pkShardId, + FallbackPidShardCache: pidShardId, + NodesCoordinator: coordinator, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + StartEpoch: startInEpoch, + } + peerShardMapper, err := networksharding.NewPeerShardMapper(arg) + if err != nil { + log.Error("error creating NewPeerShardMapper", "error", err) + } + err = messenger.SetPeerShardResolver(peerShardMapper) + if err != nil { + log.Error("error setting NewPeerShardMapper in p2p messenger", "error", err) + } + + thn := &TestHeartbeatNode{ + ShardCoordinator: shardCoordinator, + NodesCoordinator: coordinator, + Messenger: messenger, + PeerSigHandler: peerSigHandler, + PeerShardMapper: peerShardMapper, + Interceptor: NewCountInterceptor(), + } + + localId := thn.Messenger.ID() + thn.PeerShardMapper.UpdatePeerIDInfo(localId, []byte(""), shardCoordinator.SelfId()) + + thn.NodeKeys = keys + + return thn +} + +// CreateNodesWithTestHeartbeatNode returns a map with nodes per shard each using a real nodes coordinator +// and TestHeartbeatNode +func CreateNodesWithTestHeartbeatNode( + nodesPerShard int, + numMetaNodes int, + numShards int, + shardConsensusGroupSize int, + metaConsensusGroupSize int, + numObserversOnShard int, + p2pConfig config.P2PConfig, +) map[uint32][]*TestHeartbeatNode { + + cp := CreateCryptoParams(nodesPerShard, numMetaNodes, uint32(numShards)) + pubKeys := PubKeysMapFromKeysMap(cp.Keys) + validatorsMap := GenValidatorsFromPubKeys(pubKeys, uint32(numShards)) + validatorsForNodesCoordinator, _ := nodesCoordinator.NodesInfoToValidators(validatorsMap) + nodesMap := make(map[uint32][]*TestHeartbeatNode) + cacherCfg := storageUnit.CacheConfig{Capacity: 10000, Type: storageUnit.LRUCache, Shards: 1} + cache, _ := storageUnit.NewCache(cacherCfg) + for shardId, validatorList := range validatorsMap { + argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + ShardIDAsObserver: shardId, + NbShards: uint32(numShards), + EligibleNodes: validatorsForNodesCoordinator, + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: cache, + Shuffler: &shardingMocks.NodeShufflerMock{}, + BootStorer: CreateMemUnit(), + WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), + Epoch: 0, + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + WaitingListFixEnabledEpoch: 0, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, + } + nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) + log.LogIfError(err) + + nodesList := make([]*TestHeartbeatNode, len(validatorList)) + for i := range validatorList { + kp := cp.Keys[shardId][i] + nodesList[i] = NewTestHeartbeatNodeWithCoordinator( + uint32(numShards), + shardId, + p2pConfig, + nodesCoordinatorInstance, + *kp, + ) + } + nodesMap[shardId] = nodesList + } + + for counter := uint32(0); counter < uint32(numShards+1); counter++ { + for j := 0; j < numObserversOnShard; j++ { + shardId := counter + if shardId == uint32(numShards) { + shardId = core.MetachainShardId + } + + argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + ShardIDAsObserver: shardId, + NbShards: uint32(numShards), + EligibleNodes: validatorsForNodesCoordinator, + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: cache, + Shuffler: &shardingMocks.NodeShufflerMock{}, + BootStorer: CreateMemUnit(), + WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), + Epoch: 0, + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + WaitingListFixEnabledEpoch: 0, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, + } + nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) + log.LogIfError(err) + + n := NewTestHeartbeatNodeWithCoordinator( + uint32(numShards), + shardId, + p2pConfig, + nodesCoordinatorInstance, + createCryptoPair(), + ) + + nodesMap[shardId] = append(nodesMap[shardId], n) + } + } + + return nodesMap +} + +// InitTestHeartbeatNode initializes all the components and starts sender +func (thn *TestHeartbeatNode) InitTestHeartbeatNode(minPeersWaiting int) { + thn.initStorage() + thn.initDataPools() + thn.initRequestedItemsHandler() + thn.initResolvers() + thn.initInterceptors() + thn.initDirectConnectionsProcessor() + + for len(thn.Messenger.Peers()) < minPeersWaiting { + time.Sleep(time.Second) + } + + thn.initSender() + thn.initRequestsProcessor() +} + +func (thn *TestHeartbeatNode) initDataPools() { + thn.DataPool = dataRetrieverMock.CreatePoolsHolder(1, thn.ShardCoordinator.SelfId()) + + cacherCfg := storageUnit.CacheConfig{Capacity: 10000, Type: storageUnit.LRUCache, Shards: 1} + cache, _ := storageUnit.NewCache(cacherCfg) + thn.WhiteListHandler, _ = interceptors.NewWhiteListDataVerifier(cache) +} + +func (thn *TestHeartbeatNode) initStorage() { + thn.Storage = CreateStore(thn.ShardCoordinator.NumberOfShards()) +} + +func (thn *TestHeartbeatNode) initSender() { + identifierHeartbeat := common.HeartbeatV2Topic + thn.ShardCoordinator.CommunicationIdentifier(thn.ShardCoordinator.SelfId()) + argsSender := sender.ArgSender{ + Messenger: thn.Messenger, + Marshaller: TestMarshaller, + PeerAuthenticationTopic: common.PeerAuthenticationTopic, + HeartbeatTopic: identifierHeartbeat, + VersionNumber: "v01", + NodeDisplayName: defaultNodeName, + Identity: defaultNodeName + "_identity", + PeerSubType: core.RegularPeer, + CurrentBlockProvider: &testscommon.ChainHandlerStub{}, + PeerSignatureHandler: thn.PeerSigHandler, + PrivateKey: thn.NodeKeys.Sk, + RedundancyHandler: &mock.RedundancyHandlerStub{}, + NodesCoordinator: thn.NodesCoordinator, + HardforkTrigger: &testscommon.HardforkTriggerStub{}, + HardforkTriggerPubKey: []byte(providedHardforkPubKey), + + PeerAuthenticationTimeBetweenSends: timeBetweenPeerAuths, + PeerAuthenticationTimeBetweenSendsWhenError: timeBetweenSendsWhenError, + PeerAuthenticationThresholdBetweenSends: thresholdBetweenSends, + HeartbeatTimeBetweenSends: timeBetweenHeartbeats, + HeartbeatTimeBetweenSendsWhenError: timeBetweenSendsWhenError, + HeartbeatThresholdBetweenSends: thresholdBetweenSends, + HardforkTimeBetweenSends: timeBetweenHardforks, + } + + thn.Sender, _ = sender.NewSender(argsSender) +} + +func (thn *TestHeartbeatNode) initResolvers() { + dataPacker, _ := partitioning.NewSimpleDataPacker(TestMarshaller) + + _ = thn.Messenger.CreateTopic(common.ConsensusTopic+thn.ShardCoordinator.CommunicationIdentifier(thn.ShardCoordinator.SelfId()), true) + + resolverContainerFactory := resolverscontainer.FactoryArgs{ + ShardCoordinator: thn.ShardCoordinator, + Messenger: thn.Messenger, + Store: thn.Storage, + Marshalizer: TestMarshaller, + DataPools: thn.DataPool, + Uint64ByteSliceConverter: TestUint64Converter, + DataPacker: dataPacker, + TriesContainer: &mock.TriesHolderStub{ + GetCalled: func(bytes []byte) common.Trie { + return &trieMock.TrieStub{} + }, + }, + SizeCheckDelta: 100, + InputAntifloodHandler: &mock.NilAntifloodHandler{}, + OutputAntifloodHandler: &mock.NilAntifloodHandler{}, + NumConcurrentResolvingJobs: 10, + CurrentNetworkEpochProvider: &mock.CurrentNetworkEpochProviderStub{}, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + ResolverConfig: config.ResolverConfig{ + NumCrossShardPeers: 2, + NumTotalPeers: 3, + NumFullHistoryPeers: 3, + }, + NodesCoordinator: thn.NodesCoordinator, + MaxNumOfPeerAuthenticationInResponse: 5, + PeerShardMapper: thn.PeerShardMapper, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, + } + + if thn.ShardCoordinator.SelfId() == core.MetachainShardId { + thn.createMetaResolverContainer(resolverContainerFactory) + } else { + thn.createShardResolverContainer(resolverContainerFactory) + } +} + +func (thn *TestHeartbeatNode) createMetaResolverContainer(args resolverscontainer.FactoryArgs) { + resolversContainerFactory, _ := resolverscontainer.NewMetaResolversContainerFactory(args) + + var err error + thn.ResolversContainer, err = resolversContainerFactory.Create() + log.LogIfError(err) + + thn.createRequestHandler() +} + +func (thn *TestHeartbeatNode) createShardResolverContainer(args resolverscontainer.FactoryArgs) { + resolversContainerFactory, _ := resolverscontainer.NewShardResolversContainerFactory(args) + + var err error + thn.ResolversContainer, err = resolversContainerFactory.Create() + log.LogIfError(err) + + thn.createRequestHandler() +} + +func (thn *TestHeartbeatNode) createRequestHandler() { + thn.ResolverFinder, _ = containers.NewResolversFinder(thn.ResolversContainer, thn.ShardCoordinator) + thn.RequestHandler, _ = requestHandlers.NewResolverRequestHandler( + thn.ResolverFinder, + thn.RequestedItemsHandler, + thn.WhiteListHandler, + 100, + thn.ShardCoordinator.SelfId(), + time.Second, + ) +} + +func (thn *TestHeartbeatNode) initRequestedItemsHandler() { + thn.RequestedItemsHandler = timecache.NewTimeCache(roundDuration) +} + +func (thn *TestHeartbeatNode) initInterceptors() { + argsFactory := interceptorFactory.ArgInterceptedDataFactory{ + CoreComponents: &processMock.CoreComponentsMock{ + IntMarsh: TestMarshaller, + HardforkTriggerPubKeyField: []byte(providedHardforkPubKey), + }, + ShardCoordinator: thn.ShardCoordinator, + NodesCoordinator: thn.NodesCoordinator, + PeerSignatureHandler: thn.PeerSigHandler, + SignaturesHandler: &processMock.SignaturesHandlerStub{}, + HeartbeatExpiryTimespanInSec: 60, + PeerID: thn.Messenger.ID(), + } + + thn.createPeerAuthInterceptor(argsFactory) + thn.createHeartbeatInterceptor(argsFactory) + thn.createDirectConnectionInfoInterceptor(argsFactory) +} + +func (thn *TestHeartbeatNode) createPeerAuthInterceptor(argsFactory interceptorFactory.ArgInterceptedDataFactory) { + args := interceptorsProcessor.ArgPeerAuthenticationInterceptorProcessor{ + PeerAuthenticationCacher: thn.DataPool.PeerAuthentications(), + PeerShardMapper: thn.PeerShardMapper, + Marshaller: TestMarshaller, + HardforkTrigger: &testscommon.HardforkTriggerStub{}, + } + paProcessor, _ := interceptorsProcessor.NewPeerAuthenticationInterceptorProcessor(args) + paFactory, _ := interceptorFactory.NewInterceptedPeerAuthenticationDataFactory(argsFactory) + thn.PeerAuthInterceptor = thn.initMultiDataInterceptor(common.PeerAuthenticationTopic, paFactory, paProcessor) +} + +func (thn *TestHeartbeatNode) createHeartbeatInterceptor(argsFactory interceptorFactory.ArgInterceptedDataFactory) { + args := interceptorsProcessor.ArgHeartbeatInterceptorProcessor{ + HeartbeatCacher: thn.DataPool.Heartbeats(), + ShardCoordinator: thn.ShardCoordinator, + PeerShardMapper: thn.PeerShardMapper, + } + hbProcessor, _ := interceptorsProcessor.NewHeartbeatInterceptorProcessor(args) + hbFactory, _ := interceptorFactory.NewInterceptedHeartbeatDataFactory(argsFactory) + identifierHeartbeat := common.HeartbeatV2Topic + thn.ShardCoordinator.CommunicationIdentifier(thn.ShardCoordinator.SelfId()) + thn.HeartbeatInterceptor = thn.initMultiDataInterceptor(identifierHeartbeat, hbFactory, hbProcessor) +} + +func (thn *TestHeartbeatNode) createDirectConnectionInfoInterceptor(argsFactory interceptorFactory.ArgInterceptedDataFactory) { + args := interceptorsProcessor.ArgDirectConnectionInfoInterceptorProcessor{ + PeerShardMapper: thn.PeerShardMapper, + } + dciProcessor, _ := interceptorsProcessor.NewDirectConnectionInfoInterceptorProcessor(args) + dciFactory, _ := interceptorFactory.NewInterceptedDirectConnectionInfoFactory(argsFactory) + thn.ValidatorInfoInterceptor = thn.initSingleDataInterceptor(common.ConnectionTopic, dciFactory, dciProcessor) +} + +func (thn *TestHeartbeatNode) initMultiDataInterceptor(topic string, dataFactory process.InterceptedDataFactory, processor process.InterceptorProcessor) *interceptors.MultiDataInterceptor { + mdInterceptor, _ := interceptors.NewMultiDataInterceptor( + interceptors.ArgMultiDataInterceptor{ + Topic: topic, + Marshalizer: TestMarshalizer, + DataFactory: dataFactory, + Processor: processor, + Throttler: TestThrottler, + AntifloodHandler: &mock.NilAntifloodHandler{}, + WhiteListRequest: &testscommon.WhiteListHandlerStub{ + IsWhiteListedCalled: func(interceptedData process.InterceptedData) bool { + return true + }, + }, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + CurrentPeerId: thn.Messenger.ID(), + }, + ) + + thn.registerTopicValidator(topic, mdInterceptor) + + return mdInterceptor +} + +func (thn *TestHeartbeatNode) initSingleDataInterceptor(topic string, dataFactory process.InterceptedDataFactory, processor process.InterceptorProcessor) *interceptors.SingleDataInterceptor { + sdInterceptor, _ := interceptors.NewSingleDataInterceptor( + interceptors.ArgSingleDataInterceptor{ + Topic: topic, + DataFactory: dataFactory, + Processor: processor, + Throttler: TestThrottler, + AntifloodHandler: &mock.NilAntifloodHandler{}, + WhiteListRequest: &testscommon.WhiteListHandlerStub{ + IsWhiteListedCalled: func(interceptedData process.InterceptedData) bool { + return true + }, + }, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + CurrentPeerId: thn.Messenger.ID(), + }, + ) + + thn.registerTopicValidator(topic, sdInterceptor) + + return sdInterceptor +} + +func (thn *TestHeartbeatNode) initRequestsProcessor() { + args := processor.ArgPeerAuthenticationRequestsProcessor{ + RequestHandler: thn.RequestHandler, + NodesCoordinator: thn.NodesCoordinator, + PeerAuthenticationPool: thn.DataPool.PeerAuthentications(), + ShardId: thn.ShardCoordinator.SelfId(), + Epoch: 0, + MessagesInChunk: messagesInChunk, + MinPeersThreshold: minPeersThreshold, + DelayBetweenRequests: delayBetweenRequests, + MaxTimeout: maxTimeout, + MaxMissingKeysInRequest: maxMissingKeysInRequest, + Randomizer: &random.ConcurrentSafeIntRandomizer{}, + } + thn.RequestsProcessor, _ = processor.NewPeerAuthenticationRequestsProcessor(args) +} + +func (thn *TestHeartbeatNode) initDirectConnectionsProcessor() { + args := processor.ArgDirectConnectionsProcessor{ + Messenger: thn.Messenger, + Marshaller: TestMarshaller, + ShardCoordinator: thn.ShardCoordinator, + DelayBetweenNotifications: 5 * time.Second, + } + + thn.DirectConnectionsProcessor, _ = processor.NewDirectConnectionsProcessor(args) +} + +// ConnectTo will try to initiate a connection to the provided parameter +func (thn *TestHeartbeatNode) ConnectTo(connectable Connectable) error { + if check.IfNil(connectable) { + return fmt.Errorf("trying to connect to a nil Connectable parameter") + } + + return thn.Messenger.ConnectToPeer(connectable.GetConnectableAddress()) +} + +// GetConnectableAddress returns a non circuit, non windows default connectable p2p address +func (thn *TestHeartbeatNode) GetConnectableAddress() string { + if thn == nil { + return "nil" + } + + return GetConnectableAddress(thn.Messenger) +} + +// MakeDisplayTableForHeartbeatNodes returns a string containing counters for received messages for all provided test nodes +func MakeDisplayTableForHeartbeatNodes(nodes map[uint32][]*TestHeartbeatNode) string { + header := []string{"pk", "pid", "shard ID", "messages global", "messages intra", "messages cross", "conns Total/IntraVal/CrossVal/IntraObs/CrossObs/FullObs/Unk/Sed"} + dataLines := make([]*display.LineData, 0) + + for shardId, nodesList := range nodes { + for _, n := range nodesList { + buffPk, _ := n.NodeKeys.Pk.ToByteArray() + + peerInfo := n.Messenger.GetConnectedPeersInfo() + + pid := n.Messenger.ID().Pretty() + lineData := display.NewLineData( + false, + []string{ + core.GetTrimmedPk(hex.EncodeToString(buffPk)), + pid[len(pid)-6:], + fmt.Sprintf("%d", shardId), + fmt.Sprintf("%d", n.CountGlobalMessages()), + fmt.Sprintf("%d", n.CountIntraShardMessages()), + fmt.Sprintf("%d", n.CountCrossShardMessages()), + fmt.Sprintf("%d/%d/%d/%d/%d/%d/%d/%d", + len(n.Messenger.ConnectedPeers()), + peerInfo.NumIntraShardValidators, + peerInfo.NumCrossShardValidators, + peerInfo.NumIntraShardObservers, + peerInfo.NumCrossShardObservers, + peerInfo.NumFullHistoryObservers, + len(peerInfo.UnknownPeers), + len(peerInfo.Seeders), + ), + }, + ) + + dataLines = append(dataLines, lineData) + } + } + table, _ := display.CreateTableString(header, dataLines) + + return table +} + +// registerTopicValidator registers a message processor instance on the provided topic +func (thn *TestHeartbeatNode) registerTopicValidator(topic string, processor p2p.MessageProcessor) { + err := thn.Messenger.CreateTopic(topic, true) + if err != nil { + fmt.Printf("error while creating topic %s: %s\n", topic, err.Error()) + return + } + + err = thn.Messenger.RegisterMessageProcessor(topic, "test", processor) + if err != nil { + fmt.Printf("error while registering topic validator %s: %s\n", topic, err.Error()) + return + } +} + +// CreateTestInterceptors creates test interceptors that count the number of received messages +func (thn *TestHeartbeatNode) CreateTestInterceptors() { + thn.registerTopicValidator(GlobalTopic, thn.Interceptor) + + metaIdentifier := ShardTopic + thn.ShardCoordinator.CommunicationIdentifier(core.MetachainShardId) + thn.registerTopicValidator(metaIdentifier, thn.Interceptor) + + for i := uint32(0); i < thn.ShardCoordinator.NumberOfShards(); i++ { + identifier := ShardTopic + thn.ShardCoordinator.CommunicationIdentifier(i) + thn.registerTopicValidator(identifier, thn.Interceptor) + } +} + +// CountGlobalMessages returns the messages count on the global topic +func (thn *TestHeartbeatNode) CountGlobalMessages() int { + return thn.Interceptor.MessageCount(GlobalTopic) +} + +// CountIntraShardMessages returns the messages count on the intra-shard topic +func (thn *TestHeartbeatNode) CountIntraShardMessages() int { + identifier := ShardTopic + thn.ShardCoordinator.CommunicationIdentifier(thn.ShardCoordinator.SelfId()) + return thn.Interceptor.MessageCount(identifier) +} + +// CountCrossShardMessages returns the messages count on the cross-shard topics +func (thn *TestHeartbeatNode) CountCrossShardMessages() int { + messages := 0 + + if thn.ShardCoordinator.SelfId() != core.MetachainShardId { + metaIdentifier := ShardTopic + thn.ShardCoordinator.CommunicationIdentifier(core.MetachainShardId) + messages += thn.Interceptor.MessageCount(metaIdentifier) + } + + for i := uint32(0); i < thn.ShardCoordinator.NumberOfShards(); i++ { + if i == thn.ShardCoordinator.SelfId() { + continue + } + + metaIdentifier := ShardTopic + thn.ShardCoordinator.CommunicationIdentifier(i) + messages += thn.Interceptor.MessageCount(metaIdentifier) + } + + return messages +} + +// Close - +func (thn *TestHeartbeatNode) Close() { + _ = thn.Sender.Close() + _ = thn.PeerAuthInterceptor.Close() + _ = thn.RequestsProcessor.Close() + _ = thn.ResolversContainer.Close() + _ = thn.DirectConnectionsProcessor.Close() + _ = thn.Messenger.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (thn *TestHeartbeatNode) IsInterfaceNil() bool { + return thn == nil +} diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 1aeeaef4760..9adbb247c3a 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -159,6 +159,7 @@ func CreateMessengerWithKadDht(initialAddr string) p2p.Messenger { SyncTimer: &libp2p.LocalSyncTimer{}, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, NodeOperationMode: p2p.NormalOperation, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, } libP2PMes, err := libp2p.NewNetworkMessenger(arg) @@ -182,6 +183,7 @@ func CreateMessengerWithKadDhtAndProtocolID(initialAddr string, protocolID strin SyncTimer: &libp2p.LocalSyncTimer{}, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, NodeOperationMode: p2p.NormalOperation, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, } libP2PMes, err := libp2p.NewNetworkMessenger(arg) @@ -199,6 +201,7 @@ func CreateMessengerFromConfig(p2pConfig config.P2PConfig) p2p.Messenger { SyncTimer: &libp2p.LocalSyncTimer{}, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, NodeOperationMode: p2p.NormalOperation, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, } if p2pConfig.Sharding.AdditionalConnections.MaxFullHistoryObservers > 0 { @@ -212,8 +215,55 @@ func CreateMessengerFromConfig(p2pConfig config.P2PConfig) p2p.Messenger { return libP2PMes } +// CreateMessengerFromConfigWithPeersRatingHandler creates a new libp2p messenger with provided configuration +func CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig config.P2PConfig, peersRatingHandler p2p.PeersRatingHandler) p2p.Messenger { + arg := libp2p.ArgsNetworkMessenger{ + Marshalizer: TestMarshalizer, + ListenAddress: libp2p.ListenLocalhostAddrWithIp4AndTcp, + P2pConfig: p2pConfig, + SyncTimer: &libp2p.LocalSyncTimer{}, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + NodeOperationMode: p2p.NormalOperation, + PeersRatingHandler: peersRatingHandler, + } + + if p2pConfig.Sharding.AdditionalConnections.MaxFullHistoryObservers > 0 { + // we deliberately set this, automatically choose full archive node mode + arg.NodeOperationMode = p2p.FullArchiveMode + } + + libP2PMes, err := libp2p.NewNetworkMessenger(arg) + log.LogIfError(err) + + return libP2PMes +} + +// CreateP2PConfigWithNoDiscovery creates a new libp2p messenger with no peer discovery +func CreateP2PConfigWithNoDiscovery() config.P2PConfig { + return config.P2PConfig{ + Node: config.NodeConfig{ + Port: "0", + Seed: "", + ConnectionWatcherType: "print", + }, + KadDhtPeerDiscovery: config.KadDhtPeerDiscoveryConfig{ + Enabled: false, + }, + Sharding: config.ShardingConfig{ + Type: p2p.NilListSharder, + }, + } +} + // CreateMessengerWithNoDiscovery creates a new libp2p messenger with no peer discovery func CreateMessengerWithNoDiscovery() p2p.Messenger { + p2pConfig := CreateP2PConfigWithNoDiscovery() + + return CreateMessengerFromConfig(p2pConfig) +} + +// CreateMessengerWithNoDiscoveryAndPeersRatingHandler creates a new libp2p messenger with no peer discovery +func CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHanlder p2p.PeersRatingHandler) p2p.Messenger { p2pConfig := config.P2PConfig{ Node: config.NodeConfig{ Port: "0", @@ -228,7 +278,7 @@ func CreateMessengerWithNoDiscovery() p2p.Messenger { }, } - return CreateMessengerFromConfig(p2pConfig) + return CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig, peersRatingHanlder) } // CreateFixedNetworkOf8Peers assembles a network as following: diff --git a/integrationTests/testP2PNode.go b/integrationTests/testP2PNode.go index 8c0ba72053f..d19fc6c5a78 100644 --- a/integrationTests/testP2PNode.go +++ b/integrationTests/testP2PNode.go @@ -180,6 +180,7 @@ func (tP2pNode *TestP2PNode) initNode() { processComponents.EpochNotifier = epochStartNotifier processComponents.EpochTrigger = &mock.EpochStartTriggerStub{} processComponents.PeerMapper = tP2pNode.NetworkShardingUpdater + processComponents.HardforkTriggerField = hardforkTrigger networkComponents := GetDefaultNetworkComponents() networkComponents.Messenger = tP2pNode.Messenger @@ -199,7 +200,6 @@ func (tP2pNode *TestP2PNode) initNode() { node.WithNetworkComponents(networkComponents), node.WithDataComponents(dataComponents), node.WithInitialNodesPubKeys(pubkeys), - node.WithHardforkTrigger(hardforkTrigger), node.WithPeerDenialEvaluator(&mock.PeerDenialEvaluatorStub{}), ) log.LogIfError(err) @@ -216,16 +216,16 @@ func (tP2pNode *TestP2PNode) initNode() { Config: config.Config{ Heartbeat: hbConfig, }, - Prefs: config.Preferences{}, - AppVersion: "test", - GenesisTime: time.Time{}, - HardforkTrigger: hardforkTrigger, - RedundancyHandler: redundancyHandler, - CoreComponents: coreComponents, - DataComponents: dataComponents, - NetworkComponents: networkComponents, - CryptoComponents: cryptoComponents, - ProcessComponents: processComponents, + HeartbeatDisableEpoch: 10, + Prefs: config.Preferences{}, + AppVersion: "test", + GenesisTime: time.Time{}, + RedundancyHandler: redundancyHandler, + CoreComponents: coreComponents, + DataComponents: dataComponents, + NetworkComponents: networkComponents, + CryptoComponents: cryptoComponents, + ProcessComponents: processComponents, } heartbeatComponentsFactory, _ := factory.NewHeartbeatComponentsFactory(hbCompArgs) managedHBComponents, err := factory.NewManagedHeartbeatComponents(heartbeatComponentsFactory) diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 08db3b3e030..1f314173c16 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -42,6 +42,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/resolverscontainer" "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" "github.com/ElrondNetwork/elrond-go/dblookupext" + bootstrapDisabled "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" "github.com/ElrondNetwork/elrond-go/epochStart/metachain" "github.com/ElrondNetwork/elrond-go/epochStart/notifier" "github.com/ElrondNetwork/elrond-go/epochStart/shardchain" @@ -55,11 +56,13 @@ import ( "github.com/ElrondNetwork/elrond-go/node/external" "github.com/ElrondNetwork/elrond-go/node/nodeDebugFactory" "github.com/ElrondNetwork/elrond-go/p2p" + p2pRating "github.com/ElrondNetwork/elrond-go/p2p/rating" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block" "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" "github.com/ElrondNetwork/elrond-go/process/block/postprocess" "github.com/ElrondNetwork/elrond-go/process/block/preprocess" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" "github.com/ElrondNetwork/elrond-go/process/coordinator" "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/process/factory" @@ -114,6 +117,8 @@ import ( var zero = big.NewInt(0) +var hardforkPubKey = "153dae6cb3963260f309959bf285537b77ae16d82e9933147be7827f7394de8dc97d9d9af41e970bc72aecb44b77e819621081658c37f7000d21e2d0e8963df83233407bde9f46369ba4fcd03b57f40b80b06c191a428cfb5c447ec510e79307" + // TestHasher represents a sha256 hasher var TestHasher = sha256.NewSha256() @@ -245,6 +250,7 @@ type Connectable interface { type TestProcessorNode struct { ShardCoordinator sharding.Coordinator NodesCoordinator nodesCoordinator.NodesCoordinator + PeerShardMapper process.PeerShardMapper NodesSetup sharding.GenesisNodesSetupHandler Messenger p2p.Messenger @@ -335,8 +341,9 @@ type TestProcessorNode struct { EnableEpochs config.EnableEpochs UseValidVmBlsSigVerifier bool - TransactionLogProcessor process.TransactionLogProcessor - ScheduledMiniBlocksEnableEpoch uint32 + TransactionLogProcessor process.TransactionLogProcessor + PeersRatingHandler p2p.PeersRatingHandler + HardforkTrigger node.HardforkTrigger } // CreatePkBytes creates 'numShards' public key-like byte slices @@ -389,7 +396,7 @@ func newBaseTestProcessorNode( return numNodes }, } - nodesCoordinator := &shardingMocks.NodesCoordinatorStub{ + nodesCoordinatorStub := &shardingMocks.NodesCoordinatorStub{ ComputeValidatorsGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validators []nodesCoordinator.Validator, err error) { v, _ := nodesCoordinator.NewValidator(pksBytes[shardId], 1, defaultChancesSelection) return []nodesCoordinator.Validator{v}, nil @@ -411,13 +418,19 @@ func newBaseTestProcessorNode( }, } - messenger := CreateMessengerWithNoDiscovery() + peersRatingHandler, _ := p2pRating.NewPeersRatingHandler( + p2pRating.ArgPeersRatingHandler{ + TopRatedCache: testscommon.NewCacherMock(), + BadRatedCache: testscommon.NewCacherMock(), + }) + + messenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler) logsProcessor, _ := transactionLog.NewTxLogProcessor(transactionLog.ArgTxLogProcessor{Marshalizer: TestMarshalizer}) tpn := &TestProcessorNode{ ShardCoordinator: shardCoordinator, Messenger: messenger, - NodesCoordinator: nodesCoordinator, + NodesCoordinator: nodesCoordinatorStub, HeaderSigVerifier: &mock.HeaderSigVerifierStub{}, HeaderIntegrityVerifier: CreateHeaderIntegrityVerifier(), ChainID: ChainID, @@ -428,6 +441,8 @@ func newBaseTestProcessorNode( ArwenChangeLocker: &sync.RWMutex{}, TransactionLogProcessor: logsProcessor, Bootstrapper: mock.NewTestBootstrapperMock(), + PeersRatingHandler: peersRatingHandler, + PeerShardMapper: mock.NewNetworkShardingCollectorMock(), EnableEpochs: config.EnableEpochs{ StakingV4InitEnableEpoch: StakingV4InitEpoch, StakingV4EnableEpoch: StakingV4Epoch, @@ -435,7 +450,6 @@ func newBaseTestProcessorNode( }, } - tpn.ScheduledMiniBlocksEnableEpoch = uint32(1000000) tpn.NodeKeys = &TestKeyPair{ Sk: sk, Pk: pk, @@ -447,7 +461,9 @@ func newBaseTestProcessorNode( tpn.initDataPools() tpn.EnableEpochs = config.EnableEpochs{ OptimizeGasUsedInCrossMiniBlocksEnableEpoch: 10, - StakingV4InitEnableEpoch: StakingV4Epoch - 1, + ScheduledMiniBlocksEnableEpoch: 1000000, + MiniBlockPartialExecutionEnableEpoch: 1000000, + StakingV4InitEnableEpoch: StakingV4InitEpoch, StakingV4EnableEpoch: StakingV4Epoch, } @@ -558,7 +574,7 @@ func NewTestProcessorNodeWithFullGenesis( smartContractParser, ) tpn.initBlockTracker() - tpn.initInterceptors() + tpn.initInterceptors(heartbeatPk) tpn.initInnerProcessors(arwenConfig.MakeGasMapForTests()) argsNewScQueryService := smartContract.ArgsNewSCQueryService{ VmContainer: tpn.VMContainer, @@ -586,7 +602,7 @@ func NewTestProcessorNodeWithFullGenesis( tpn.initNode() tpn.addHandlersForCounters() tpn.addGenesisBlocksIntoStorage() - tpn.createHeartbeatWithHardforkTrigger(heartbeatPk) + tpn.createHeartbeatWithHardforkTrigger() return tpn } @@ -595,9 +611,15 @@ func NewTestProcessorNodeWithFullGenesis( func NewTestProcessorNodeWithCustomDataPool(maxShards uint32, nodeShardId uint32, txSignPrivKeyShardId uint32, dPool dataRetriever.PoolsHolder) *TestProcessorNode { shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) - messenger := CreateMessengerWithNoDiscovery() + peersRatingHandler, _ := p2pRating.NewPeersRatingHandler( + p2pRating.ArgPeersRatingHandler{ + TopRatedCache: testscommon.NewCacherMock(), + BadRatedCache: testscommon.NewCacherMock(), + }) + + messenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler) _ = messenger.SetThresholdMinConnectedPeers(minConnectedPeers) - nodesCoordinator := &shardingMocks.NodesCoordinatorMock{} + nodesCoordinatorStub := &shardingMocks.NodesCoordinatorMock{} kg := &mock.KeyGenMock{} sk, pk := kg.GeneratePair() @@ -605,7 +627,7 @@ func NewTestProcessorNodeWithCustomDataPool(maxShards uint32, nodeShardId uint32 tpn := &TestProcessorNode{ ShardCoordinator: shardCoordinator, Messenger: messenger, - NodesCoordinator: nodesCoordinator, + NodesCoordinator: nodesCoordinatorStub, HeaderSigVerifier: &mock.HeaderSigVerifierStub{}, HeaderIntegrityVerifier: CreateHeaderIntegrityVerifier(), ChainID: ChainID, @@ -619,6 +641,8 @@ func NewTestProcessorNodeWithCustomDataPool(maxShards uint32, nodeShardId uint32 EpochNotifier: forking.NewGenericEpochNotifier(), ArwenChangeLocker: &sync.RWMutex{}, TransactionLogProcessor: logsProcessor, + PeersRatingHandler: peersRatingHandler, + PeerShardMapper: bootstrapDisabled.NewPeerShardMapper(), } tpn.NodeKeys = &TestKeyPair{ @@ -759,7 +783,7 @@ func (tpn *TestProcessorNode) initTestNode() { tpn.EconomicsData, ) tpn.initBlockTracker() - tpn.initInterceptors() + tpn.initInterceptors("") tpn.initInnerProcessors(arwenConfig.MakeGasMapForTests()) argsNewScQueryService := smartContract.ArgsNewSCQueryService{ VmContainer: tpn.VMContainer, @@ -818,7 +842,7 @@ func (tpn *TestProcessorNode) initTestNodeWithTrieDBAndGasModel(trieStore storag tpn.EconomicsData, ) tpn.initBlockTracker() - tpn.initInterceptors() + tpn.initInterceptors("") tpn.initInnerProcessors(gasMap) tpn.createFullSCQueryService() tpn.initBlockProcessor(stateCheckpointModulus) @@ -1193,7 +1217,7 @@ func CreateRatingsData() *rating.RatingsData { return ratingsData } -func (tpn *TestProcessorNode) initInterceptors() { +func (tpn *TestProcessorNode) initInterceptors(heartbeatPk string) { var err error tpn.BlockBlackListHandler = timecache.NewTimeCache(TimeSpanForBadHeaders) if check.IfNil(tpn.EpochStartNotifier) { @@ -1239,30 +1263,37 @@ func (tpn *TestProcessorNode) initInterceptors() { epochStartTrigger, _ := metachain.NewEpochStartTrigger(argsEpochStart) tpn.EpochStartTrigger = &metachain.TestTrigger{} tpn.EpochStartTrigger.SetTrigger(epochStartTrigger) + providedHardforkPk := tpn.createHardforkTrigger(heartbeatPk) + coreComponents.HardforkTriggerPubKeyField = providedHardforkPk metaInterceptorContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ - CoreComponents: coreComponents, - CryptoComponents: cryptoComponents, - ShardCoordinator: tpn.ShardCoordinator, - NodesCoordinator: tpn.NodesCoordinator, - Messenger: tpn.Messenger, - Store: tpn.Storage, - DataPool: tpn.DataPool, - Accounts: tpn.AccntState, - MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, - TxFeeHandler: tpn.EconomicsData, - BlockBlackList: tpn.BlockBlackListHandler, - HeaderSigVerifier: tpn.HeaderSigVerifier, - HeaderIntegrityVerifier: tpn.HeaderIntegrityVerifier, - SizeCheckDelta: sizeCheckDelta, - ValidityAttester: tpn.BlockTracker, - EpochStartTrigger: tpn.EpochStartTrigger, - WhiteListHandler: tpn.WhiteListHandler, - WhiteListerVerifiedTxs: tpn.WhiteListerVerifiedTxs, - AntifloodHandler: &mock.NilAntifloodHandler{}, - ArgumentsParser: smartContract.NewArgumentParser(), - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - RequestHandler: tpn.RequestHandler, + CoreComponents: coreComponents, + CryptoComponents: cryptoComponents, + Accounts: tpn.AccntState, + ShardCoordinator: tpn.ShardCoordinator, + NodesCoordinator: tpn.NodesCoordinator, + Messenger: tpn.Messenger, + Store: tpn.Storage, + DataPool: tpn.DataPool, + MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, + TxFeeHandler: tpn.EconomicsData, + BlockBlackList: tpn.BlockBlackListHandler, + HeaderSigVerifier: tpn.HeaderSigVerifier, + HeaderIntegrityVerifier: tpn.HeaderIntegrityVerifier, + ValidityAttester: tpn.BlockTracker, + EpochStartTrigger: tpn.EpochStartTrigger, + WhiteListHandler: tpn.WhiteListHandler, + WhiteListerVerifiedTxs: tpn.WhiteListerVerifiedTxs, + AntifloodHandler: &mock.NilAntifloodHandler{}, + ArgumentsParser: smartContract.NewArgumentParser(), + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + SizeCheckDelta: sizeCheckDelta, + RequestHandler: tpn.RequestHandler, + PeerSignatureHandler: &processMock.PeerSignatureHandlerStub{}, + SignaturesHandler: &processMock.SignaturesHandlerStub{}, + HeartbeatExpiryTimespanInSec: 30, + PeerShardMapper: tpn.PeerShardMapper, + HardforkTrigger: tpn.HardforkTrigger, } interceptorContainerFactory, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(metaInterceptorContainerFactoryArgs) @@ -1295,30 +1326,37 @@ func (tpn *TestProcessorNode) initInterceptors() { epochStartTrigger, _ := shardchain.NewEpochStartTrigger(argsShardEpochStart) tpn.EpochStartTrigger = &shardchain.TestTrigger{} tpn.EpochStartTrigger.SetTrigger(epochStartTrigger) + providedHardforkPk := tpn.createHardforkTrigger(heartbeatPk) + coreComponents.HardforkTriggerPubKeyField = providedHardforkPk shardIntereptorContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ - CoreComponents: coreComponents, - CryptoComponents: cryptoComponents, - Accounts: tpn.AccntState, - ShardCoordinator: tpn.ShardCoordinator, - NodesCoordinator: tpn.NodesCoordinator, - Messenger: tpn.Messenger, - Store: tpn.Storage, - DataPool: tpn.DataPool, - MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, - TxFeeHandler: tpn.EconomicsData, - BlockBlackList: tpn.BlockBlackListHandler, - HeaderSigVerifier: tpn.HeaderSigVerifier, - HeaderIntegrityVerifier: tpn.HeaderIntegrityVerifier, - SizeCheckDelta: sizeCheckDelta, - ValidityAttester: tpn.BlockTracker, - EpochStartTrigger: tpn.EpochStartTrigger, - WhiteListHandler: tpn.WhiteListHandler, - WhiteListerVerifiedTxs: tpn.WhiteListerVerifiedTxs, - AntifloodHandler: &mock.NilAntifloodHandler{}, - ArgumentsParser: smartContract.NewArgumentParser(), - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - RequestHandler: tpn.RequestHandler, + CoreComponents: coreComponents, + CryptoComponents: cryptoComponents, + Accounts: tpn.AccntState, + ShardCoordinator: tpn.ShardCoordinator, + NodesCoordinator: tpn.NodesCoordinator, + Messenger: tpn.Messenger, + Store: tpn.Storage, + DataPool: tpn.DataPool, + MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, + TxFeeHandler: tpn.EconomicsData, + BlockBlackList: tpn.BlockBlackListHandler, + HeaderSigVerifier: tpn.HeaderSigVerifier, + HeaderIntegrityVerifier: tpn.HeaderIntegrityVerifier, + ValidityAttester: tpn.BlockTracker, + EpochStartTrigger: tpn.EpochStartTrigger, + WhiteListHandler: tpn.WhiteListHandler, + WhiteListerVerifiedTxs: tpn.WhiteListerVerifiedTxs, + AntifloodHandler: &mock.NilAntifloodHandler{}, + ArgumentsParser: smartContract.NewArgumentParser(), + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + SizeCheckDelta: sizeCheckDelta, + RequestHandler: tpn.RequestHandler, + PeerSignatureHandler: &processMock.PeerSignatureHandlerStub{}, + SignaturesHandler: &processMock.SignaturesHandlerStub{}, + HeartbeatExpiryTimespanInSec: 30, + PeerShardMapper: tpn.PeerShardMapper, + HardforkTrigger: tpn.HardforkTrigger, } interceptorContainerFactory, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(shardIntereptorContainerFactoryArgs) @@ -1329,6 +1367,34 @@ func (tpn *TestProcessorNode) initInterceptors() { } } +func (tpn *TestProcessorNode) createHardforkTrigger(heartbeatPk string) []byte { + pkBytes, _ := tpn.NodeKeys.Pk.ToByteArray() + argHardforkTrigger := trigger.ArgHardforkTrigger{ + TriggerPubKeyBytes: pkBytes, + Enabled: true, + EnabledAuthenticated: true, + ArgumentParser: smartContract.NewArgumentParser(), + EpochProvider: tpn.EpochStartTrigger, + ExportFactoryHandler: &mock.ExportFactoryHandlerStub{}, + CloseAfterExportInMinutes: 5, + ChanStopNodeProcess: make(chan endProcess.ArgEndProcess), + EpochConfirmedNotifier: tpn.EpochStartNotifier, + SelfPubKeyBytes: pkBytes, + ImportStartHandler: &mock.ImportStartHandlerStub{}, + RoundHandler: &mock.RoundHandlerMock{}, + } + + var err error + if len(heartbeatPk) > 0 { + argHardforkTrigger.TriggerPubKeyBytes, err = hex.DecodeString(heartbeatPk) + log.LogIfError(err) + } + tpn.HardforkTrigger, err = trigger.NewTrigger(argHardforkTrigger) + log.LogIfError(err) + + return argHardforkTrigger.TriggerPubKeyBytes +} + func (tpn *TestProcessorNode) initResolvers() { dataPacker, _ := partitioning.NewSimpleDataPacker(TestMarshalizer) @@ -1351,9 +1417,13 @@ func (tpn *TestProcessorNode) initResolvers() { PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, ResolverConfig: config.ResolverConfig{ NumCrossShardPeers: 2, - NumIntraShardPeers: 1, + NumTotalPeers: 3, NumFullHistoryPeers: 3, }, + PeersRatingHandler: tpn.PeersRatingHandler, + NodesCoordinator: tpn.NodesCoordinator, + MaxNumOfPeerAuthenticationInResponse: 5, + PeerShardMapper: tpn.PeerShardMapper, } var err error @@ -1572,6 +1642,7 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u TestHasher, tpn.ShardCoordinator, ) + processedMiniBlocksTracker := processedMb.NewProcessedMiniBlocksTracker() fact, _ := shard.NewPreProcessorsContainerFactory( tpn.ShardCoordinator, @@ -1594,33 +1665,36 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u tpn.EpochNotifier, tpn.EnableEpochs.OptimizeGasUsedInCrossMiniBlocksEnableEpoch, tpn.EnableEpochs.FrontRunningProtectionEnableEpoch, - tpn.ScheduledMiniBlocksEnableEpoch, + tpn.EnableEpochs.ScheduledMiniBlocksEnableEpoch, txTypeHandler, scheduledTxsExecutionHandler, + processedMiniBlocksTracker, ) tpn.PreProcessorsContainer, _ = fact.Create() argsTransactionCoordinator := coordinator.ArgTransactionCoordinator{ - Hasher: TestHasher, - Marshalizer: TestMarshalizer, - ShardCoordinator: tpn.ShardCoordinator, - Accounts: tpn.AccntState, - MiniBlockPool: tpn.DataPool.MiniBlocks(), - RequestHandler: tpn.RequestHandler, - PreProcessors: tpn.PreProcessorsContainer, - InterProcessors: tpn.InterimProcContainer, - GasHandler: tpn.GasHandler, - FeeHandler: tpn.FeeAccumulator, - BlockSizeComputation: TestBlockSizeComputationHandler, - BalanceComputation: TestBalanceComputationHandler, - EconomicsFee: tpn.EconomicsData, - TxTypeHandler: txTypeHandler, - BlockGasAndFeesReCheckEnableEpoch: tpn.EnableEpochs.BlockGasAndFeesReCheckEnableEpoch, - TransactionsLogProcessor: tpn.TransactionLogProcessor, - EpochNotifier: tpn.EpochNotifier, - ScheduledTxsExecutionHandler: scheduledTxsExecutionHandler, - ScheduledMiniBlocksEnableEpoch: tpn.ScheduledMiniBlocksEnableEpoch, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + ShardCoordinator: tpn.ShardCoordinator, + Accounts: tpn.AccntState, + MiniBlockPool: tpn.DataPool.MiniBlocks(), + RequestHandler: tpn.RequestHandler, + PreProcessors: tpn.PreProcessorsContainer, + InterProcessors: tpn.InterimProcContainer, + GasHandler: tpn.GasHandler, + FeeHandler: tpn.FeeAccumulator, + BlockSizeComputation: TestBlockSizeComputationHandler, + BalanceComputation: TestBalanceComputationHandler, + EconomicsFee: tpn.EconomicsData, + TxTypeHandler: txTypeHandler, + BlockGasAndFeesReCheckEnableEpoch: tpn.EnableEpochs.BlockGasAndFeesReCheckEnableEpoch, + TransactionsLogProcessor: tpn.TransactionLogProcessor, + EpochNotifier: tpn.EpochNotifier, + ScheduledTxsExecutionHandler: scheduledTxsExecutionHandler, + ScheduledMiniBlocksEnableEpoch: tpn.EnableEpochs.ScheduledMiniBlocksEnableEpoch, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: tpn.EnableEpochs.MiniBlockPartialExecutionEnableEpoch, + ProcessedMiniBlocksTracker: processedMiniBlocksTracker, } tpn.TxCoordinator, _ = coordinator.NewTransactionCoordinator(argsTransactionCoordinator) scheduledTxsExecutionHandler.SetTransactionCoordinator(tpn.TxCoordinator) @@ -1818,6 +1892,7 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors() { TestMarshalizer, TestHasher, tpn.ShardCoordinator) + processedMiniBlocksTracker := processedMb.NewProcessedMiniBlocksTracker() fact, _ := metaProcess.NewPreProcessorsContainerFactory( tpn.ShardCoordinator, @@ -1838,33 +1913,36 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors() { tpn.EpochNotifier, tpn.EnableEpochs.OptimizeGasUsedInCrossMiniBlocksEnableEpoch, tpn.EnableEpochs.FrontRunningProtectionEnableEpoch, - tpn.ScheduledMiniBlocksEnableEpoch, + tpn.EnableEpochs.ScheduledMiniBlocksEnableEpoch, txTypeHandler, scheduledTxsExecutionHandler, + processedMiniBlocksTracker, ) tpn.PreProcessorsContainer, _ = fact.Create() argsTransactionCoordinator := coordinator.ArgTransactionCoordinator{ - Hasher: TestHasher, - Marshalizer: TestMarshalizer, - ShardCoordinator: tpn.ShardCoordinator, - Accounts: tpn.AccntState, - MiniBlockPool: tpn.DataPool.MiniBlocks(), - RequestHandler: tpn.RequestHandler, - PreProcessors: tpn.PreProcessorsContainer, - InterProcessors: tpn.InterimProcContainer, - GasHandler: tpn.GasHandler, - FeeHandler: tpn.FeeAccumulator, - BlockSizeComputation: TestBlockSizeComputationHandler, - BalanceComputation: TestBalanceComputationHandler, - EconomicsFee: tpn.EconomicsData, - TxTypeHandler: txTypeHandler, - BlockGasAndFeesReCheckEnableEpoch: tpn.EnableEpochs.BlockGasAndFeesReCheckEnableEpoch, - TransactionsLogProcessor: tpn.TransactionLogProcessor, - EpochNotifier: tpn.EpochNotifier, - ScheduledTxsExecutionHandler: scheduledTxsExecutionHandler, - ScheduledMiniBlocksEnableEpoch: tpn.ScheduledMiniBlocksEnableEpoch, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + ShardCoordinator: tpn.ShardCoordinator, + Accounts: tpn.AccntState, + MiniBlockPool: tpn.DataPool.MiniBlocks(), + RequestHandler: tpn.RequestHandler, + PreProcessors: tpn.PreProcessorsContainer, + InterProcessors: tpn.InterimProcContainer, + GasHandler: tpn.GasHandler, + FeeHandler: tpn.FeeAccumulator, + BlockSizeComputation: TestBlockSizeComputationHandler, + BalanceComputation: TestBalanceComputationHandler, + EconomicsFee: tpn.EconomicsData, + TxTypeHandler: txTypeHandler, + BlockGasAndFeesReCheckEnableEpoch: tpn.EnableEpochs.BlockGasAndFeesReCheckEnableEpoch, + TransactionsLogProcessor: tpn.TransactionLogProcessor, + EpochNotifier: tpn.EpochNotifier, + ScheduledTxsExecutionHandler: scheduledTxsExecutionHandler, + ScheduledMiniBlocksEnableEpoch: tpn.EnableEpochs.ScheduledMiniBlocksEnableEpoch, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: tpn.EnableEpochs.MiniBlockPartialExecutionEnableEpoch, + ProcessedMiniBlocksTracker: processedMiniBlocksTracker, } tpn.TxCoordinator, _ = coordinator.NewTransactionCoordinator(argsTransactionCoordinator) scheduledTxsExecutionHandler.SetTransactionCoordinator(tpn.TxCoordinator) @@ -2075,6 +2153,7 @@ func (tpn *TestProcessorNode) initBlockProcessor(stateCheckpointModulus uint) { GasHandler: tpn.GasHandler, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, ScheduledMiniBlocksEnableEpoch: ScheduledMiniBlocksEnableEpoch, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } if check.IfNil(tpn.EpochStartNotifier) { @@ -2082,22 +2161,24 @@ func (tpn *TestProcessorNode) initBlockProcessor(stateCheckpointModulus uint) { } if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { - argsEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ - GenesisTime: argumentsBase.CoreComponents.RoundHandler().TimeStamp(), - Settings: &config.EpochStartConfig{ - MinRoundsBetweenEpochs: 1000, - RoundsPerEpoch: 10000, - }, - Epoch: 0, - EpochStartNotifier: tpn.EpochStartNotifier, - Storage: tpn.Storage, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + if check.IfNil(tpn.EpochStartTrigger) { + argsEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ + GenesisTime: argumentsBase.CoreComponents.RoundHandler().TimeStamp(), + Settings: &config.EpochStartConfig{ + MinRoundsBetweenEpochs: 1000, + RoundsPerEpoch: 10000, + }, + Epoch: 0, + EpochStartNotifier: tpn.EpochStartNotifier, + Storage: tpn.Storage, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + } + epochStartTrigger, _ := metachain.NewEpochStartTrigger(argsEpochStart) + tpn.EpochStartTrigger = &metachain.TestTrigger{} + tpn.EpochStartTrigger.SetTrigger(epochStartTrigger) } - epochStartTrigger, _ := metachain.NewEpochStartTrigger(argsEpochStart) - tpn.EpochStartTrigger = &metachain.TestTrigger{} - tpn.EpochStartTrigger.SetTrigger(epochStartTrigger) argumentsBase.EpochStartTrigger = tpn.EpochStartTrigger argumentsBase.TxCoordinator = tpn.TxCoordinator @@ -2111,7 +2192,7 @@ func (tpn *TestProcessorNode) initBlockProcessor(stateCheckpointModulus uint) { ArgParser: tpn.ArgsParser, CurrTxs: tpn.DataPool.CurrentBlockTxs(), RatingsData: tpn.RatingsData, - StakingV4InitEpoch: StakingV4Epoch - 1, + StakingV4InitEpoch: StakingV4InitEpoch, EpochNotifier: &epochNotifier.EpochNotifierStub{}, } scToProtocolInstance, _ := scToProtocol.NewStakingToPeer(argsStakingToPeer) @@ -2221,7 +2302,7 @@ func (tpn *TestProcessorNode) initBlockProcessor(stateCheckpointModulus uint) { EpochConfig: config.EpochConfig{ EnableEpochs: config.EnableEpochs{ StakingV2EnableEpoch: StakingV2Epoch, - StakingV4InitEnableEpoch: StakingV4Epoch - 1, + StakingV4InitEnableEpoch: StakingV4InitEpoch, StakingV4EnableEpoch: StakingV4Epoch, ESDTEnableEpoch: 0, }, @@ -2322,6 +2403,8 @@ func (tpn *TestProcessorNode) initNode() { coreComponents.SyncTimerField = &mock.SyncTimerMock{} coreComponents.EpochNotifierField = tpn.EpochNotifier coreComponents.ArwenChangeLockerInternal = tpn.ArwenChangeLocker + hardforkPubKeyBytes, _ := coreComponents.ValidatorPubKeyConverterField.Decode(hardforkPubKey) + coreComponents.HardforkTriggerPubKeyField = hardforkPubKeyBytes dataComponents := GetDefaultDataComponents() dataComponents.BlockChain = tpn.BlockChain @@ -2343,6 +2426,7 @@ func (tpn *TestProcessorNode) initNode() { processComponents.WhiteListHandlerInternal = tpn.WhiteListHandler processComponents.WhiteListerVerifiedTxsInternal = tpn.WhiteListerVerifiedTxs processComponents.TxsSenderHandlerField = createTxsSender(tpn.ShardCoordinator, tpn.Messenger) + processComponents.HardforkTriggerField = tpn.HardforkTrigger cryptoComponents := GetDefaultCryptoComponents() cryptoComponents.PrivKey = tpn.NodeKeys.Sk @@ -2371,7 +2455,6 @@ func (tpn *TestProcessorNode) initNode() { node.WithNetworkComponents(networkComponents), node.WithStateComponents(stateComponents), node.WithPeerDenialEvaluator(&mock.PeerDenialEvaluatorStub{}), - node.WithHardforkTrigger(&mock.HardforkTriggerStub{}), ) log.LogIfError(err) @@ -2866,31 +2949,7 @@ func (tpn *TestProcessorNode) initHeaderValidator() { tpn.HeaderValidator, _ = block.NewHeaderValidator(argsHeaderValidator) } -func (tpn *TestProcessorNode) createHeartbeatWithHardforkTrigger(heartbeatPk string) { - pkBytes, _ := tpn.NodeKeys.Pk.ToByteArray() - argHardforkTrigger := trigger.ArgHardforkTrigger{ - TriggerPubKeyBytes: pkBytes, - Enabled: true, - EnabledAuthenticated: true, - ArgumentParser: smartContract.NewArgumentParser(), - EpochProvider: tpn.EpochStartTrigger, - ExportFactoryHandler: &mock.ExportFactoryHandlerStub{}, - CloseAfterExportInMinutes: 5, - ChanStopNodeProcess: make(chan endProcess.ArgEndProcess), - EpochConfirmedNotifier: tpn.EpochStartNotifier, - SelfPubKeyBytes: pkBytes, - ImportStartHandler: &mock.ImportStartHandlerStub{}, - RoundHandler: &mock.RoundHandlerMock{}, - } - var err error - if len(heartbeatPk) > 0 { - argHardforkTrigger.TriggerPubKeyBytes, err = hex.DecodeString(heartbeatPk) - log.LogIfError(err) - } - - hardforkTrigger, err := trigger.NewTrigger(argHardforkTrigger) - log.LogIfError(err) - +func (tpn *TestProcessorNode) createHeartbeatWithHardforkTrigger() { cacher := testscommon.NewCacherMock() psh, err := peerSignatureHandler.NewPeerSignatureHandler( cacher, @@ -2937,16 +2996,18 @@ func (tpn *TestProcessorNode) createHeartbeatWithHardforkTrigger(heartbeatPk str processComponents.HistoryRepositoryInternal = tpn.HistoryRepository processComponents.TxsSenderHandlerField = createTxsSender(tpn.ShardCoordinator, tpn.Messenger) - redundancyHandler := &mock.RedundancyHandlerStub{} + processComponents.HardforkTriggerField = tpn.HardforkTrigger err = tpn.Node.ApplyOptions( - node.WithHardforkTrigger(hardforkTrigger), node.WithCryptoComponents(cryptoComponents), - node.WithNetworkComponents(networkComponents), node.WithProcessComponents(processComponents), ) log.LogIfError(err) + // TODO: remove it with heartbeat v1 cleanup + // =============== Heartbeat ============== // + redundancyHandler := &mock.RedundancyHandlerStub{} + hbConfig := config.HeartbeatConfig{ MinTimeToWaitBetweenBroadcastsInSec: 4, MaxTimeToWaitBetweenBroadcastsInSec: 6, @@ -2959,14 +3020,14 @@ func (tpn *TestProcessorNode) createHeartbeatWithHardforkTrigger(heartbeatPk str Config: config.Config{ Heartbeat: hbConfig, }, - Prefs: config.Preferences{}, - HardforkTrigger: hardforkTrigger, - RedundancyHandler: redundancyHandler, - CoreComponents: tpn.Node.GetCoreComponents(), - DataComponents: tpn.Node.GetDataComponents(), - NetworkComponents: tpn.Node.GetNetworkComponents(), - CryptoComponents: tpn.Node.GetCryptoComponents(), - ProcessComponents: tpn.Node.GetProcessComponents(), + HeartbeatDisableEpoch: 10, + Prefs: config.Preferences{}, + RedundancyHandler: redundancyHandler, + CoreComponents: tpn.Node.GetCoreComponents(), + DataComponents: tpn.Node.GetDataComponents(), + NetworkComponents: tpn.Node.GetNetworkComponents(), + CryptoComponents: tpn.Node.GetCryptoComponents(), + ProcessComponents: tpn.Node.GetProcessComponents(), } heartbeatFactory, err := mainFactory.NewHeartbeatComponentsFactory(hbFactoryArgs) @@ -2981,7 +3042,64 @@ func (tpn *TestProcessorNode) createHeartbeatWithHardforkTrigger(heartbeatPk str err = tpn.Node.ApplyOptions( node.WithHeartbeatComponents(managedHeartbeatComponents), ) + log.LogIfError(err) + + // ============== HeartbeatV2 ============= // + hbv2Config := config.HeartbeatV2Config{ + PeerAuthenticationTimeBetweenSendsInSec: 5, + PeerAuthenticationTimeBetweenSendsWhenErrorInSec: 1, + PeerAuthenticationThresholdBetweenSends: 0.1, + HeartbeatTimeBetweenSendsInSec: 2, + HeartbeatTimeBetweenSendsWhenErrorInSec: 1, + HeartbeatThresholdBetweenSends: 0.1, + MaxNumOfPeerAuthenticationInResponse: 5, + HeartbeatExpiryTimespanInSec: 300, + MinPeersThreshold: 0.8, + DelayBetweenRequestsInSec: 10, + MaxTimeoutInSec: 60, + DelayBetweenConnectionNotificationsInSec: 5, + MaxMissingKeysInRequest: 100, + MaxDurationPeerUnresponsiveInSec: 10, + HideInactiveValidatorIntervalInSec: 60, + HardforkTimeBetweenSendsInSec: 2, + PeerAuthenticationPool: config.PeerAuthenticationPoolConfig{ + DefaultSpanInSec: 30, + CacheExpiryInSec: 30, + }, + HeartbeatPool: config.CacheConfig{ + Type: "LRU", + Capacity: 1000, + Shards: 1, + }, + } + + hbv2FactoryArgs := mainFactory.ArgHeartbeatV2ComponentsFactory{ + Config: config.Config{ + HeartbeatV2: hbv2Config, + Hardfork: config.HardforkConfig{ + PublicKeyToListenFrom: hardforkPubKey, + }, + }, + BoostrapComponents: tpn.Node.GetBootstrapComponents(), + CoreComponents: tpn.Node.GetCoreComponents(), + DataComponents: tpn.Node.GetDataComponents(), + NetworkComponents: tpn.Node.GetNetworkComponents(), + CryptoComponents: tpn.Node.GetCryptoComponents(), + ProcessComponents: tpn.Node.GetProcessComponents(), + } + + heartbeatV2Factory, err := mainFactory.NewHeartbeatV2ComponentsFactory(hbv2FactoryArgs) + log.LogIfError(err) + managedHeartbeatV2Components, err := mainFactory.NewManagedHeartbeatV2Components(heartbeatV2Factory) + log.LogIfError(err) + + err = managedHeartbeatV2Components.Create() + log.LogIfError(err) + + err = tpn.Node.ApplyOptions( + node.WithHeartbeatV2Components(managedHeartbeatV2Components), + ) log.LogIfError(err) } @@ -3061,6 +3179,7 @@ func GetDefaultProcessComponents() *mock.ProcessComponentsStub { }, CurrentEpochProviderInternal: &testscommon.CurrentEpochProviderStub{}, HistoryRepositoryInternal: &dblookupextMock.HistoryRepositoryStub{}, + HardforkTriggerField: &testscommon.HardforkTriggerStub{}, } } @@ -3109,10 +3228,11 @@ func GetDefaultStateComponents() *testscommon.StateComponentsMock { // GetDefaultNetworkComponents - func GetDefaultNetworkComponents() *mock.NetworkComponentsStub { return &mock.NetworkComponentsStub{ - Messenger: &p2pmocks.MessengerStub{}, - InputAntiFlood: &mock.P2PAntifloodHandlerStub{}, - OutputAntiFlood: &mock.P2PAntifloodHandlerStub{}, - PeerBlackList: &mock.PeerBlackListCacherStub{}, + Messenger: &p2pmocks.MessengerStub{}, + InputAntiFlood: &mock.P2PAntifloodHandlerStub{}, + OutputAntiFlood: &mock.P2PAntifloodHandlerStub{}, + PeerBlackList: &mock.PeerBlackListCacherStub{}, + PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, } } diff --git a/integrationTests/testProcessorNodeWithCoordinator.go b/integrationTests/testProcessorNodeWithCoordinator.go index 58c09750000..114d815ebe0 100644 --- a/integrationTests/testProcessorNodeWithCoordinator.go +++ b/integrationTests/testProcessorNodeWithCoordinator.go @@ -16,9 +16,11 @@ import ( multisig2 "github.com/ElrondNetwork/elrond-go-crypto/signing/mcl/multisig" "github.com/ElrondNetwork/elrond-go-crypto/signing/multisig" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" + p2pRating "github.com/ElrondNetwork/elrond-go/p2p/rating" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/sharding/nodesCoordinator" "github.com/ElrondNetwork/elrond-go/storage/lrucache" + "github.com/ElrondNetwork/elrond-go/testscommon" ) type nodeKeys struct { @@ -202,7 +204,13 @@ func newTestProcessorNodeWithCustomNodesCoordinator( shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) - messenger := CreateMessengerWithNoDiscovery() + peersRatingHandler, _ := p2pRating.NewPeersRatingHandler( + p2pRating.ArgPeersRatingHandler{ + TopRatedCache: testscommon.NewCacherMock(), + BadRatedCache: testscommon.NewCacherMock(), + }) + + messenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler) tpn := &TestProcessorNode{ ShardCoordinator: shardCoordinator, Messenger: messenger, @@ -212,6 +220,7 @@ func newTestProcessorNodeWithCustomNodesCoordinator( ChainID: ChainID, NodesSetup: nodesSetup, ArwenChangeLocker: &sync.RWMutex{}, + PeersRatingHandler: peersRatingHandler, } tpn.NodeKeys = &TestKeyPair{ diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index fbc1fa5727b..74e6a07041d 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -20,9 +20,11 @@ import ( "github.com/ElrondNetwork/elrond-go-crypto/signing/multisig" "github.com/ElrondNetwork/elrond-go/common/forking" "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" "github.com/ElrondNetwork/elrond-go/epochStart/notifier" "github.com/ElrondNetwork/elrond-go/factory/peerSignatureHandler" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" + p2pRating "github.com/ElrondNetwork/elrond-go/p2p/rating" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/headerCheck" "github.com/ElrondNetwork/elrond-go/process/rating" @@ -55,7 +57,12 @@ func NewTestProcessorNodeWithCustomNodesCoordinator( shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) logsProcessor, _ := transactionLog.NewTxLogProcessor(transactionLog.ArgTxLogProcessor{Marshalizer: TestMarshalizer}) - messenger := CreateMessengerWithNoDiscovery() + peersRatingHandler, _ := p2pRating.NewPeersRatingHandler( + p2pRating.ArgPeersRatingHandler{ + TopRatedCache: testscommon.NewCacherMock(), + BadRatedCache: testscommon.NewCacherMock(), + }) + messenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler) tpn := &TestProcessorNode{ ShardCoordinator: shardCoordinator, Messenger: messenger, @@ -71,6 +78,8 @@ func NewTestProcessorNodeWithCustomNodesCoordinator( ArwenChangeLocker: &sync.RWMutex{}, TransactionLogProcessor: logsProcessor, Bootstrapper: mock.NewTestBootstrapperMock(), + PeersRatingHandler: peersRatingHandler, + PeerShardMapper: mock.NewNetworkShardingCollectorMock(), EnableEpochs: config.EnableEpochs{ StakingV4InitEnableEpoch: StakingV4InitEpoch, StakingV4EnableEpoch: StakingV4Epoch, @@ -78,7 +87,8 @@ func NewTestProcessorNodeWithCustomNodesCoordinator( }, } - tpn.ScheduledMiniBlocksEnableEpoch = uint32(1000000) + tpn.EnableEpochs.ScheduledMiniBlocksEnableEpoch = uint32(1000000) + tpn.EnableEpochs.MiniBlockPartialExecutionEnableEpoch = uint32(1000000) tpn.NodeKeys = cp.Keys[nodeShardId][keyIndex] blsHasher, _ := blake2b.NewBlake2bWithSize(hashing.BlsHashSize) llsig := &mclmultisig.BlsMultiSigner{Hasher: blsHasher} @@ -242,16 +252,21 @@ func CreateNodeWithBLSAndTxKeys( consensusGroupCache: cache, bootStorer: bootStorer, } - nodesCoordinator := coordinatorFactory.CreateNodesCoordinator(argFactory) + nodesCoordinatorInstance := coordinatorFactory.CreateNodesCoordinator(argFactory) shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(nbShards), shardId) logsProcessor, _ := transactionLog.NewTxLogProcessor(transactionLog.ArgTxLogProcessor{Marshalizer: TestMarshalizer}) - messenger := CreateMessengerWithNoDiscovery() + peersRatingHandler, _ := p2pRating.NewPeersRatingHandler( + p2pRating.ArgPeersRatingHandler{ + TopRatedCache: testscommon.NewCacherMock(), + BadRatedCache: testscommon.NewCacherMock(), + }) + messenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler) tpn := &TestProcessorNode{ ShardCoordinator: shardCoordinator, Messenger: messenger, - NodesCoordinator: nodesCoordinator, + NodesCoordinator: nodesCoordinatorInstance, HeaderSigVerifier: &mock.HeaderSigVerifierStub{}, HeaderIntegrityVerifier: CreateHeaderIntegrityVerifier(), ChainID: ChainID, @@ -262,6 +277,8 @@ func CreateNodeWithBLSAndTxKeys( EpochNotifier: forking.NewGenericEpochNotifier(), ArwenChangeLocker: &sync.RWMutex{}, TransactionLogProcessor: logsProcessor, + PeersRatingHandler: peersRatingHandler, + PeerShardMapper: disabled.NewPeerShardMapper(), EnableEpochs: config.EnableEpochs{ StakingV4InitEnableEpoch: StakingV4InitEpoch, StakingV4EnableEpoch: StakingV4Epoch, @@ -269,7 +286,8 @@ func CreateNodeWithBLSAndTxKeys( }, } - tpn.ScheduledMiniBlocksEnableEpoch = uint32(1000000) + tpn.EnableEpochs.ScheduledMiniBlocksEnableEpoch = uint32(1000000) + tpn.EnableEpochs.MiniBlockPartialExecutionEnableEpoch = uint32(1000000) tpn.NodeKeys = cp.Keys[shardId][keyIndex] blsHasher, _ := blake2b.NewBlake2bWithSize(hashing.BlsHashSize) llsig := &mclmultisig.BlsMultiSigner{Hasher: blsHasher} @@ -443,13 +461,13 @@ func CreateNode( consensusGroupCache: cache, bootStorer: bootStorer, } - nodesCoordinator := coordinatorFactory.CreateNodesCoordinator(argFactory) + nodesCoordinatorInstance := coordinatorFactory.CreateNodesCoordinator(argFactory) return NewTestProcessorNodeWithCustomNodesCoordinator( uint32(nbShards), shardId, epochStartSubscriber, - nodesCoordinator, + nodesCoordinatorInstance, ratingsData, cp, keyIndex, @@ -531,7 +549,7 @@ func CreateNodesWithNodesCoordinatorAndHeaderSigVerifier( StakingV4EnableEpoch: StakingV4Epoch, NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } - nodesCoordinator, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) + nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) if err != nil { fmt.Println("Error creating node coordinator: " + err.Error()) @@ -541,7 +559,7 @@ func CreateNodesWithNodesCoordinatorAndHeaderSigVerifier( args := headerCheck.ArgsHeaderSigVerifier{ Marshalizer: TestMarshalizer, Hasher: TestHasher, - NodesCoordinator: nodesCoordinator, + NodesCoordinator: nodesCoordinatorInstance, MultiSigVerifier: TestMultiSig, SingleSigVerifier: signer, KeyGen: keyGen, @@ -554,7 +572,7 @@ func CreateNodesWithNodesCoordinatorAndHeaderSigVerifier( uint32(nbShards), shardId, epochStartSubscriber, - nodesCoordinator, + nodesCoordinatorInstance, nil, cp, i, @@ -696,9 +714,9 @@ func ProposeBlockWithConsensusSignature( randomness []byte, epoch uint32, ) (data.BodyHandler, data.HeaderHandler, [][]byte, []*TestProcessorNode) { - nodesCoordinator := nodesMap[shardId][0].NodesCoordinator + nodesCoordinatorInstance := nodesMap[shardId][0].NodesCoordinator - pubKeys, err := nodesCoordinator.GetConsensusValidatorsPublicKeys(randomness, round, shardId, epoch) + pubKeys, err := nodesCoordinatorInstance.GetConsensusValidatorsPublicKeys(randomness, round, shardId, epoch) if err != nil { log.Error("nodesCoordinator.GetConsensusValidatorsPublicKeys", "error", err) } diff --git a/integrationTests/testProcessorNodeWithStateCheckpointModulus.go b/integrationTests/testProcessorNodeWithStateCheckpointModulus.go index 28856f961e4..2d5109cb84f 100644 --- a/integrationTests/testProcessorNodeWithStateCheckpointModulus.go +++ b/integrationTests/testProcessorNodeWithStateCheckpointModulus.go @@ -8,8 +8,10 @@ import ( "github.com/ElrondNetwork/elrond-go/common/forking" "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" "github.com/ElrondNetwork/elrond-go/epochStart/notifier" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" + p2pRating "github.com/ElrondNetwork/elrond-go/p2p/rating" "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/transactionLog" "github.com/ElrondNetwork/elrond-go/sharding" @@ -51,7 +53,7 @@ func NewTestProcessorNodeWithStateCheckpointModulus( }, } - nodesCoordinator := &shardingMocks.NodesCoordinatorStub{ + nodesCoordinatorInstance := &shardingMocks.NodesCoordinatorStub{ ComputeValidatorsGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validators []nodesCoordinator.Validator, err error) { v, _ := nodesCoordinator.NewValidator(pkBytes, defaultChancesSelection, 1) return []nodesCoordinator.Validator{v}, nil @@ -69,11 +71,17 @@ func NewTestProcessorNodeWithStateCheckpointModulus( } logsProcessor, _ := transactionLog.NewTxLogProcessor(transactionLog.ArgTxLogProcessor{Marshalizer: TestMarshalizer}) - messenger := CreateMessengerWithNoDiscovery() + peersRatingHandler, _ := p2pRating.NewPeersRatingHandler( + p2pRating.ArgPeersRatingHandler{ + TopRatedCache: testscommon.NewCacherMock(), + BadRatedCache: testscommon.NewCacherMock(), + }) + + messenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler) tpn := &TestProcessorNode{ ShardCoordinator: shardCoordinator, Messenger: messenger, - NodesCoordinator: nodesCoordinator, + NodesCoordinator: nodesCoordinatorInstance, HeaderSigVerifier: &mock.HeaderSigVerifierStub{}, HeaderIntegrityVerifier: CreateHeaderIntegrityVerifier(), ChainID: ChainID, @@ -82,6 +90,8 @@ func NewTestProcessorNodeWithStateCheckpointModulus( EpochNotifier: forking.NewGenericEpochNotifier(), ArwenChangeLocker: &sync.RWMutex{}, TransactionLogProcessor: logsProcessor, + PeersRatingHandler: peersRatingHandler, + PeerShardMapper: disabled.NewPeerShardMapper(), EnableEpochs: config.EnableEpochs{ StakingV4InitEnableEpoch: StakingV4InitEpoch, StakingV4EnableEpoch: StakingV4Epoch, @@ -127,7 +137,7 @@ func NewTestProcessorNodeWithStateCheckpointModulus( tpn.EconomicsData, ) tpn.initBlockTracker() - tpn.initInterceptors() + tpn.initInterceptors("") tpn.initInnerProcessors(arwenConfig.MakeGasMapForTests()) argsNewScQueryService := smartContract.ArgsNewSCQueryService{ VmContainer: tpn.VMContainer, diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 5102d48ab70..33c2c934eaa 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -12,8 +12,10 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/provider" + "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" "github.com/ElrondNetwork/elrond-go/epochStart/notifier" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" + "github.com/ElrondNetwork/elrond-go/p2p/rating" "github.com/ElrondNetwork/elrond-go/process/block" "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" "github.com/ElrondNetwork/elrond-go/process/smartContract" @@ -54,7 +56,7 @@ func NewTestSyncNode( }, } - nodesCoordinator := &shardingMocks.NodesCoordinatorStub{ + nodesCoordinatorInstance := &shardingMocks.NodesCoordinatorStub{ ComputeValidatorsGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validators []nodesCoordinator.Validator, err error) { v, _ := nodesCoordinator.NewValidator(pkBytes, 1, defaultChancesSelection) return []nodesCoordinator.Validator{v}, nil @@ -71,13 +73,19 @@ func NewTestSyncNode( }, } - messenger := CreateMessengerWithNoDiscovery() + peersRatingHandler, _ := rating.NewPeersRatingHandler( + rating.ArgPeersRatingHandler{ + TopRatedCache: testscommon.NewCacherMock(), + BadRatedCache: testscommon.NewCacherMock(), + }) + + messenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler) logsProcessor, _ := transactionLog.NewTxLogProcessor(transactionLog.ArgTxLogProcessor{Marshalizer: TestMarshalizer}) tpn := &TestProcessorNode{ ShardCoordinator: shardCoordinator, Messenger: messenger, - NodesCoordinator: nodesCoordinator, + NodesCoordinator: nodesCoordinatorInstance, BootstrapStorer: &mock.BoostrapStorerMock{ PutCalled: func(round int64, bootData bootstrapStorage.BootstrapData) error { return nil @@ -94,6 +102,8 @@ func NewTestSyncNode( EpochNotifier: forking.NewGenericEpochNotifier(), ArwenChangeLocker: &syncGo.RWMutex{}, TransactionLogProcessor: logsProcessor, + PeersRatingHandler: peersRatingHandler, + PeerShardMapper: disabled.NewPeerShardMapper(), } kg := &mock.KeyGenMock{} @@ -125,7 +135,7 @@ func (tpn *TestProcessorNode) initTestNodeWithSync() { tpn.initRequestedItemsHandler() tpn.initResolvers() tpn.initBlockTracker() - tpn.initInterceptors() + tpn.initInterceptors("") tpn.initInnerProcessors(arwenConfig.MakeGasMapForTests()) tpn.initBlockProcessorWithSync() tpn.BroadcastMessenger, _ = sposFactory.GetBroadcastMessenger( @@ -226,6 +236,7 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { GasHandler: tpn.GasHandler, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, ScheduledMiniBlocksEnableEpoch: ScheduledMiniBlocksEnableEpoch, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { diff --git a/integrationTests/vm/esdt/common.go b/integrationTests/vm/esdt/common.go index c292e1d965c..d424d121441 100644 --- a/integrationTests/vm/esdt/common.go +++ b/integrationTests/vm/esdt/common.go @@ -145,6 +145,8 @@ func CreateNodesAndPrepareBalances(numOfShards int) ([]*integrationTests.TestPro enableEpochs := config.EnableEpochs{ OptimizeGasUsedInCrossMiniBlocksEnableEpoch: 10, + ScheduledMiniBlocksEnableEpoch: 10, + MiniBlockPartialExecutionEnableEpoch: 10, } nodes := integrationTests.CreateNodesWithEnableEpochs( diff --git a/integrationTests/vm/esdt/multi-transfer/esdtMultiTransferThroughForwarder_test.go b/integrationTests/vm/esdt/multi-transfer/esdtMultiTransferThroughForwarder/esdtMultiTransferThroughForwarder_test.go similarity index 75% rename from integrationTests/vm/esdt/multi-transfer/esdtMultiTransferThroughForwarder_test.go rename to integrationTests/vm/esdt/multi-transfer/esdtMultiTransferThroughForwarder/esdtMultiTransferThroughForwarder_test.go index 9f207559228..6d971059fc3 100644 --- a/integrationTests/vm/esdt/multi-transfer/esdtMultiTransferThroughForwarder_test.go +++ b/integrationTests/vm/esdt/multi-transfer/esdtMultiTransferThroughForwarder/esdtMultiTransferThroughForwarder_test.go @@ -1,4 +1,4 @@ -package multitransfer +package esdtMultiTransferThroughForwarder import ( "testing" @@ -6,6 +6,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/integrationTests/vm/esdt" + multitransfer "github.com/ElrondNetwork/elrond-go/integrationTests/vm/esdt/multi-transfer" "github.com/ElrondNetwork/elrond-go/testscommon/txDataBuilder" ) @@ -24,17 +25,17 @@ func TestESDTMultiTransferThroughForwarder(t *testing.T) { senderNode := net.NodesSharded[0][0] owner := senderNode.OwnAccount - forwarder := net.DeployPayableSC(owner, "../testdata/forwarder.wasm") - vault := net.DeployNonpayableSC(owner, "../testdata/vaultV2.wasm") - vaultOtherShard := net.DeployNonpayableSC(net.NodesSharded[1][0].OwnAccount, "../testdata/vaultV2.wasm") + forwarder := net.DeployPayableSC(owner, "../../testdata/forwarder.wasm") + vault := net.DeployNonpayableSC(owner, "../../testdata/vaultV2.wasm") + vaultOtherShard := net.DeployNonpayableSC(net.NodesSharded[1][0].OwnAccount, "../../testdata/vaultV2.wasm") // Create the fungible token supply := int64(1000) - tokenID := issueFungibleToken(t, net, senderNode, "FUNG1", supply) + tokenID := multitransfer.IssueFungibleToken(t, net, senderNode, "FUNG1", supply) // Issue and create an SFT - sftID := issueNft(net, senderNode, "SFT1", true) - createSFT(t, net, senderNode, sftID, 1, supply) + sftID := multitransfer.IssueNft(net, senderNode, "SFT1", true) + multitransfer.CreateSFT(t, net, senderNode, sftID, 1, supply) // Send the tokens to the forwarder SC txData := txDataBuilder.NewBuilder() @@ -52,11 +53,11 @@ func TestESDTMultiTransferThroughForwarder(t *testing.T) { esdt.CheckAddressHasTokens(t, forwarder, net.Nodes, []byte(tokenID), 0, supply) // transfer to a user from another shard - transfers := []*esdtTransfer{ + transfers := []*multitransfer.EsdtTransfer{ { - tokenIdentifier: tokenID, - nonce: 0, - amount: 100, + TokenIdentifier: tokenID, + Nonce: 0, + Amount: 100, }} destAddress := net.NodesSharded[1][0].OwnAccount.Address multiTransferThroughForwarder( @@ -84,16 +85,16 @@ func TestESDTMultiTransferThroughForwarder(t *testing.T) { // transfer fungible and non-fungible // transfer to vault, same shard - transfers = []*esdtTransfer{ + transfers = []*multitransfer.EsdtTransfer{ { - tokenIdentifier: tokenID, - nonce: 0, - amount: 100, + TokenIdentifier: tokenID, + Nonce: 0, + Amount: 100, }, { - tokenIdentifier: sftID, - nonce: 1, - amount: 100, + TokenIdentifier: sftID, + Nonce: 1, + Amount: 100, }, } multiTransferThroughForwarder( @@ -112,16 +113,16 @@ func TestESDTMultiTransferThroughForwarder(t *testing.T) { // transfer fungible and non-fungible // transfer to vault, cross shard via transfer and execute - transfers = []*esdtTransfer{ + transfers = []*multitransfer.EsdtTransfer{ { - tokenIdentifier: tokenID, - nonce: 0, - amount: 100, + TokenIdentifier: tokenID, + Nonce: 0, + Amount: 100, }, { - tokenIdentifier: sftID, - nonce: 1, - amount: 100, + TokenIdentifier: sftID, + Nonce: 1, + Amount: 100, }, } multiTransferThroughForwarder( @@ -139,16 +140,16 @@ func TestESDTMultiTransferThroughForwarder(t *testing.T) { esdt.CheckAddressHasTokens(t, vaultOtherShard, net.Nodes, []byte(sftID), 1, 100) // transfer to vault, cross shard, via async call - transfers = []*esdtTransfer{ + transfers = []*multitransfer.EsdtTransfer{ { - tokenIdentifier: tokenID, - nonce: 0, - amount: 100, + TokenIdentifier: tokenID, + Nonce: 0, + Amount: 100, }, { - tokenIdentifier: sftID, - nonce: 1, - amount: 100, + TokenIdentifier: sftID, + Nonce: 1, + Amount: 100, }, } multiTransferThroughForwarder( @@ -171,14 +172,14 @@ func multiTransferThroughForwarder( ownerWallet *integrationTests.TestWalletAccount, forwarderAddress []byte, function string, - transfers []*esdtTransfer, + transfers []*multitransfer.EsdtTransfer, destAddress []byte) { txData := txDataBuilder.NewBuilder() txData.Func(function).Bytes(destAddress) for _, transfer := range transfers { - txData.Str(transfer.tokenIdentifier).Int64(transfer.nonce).Int64(transfer.amount) + txData.Str(transfer.TokenIdentifier).Int64(transfer.Nonce).Int64(transfer.Amount) } tx := net.CreateTxUint64(ownerWallet, forwarderAddress, 0, txData.ToBytes()) diff --git a/integrationTests/vm/esdt/multi-transfer/esdtMultiTransferToVaultCrossShard/esdtMultiTransferToVaultCrossShard_test.go b/integrationTests/vm/esdt/multi-transfer/esdtMultiTransferToVaultCrossShard/esdtMultiTransferToVaultCrossShard_test.go new file mode 100644 index 00000000000..e3647bc878f --- /dev/null +++ b/integrationTests/vm/esdt/multi-transfer/esdtMultiTransferToVaultCrossShard/esdtMultiTransferToVaultCrossShard_test.go @@ -0,0 +1,11 @@ +package esdtMultiTransferToVaultCrossShard + +import ( + "testing" + + multitransfer "github.com/ElrondNetwork/elrond-go/integrationTests/vm/esdt/multi-transfer" +) + +func TestESDTMultiTransferToVaultCrossShard(t *testing.T) { + multitransfer.EsdtMultiTransferToVault(t, true, "../../testdata/vaultV2.wasm") +} diff --git a/integrationTests/vm/esdt/multi-transfer/esdtMultiTransferToVaultSameShard/esdtMultiTransferToVaultSameShard_test.go b/integrationTests/vm/esdt/multi-transfer/esdtMultiTransferToVaultSameShard/esdtMultiTransferToVaultSameShard_test.go new file mode 100644 index 00000000000..aab16166338 --- /dev/null +++ b/integrationTests/vm/esdt/multi-transfer/esdtMultiTransferToVaultSameShard/esdtMultiTransferToVaultSameShard_test.go @@ -0,0 +1,11 @@ +package esdtMultiTransferToVaultSameShard + +import ( + "testing" + + multitransfer "github.com/ElrondNetwork/elrond-go/integrationTests/vm/esdt/multi-transfer" +) + +func TestESDTMultiTransferToVaultSameShard(t *testing.T) { + multitransfer.EsdtMultiTransferToVault(t, false, "../../testdata/vaultV2.wasm") +} diff --git a/integrationTests/vm/esdt/multi-transfer/esdtMultiTransferToVault_test.go b/integrationTests/vm/esdt/multi-transfer/esdtMultiTransferToVault_test.go deleted file mode 100644 index a32985add98..00000000000 --- a/integrationTests/vm/esdt/multi-transfer/esdtMultiTransferToVault_test.go +++ /dev/null @@ -1,327 +0,0 @@ -package multitransfer - -import ( - "testing" - - "github.com/ElrondNetwork/elrond-go/integrationTests" -) - -func TestESDTMultiTransferToVaultSameShard(t *testing.T) { - esdtMultiTransferToVault(t, false) -} - -func TestESDTMultiTransferToVaultCrossShard(t *testing.T) { - esdtMultiTransferToVault(t, true) -} - -func esdtMultiTransferToVault(t *testing.T, crossShard bool) { - if testing.Short() { - t.Skip("this is not a short test") - } - - // For cross shard, we use 2 nodes, with node[1] being the SC deployer, and node[0] being the caller - numShards := 1 - nrRoundsToWait := numRoundsSameShard - - if crossShard { - numShards = 2 - nrRoundsToWait = numRoundsCrossShard - } - - net := integrationTests.NewTestNetworkSized(t, numShards, 1, 1) - net.Start() - defer net.Close() - - net.MintNodeAccountsUint64(10000000000) - net.Step() - - senderNode := net.NodesSharded[0][0] - if crossShard { - senderNode = net.NodesSharded[1][0] - } - - expectedIssuerBalance := make(map[string]map[int64]int64) - expectedVaultBalance := make(map[string]map[int64]int64) - - // deploy vault SC - vaultScAddress := deployNonPayableSmartContract(t, net, net.NodesSharded[0][0], "../testdata/vaultV2.wasm") - - // issue two fungible tokens - fungibleTokenIdentifier1 := issueFungibleToken(t, net, senderNode, "FUNG1", 1000) - fungibleTokenIdentifier2 := issueFungibleToken(t, net, senderNode, "FUNG2", 1000) - - expectedIssuerBalance[fungibleTokenIdentifier1] = make(map[int64]int64) - expectedIssuerBalance[fungibleTokenIdentifier2] = make(map[int64]int64) - expectedVaultBalance[fungibleTokenIdentifier1] = make(map[int64]int64) - expectedVaultBalance[fungibleTokenIdentifier2] = make(map[int64]int64) - - expectedIssuerBalance[fungibleTokenIdentifier1][0] = 1000 - expectedIssuerBalance[fungibleTokenIdentifier2][0] = 1000 - - // issue two NFT, with multiple NFTCreate - nonFungibleTokenIdentifier1 := issueNft(net, senderNode, "NFT1", false) - nonFungibleTokenIdentifier2 := issueNft(net, senderNode, "NFT2", false) - - expectedIssuerBalance[nonFungibleTokenIdentifier1] = make(map[int64]int64) - expectedIssuerBalance[nonFungibleTokenIdentifier2] = make(map[int64]int64) - - expectedVaultBalance[nonFungibleTokenIdentifier1] = make(map[int64]int64) - expectedVaultBalance[nonFungibleTokenIdentifier2] = make(map[int64]int64) - - for i := int64(1); i <= 10; i++ { - createNFT(t, net, senderNode, nonFungibleTokenIdentifier1, i) - createNFT(t, net, senderNode, nonFungibleTokenIdentifier2, i) - - expectedIssuerBalance[nonFungibleTokenIdentifier1][i] = 1 - expectedIssuerBalance[nonFungibleTokenIdentifier2][i] = 1 - } - - // issue two SFTs, with two NFTCreate for each - semiFungibleTokenIdentifier1 := issueNft(net, senderNode, "SFT1", true) - semiFungibleTokenIdentifier2 := issueNft(net, senderNode, "SFT2", true) - - expectedIssuerBalance[semiFungibleTokenIdentifier1] = make(map[int64]int64) - expectedIssuerBalance[semiFungibleTokenIdentifier2] = make(map[int64]int64) - - expectedVaultBalance[semiFungibleTokenIdentifier1] = make(map[int64]int64) - expectedVaultBalance[semiFungibleTokenIdentifier2] = make(map[int64]int64) - - for i := int64(1); i <= 2; i++ { - createSFT(t, net, senderNode, semiFungibleTokenIdentifier1, i, 1000) - createSFT(t, net, senderNode, semiFungibleTokenIdentifier2, i, 1000) - - expectedIssuerBalance[semiFungibleTokenIdentifier1][i] = 1000 - expectedIssuerBalance[semiFungibleTokenIdentifier2][i] = 1000 - } - - // send a single ESDT with multi-transfer - transfers := []*esdtTransfer{ - { - tokenIdentifier: fungibleTokenIdentifier1, - nonce: 0, - amount: 100, - }} - multiTransferToVault(t, net, senderNode, - vaultScAddress, transfers, nrRoundsToWait, - expectedIssuerBalance, expectedVaultBalance, - ) - - // send two identical transfers with multi-transfer - transfers = []*esdtTransfer{ - { - tokenIdentifier: fungibleTokenIdentifier1, - nonce: 0, - amount: 50, - }, - { - tokenIdentifier: fungibleTokenIdentifier1, - nonce: 0, - amount: 50, - }} - multiTransferToVault(t, net, senderNode, - vaultScAddress, transfers, nrRoundsToWait, - expectedIssuerBalance, expectedVaultBalance, - ) - - // send two different transfers amounts, same token - transfers = []*esdtTransfer{ - { - tokenIdentifier: fungibleTokenIdentifier1, - nonce: 0, - amount: 50, - }, - { - tokenIdentifier: fungibleTokenIdentifier1, - nonce: 0, - amount: 100, - }} - multiTransferToVault(t, net, senderNode, - vaultScAddress, transfers, nrRoundsToWait, - expectedIssuerBalance, expectedVaultBalance, - ) - - // send two different tokens, same amount - transfers = []*esdtTransfer{ - { - tokenIdentifier: fungibleTokenIdentifier1, - nonce: 0, - amount: 100, - }, - { - tokenIdentifier: fungibleTokenIdentifier2, - nonce: 0, - amount: 100, - }} - multiTransferToVault(t, net, senderNode, - vaultScAddress, transfers, nrRoundsToWait, - expectedIssuerBalance, expectedVaultBalance, - ) - - // send single NFT - transfers = []*esdtTransfer{ - { - tokenIdentifier: nonFungibleTokenIdentifier1, - nonce: 1, - amount: 1, - }} - multiTransferToVault(t, net, senderNode, - vaultScAddress, transfers, nrRoundsToWait, - expectedIssuerBalance, expectedVaultBalance, - ) - - // send two NFTs, same token ID - transfers = []*esdtTransfer{ - { - tokenIdentifier: nonFungibleTokenIdentifier1, - nonce: 2, - amount: 1, - }, - { - tokenIdentifier: nonFungibleTokenIdentifier1, - nonce: 3, - amount: 1, - }} - multiTransferToVault(t, net, senderNode, - vaultScAddress, transfers, nrRoundsToWait, - expectedIssuerBalance, expectedVaultBalance, - ) - - // send two NFTs, different token ID - transfers = []*esdtTransfer{ - { - tokenIdentifier: nonFungibleTokenIdentifier1, - nonce: 4, - amount: 1, - }, - { - tokenIdentifier: nonFungibleTokenIdentifier2, - nonce: 1, - amount: 1, - }} - multiTransferToVault(t, net, senderNode, - vaultScAddress, transfers, nrRoundsToWait, - expectedIssuerBalance, expectedVaultBalance, - ) - - // send fours NFTs, two of each different token ID - transfers = []*esdtTransfer{ - { - tokenIdentifier: nonFungibleTokenIdentifier1, - nonce: 5, - amount: 1, - }, - { - tokenIdentifier: nonFungibleTokenIdentifier2, - nonce: 2, - amount: 1, - }, - { - tokenIdentifier: nonFungibleTokenIdentifier1, - nonce: 6, - amount: 1, - }, - { - tokenIdentifier: nonFungibleTokenIdentifier2, - nonce: 3, - amount: 1, - }} - multiTransferToVault(t, net, senderNode, - vaultScAddress, transfers, nrRoundsToWait, - expectedIssuerBalance, expectedVaultBalance, - ) - - // send single SFT - transfers = []*esdtTransfer{ - { - tokenIdentifier: semiFungibleTokenIdentifier1, - nonce: 1, - amount: 100, - }} - multiTransferToVault(t, net, senderNode, - vaultScAddress, transfers, nrRoundsToWait, - expectedIssuerBalance, expectedVaultBalance, - ) - - // send two SFTs, same token ID - transfers = []*esdtTransfer{ - { - tokenIdentifier: semiFungibleTokenIdentifier1, - nonce: 1, - amount: 100, - }, - { - tokenIdentifier: semiFungibleTokenIdentifier1, - nonce: 2, - amount: 100, - }} - multiTransferToVault(t, net, senderNode, - vaultScAddress, transfers, nrRoundsToWait, - expectedIssuerBalance, expectedVaultBalance, - ) - - // send two SFTs, different token ID - transfers = []*esdtTransfer{ - { - tokenIdentifier: semiFungibleTokenIdentifier1, - nonce: 1, - amount: 100, - }, - { - tokenIdentifier: semiFungibleTokenIdentifier2, - nonce: 1, - amount: 100, - }} - multiTransferToVault(t, net, senderNode, - vaultScAddress, transfers, nrRoundsToWait, - expectedIssuerBalance, expectedVaultBalance, - ) - - // send fours SFTs, two of each different token ID - transfers = []*esdtTransfer{ - { - tokenIdentifier: semiFungibleTokenIdentifier1, - nonce: 1, - amount: 100, - }, - { - tokenIdentifier: semiFungibleTokenIdentifier2, - nonce: 2, - amount: 100, - }, - { - tokenIdentifier: semiFungibleTokenIdentifier1, - nonce: 2, - amount: 50, - }, - { - tokenIdentifier: semiFungibleTokenIdentifier2, - nonce: 1, - amount: 200, - }} - multiTransferToVault(t, net, senderNode, - vaultScAddress, transfers, nrRoundsToWait, - expectedIssuerBalance, expectedVaultBalance, - ) - - // transfer all 3 types - transfers = []*esdtTransfer{ - { - tokenIdentifier: fungibleTokenIdentifier1, - nonce: 0, - amount: 100, - }, - { - tokenIdentifier: semiFungibleTokenIdentifier2, - nonce: 2, - amount: 100, - }, - { - tokenIdentifier: nonFungibleTokenIdentifier1, - nonce: 7, - amount: 1, - }} - multiTransferToVault(t, net, senderNode, - vaultScAddress, transfers, nrRoundsToWait, - expectedIssuerBalance, expectedVaultBalance, - ) -} diff --git a/integrationTests/vm/esdt/multi-transfer/multiTransferCommon.go b/integrationTests/vm/esdt/multi-transfer/multiTransferCommon.go index f3ca482752e..ca4d62f5419 100644 --- a/integrationTests/vm/esdt/multi-transfer/multiTransferCommon.go +++ b/integrationTests/vm/esdt/multi-transfer/multiTransferCommon.go @@ -20,13 +20,15 @@ import ( const numRoundsCrossShard = 15 const numRoundsSameShard = 1 -type esdtTransfer struct { - tokenIdentifier string - nonce int64 - amount int64 +// EsdtTransfer - +type EsdtTransfer struct { + TokenIdentifier string + Nonce int64 + Amount int64 } -func issueFungibleToken( +// IssueFungibleToken - +func IssueFungibleToken( t *testing.T, net *integrationTests.TestNetwork, issuerNode *integrationTests.TestProcessorNode, @@ -48,7 +50,7 @@ func issueFungibleToken( issuePrice, vm.ESDTSCAddress, txData.ToString(), core.MinMetaTxExtraGasCost) - waitForOperationCompletion(net, numRoundsCrossShard) + WaitForOperationCompletion(net, numRoundsCrossShard) tokenIdentifier := integrationTests.GetTokenIdentifier(net.Nodes, []byte(ticker)) @@ -58,7 +60,8 @@ func issueFungibleToken( return string(tokenIdentifier) } -func issueNft( +// IssueNft - +func IssueNft( net *integrationTests.TestNetwork, issuerNode *integrationTests.TestProcessorNode, ticker string, @@ -85,7 +88,7 @@ func issueNft( vm.ESDTSCAddress, txData.ToString(), core.MinMetaTxExtraGasCost) - waitForOperationCompletion(net, numRoundsCrossShard) + WaitForOperationCompletion(net, numRoundsCrossShard) issuerAddress := issuerNode.OwnAccount.Address tokenIdentifier := string(integrationTests.GetTokenIdentifier(net.Nodes, []byte(ticker))) @@ -97,12 +100,13 @@ func issueNft( roles = append(roles, []byte(core.ESDTRoleNFTAddQuantity)) } - setLocalRoles(net, issuerNode, issuerAddress, tokenIdentifier, roles) + SetLocalRoles(net, issuerNode, issuerAddress, tokenIdentifier, roles) return tokenIdentifier } -func setLocalRoles( +// SetLocalRoles - +func SetLocalRoles( net *integrationTests.TestNetwork, issuerNode *integrationTests.TestProcessorNode, addrForRole []byte, @@ -124,10 +128,11 @@ func setLocalRoles( vm.ESDTSCAddress, txData, core.MinMetaTxExtraGasCost) - waitForOperationCompletion(net, numRoundsCrossShard) + WaitForOperationCompletion(net, numRoundsCrossShard) } -func createSFT( +// CreateSFT - +func CreateSFT( t *testing.T, net *integrationTests.TestNetwork, issuerNode *integrationTests.TestProcessorNode, @@ -161,13 +166,14 @@ func createSFT( issuerAddress, txData.ToString(), integrationTests.AdditionalGasLimit) - waitForOperationCompletion(net, numRoundsSameShard) + WaitForOperationCompletion(net, numRoundsSameShard) esdt.CheckAddressHasTokens(t, issuerAddress, net.Nodes, []byte(tokenIdentifier), createdTokenNonce, initialSupply) } -func createNFT( +// CreateNFT - +func CreateNFT( t *testing.T, net *integrationTests.TestNetwork, issuerNode *integrationTests.TestProcessorNode, @@ -175,12 +181,13 @@ func createNFT( createdTokenNonce int64, ) { - createSFT(t, net, issuerNode, tokenIdentifier, createdTokenNonce, 1) + CreateSFT(t, net, issuerNode, tokenIdentifier, createdTokenNonce, 1) } -func buildEsdtMultiTransferTxData( +// BuildEsdtMultiTransferTxData - +func BuildEsdtMultiTransferTxData( receiverAddress []byte, - transfers []*esdtTransfer, + transfers []*EsdtTransfer, endpointName string, arguments ...[]byte, ) string { @@ -193,9 +200,9 @@ func buildEsdtMultiTransferTxData( txData.Int(nrTransfers) for _, transfer := range transfers { - txData.Str(transfer.tokenIdentifier) - txData.Int64(transfer.nonce) - txData.Int64(transfer.amount) + txData.Str(transfer.TokenIdentifier) + txData.Int64(transfer.Nonce) + txData.Int64(transfer.Amount) } if len(endpointName) > 0 { @@ -209,17 +216,19 @@ func buildEsdtMultiTransferTxData( return txData.ToString() } -func waitForOperationCompletion(net *integrationTests.TestNetwork, roundsToWait int) { +// WaitForOperationCompletion - +func WaitForOperationCompletion(net *integrationTests.TestNetwork, roundsToWait int) { time.Sleep(time.Second) net.Steps(roundsToWait) } -func multiTransferToVault( +// MultiTransferToVault - +func MultiTransferToVault( t *testing.T, net *integrationTests.TestNetwork, senderNode *integrationTests.TestProcessorNode, vaultScAddress []byte, - transfers []*esdtTransfer, + transfers []*EsdtTransfer, nrRoundsToWait int, userBalances map[string]map[int64]int64, scBalances map[string]map[int64]int64, @@ -228,7 +237,7 @@ func multiTransferToVault( acceptMultiTransferEndpointName := "accept_funds_multi_transfer" senderAddress := senderNode.OwnAccount.Address - txData := buildEsdtMultiTransferTxData(vaultScAddress, + txData := BuildEsdtMultiTransferTxData(vaultScAddress, transfers, acceptMultiTransferEndpointName, ) @@ -241,27 +250,28 @@ func multiTransferToVault( txData, integrationTests.AdditionalGasLimit, ) - waitForOperationCompletion(net, nrRoundsToWait) + WaitForOperationCompletion(net, nrRoundsToWait) // update expected balances after transfers for _, transfer := range transfers { - userBalances[transfer.tokenIdentifier][transfer.nonce] -= transfer.amount - scBalances[transfer.tokenIdentifier][transfer.nonce] += transfer.amount + userBalances[transfer.TokenIdentifier][transfer.Nonce] -= transfer.Amount + scBalances[transfer.TokenIdentifier][transfer.Nonce] += transfer.Amount } // check expected vs actual values for _, transfer := range transfers { - expectedUserBalance := userBalances[transfer.tokenIdentifier][transfer.nonce] - expectedScBalance := scBalances[transfer.tokenIdentifier][transfer.nonce] + expectedUserBalance := userBalances[transfer.TokenIdentifier][transfer.Nonce] + expectedScBalance := scBalances[transfer.TokenIdentifier][transfer.Nonce] esdt.CheckAddressHasTokens(t, senderAddress, net.Nodes, - []byte(transfer.tokenIdentifier), transfer.nonce, expectedUserBalance) + []byte(transfer.TokenIdentifier), transfer.Nonce, expectedUserBalance) esdt.CheckAddressHasTokens(t, vaultScAddress, net.Nodes, - []byte(transfer.tokenIdentifier), transfer.nonce, expectedScBalance) + []byte(transfer.TokenIdentifier), transfer.Nonce, expectedScBalance) } } -func deployNonPayableSmartContract( +// DeployNonPayableSmartContract - +func DeployNonPayableSmartContract( t *testing.T, net *integrationTests.TestNetwork, deployerNode *integrationTests.TestProcessorNode, @@ -281,10 +291,323 @@ func deployNonPayableSmartContract( arwen.CreateDeployTxDataNonPayable(scCode), integrationTests.AdditionalGasLimit, ) - waitForOperationCompletion(net, 4) + WaitForOperationCompletion(net, 4) _, err := deployerNode.AccntState.GetExistingAccount(scAddress) require.Nil(t, err) return scAddress } + +// EsdtMultiTransferToVault - +func EsdtMultiTransferToVault(t *testing.T, crossShard bool, scCodeFilename string) { + if testing.Short() { + t.Skip("this is not a short test") + } + + // For cross shard, we use 2 nodes, with node[1] being the SC deployer, and node[0] being the caller + numShards := 1 + nrRoundsToWait := numRoundsSameShard + + if crossShard { + numShards = 2 + nrRoundsToWait = numRoundsCrossShard + } + + net := integrationTests.NewTestNetworkSized(t, numShards, 1, 1) + net.Start() + defer net.Close() + + net.MintNodeAccountsUint64(10000000000) + net.Step() + + senderNode := net.NodesSharded[0][0] + if crossShard { + senderNode = net.NodesSharded[1][0] + } + + expectedIssuerBalance := make(map[string]map[int64]int64) + expectedVaultBalance := make(map[string]map[int64]int64) + + // deploy vault SC + vaultScAddress := DeployNonPayableSmartContract(t, net, net.NodesSharded[0][0], scCodeFilename) + + // issue two fungible tokens + fungibleTokenIdentifier1 := IssueFungibleToken(t, net, senderNode, "FUNG1", 1000) + fungibleTokenIdentifier2 := IssueFungibleToken(t, net, senderNode, "FUNG2", 1000) + + expectedIssuerBalance[fungibleTokenIdentifier1] = make(map[int64]int64) + expectedIssuerBalance[fungibleTokenIdentifier2] = make(map[int64]int64) + expectedVaultBalance[fungibleTokenIdentifier1] = make(map[int64]int64) + expectedVaultBalance[fungibleTokenIdentifier2] = make(map[int64]int64) + + expectedIssuerBalance[fungibleTokenIdentifier1][0] = 1000 + expectedIssuerBalance[fungibleTokenIdentifier2][0] = 1000 + + // issue two NFT, with multiple NFTCreate + nonFungibleTokenIdentifier1 := IssueNft(net, senderNode, "NFT1", false) + nonFungibleTokenIdentifier2 := IssueNft(net, senderNode, "NFT2", false) + + expectedIssuerBalance[nonFungibleTokenIdentifier1] = make(map[int64]int64) + expectedIssuerBalance[nonFungibleTokenIdentifier2] = make(map[int64]int64) + + expectedVaultBalance[nonFungibleTokenIdentifier1] = make(map[int64]int64) + expectedVaultBalance[nonFungibleTokenIdentifier2] = make(map[int64]int64) + + for i := int64(1); i <= 10; i++ { + CreateNFT(t, net, senderNode, nonFungibleTokenIdentifier1, i) + CreateNFT(t, net, senderNode, nonFungibleTokenIdentifier2, i) + + expectedIssuerBalance[nonFungibleTokenIdentifier1][i] = 1 + expectedIssuerBalance[nonFungibleTokenIdentifier2][i] = 1 + } + + // issue two SFTs, with two NFTCreate for each + semiFungibleTokenIdentifier1 := IssueNft(net, senderNode, "SFT1", true) + semiFungibleTokenIdentifier2 := IssueNft(net, senderNode, "SFT2", true) + + expectedIssuerBalance[semiFungibleTokenIdentifier1] = make(map[int64]int64) + expectedIssuerBalance[semiFungibleTokenIdentifier2] = make(map[int64]int64) + + expectedVaultBalance[semiFungibleTokenIdentifier1] = make(map[int64]int64) + expectedVaultBalance[semiFungibleTokenIdentifier2] = make(map[int64]int64) + + for i := int64(1); i <= 2; i++ { + CreateSFT(t, net, senderNode, semiFungibleTokenIdentifier1, i, 1000) + CreateSFT(t, net, senderNode, semiFungibleTokenIdentifier2, i, 1000) + + expectedIssuerBalance[semiFungibleTokenIdentifier1][i] = 1000 + expectedIssuerBalance[semiFungibleTokenIdentifier2][i] = 1000 + } + + // send a single ESDT with multi-transfer + transfers := []*EsdtTransfer{ + { + TokenIdentifier: fungibleTokenIdentifier1, + Nonce: 0, + Amount: 100, + }} + MultiTransferToVault(t, net, senderNode, + vaultScAddress, transfers, nrRoundsToWait, + expectedIssuerBalance, expectedVaultBalance, + ) + + // send two identical transfers with multi-transfer + transfers = []*EsdtTransfer{ + { + TokenIdentifier: fungibleTokenIdentifier1, + Nonce: 0, + Amount: 50, + }, + { + TokenIdentifier: fungibleTokenIdentifier1, + Nonce: 0, + Amount: 50, + }} + MultiTransferToVault(t, net, senderNode, + vaultScAddress, transfers, nrRoundsToWait, + expectedIssuerBalance, expectedVaultBalance, + ) + + // send two different transfers amounts, same token + transfers = []*EsdtTransfer{ + { + TokenIdentifier: fungibleTokenIdentifier1, + Nonce: 0, + Amount: 50, + }, + { + TokenIdentifier: fungibleTokenIdentifier1, + Nonce: 0, + Amount: 100, + }} + MultiTransferToVault(t, net, senderNode, + vaultScAddress, transfers, nrRoundsToWait, + expectedIssuerBalance, expectedVaultBalance, + ) + + // send two different tokens, same amount + transfers = []*EsdtTransfer{ + { + TokenIdentifier: fungibleTokenIdentifier1, + Nonce: 0, + Amount: 100, + }, + { + TokenIdentifier: fungibleTokenIdentifier2, + Nonce: 0, + Amount: 100, + }} + MultiTransferToVault(t, net, senderNode, + vaultScAddress, transfers, nrRoundsToWait, + expectedIssuerBalance, expectedVaultBalance, + ) + + // send single NFT + transfers = []*EsdtTransfer{ + { + TokenIdentifier: nonFungibleTokenIdentifier1, + Nonce: 1, + Amount: 1, + }} + MultiTransferToVault(t, net, senderNode, + vaultScAddress, transfers, nrRoundsToWait, + expectedIssuerBalance, expectedVaultBalance, + ) + + // send two NFTs, same token ID + transfers = []*EsdtTransfer{ + { + TokenIdentifier: nonFungibleTokenIdentifier1, + Nonce: 2, + Amount: 1, + }, + { + TokenIdentifier: nonFungibleTokenIdentifier1, + Nonce: 3, + Amount: 1, + }} + MultiTransferToVault(t, net, senderNode, + vaultScAddress, transfers, nrRoundsToWait, + expectedIssuerBalance, expectedVaultBalance, + ) + + // send two NFTs, different token ID + transfers = []*EsdtTransfer{ + { + TokenIdentifier: nonFungibleTokenIdentifier1, + Nonce: 4, + Amount: 1, + }, + { + TokenIdentifier: nonFungibleTokenIdentifier2, + Nonce: 1, + Amount: 1, + }} + MultiTransferToVault(t, net, senderNode, + vaultScAddress, transfers, nrRoundsToWait, + expectedIssuerBalance, expectedVaultBalance, + ) + + // send fours NFTs, two of each different token ID + transfers = []*EsdtTransfer{ + { + TokenIdentifier: nonFungibleTokenIdentifier1, + Nonce: 5, + Amount: 1, + }, + { + TokenIdentifier: nonFungibleTokenIdentifier2, + Nonce: 2, + Amount: 1, + }, + { + TokenIdentifier: nonFungibleTokenIdentifier1, + Nonce: 6, + Amount: 1, + }, + { + TokenIdentifier: nonFungibleTokenIdentifier2, + Nonce: 3, + Amount: 1, + }} + MultiTransferToVault(t, net, senderNode, + vaultScAddress, transfers, nrRoundsToWait, + expectedIssuerBalance, expectedVaultBalance, + ) + + // send single SFT + transfers = []*EsdtTransfer{ + { + TokenIdentifier: semiFungibleTokenIdentifier1, + Nonce: 1, + Amount: 100, + }} + MultiTransferToVault(t, net, senderNode, + vaultScAddress, transfers, nrRoundsToWait, + expectedIssuerBalance, expectedVaultBalance, + ) + + // send two SFTs, same token ID + transfers = []*EsdtTransfer{ + { + TokenIdentifier: semiFungibleTokenIdentifier1, + Nonce: 1, + Amount: 100, + }, + { + TokenIdentifier: semiFungibleTokenIdentifier1, + Nonce: 2, + Amount: 100, + }} + MultiTransferToVault(t, net, senderNode, + vaultScAddress, transfers, nrRoundsToWait, + expectedIssuerBalance, expectedVaultBalance, + ) + + // send two SFTs, different token ID + transfers = []*EsdtTransfer{ + { + TokenIdentifier: semiFungibleTokenIdentifier1, + Nonce: 1, + Amount: 100, + }, + { + TokenIdentifier: semiFungibleTokenIdentifier2, + Nonce: 1, + Amount: 100, + }} + MultiTransferToVault(t, net, senderNode, + vaultScAddress, transfers, nrRoundsToWait, + expectedIssuerBalance, expectedVaultBalance, + ) + + // send fours SFTs, two of each different token ID + transfers = []*EsdtTransfer{ + { + TokenIdentifier: semiFungibleTokenIdentifier1, + Nonce: 1, + Amount: 100, + }, + { + TokenIdentifier: semiFungibleTokenIdentifier2, + Nonce: 2, + Amount: 100, + }, + { + TokenIdentifier: semiFungibleTokenIdentifier1, + Nonce: 2, + Amount: 50, + }, + { + TokenIdentifier: semiFungibleTokenIdentifier2, + Nonce: 1, + Amount: 200, + }} + MultiTransferToVault(t, net, senderNode, + vaultScAddress, transfers, nrRoundsToWait, + expectedIssuerBalance, expectedVaultBalance, + ) + + // transfer all 3 types + transfers = []*EsdtTransfer{ + { + TokenIdentifier: fungibleTokenIdentifier1, + Nonce: 0, + Amount: 100, + }, + { + TokenIdentifier: semiFungibleTokenIdentifier2, + Nonce: 2, + Amount: 100, + }, + { + TokenIdentifier: nonFungibleTokenIdentifier1, + Nonce: 7, + Amount: 1, + }} + MultiTransferToVault(t, net, senderNode, + vaultScAddress, transfers, nrRoundsToWait, + expectedIssuerBalance, expectedVaultBalance, + ) +} diff --git a/integrationTests/vm/esdt/nft/common.go b/integrationTests/vm/esdt/nft/common.go new file mode 100644 index 00000000000..b1762095f34 --- /dev/null +++ b/integrationTests/vm/esdt/nft/common.go @@ -0,0 +1,122 @@ +package nft + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/integrationTests/vm/esdt" + "github.com/stretchr/testify/require" +) + +// NftArguments - +type NftArguments struct { + Name []byte + Quantity int64 + Royalties int64 + Hash []byte + Attributes []byte + URI [][]byte +} + +// CreateNFT - +func CreateNFT(tokenIdentifier []byte, issuer *integrationTests.TestProcessorNode, nodes []*integrationTests.TestProcessorNode, args *NftArguments) { + txData := fmt.Sprintf("%s@%s@%s@%s@%s@%s@%s@%s@", + core.BuiltInFunctionESDTNFTCreate, + hex.EncodeToString(tokenIdentifier), + hex.EncodeToString(big.NewInt(args.Quantity).Bytes()), + hex.EncodeToString(args.Name), + hex.EncodeToString(big.NewInt(args.Royalties).Bytes()), + hex.EncodeToString(args.Hash), + hex.EncodeToString(args.Attributes), + hex.EncodeToString(args.URI[0]), + ) + + integrationTests.CreateAndSendTransaction(issuer, nodes, big.NewInt(0), issuer.OwnAccount.Address, txData, integrationTests.AdditionalGasLimit) +} + +// CheckNftData - +func CheckNftData( + t *testing.T, + creator []byte, + address []byte, + nodes []*integrationTests.TestProcessorNode, + tickerID []byte, + args *NftArguments, + nonce uint64, +) { + esdtData := esdt.GetESDTTokenData(t, address, nodes, tickerID, nonce) + + if args.Quantity == 0 { + require.Nil(t, esdtData.TokenMetaData) + return + } + + require.NotNil(t, esdtData.TokenMetaData) + require.Equal(t, creator, esdtData.TokenMetaData.Creator) + require.Equal(t, args.URI[0], esdtData.TokenMetaData.URIs[0]) + require.Equal(t, args.Attributes, esdtData.TokenMetaData.Attributes) + require.Equal(t, args.Name, esdtData.TokenMetaData.Name) + require.Equal(t, args.Hash, esdtData.TokenMetaData.Hash) + require.Equal(t, uint32(args.Royalties), esdtData.TokenMetaData.Royalties) + require.Equal(t, big.NewInt(args.Quantity).Bytes(), esdtData.Value.Bytes()) +} + +// PrepareNFTWithRoles - +func PrepareNFTWithRoles( + t *testing.T, + nodes []*integrationTests.TestProcessorNode, + idxProposers []int, + nftCreator *integrationTests.TestProcessorNode, + round *uint64, + nonce *uint64, + esdtType string, + quantity int64, + roles [][]byte, +) (string, *NftArguments) { + esdt.IssueNFT(nodes, esdtType, "SFT") + + time.Sleep(time.Second) + nrRoundsToPropagateMultiShard := 10 + *nonce, *round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, *nonce, *round, idxProposers) + time.Sleep(time.Second) + + tokenIdentifier := string(integrationTests.GetTokenIdentifier(nodes, []byte("SFT"))) + + // ----- set special roles + esdt.SetRoles(nodes, nftCreator.OwnAccount.Address, []byte(tokenIdentifier), roles) + + time.Sleep(time.Second) + *nonce, *round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, *nonce, *round, idxProposers) + time.Sleep(time.Second) + + nftMetaData := NftArguments{ + Name: []byte("nft name"), + Quantity: quantity, + Royalties: 9000, + Hash: []byte("hash"), + Attributes: []byte("attr"), + URI: [][]byte{[]byte("uri")}, + } + CreateNFT([]byte(tokenIdentifier), nftCreator, nodes, &nftMetaData) + + time.Sleep(time.Second) + *nonce, *round = integrationTests.WaitOperationToBeDone(t, nodes, 3, *nonce, *round, idxProposers) + time.Sleep(time.Second) + + CheckNftData( + t, + nftCreator.OwnAccount.Address, + nftCreator.OwnAccount.Address, + nodes, + []byte(tokenIdentifier), + &nftMetaData, + 1, + ) + + return tokenIdentifier, &nftMetaData +} diff --git a/integrationTests/vm/esdt/nft/esdtNft_test.go b/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go similarity index 76% rename from integrationTests/vm/esdt/nft/esdtNft_test.go rename to integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go index 903927fe2cc..6faf465aff6 100644 --- a/integrationTests/vm/esdt/nft/esdtNft_test.go +++ b/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go @@ -1,12 +1,11 @@ //go:build !race // +build !race -package nft +package esdtNFT import ( "bytes" "encoding/hex" - "fmt" "math/big" "testing" "time" @@ -14,8 +13,8 @@ import ( "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/integrationTests/vm/esdt" + "github.com/ElrondNetwork/elrond-go/integrationTests/vm/esdt/nft" "github.com/ElrondNetwork/elrond-go/vm" - "github.com/stretchr/testify/require" ) func TestESDTNonFungibleTokenCreateAndBurn(t *testing.T) { @@ -60,7 +59,7 @@ func TestESDTNonFungibleTokenCreateAndBurn(t *testing.T) { []byte(core.ESDTRoleNFTBurn), } - tokenIdentifier, nftMetaData := prepareNFTWithRoles( + tokenIdentifier, nftMetaData := nft.PrepareNFTWithRoles( t, nodes, idxProposers, @@ -93,8 +92,8 @@ func TestESDTNonFungibleTokenCreateAndBurn(t *testing.T) { time.Sleep(time.Second) // the token data is removed from trie if the quantity is 0, so we should not find it - nftMetaData.quantity = 0 - checkNftData( + nftMetaData.Quantity = 0 + nft.CheckNftData( t, nodes[1].OwnAccount.Address, nodes[1].OwnAccount.Address, @@ -149,7 +148,7 @@ func TestESDTSemiFungibleTokenCreateAddAndBurn(t *testing.T) { } initialQuantity := int64(5) - tokenIdentifier, nftMetaData := prepareNFTWithRoles( + tokenIdentifier, nftMetaData := nft.PrepareNFTWithRoles( t, nodes, idxProposers, @@ -181,8 +180,8 @@ func TestESDTSemiFungibleTokenCreateAddAndBurn(t *testing.T) { nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) time.Sleep(time.Second) - nftMetaData.quantity += quantityToAdd - checkNftData( + nftMetaData.Quantity += quantityToAdd + nft.CheckNftData( t, nodes[1].OwnAccount.Address, nodes[1].OwnAccount.Address, @@ -197,7 +196,7 @@ func TestESDTSemiFungibleTokenCreateAddAndBurn(t *testing.T) { nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) time.Sleep(time.Second) - checkNftData( + nft.CheckNftData( t, nodes[1].OwnAccount.Address, nodes[1].OwnAccount.Address, @@ -226,8 +225,8 @@ func TestESDTSemiFungibleTokenCreateAddAndBurn(t *testing.T) { nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) time.Sleep(time.Second) - nftMetaData.quantity -= quantityToBurn - checkNftData( + nftMetaData.Quantity -= quantityToBurn + nft.CheckNftData( t, nodes[1].OwnAccount.Address, nodes[1].OwnAccount.Address, @@ -279,7 +278,7 @@ func TestESDTNonFungibleTokenTransferSelfShard(t *testing.T) { []byte(core.ESDTRoleNFTCreate), []byte(core.ESDTRoleNFTBurn), } - tokenIdentifier, nftMetaData := prepareNFTWithRoles( + tokenIdentifier, nftMetaData := nft.PrepareNFTWithRoles( t, nodes, idxProposers, @@ -323,7 +322,7 @@ func TestESDTNonFungibleTokenTransferSelfShard(t *testing.T) { time.Sleep(time.Second) // check that the new address owns the NFT - checkNftData( + nft.CheckNftData( t, nodes[1].OwnAccount.Address, nodeInSameShard.OwnAccount.Address, @@ -334,8 +333,8 @@ func TestESDTNonFungibleTokenTransferSelfShard(t *testing.T) { ) // check that the creator doesn't has the token data in trie anymore - nftMetaData.quantity = 0 - checkNftData( + nftMetaData.Quantity = 0 + nft.CheckNftData( t, nodes[1].OwnAccount.Address, nodes[1].OwnAccount.Address, @@ -399,7 +398,7 @@ func TestESDTSemiFungibleTokenTransferCrossShard(t *testing.T) { } initialQuantity := int64(5) - tokenIdentifier, nftMetaData := prepareNFTWithRoles( + tokenIdentifier, nftMetaData := nft.PrepareNFTWithRoles( t, nodes, idxProposers, @@ -431,8 +430,8 @@ func TestESDTSemiFungibleTokenTransferCrossShard(t *testing.T) { nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) time.Sleep(time.Second) - nftMetaData.quantity += quantityToAdd - checkNftData( + nftMetaData.Quantity += quantityToAdd + nft.CheckNftData( t, nodeInDifferentShard.OwnAccount.Address, nodeInDifferentShard.OwnAccount.Address, @@ -447,7 +446,7 @@ func TestESDTSemiFungibleTokenTransferCrossShard(t *testing.T) { nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) time.Sleep(time.Second) - checkNftData( + nft.CheckNftData( t, nodeInDifferentShard.OwnAccount.Address, nodeInDifferentShard.OwnAccount.Address, @@ -476,8 +475,8 @@ func TestESDTSemiFungibleTokenTransferCrossShard(t *testing.T) { nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) time.Sleep(time.Second) - nftMetaData.quantity = initialQuantity + quantityToAdd - quantityToTransfer - checkNftData( + nftMetaData.Quantity = initialQuantity + quantityToAdd - quantityToTransfer + nft.CheckNftData( t, nodeInDifferentShard.OwnAccount.Address, nodeInDifferentShard.OwnAccount.Address, @@ -487,8 +486,8 @@ func TestESDTSemiFungibleTokenTransferCrossShard(t *testing.T) { 1, ) - nftMetaData.quantity = quantityToTransfer - checkNftData( + nftMetaData.Quantity = quantityToTransfer + nft.CheckNftData( t, nodeInDifferentShard.OwnAccount.Address, nodes[0].OwnAccount.Address, @@ -543,7 +542,7 @@ func TestESDTSemiFungibleTokenTransferToSystemScAddressShouldReceiveBack(t *test } initialQuantity := int64(5) - tokenIdentifier, nftMetaData := prepareNFTWithRoles( + tokenIdentifier, nftMetaData := nft.PrepareNFTWithRoles( t, nodes, idxProposers, @@ -575,8 +574,8 @@ func TestESDTSemiFungibleTokenTransferToSystemScAddressShouldReceiveBack(t *test nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) time.Sleep(time.Second) - nftMetaData.quantity += quantityToAdd - checkNftData( + nftMetaData.Quantity += quantityToAdd + nft.CheckNftData( t, nodes[0].OwnAccount.Address, nodes[0].OwnAccount.Address, @@ -591,7 +590,7 @@ func TestESDTSemiFungibleTokenTransferToSystemScAddressShouldReceiveBack(t *test nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) time.Sleep(time.Second) - checkNftData( + nft.CheckNftData( t, nodes[0].OwnAccount.Address, nodes[0].OwnAccount.Address, @@ -620,8 +619,8 @@ func TestESDTSemiFungibleTokenTransferToSystemScAddressShouldReceiveBack(t *test nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) time.Sleep(time.Second) - nftMetaData.quantity = 0 // make sure that the ESDT SC address didn't receive the token - checkNftData( + nftMetaData.Quantity = 0 // make sure that the ESDT SC address didn't receive the token + nft.CheckNftData( t, nodes[0].OwnAccount.Address, vm.ESDTSCAddress, @@ -631,8 +630,8 @@ func TestESDTSemiFungibleTokenTransferToSystemScAddressShouldReceiveBack(t *test 1, ) - nftMetaData.quantity = initialQuantity + quantityToAdd // should have the same quantity like before transferring - checkNftData( + nftMetaData.Quantity = initialQuantity + quantityToAdd // should have the same quantity like before transferring + nft.CheckNftData( t, nodes[0].OwnAccount.Address, nodes[0].OwnAccount.Address, @@ -666,7 +665,7 @@ func testNFTSendCreateRole(t *testing.T, numOfShards int) { nftCreator := nodes[0] initialQuantity := int64(1) - tokenIdentifier, nftMetaData := prepareNFTWithRoles( + tokenIdentifier, nftMetaData := nft.PrepareNFTWithRoles( t, nodes, idxProposers, @@ -705,7 +704,7 @@ func testNFTSendCreateRole(t *testing.T, numOfShards int) { nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) time.Sleep(time.Second) - createNFT( + nft.CreateNFT( []byte(tokenIdentifier), nextNftCreator, nodes, @@ -717,7 +716,7 @@ func testNFTSendCreateRole(t *testing.T, numOfShards int) { nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) time.Sleep(time.Second) - checkNftData( + nft.CheckNftData( t, nextNftCreator.OwnAccount.Address, nextNftCreator.OwnAccount.Address, @@ -744,106 +743,167 @@ func TestESDTNFTSendCreateRoleInCrossShard(t *testing.T) { testNFTSendCreateRole(t, 2) } -func prepareNFTWithRoles( - t *testing.T, - nodes []*integrationTests.TestProcessorNode, - idxProposers []int, - nftCreator *integrationTests.TestProcessorNode, - round *uint64, - nonce *uint64, - esdtType string, - quantity int64, - roles [][]byte, -) (string, *nftArguments) { - esdt.IssueNFT(nodes, esdtType, "SFT") +func TestESDTSemiFungibleWithTransferRoleIntraShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } - time.Sleep(time.Second) - nrRoundsToPropagateMultiShard := 10 - *nonce, *round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, *nonce, *round, idxProposers) - time.Sleep(time.Second) + testESDTSemiFungibleTokenTransferRole(t, 1) +} - tokenIdentifier := string(integrationTests.GetTokenIdentifier(nodes, []byte("SFT"))) +func TestESDTSemiFungibleWithTransferRoleCrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } - // ----- set special roles - esdt.SetRoles(nodes, nftCreator.OwnAccount.Address, []byte(tokenIdentifier), roles) + testESDTSemiFungibleTokenTransferRole(t, 2) +} - time.Sleep(time.Second) - *nonce, *round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, *nonce, *round, idxProposers) - time.Sleep(time.Second) +func testESDTSemiFungibleTokenTransferRole(t *testing.T, numOfShards int) { + nodesPerShard := 2 + numMetachainNodes := 2 + + nodes := integrationTests.CreateNodes( + numOfShards, + nodesPerShard, + numMetachainNodes, + ) - nftMetaData := nftArguments{ - name: []byte("nft name"), - quantity: quantity, - royalties: 9000, - hash: []byte("hash"), - attributes: []byte("attr"), - uri: [][]byte{[]byte("uri")}, + idxProposers := make([]int, numOfShards+1) + for i := 0; i < numOfShards; i++ { + idxProposers[i] = i * nodesPerShard } - createNFT([]byte(tokenIdentifier), nftCreator, nodes, &nftMetaData) + idxProposers[numOfShards] = numOfShards * nodesPerShard + + integrationTests.DisplayAndStartNodes(nodes) + + defer func() { + for _, n := range nodes { + _ = n.Messenger.Close() + } + }() + + initialVal := big.NewInt(10000000000) + integrationTests.MintAllNodes(nodes, initialVal) + + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + // get a node from a different shard + var nodeInDifferentShard = nodes[0] + for _, node := range nodes { + if node.ShardCoordinator.SelfId() != nodes[0].ShardCoordinator.SelfId() { + nodeInDifferentShard = node + break + } + } + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleNFTAddQuantity), + []byte(core.ESDTRoleNFTBurn), + []byte(core.ESDTRoleTransfer), + } + + initialQuantity := int64(5) + tokenIdentifier, nftMetaData := nft.PrepareNFTWithRoles( + t, + nodes, + idxProposers, + nodeInDifferentShard, + &round, + &nonce, + core.SemiFungibleESDT, + initialQuantity, + roles, + ) + + // increase quantity + nonceArg := hex.EncodeToString(big.NewInt(0).SetUint64(1).Bytes()) + quantityToAdd := int64(4) + quantityToAddArg := hex.EncodeToString(big.NewInt(quantityToAdd).Bytes()) + txData := []byte(core.BuiltInFunctionESDTNFTAddQuantity + "@" + hex.EncodeToString([]byte(tokenIdentifier)) + + "@" + nonceArg + "@" + quantityToAddArg) + integrationTests.CreateAndSendTransaction( + nodeInDifferentShard, + nodes, + big.NewInt(0), + nodeInDifferentShard.OwnAccount.Address, + string(txData), + integrationTests.AdditionalGasLimit, + ) time.Sleep(time.Second) - *nonce, *round = integrationTests.WaitOperationToBeDone(t, nodes, 3, *nonce, *round, idxProposers) + nrRoundsToPropagateMultiShard := 5 + nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) time.Sleep(time.Second) - checkNftData( + nftMetaData.Quantity += quantityToAdd + nft.CheckNftData( t, - nftCreator.OwnAccount.Address, - nftCreator.OwnAccount.Address, + nodeInDifferentShard.OwnAccount.Address, + nodeInDifferentShard.OwnAccount.Address, nodes, []byte(tokenIdentifier), - &nftMetaData, + nftMetaData, 1, ) - return tokenIdentifier, &nftMetaData -} + time.Sleep(time.Second) + nrRoundsToPropagateMultiShard = 5 + nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) + time.Sleep(time.Second) -type nftArguments struct { - name []byte - quantity int64 - royalties int64 - hash []byte - attributes []byte - uri [][]byte -} + nft.CheckNftData( + t, + nodeInDifferentShard.OwnAccount.Address, + nodeInDifferentShard.OwnAccount.Address, + nodes, + []byte(tokenIdentifier), + nftMetaData, + 1, + ) -func createNFT(tokenIdentifier []byte, issuer *integrationTests.TestProcessorNode, nodes []*integrationTests.TestProcessorNode, args *nftArguments) { - txData := fmt.Sprintf("%s@%s@%s@%s@%s@%s@%s@%s@", - core.BuiltInFunctionESDTNFTCreate, - hex.EncodeToString(tokenIdentifier), - hex.EncodeToString(big.NewInt(args.quantity).Bytes()), - hex.EncodeToString(args.name), - hex.EncodeToString(big.NewInt(args.royalties).Bytes()), - hex.EncodeToString(args.hash), - hex.EncodeToString(args.attributes), - hex.EncodeToString(args.uri[0]), + // transfer + quantityToTransfer := int64(4) + quantityToTransferArg := hex.EncodeToString(big.NewInt(quantityToTransfer).Bytes()) + txData = []byte(core.BuiltInFunctionESDTNFTTransfer + "@" + hex.EncodeToString([]byte(tokenIdentifier)) + + "@" + nonceArg + "@" + quantityToTransferArg + "@" + hex.EncodeToString(nodes[0].OwnAccount.Address)) + integrationTests.CreateAndSendTransaction( + nodeInDifferentShard, + nodes, + big.NewInt(0), + nodeInDifferentShard.OwnAccount.Address, + string(txData), + integrationTests.AdditionalGasLimit, ) - integrationTests.CreateAndSendTransaction(issuer, nodes, big.NewInt(0), issuer.OwnAccount.Address, txData, integrationTests.AdditionalGasLimit) -} + time.Sleep(time.Second) + nrRoundsToPropagateMultiShard = 11 + nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) + time.Sleep(time.Second) -func checkNftData( - t *testing.T, - creator []byte, - address []byte, - nodes []*integrationTests.TestProcessorNode, - tickerID []byte, - args *nftArguments, - nonce uint64, -) { - esdtData := esdt.GetESDTTokenData(t, address, nodes, tickerID, nonce) - - if args.quantity == 0 { - require.Nil(t, esdtData.TokenMetaData) - return - } + nftMetaData.Quantity = initialQuantity + quantityToAdd - quantityToTransfer + nft.CheckNftData( + t, + nodeInDifferentShard.OwnAccount.Address, + nodeInDifferentShard.OwnAccount.Address, + nodes, + []byte(tokenIdentifier), + nftMetaData, + 1, + ) - require.NotNil(t, esdtData.TokenMetaData) - require.Equal(t, creator, esdtData.TokenMetaData.Creator) - require.Equal(t, args.uri[0], esdtData.TokenMetaData.URIs[0]) - require.Equal(t, args.attributes, esdtData.TokenMetaData.Attributes) - require.Equal(t, args.name, esdtData.TokenMetaData.Name) - require.Equal(t, args.hash, esdtData.TokenMetaData.Hash) - require.Equal(t, uint32(args.royalties), esdtData.TokenMetaData.Royalties) - require.Equal(t, big.NewInt(args.quantity).Bytes(), esdtData.Value.Bytes()) + nftMetaData.Quantity = quantityToTransfer + nft.CheckNftData( + t, + nodeInDifferentShard.OwnAccount.Address, + nodes[0].OwnAccount.Address, + nodes, + []byte(tokenIdentifier), + nftMetaData, + 1, + ) } diff --git a/integrationTests/vm/esdt/nft/esdtNFTSCs_test.go b/integrationTests/vm/esdt/nft/esdtNFTSCs/esdtNFTSCs_test.go similarity index 97% rename from integrationTests/vm/esdt/nft/esdtNFTSCs_test.go rename to integrationTests/vm/esdt/nft/esdtNFTSCs/esdtNFTSCs_test.go index 8c4bb23db85..c5d1a6e7a5b 100644 --- a/integrationTests/vm/esdt/nft/esdtNFTSCs_test.go +++ b/integrationTests/vm/esdt/nft/esdtNFTSCs/esdtNFTSCs_test.go @@ -1,6 +1,7 @@ +//go:build !race // +build !race -package nft +package esdtNFTSCs import ( "encoding/hex" @@ -11,6 +12,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/integrationTests/vm/esdt" + "github.com/ElrondNetwork/elrond-go/integrationTests/vm/esdt/nft" "github.com/stretchr/testify/require" ) @@ -292,7 +294,7 @@ func TestESDTTransferNFTBetweenContractsAcceptAndNotAcceptWithRevert(t *testing. checkAddressHasNft(t, scAddress, scAddress, nodes, []byte(tokenIdentifier), 2, big.NewInt(1)) checkAddressHasNft(t, scAddress, scAddress, nodes, []byte(tokenIdentifier), 1, big.NewInt(1)) - destinationSCAddress := esdt.DeployNonPayableSmartContract(t, nodes, idxProposers, &nonce, &round, "../testdata/nft-receiver.wasm") + destinationSCAddress := esdt.DeployNonPayableSmartContract(t, nodes, idxProposers, &nonce, &round, "../../testdata/nft-receiver.wasm") txData = []byte("transferNftViaAsyncCall" + "@" + hex.EncodeToString(destinationSCAddress) + "@" + hex.EncodeToString([]byte(tokenIdentifier)) + "@" + hex.EncodeToString(big.NewInt(1).Bytes()) + "@" + hex.EncodeToString(big.NewInt(1).Bytes()) + "@" + hex.EncodeToString([]byte("wrongFunctionToCall"))) @@ -382,7 +384,7 @@ func TestESDTTransferNFTToSCIntraShard(t *testing.T) { []byte(core.ESDTRoleNFTCreate), []byte(core.ESDTRoleNFTBurn), } - tokenIdentifier, _ := prepareNFTWithRoles( + tokenIdentifier, _ := nft.PrepareNFTWithRoles( t, nodes, idxProposers, @@ -396,7 +398,7 @@ func TestESDTTransferNFTToSCIntraShard(t *testing.T) { nonceArg := hex.EncodeToString(big.NewInt(0).SetUint64(1).Bytes()) quantityToTransfer := hex.EncodeToString(big.NewInt(1).Bytes()) - destinationSCAddress := esdt.DeployNonPayableSmartContract(t, nodes, idxProposers, &nonce, &round, "../testdata/nft-receiver.wasm") + destinationSCAddress := esdt.DeployNonPayableSmartContract(t, nodes, idxProposers, &nonce, &round, "../../testdata/nft-receiver.wasm") txData := core.BuiltInFunctionESDTNFTTransfer + "@" + hex.EncodeToString([]byte(tokenIdentifier)) + "@" + nonceArg + "@" + quantityToTransfer + "@" + hex.EncodeToString(destinationSCAddress) + "@" + hex.EncodeToString([]byte("acceptAndReturnCallData")) integrationTests.CreateAndSendTransaction( @@ -435,7 +437,7 @@ func TestESDTTransferNFTToSCCrossShard(t *testing.T) { round = integrationTests.IncrementAndPrintRound(round) nonce++ - destinationSCAddress := esdt.DeployNonPayableSmartContract(t, nodes, idxProposers, &nonce, &round, "../testdata/nft-receiver.wasm") + destinationSCAddress := esdt.DeployNonPayableSmartContract(t, nodes, idxProposers, &nonce, &round, "../../testdata/nft-receiver.wasm") destinationSCShardID := nodes[0].ShardCoordinator.ComputeId(destinationSCAddress) @@ -452,7 +454,7 @@ func TestESDTTransferNFTToSCCrossShard(t *testing.T) { []byte(core.ESDTRoleNFTCreate), []byte(core.ESDTRoleNFTBurn), } - tokenIdentifier, _ := prepareNFTWithRoles( + tokenIdentifier, _ := nft.PrepareNFTWithRoles( t, nodes, idxProposers, @@ -528,7 +530,7 @@ func deployAndIssueNFTSFTThroughSC( issueFunc string, rolesEncoded string, ) ([]byte, string) { - scAddress := esdt.DeployNonPayableSmartContract(t, nodes, idxProposers, nonce, round, "../testdata/local-esdt-and-nft.wasm") + scAddress := esdt.DeployNonPayableSmartContract(t, nodes, idxProposers, nonce, round, "../../testdata/local-esdt-and-nft.wasm") issuePrice := big.NewInt(1000) txData := []byte(issueFunc + "@" + hex.EncodeToString([]byte("TOKEN")) + diff --git a/integrationTests/vm/esdt/process/esdtProcess_test.go b/integrationTests/vm/esdt/process/esdtProcess_test.go index ac5247226d5..9be5039b4fb 100644 --- a/integrationTests/vm/esdt/process/esdtProcess_test.go +++ b/integrationTests/vm/esdt/process/esdtProcess_test.go @@ -45,6 +45,8 @@ func TestESDTIssueAndTransactionsOnMultiShardEnvironment(t *testing.T) { GlobalMintBurnDisableEpoch: 10, BuiltInFunctionOnMetaEnableEpoch: 10, OptimizeGasUsedInCrossMiniBlocksEnableEpoch: 10, + ScheduledMiniBlocksEnableEpoch: 10, + MiniBlockPartialExecutionEnableEpoch: 10, } nodes := integrationTests.CreateNodesWithEnableEpochs( numOfShards, diff --git a/integrationTests/vm/staking/metaBlockProcessorCreator.go b/integrationTests/vm/staking/metaBlockProcessorCreator.go index 126d5a90c13..0c41a7f60b7 100644 --- a/integrationTests/vm/staking/metaBlockProcessorCreator.go +++ b/integrationTests/vm/staking/metaBlockProcessorCreator.go @@ -15,6 +15,7 @@ import ( blproc "github.com/ElrondNetwork/elrond-go/process/block" "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" "github.com/ElrondNetwork/elrond-go/process/block/postprocess" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/process/scToProtocol" "github.com/ElrondNetwork/elrond-go/process/smartContract" @@ -91,6 +92,7 @@ func createMetaBlockProcessor( ScheduledMiniBlocksEnableEpoch: 10000, VMContainersFactory: metaVMFactory, VmContainer: vmContainer, + ProcessedMiniBlocksTracker: processedMb.NewProcessedMiniBlocksTracker(), }, SCToProtocol: stakingToPeer, PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, diff --git a/integrationTests/vm/testIndexer.go b/integrationTests/vm/testIndexer.go index 92e0970e750..aa450f76dca 100644 --- a/integrationTests/vm/testIndexer.go +++ b/integrationTests/vm/testIndexer.go @@ -16,6 +16,7 @@ import ( blockProc "github.com/ElrondNetwork/elastic-indexer-go/process/block" "github.com/ElrondNetwork/elastic-indexer-go/process/logsevents" "github.com/ElrondNetwork/elastic-indexer-go/process/miniblocks" + "github.com/ElrondNetwork/elastic-indexer-go/process/operations" "github.com/ElrondNetwork/elastic-indexer-go/process/statistics" "github.com/ElrondNetwork/elastic-indexer-go/process/transactions" "github.com/ElrondNetwork/elastic-indexer-go/process/validators" @@ -124,7 +125,8 @@ func (ti *testIndexer) createElasticProcessor( bp, _ := blockProc.NewBlockProcessor(testHasher, testMarshalizer) mp, _ := miniblocks.NewMiniblocksProcessor(shardCoordinator.SelfId(), testHasher, testMarshalizer, false) sp := statistics.NewStatisticsProcessor() - vp, _ := validators.NewValidatorsProcessor(pubkeyConv) + vp, _ := validators.NewValidatorsProcessor(pubkeyConv, 0) + opp, _ := operations.NewOperationsProcessor(false, shardCoordinator) args := &logsevents.ArgsLogsAndEventsProcessor{ ShardCoordinator: shardCoordinator, PubKeyConverter: pubkeyConv, @@ -149,6 +151,7 @@ func (ti *testIndexer) createElasticProcessor( ValidatorsProc: vp, LogsAndEventsProc: lp, DBClient: databaseClient, + OperationsProc: opp, } esProcessor, _ := elasticProcessor.NewElasticProcessor(esIndexerArgs) @@ -254,6 +257,7 @@ func (ti *testIndexer) createDatabaseClient(hasResults bool) elasticProcessor.Da ti.indexerData[index] = buff if !done { done = true + ti.saveDoneChan <- struct{}{} return nil } ti.saveDoneChan <- struct{}{} @@ -270,7 +274,7 @@ func (ti *testIndexer) createDatabaseClient(hasResults bool) elasticProcessor.Da // GetIndexerPreparedTransaction - func (ti *testIndexer) GetIndexerPreparedTransaction(t *testing.T) *indexerTypes.Transaction { ti.mutex.RLock() - txData, ok := ti.indexerData["transactions"] + txData, ok := ti.indexerData[""] ti.mutex.RUnlock() require.True(t, ok) @@ -300,7 +304,7 @@ func (ti *testIndexer) GetIndexerPreparedTransaction(t *testing.T) *indexerTypes func (ti *testIndexer) printReceipt() { ti.mutex.RLock() - receipts, ok := ti.indexerData["receipts"] + receipts, ok := ti.indexerData[""] ti.mutex.RUnlock() if !ok { @@ -319,7 +323,7 @@ func (ti *testIndexer) printReceipt() { func (ti *testIndexer) putSCRSInTx(tx *indexerTypes.Transaction) { ti.mutex.RLock() - scrData, ok := ti.indexerData["scresults"] + scrData, ok := ti.indexerData[""] ti.mutex.RUnlock() if !ok { @@ -330,6 +334,10 @@ func (ti *testIndexer) putSCRSInTx(tx *indexerTypes.Transaction) { require.True(ti.t, len(split) > 2) for idx := 1; idx < len(split); idx += 2 { + if !bytes.Contains(split[idx], []byte("scresults")) { + continue + } + newSCR := &indexerTypes.ScResult{} err := json.Unmarshal(split[idx], newSCR) require.Nil(ti.t, err) diff --git a/node/interface.go b/node/interface.go index 66b9cfef158..b9c4b5200c9 100644 --- a/node/interface.go +++ b/node/interface.go @@ -31,7 +31,7 @@ type P2PMessenger interface { // The interface assures that the collected data will be used by the p2p network sharding components type NetworkShardingCollector interface { UpdatePeerIDInfo(pid core.PeerID, pk []byte, shardID uint32) - UpdatePeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) + PutPeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) GetPeerInfo(pid core.PeerID) core.P2PPeerInfo IsInterfaceNil() bool } @@ -50,12 +50,14 @@ type P2PAntifloodHandler interface { // HardforkTrigger defines the behavior of a hardfork trigger type HardforkTrigger interface { + SetExportFactoryHandler(exportFactoryHandler update.ExportFactoryHandler) error TriggerReceived(payload []byte, data []byte, pkBytes []byte) (bool, error) RecordedTriggerMessage() ([]byte, bool) Trigger(epoch uint32, withEarlyEndOfEpoch bool) error CreateData() []byte AddCloser(closer update.Closer) error NotifyTriggerReceived() <-chan struct{} + NotifyTriggerReceivedV2() <-chan struct{} IsSelfTrigger() bool IsInterfaceNil() bool } diff --git a/node/metrics/metrics.go b/node/metrics/metrics.go index b655e40914f..b370fe322d3 100644 --- a/node/metrics/metrics.go +++ b/node/metrics/metrics.go @@ -45,8 +45,6 @@ func InitBaseMetrics(statusHandlerUtils StatusHandlersUtils) error { appStatusHandler.SetUInt64Value(common.MetricNumTimesInForkChoice, initUint) appStatusHandler.SetUInt64Value(common.MetricHighestFinalBlock, initUint) appStatusHandler.SetUInt64Value(common.MetricCountConsensusAcceptedBlocks, initUint) - appStatusHandler.SetUInt64Value(common.MetricRoundAtEpochStart, initUint) - appStatusHandler.SetUInt64Value(common.MetricNonceAtEpochStart, initUint) appStatusHandler.SetUInt64Value(common.MetricRoundsPassedInCurrentEpoch, initUint) appStatusHandler.SetUInt64Value(common.MetricNoncesPassedInCurrentEpoch, initUint) appStatusHandler.SetUInt64Value(common.MetricNumConnectedPeers, initUint) @@ -121,6 +119,7 @@ func InitConfigMetrics( appStatusHandler.SetUInt64Value(common.MetricGlobalMintBurnDisableEpoch, uint64(enableEpochs.GlobalMintBurnDisableEpoch)) appStatusHandler.SetUInt64Value(common.MetricESDTTransferRoleEnableEpoch, uint64(enableEpochs.ESDTTransferRoleEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricBuiltInFunctionOnMetaEnableEpoch, uint64(enableEpochs.BuiltInFunctionOnMetaEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricHeartbeatDisableEpoch, uint64(enableEpochs.HeartbeatDisableEpoch)) appStatusHandler.SetStringValue(common.MetricTotalSupply, economicsConfig.GlobalSettings.GenesisTotalSupply) appStatusHandler.SetUInt64Value(common.MetricWaitingListFixEnableEpoch, uint64(enableEpochs.WaitingListFixEnableEpoch)) diff --git a/node/metrics/metrics_test.go b/node/metrics/metrics_test.go index 42cc66747fb..4c66a56789f 100644 --- a/node/metrics/metrics_test.go +++ b/node/metrics/metrics_test.go @@ -32,8 +32,6 @@ func TestInitBaseMetrics(t *testing.T) { common.MetricNumTimesInForkChoice, common.MetricHighestFinalBlock, common.MetricCountConsensusAcceptedBlocks, - common.MetricRoundAtEpochStart, - common.MetricNonceAtEpochStart, common.MetricRoundsPassedInCurrentEpoch, common.MetricNoncesPassedInCurrentEpoch, common.MetricNumConnectedPeers, @@ -127,6 +125,7 @@ func TestInitConfigMetrics(t *testing.T) { ESDTTransferRoleEnableEpoch: 33, BuiltInFunctionOnMetaEnableEpoch: 34, WaitingListFixEnableEpoch: 35, + HeartbeatDisableEpoch: 35, }, } @@ -167,6 +166,7 @@ func TestInitConfigMetrics(t *testing.T) { "erd_builtin_function_on_meta_enable_epoch": uint32(34), "erd_waiting_list_fix_enable_epoch": uint32(35), "erd_max_nodes_change_enable_epoch": nil, + "erd_heartbeat_disable_epoch": uint32(35), "erd_total_supply": "12345", "erd_hysteresis": "0.100000", "erd_adaptivity": "true", diff --git a/node/mock/blockProcessorStub.go b/node/mock/blockProcessorStub.go index aada60e09f5..39ed9b1c67b 100644 --- a/node/mock/blockProcessorStub.go +++ b/node/mock/blockProcessorStub.go @@ -5,7 +5,6 @@ import ( "time" "github.com/ElrondNetwork/elrond-go-core/data" - "github.com/ElrondNetwork/elrond-go/process/block/processedMb" ) // BlockProcessorStub mocks the implementation for a blockProcessor @@ -109,10 +108,6 @@ func (bps *BlockProcessorStub) CreateNewHeader(round uint64, nonce uint64) (data return bps.CreateNewHeaderCalled(round, nonce) } -// ApplyProcessedMiniBlocks - -func (bps *BlockProcessorStub) ApplyProcessedMiniBlocks(_ *processedMb.ProcessedMiniBlockTracker) { -} - // Close - func (bps *BlockProcessorStub) Close() error { return nil diff --git a/node/mock/factory/coreComponentsStub.go b/node/mock/factory/coreComponentsStub.go index 80e056e37e4..6f25166608b 100644 --- a/node/mock/factory/coreComponentsStub.go +++ b/node/mock/factory/coreComponentsStub.go @@ -53,6 +53,7 @@ type CoreComponentsMock struct { NodeTypeProviderField core.NodeTypeProviderHandler ArwenChangeLockerInternal common.Locker ProcessStatusHandlerInternal common.ProcessStatusHandler + HardforkTriggerPubKeyField []byte } // Create - @@ -253,6 +254,11 @@ func (ccm *CoreComponentsMock) String() string { return "CoreComponentsMock" } +// HardforkTriggerPubKey - +func (ccm *CoreComponentsMock) HardforkTriggerPubKey() []byte { + return ccm.HardforkTriggerPubKeyField +} + // IsInterfaceNil - func (ccm *CoreComponentsMock) IsInterfaceNil() bool { return ccm == nil diff --git a/node/mock/factory/networkComponentsMock.go b/node/mock/factory/networkComponentsMock.go index 4e45382b28e..2ea64e69bd0 100644 --- a/node/mock/factory/networkComponentsMock.go +++ b/node/mock/factory/networkComponentsMock.go @@ -8,11 +8,12 @@ import ( // NetworkComponentsMock - type NetworkComponentsMock struct { - Messenger p2p.Messenger - InputAntiFlood factory.P2PAntifloodHandler - OutputAntiFlood factory.P2PAntifloodHandler - PeerBlackList process.PeerBlackListCacher - PreferredPeersHolder factory.PreferredPeersHolderHandler + Messenger p2p.Messenger + InputAntiFlood factory.P2PAntifloodHandler + OutputAntiFlood factory.P2PAntifloodHandler + PeerBlackList process.PeerBlackListCacher + PreferredPeersHolder factory.PreferredPeersHolderHandler + PeersRatingHandlerField p2p.PeersRatingHandler } // PubKeyCacher - @@ -65,6 +66,11 @@ func (ncm *NetworkComponentsMock) PreferredPeersHolderHandler() factory.Preferre return ncm.PreferredPeersHolder } +// PeersRatingHandler - +func (ncm *NetworkComponentsMock) PeersRatingHandler() p2p.PeersRatingHandler { + return ncm.PeersRatingHandlerField +} + // String - func (ncm *NetworkComponentsMock) String() string { return "NetworkComponentsMock" diff --git a/node/mock/hardforkTriggerStub.go b/node/mock/hardforkTriggerStub.go deleted file mode 100644 index 6858c666c16..00000000000 --- a/node/mock/hardforkTriggerStub.go +++ /dev/null @@ -1,82 +0,0 @@ -package mock - -import "github.com/ElrondNetwork/elrond-go/update" - -// HardforkTriggerStub - -type HardforkTriggerStub struct { - TriggerCalled func(epoch uint32, withEarlyEndOfEpoch bool) error - IsSelfTriggerCalled func() bool - TriggerReceivedCalled func(payload []byte, data []byte, pkBytes []byte) (bool, error) - RecordedTriggerMessageCalled func() ([]byte, bool) - CreateDataCalled func() []byte - AddCloserCalled func(closer update.Closer) error - NotifyTriggerReceivedCalled func() <-chan struct{} -} - -// Trigger - -func (hts *HardforkTriggerStub) Trigger(epoch uint32, withEarlyEndOfEpoch bool) error { - if hts.TriggerCalled != nil { - return hts.TriggerCalled(epoch, withEarlyEndOfEpoch) - } - - return nil -} - -// IsSelfTrigger - -func (hts *HardforkTriggerStub) IsSelfTrigger() bool { - if hts.IsSelfTriggerCalled != nil { - return hts.IsSelfTriggerCalled() - } - - return false -} - -// TriggerReceived - -func (hts *HardforkTriggerStub) TriggerReceived(payload []byte, data []byte, pkBytes []byte) (bool, error) { - if hts.TriggerReceivedCalled != nil { - return hts.TriggerReceivedCalled(payload, data, pkBytes) - } - - return false, nil -} - -// RecordedTriggerMessage - -func (hts *HardforkTriggerStub) RecordedTriggerMessage() ([]byte, bool) { - if hts.RecordedTriggerMessageCalled != nil { - return hts.RecordedTriggerMessageCalled() - } - - return nil, false -} - -// CreateData - -func (hts *HardforkTriggerStub) CreateData() []byte { - if hts.CreateDataCalled != nil { - return hts.CreateDataCalled() - } - - return make([]byte, 0) -} - -// AddCloser - -func (hts *HardforkTriggerStub) AddCloser(closer update.Closer) error { - if hts.AddCloserCalled != nil { - return hts.AddCloserCalled(closer) - } - - return nil -} - -// NotifyTriggerReceived - -func (hts *HardforkTriggerStub) NotifyTriggerReceived() <-chan struct{} { - if hts.NotifyTriggerReceivedCalled != nil { - return hts.NotifyTriggerReceivedCalled() - } - - return make(chan struct{}) -} - -// IsInterfaceNil - -func (hts *HardforkTriggerStub) IsInterfaceNil() bool { - return hts == nil -} diff --git a/node/node.go b/node/node.go index 7c7520a79c1..52caa9224f8 100644 --- a/node/node.go +++ b/node/node.go @@ -62,7 +62,6 @@ type Node struct { consensusGroupSize int genesisTime time.Time peerDenialEvaluator p2p.PeerDenialEvaluator - hardforkTrigger HardforkTrigger esdtStorageHandler vmcommon.ESDTNFTStorageHandler consensusType string @@ -77,18 +76,19 @@ type Node struct { chanStopNodeProcess chan endProcess.ArgEndProcess - mutQueryHandlers syncGo.RWMutex - queryHandlers map[string]debug.QueryHandler - bootstrapComponents mainFactory.BootstrapComponentsHolder - consensusComponents mainFactory.ConsensusComponentsHolder - coreComponents mainFactory.CoreComponentsHolder - cryptoComponents mainFactory.CryptoComponentsHolder - dataComponents mainFactory.DataComponentsHolder - heartbeatComponents mainFactory.HeartbeatComponentsHolder - networkComponents mainFactory.NetworkComponentsHolder - processComponents mainFactory.ProcessComponentsHolder - stateComponents mainFactory.StateComponentsHolder - statusComponents mainFactory.StatusComponentsHolder + mutQueryHandlers syncGo.RWMutex + queryHandlers map[string]debug.QueryHandler + bootstrapComponents mainFactory.BootstrapComponentsHolder + consensusComponents mainFactory.ConsensusComponentsHolder + coreComponents mainFactory.CoreComponentsHolder + cryptoComponents mainFactory.CryptoComponentsHolder + dataComponents mainFactory.DataComponentsHolder + heartbeatComponents mainFactory.HeartbeatComponentsHolder + heartbeatV2Components mainFactory.HeartbeatV2ComponentsHolder + networkComponents mainFactory.NetworkComponentsHolder + processComponents mainFactory.ProcessComponentsHolder + stateComponents mainFactory.StateComponentsHolder + statusComponents mainFactory.StatusComponentsHolder closableComponents []mainFactory.Closer enableSignTxWithHashEpoch uint32 @@ -848,15 +848,38 @@ func (n *Node) GetCode(codeHash []byte) []byte { // GetHeartbeats returns the heartbeat status for each public key defined in genesis.json func (n *Node) GetHeartbeats() []heartbeatData.PubKeyHeartbeat { - if check.IfNil(n.heartbeatComponents) { - return make([]heartbeatData.PubKeyHeartbeat, 0) + dataMap := make(map[string]heartbeatData.PubKeyHeartbeat) + + if !check.IfNil(n.heartbeatComponents) { + v1Monitor := n.heartbeatComponents.Monitor() + if !check.IfNil(v1Monitor) { + n.addHeartbeatDataToMap(v1Monitor.GetHeartbeats(), dataMap) + } + } + + if !check.IfNil(n.heartbeatV2Components) { + v2Monitor := n.heartbeatV2Components.Monitor() + if !check.IfNil(v2Monitor) { + n.addHeartbeatDataToMap(v2Monitor.GetHeartbeats(), dataMap) + } } - mon := n.heartbeatComponents.Monitor() - if check.IfNil(mon) { - return make([]heartbeatData.PubKeyHeartbeat, 0) + + dataSlice := make([]heartbeatData.PubKeyHeartbeat, 0) + for _, hb := range dataMap { + dataSlice = append(dataSlice, hb) } - return mon.GetHeartbeats() + sort.Slice(dataSlice, func(i, j int) bool { + return strings.Compare(dataSlice[i].PublicKey, dataSlice[j].PublicKey) < 0 + }) + + return dataSlice +} + +func (n *Node) addHeartbeatDataToMap(data []heartbeatData.PubKeyHeartbeat, dataMap map[string]heartbeatData.PubKeyHeartbeat) { + for _, hb := range data { + dataMap[hb.PublicKey] = hb + } } // ValidatorStatisticsApi will return the statistics for all the validators from the initial nodes pub keys @@ -866,12 +889,12 @@ func (n *Node) ValidatorStatisticsApi() (map[string]*state.ValidatorApiResponse, // DirectTrigger will start the hardfork trigger func (n *Node) DirectTrigger(epoch uint32, withEarlyEndOfEpoch bool) error { - return n.hardforkTrigger.Trigger(epoch, withEarlyEndOfEpoch) + return n.processComponents.HardforkTrigger().Trigger(epoch, withEarlyEndOfEpoch) } // IsSelfTrigger returns true if the trigger's registered public key matches the self public key func (n *Node) IsSelfTrigger() bool { - return n.hardforkTrigger.IsSelfTrigger() + return n.processComponents.HardforkTrigger().IsSelfTrigger() } // EncodeAddressPubkey will encode the provided address public key bytes to string @@ -954,11 +977,6 @@ func (n *Node) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) { return peerInfoSlice, nil } -// GetHardforkTrigger returns the hardfork trigger -func (n *Node) GetHardforkTrigger() HardforkTrigger { - return n.hardforkTrigger -} - // GetCoreComponents returns the core components func (n *Node) GetCoreComponents() mainFactory.CoreComponentsHolder { return n.coreComponents @@ -989,6 +1007,11 @@ func (n *Node) GetHeartbeatComponents() mainFactory.HeartbeatComponentsHolder { return n.heartbeatComponents } +// GetHeartbeatV2Components returns the heartbeatV2 components +func (n *Node) GetHeartbeatV2Components() mainFactory.HeartbeatV2ComponentsHolder { + return n.heartbeatV2Components +} + // GetNetworkComponents returns the network components func (n *Node) GetNetworkComponents() mainFactory.NetworkComponentsHolder { return n.networkComponents diff --git a/node/nodeHelper.go b/node/nodeHelper.go index ca5325539db..fd4f4f721cf 100644 --- a/node/nodeHelper.go +++ b/node/nodeHelper.go @@ -2,8 +2,6 @@ package node import ( "errors" - "fmt" - "path/filepath" "time" "github.com/ElrondNetwork/elrond-go-core/core" @@ -13,112 +11,11 @@ import ( nodeDisabled "github.com/ElrondNetwork/elrond-go/node/disabled" "github.com/ElrondNetwork/elrond-go/node/nodeDebugFactory" procFactory "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/throttle/antiflood/blackList" "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/sharding/nodesCoordinator" - "github.com/ElrondNetwork/elrond-go/state" - "github.com/ElrondNetwork/elrond-go/update" - updateFactory "github.com/ElrondNetwork/elrond-go/update/factory" - "github.com/ElrondNetwork/elrond-go/update/trigger" "github.com/ElrondNetwork/elrond-vm-common/builtInFunctions" ) -// CreateHardForkTrigger is the hard fork trigger factory -// TODO: move this to process components -func CreateHardForkTrigger( - config *config.Config, - epochConfig *config.EpochConfig, - shardCoordinator sharding.Coordinator, - nodesCoordinator nodesCoordinator.NodesCoordinator, - nodesShuffledOut update.Closer, - coreData factory.CoreComponentsHolder, - stateComponents factory.StateComponentsHolder, - data factory.DataComponentsHolder, - crypto factory.CryptoComponentsHolder, - process factory.ProcessComponentsHolder, - network factory.NetworkComponentsHolder, - epochStartNotifier factory.EpochStartNotifierWithConfirm, - importStartHandler update.ImportStartHandler, - workingDir string, -) (HardforkTrigger, error) { - - selfPubKeyBytes := crypto.PublicKeyBytes() - triggerPubKeyBytes, err := coreData.ValidatorPubKeyConverter().Decode(config.Hardfork.PublicKeyToListenFrom) - if err != nil { - return nil, fmt.Errorf("%w while decoding HardforkConfig.PublicKeyToListenFrom", err) - } - - accountsDBs := make(map[state.AccountsDbIdentifier]state.AccountsAdapter) - accountsDBs[state.UserAccountsState] = stateComponents.AccountsAdapter() - accountsDBs[state.PeerAccountsState] = stateComponents.PeerAccounts() - hardForkConfig := config.Hardfork - exportFolder := filepath.Join(workingDir, hardForkConfig.ImportFolder) - argsExporter := updateFactory.ArgsExporter{ - CoreComponents: coreData, - CryptoComponents: crypto, - HeaderValidator: process.HeaderConstructionValidator(), - DataPool: data.Datapool(), - StorageService: data.StorageService(), - RequestHandler: process.RequestHandler(), - ShardCoordinator: shardCoordinator, - Messenger: network.NetworkMessenger(), - ActiveAccountsDBs: accountsDBs, - ExistingResolvers: process.ResolversFinder(), - ExportFolder: exportFolder, - ExportTriesStorageConfig: hardForkConfig.ExportTriesStorageConfig, - ExportStateStorageConfig: hardForkConfig.ExportStateStorageConfig, - ExportStateKeysConfig: hardForkConfig.ExportKeysStorageConfig, - MaxTrieLevelInMemory: config.StateTriesConfig.MaxStateTrieLevelInMemory, - WhiteListHandler: process.WhiteListHandler(), - WhiteListerVerifiedTxs: process.WhiteListerVerifiedTxs(), - InterceptorsContainer: process.InterceptorsContainer(), - NodesCoordinator: nodesCoordinator, - HeaderSigVerifier: process.HeaderSigVerifier(), - HeaderIntegrityVerifier: process.HeaderIntegrityVerifier(), - ValidityAttester: process.BlockTracker(), - InputAntifloodHandler: network.InputAntiFloodHandler(), - OutputAntifloodHandler: network.OutputAntiFloodHandler(), - RoundHandler: process.RoundHandler(), - InterceptorDebugConfig: config.Debug.InterceptorResolver, - EnableSignTxWithHashEpoch: epochConfig.EnableEpochs.TransactionSignedWithTxHashEnableEpoch, - MaxHardCapForMissingNodes: config.TrieSync.MaxHardCapForMissingNodes, - NumConcurrentTrieSyncers: config.TrieSync.NumConcurrentTrieSyncers, - TrieSyncerVersion: config.TrieSync.TrieSyncerVersion, - } - hardForkExportFactory, err := updateFactory.NewExportHandlerFactory(argsExporter) - if err != nil { - return nil, err - } - - atArgumentParser := smartContract.NewArgumentParser() - argTrigger := trigger.ArgHardforkTrigger{ - TriggerPubKeyBytes: triggerPubKeyBytes, - SelfPubKeyBytes: selfPubKeyBytes, - Enabled: config.Hardfork.EnableTrigger, - EnabledAuthenticated: config.Hardfork.EnableTriggerFromP2P, - ArgumentParser: atArgumentParser, - EpochProvider: process.EpochStartTrigger(), - ExportFactoryHandler: hardForkExportFactory, - ChanStopNodeProcess: coreData.ChanStopNodeProcess(), - EpochConfirmedNotifier: epochStartNotifier, - CloseAfterExportInMinutes: config.Hardfork.CloseAfterExportInMinutes, - ImportStartHandler: importStartHandler, - RoundHandler: process.RoundHandler(), - } - hardforkTrigger, err := trigger.NewTrigger(argTrigger) - if err != nil { - return nil, err - } - - err = hardforkTrigger.AddCloser(nodesShuffledOut) - if err != nil { - return nil, fmt.Errorf("%w when adding nodeShufflerOut in hardForkTrigger", err) - } - - return hardforkTrigger, nil -} - // prepareOpenTopics will set to the anti flood handler the topics for which // the node can receive messages from others than validators func prepareOpenTopics( @@ -126,13 +23,14 @@ func prepareOpenTopics( shardCoordinator sharding.Coordinator, ) { selfID := shardCoordinator.SelfId() + selfShardHeartbeatV2Topic := common.HeartbeatV2Topic + core.CommunicationIdentifierBetweenShards(selfID, selfID) if selfID == core.MetachainShardId { - antiflood.SetTopicsForAll(common.HeartbeatTopic) + antiflood.SetTopicsForAll(common.HeartbeatTopic, common.PeerAuthenticationTopic, selfShardHeartbeatV2Topic, common.ConnectionTopic) return } selfShardTxTopic := procFactory.TransactionTopic + core.CommunicationIdentifierBetweenShards(selfID, selfID) - antiflood.SetTopicsForAll(common.HeartbeatTopic, selfShardTxTopic) + antiflood.SetTopicsForAll(common.HeartbeatTopic, common.PeerAuthenticationTopic, selfShardHeartbeatV2Topic, common.ConnectionTopic, selfShardTxTopic) } // CreateNode is the node factory @@ -147,6 +45,7 @@ func CreateNode( stateComponents factory.StateComponentsHandler, statusComponents factory.StatusComponentsHandler, heartbeatComponents factory.HeartbeatComponentsHandler, + heartbeatV2Components factory.HeartbeatV2ComponentsHandler, consensusComponents factory.ConsensusComponentsHandler, epochConfig config.EpochConfig, bootstrapRoundIndex uint64, @@ -197,6 +96,7 @@ func CreateNode( WithStatusComponents(statusComponents), WithProcessComponents(processComponents), WithHeartbeatComponents(heartbeatComponents), + WithHeartbeatV2Components(heartbeatV2Components), WithConsensusComponents(consensusComponents), WithNetworkComponents(networkComponents), WithInitialNodesPubKeys(coreComponents.GenesisNodesSetup().InitialNodesPubKeys()), @@ -207,7 +107,6 @@ func CreateNode( WithBootstrapRoundIndex(bootstrapRoundIndex), WithPeerDenialEvaluator(peerDenialEvaluator), WithRequestedItemsHandler(processComponents.RequestedItemsHandler()), - WithHardforkTrigger(consensusComponents.HardforkTrigger()), WithAddressSignatureSize(config.AddressPubkeyConverter.SignatureLength), WithValidatorSignatureSize(config.ValidatorPubkeyConverter.SignatureLength), WithPublicKeySize(config.ValidatorPubkeyConverter.Length), diff --git a/node/nodeRunner.go b/node/nodeRunner.go index 22cff159711..cf85041524e 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -43,7 +43,6 @@ import ( storageFactory "github.com/ElrondNetwork/elrond-go/storage/factory" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/ElrondNetwork/elrond-go/storage/timecache" - "github.com/ElrondNetwork/elrond-go/update" "github.com/ElrondNetwork/elrond-go/update/trigger" "github.com/google/gops/agent" ) @@ -175,12 +174,16 @@ func printEnableEpochs(configs *config.Configs) { log.Debug(readEpochFor("correct jailed not unstaked if empty queue"), "epoch", enableEpochs.CorrectJailedNotUnstakedEmptyQueueEpoch) log.Debug(readEpochFor("do not return old block in blockchain hook"), "epoch", enableEpochs.DoNotReturnOldBlockInBlockchainHookEnableEpoch) log.Debug(readEpochFor("scr size invariant check on built in"), "epoch", enableEpochs.SCRSizeInvariantOnBuiltInResultEnableEpoch) + log.Debug(readEpochFor("correct check on tokenID for transfer role"), "epoch", enableEpochs.CheckCorrectTokenIDForTransferRoleEnableEpoch) log.Debug(readEpochFor("fail execution on every wrong API call"), "epoch", enableEpochs.FailExecutionOnEveryAPIErrorEnableEpoch) + log.Debug(readEpochFor("disable heartbeat v1"), "epoch", enableEpochs.HeartbeatDisableEpoch) log.Debug(readEpochFor("limit validators"), "epoch", enableEpochs.StakeLimitsEnableEpoch) log.Debug(readEpochFor("staking v4 init"), "epoch", enableEpochs.StakingV4InitEnableEpoch) log.Debug(readEpochFor("staking v4 enable"), "epoch", enableEpochs.StakingV4EnableEpoch) log.Debug(readEpochFor("staking v4 distribute auction to waiting"), "epoch", enableEpochs.StakingV4DistributeAuctionToWaitingEpoch) + + log.Debug(readEpochFor("mini block partial execution"), "epoch", enableEpochs.MiniBlockPartialExecutionEnableEpoch) gasSchedule := configs.EpochConfig.GasSchedule log.Debug(readEpochFor("gas schedule directories paths"), "epoch", gasSchedule.GasScheduleByEpochs) @@ -381,6 +384,12 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( return true, err } + hardforkTrigger := managedProcessComponents.HardforkTrigger() + err = hardforkTrigger.AddCloser(nodesShufflerOut) + if err != nil { + return true, fmt.Errorf("%w when adding nodeShufflerOut in hardForkTrigger", err) + } + managedStatusComponents.SetForkDetector(managedProcessComponents.ForkDetector()) err = managedStatusComponents.StartPolling() if err != nil { @@ -393,13 +402,10 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( managedCoreComponents, managedNetworkComponents, managedCryptoComponents, - managedBootstrapComponents, managedDataComponents, managedStateComponents, managedStatusComponents, managedProcessComponents, - nodesCoordinator, - nodesShufflerOut, ) if err != nil { return true, err @@ -411,7 +417,6 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( managedCryptoComponents, managedDataComponents, managedProcessComponents, - managedConsensusComponents.HardforkTrigger(), managedProcessComponents.NodeRedundancyHandler(), ) @@ -419,6 +424,19 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( return true, err } + managedHeartbeatV2Components, err := nr.CreateManagedHeartbeatV2Components( + managedBootstrapComponents, + managedCoreComponents, + managedNetworkComponents, + managedCryptoComponents, + managedDataComponents, + managedProcessComponents, + ) + + if err != nil { + return true, err + } + log.Debug("creating node structure") currentNode, err := CreateNode( configs.GeneralConfig, @@ -431,6 +449,7 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( managedStateComponents, managedStatusComponents, managedHeartbeatComponents, + managedHeartbeatV2Components, managedConsensusComponents, *configs.EpochConfig, flagsConfig.BootstrapRoundIndex, @@ -624,34 +643,11 @@ func (nr *nodeRunner) CreateManagedConsensusComponents( coreComponents mainFactory.CoreComponentsHolder, networkComponents mainFactory.NetworkComponentsHolder, cryptoComponents mainFactory.CryptoComponentsHolder, - bootstrapComponents mainFactory.BootstrapComponentsHolder, dataComponents mainFactory.DataComponentsHolder, stateComponents mainFactory.StateComponentsHolder, statusComponents mainFactory.StatusComponentsHolder, processComponents mainFactory.ProcessComponentsHolder, - nodesCoordinator nodesCoordinator.NodesCoordinator, - nodesShuffledOut update.Closer, ) (mainFactory.ConsensusComponentsHandler, error) { - hardForkTrigger, err := CreateHardForkTrigger( - nr.configs.GeneralConfig, - nr.configs.EpochConfig, - bootstrapComponents.ShardCoordinator(), - nodesCoordinator, - nodesShuffledOut, - coreComponents, - stateComponents, - dataComponents, - cryptoComponents, - processComponents, - networkComponents, - coreComponents.EpochStartNotifierWithConfirm(), - processComponents.ImportStartHandler(), - nr.configs.FlagsConfig.WorkingDir, - ) - if err != nil { - return nil, err - } - scheduledProcessorArgs := spos.ScheduledProcessorWrapperArgs{ SyncTimer: coreComponents.SyncTimer(), Processor: processComponents.BlockProcessor(), @@ -666,7 +662,6 @@ func (nr *nodeRunner) CreateManagedConsensusComponents( consensusArgs := mainFactory.ConsensusComponentsFactoryArgs{ Config: *nr.configs.GeneralConfig, BootstrapRoundIndex: nr.configs.FlagsConfig.BootstrapRoundIndex, - HardforkTrigger: hardForkTrigger, CoreComponents: coreComponents, NetworkComponents: networkComponents, CryptoComponents: cryptoComponents, @@ -702,23 +697,22 @@ func (nr *nodeRunner) CreateManagedHeartbeatComponents( cryptoComponents mainFactory.CryptoComponentsHolder, dataComponents mainFactory.DataComponentsHolder, processComponents mainFactory.ProcessComponentsHolder, - hardforkTrigger HardforkTrigger, redundancyHandler consensus.NodeRedundancyHandler, ) (mainFactory.HeartbeatComponentsHandler, error) { genesisTime := time.Unix(coreComponents.GenesisNodesSetup().GetStartTime(), 0) heartbeatArgs := mainFactory.HeartbeatComponentsFactoryArgs{ - Config: *nr.configs.GeneralConfig, - Prefs: *nr.configs.PreferencesConfig, - AppVersion: nr.configs.FlagsConfig.Version, - GenesisTime: genesisTime, - HardforkTrigger: hardforkTrigger, - RedundancyHandler: redundancyHandler, - CoreComponents: coreComponents, - DataComponents: dataComponents, - NetworkComponents: networkComponents, - CryptoComponents: cryptoComponents, - ProcessComponents: processComponents, + Config: *nr.configs.GeneralConfig, + Prefs: *nr.configs.PreferencesConfig, + AppVersion: nr.configs.FlagsConfig.Version, + GenesisTime: genesisTime, + RedundancyHandler: redundancyHandler, + CoreComponents: coreComponents, + DataComponents: dataComponents, + NetworkComponents: networkComponents, + CryptoComponents: cryptoComponents, + ProcessComponents: processComponents, + HeartbeatDisableEpoch: nr.configs.EpochConfig.EnableEpochs.HeartbeatDisableEpoch, } heartbeatComponentsFactory, err := mainFactory.NewHeartbeatComponentsFactory(heartbeatArgs) @@ -738,6 +732,44 @@ func (nr *nodeRunner) CreateManagedHeartbeatComponents( return managedHeartbeatComponents, nil } +// CreateManagedHeartbeatV2Components is the managed heartbeatV2 components factory +func (nr *nodeRunner) CreateManagedHeartbeatV2Components( + bootstrapComponents mainFactory.BootstrapComponentsHolder, + coreComponents mainFactory.CoreComponentsHolder, + networkComponents mainFactory.NetworkComponentsHolder, + cryptoComponents mainFactory.CryptoComponentsHolder, + dataComponents mainFactory.DataComponentsHolder, + processComponents mainFactory.ProcessComponentsHolder, +) (mainFactory.HeartbeatV2ComponentsHandler, error) { + heartbeatV2Args := mainFactory.ArgHeartbeatV2ComponentsFactory{ + Config: *nr.configs.GeneralConfig, + Prefs: *nr.configs.PreferencesConfig, + AppVersion: nr.configs.FlagsConfig.Version, + BoostrapComponents: bootstrapComponents, + CoreComponents: coreComponents, + DataComponents: dataComponents, + NetworkComponents: networkComponents, + CryptoComponents: cryptoComponents, + ProcessComponents: processComponents, + } + + heartbeatV2ComponentsFactory, err := mainFactory.NewHeartbeatV2ComponentsFactory(heartbeatV2Args) + if err != nil { + return nil, fmt.Errorf("NewHeartbeatV2ComponentsFactory failed: %w", err) + } + + managedHeartbeatV2Components, err := mainFactory.NewManagedHeartbeatV2Components(heartbeatV2ComponentsFactory) + if err != nil { + return nil, err + } + + err = managedHeartbeatV2Components.Create() + if err != nil { + return nil, err + } + return managedHeartbeatV2Components, nil +} + func waitForSignal( sigs chan os.Signal, chanStopNodeProcess chan endProcess.ArgEndProcess, @@ -1172,21 +1204,21 @@ func (nr *nodeRunner) CreateManagedBootstrapComponents( func (nr *nodeRunner) CreateManagedNetworkComponents( coreComponents mainFactory.CoreComponentsHolder, ) (mainFactory.NetworkComponentsHandler, error) { - decodedPreferredPubKeys, err := decodeValidatorPubKeys(*nr.configs.PreferencesConfig, coreComponents.ValidatorPubKeyConverter()) + decodedPreferredPeers, err := decodePreferredPeers(*nr.configs.PreferencesConfig, coreComponents.ValidatorPubKeyConverter()) if err != nil { return nil, err } networkComponentsFactoryArgs := mainFactory.NetworkComponentsFactoryArgs{ - P2pConfig: *nr.configs.P2pConfig, - MainConfig: *nr.configs.GeneralConfig, - RatingsConfig: *nr.configs.RatingsConfig, - StatusHandler: coreComponents.StatusHandler(), - Marshalizer: coreComponents.InternalMarshalizer(), - Syncer: coreComponents.SyncTimer(), - PreferredPublicKeys: decodedPreferredPubKeys, - BootstrapWaitTime: common.TimeToWaitForP2PBootstrap, - NodeOperationMode: p2p.NormalOperation, + P2pConfig: *nr.configs.P2pConfig, + MainConfig: *nr.configs.GeneralConfig, + RatingsConfig: *nr.configs.RatingsConfig, + StatusHandler: coreComponents.StatusHandler(), + Marshalizer: coreComponents.InternalMarshalizer(), + Syncer: coreComponents.SyncTimer(), + PreferredPeersSlices: decodedPreferredPeers, + BootstrapWaitTime: common.TimeToWaitForP2PBootstrap, + NodeOperationMode: p2p.NormalOperation, } if nr.configs.ImportDbConfig.IsImportDBMode { networkComponentsFactoryArgs.BootstrapWaitTime = 0 @@ -1451,18 +1483,18 @@ func enableGopsIfNeeded(gopsEnabled bool) { log.Trace("gops", "enabled", gopsEnabled) } -func decodeValidatorPubKeys(prefConfig config.Preferences, validatorPubKeyConverter core.PubkeyConverter) ([][]byte, error) { - decodedPublicKeys := make([][]byte, 0) - for _, pubKey := range prefConfig.Preferences.PreferredConnections { - pubKeyBytes, err := validatorPubKeyConverter.Decode(pubKey) +func decodePreferredPeers(prefConfig config.Preferences, validatorPubKeyConverter core.PubkeyConverter) ([]string, error) { + decodedPeers := make([]string, 0) + for _, connectionSlice := range prefConfig.Preferences.PreferredConnections { + peerBytes, err := validatorPubKeyConverter.Decode(connectionSlice) if err != nil { - return nil, fmt.Errorf("cannot decode preferred public key(%s) : %w", pubKey, err) + return nil, fmt.Errorf("cannot decode preferred peer(%s) : %w", connectionSlice, err) } - decodedPublicKeys = append(decodedPublicKeys, pubKeyBytes) + decodedPeers = append(decodedPeers, string(peerBytes)) } - return decodedPublicKeys, nil + return decodedPeers, nil } func createWhiteListerVerifiedTxs(generalConfig *config.Config) (process.WhiteListHandler, error) { diff --git a/node/node_test.go b/node/node_test.go index 723937fb408..e60138b81be 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "math/big" + "sort" "strings" "sync" "sync/atomic" @@ -27,12 +28,14 @@ import ( "github.com/ElrondNetwork/elrond-go-core/hashing" "github.com/ElrondNetwork/elrond-go-core/hashing/sha256" "github.com/ElrondNetwork/elrond-go-core/marshal" - crypto "github.com/ElrondNetwork/elrond-go-crypto" + "github.com/ElrondNetwork/elrond-go-crypto" "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dblookupext/esdtSupply" "github.com/ElrondNetwork/elrond-go/factory" factoryMock "github.com/ElrondNetwork/elrond-go/factory/mock" + heartbeatData "github.com/ElrondNetwork/elrond-go/heartbeat/data" + integrationTestsMock "github.com/ElrondNetwork/elrond-go/integrationTests/mock" "github.com/ElrondNetwork/elrond-go/node" "github.com/ElrondNetwork/elrond-go/node/mock" nodeMockFactory "github.com/ElrondNetwork/elrond-go/node/mock/factory" @@ -48,7 +51,6 @@ import ( "github.com/ElrondNetwork/elrond-go/testscommon/p2pmocks" "github.com/ElrondNetwork/elrond-go/testscommon/shardingMocks" stateMock "github.com/ElrondNetwork/elrond-go/testscommon/state" - "github.com/ElrondNetwork/elrond-go/testscommon/statusHandler" statusHandlerMock "github.com/ElrondNetwork/elrond-go/testscommon/statusHandler" trieMock "github.com/ElrondNetwork/elrond-go/testscommon/trie" "github.com/ElrondNetwork/elrond-go/testscommon/txsSenderMock" @@ -2956,7 +2958,7 @@ func TestNode_DirectTrigger(t *testing.T) { epoch := uint32(47839) recoveredEpoch := uint32(0) recoveredWithEarlyEndOfEpoch := atomicCore.Flag{} - hardforkTrigger := &mock.HardforkTriggerStub{ + hardforkTrigger := &testscommon.HardforkTriggerStub{ TriggerCalled: func(epoch uint32, withEarlyEndOfEpoch bool) error { wasCalled = true atomic.StoreUint32(&recoveredEpoch, epoch) @@ -2965,8 +2967,13 @@ func TestNode_DirectTrigger(t *testing.T) { return nil }, } + + processComponents := &integrationTestsMock.ProcessComponentsStub{ + HardforkTriggerField: hardforkTrigger, + } + n, _ := node.NewNode( - node.WithHardforkTrigger(hardforkTrigger), + node.WithProcessComponents(processComponents), ) err := n.DirectTrigger(epoch, true) @@ -2981,15 +2988,20 @@ func TestNode_IsSelfTrigger(t *testing.T) { t.Parallel() wasCalled := false - hardforkTrigger := &mock.HardforkTriggerStub{ + hardforkTrigger := &testscommon.HardforkTriggerStub{ IsSelfTriggerCalled: func() bool { wasCalled = true return true }, } + + processComponents := &integrationTestsMock.ProcessComponentsStub{ + HardforkTriggerField: hardforkTrigger, + } + n, _ := node.NewNode( - node.WithHardforkTrigger(hardforkTrigger), + node.WithProcessComponents(processComponents), ) isSelf := n.IsSelfTrigger() @@ -3625,6 +3637,245 @@ func TestNode_SendBulkTransactions(t *testing.T) { require.Nil(t, err) } +func TestNode_GetHeartbeats(t *testing.T) { + t.Parallel() + + t.Run("only heartbeat v1", func(t *testing.T) { + t.Parallel() + + numMessages := 5 + providedMessages := make([]heartbeatData.PubKeyHeartbeat, numMessages) + for i := 0; i < numMessages; i++ { + providedMessages[i] = createHeartbeatMessage("v1", i, true) + } + + heartbeatComponents := createMockHeartbeatV1Components(providedMessages) + + t.Run("should work - nil heartbeatV2Components", func(t *testing.T) { + n, err := node.NewNode(node.WithHeartbeatComponents(heartbeatComponents)) + require.Nil(t, err) + + receivedMessages := n.GetHeartbeats() + assert.True(t, sameMessages(providedMessages, receivedMessages)) + }) + t.Run("should work - nil heartbeatV2Components monitor", func(t *testing.T) { + n, err := node.NewNode(node.WithHeartbeatComponents(heartbeatComponents), + node.WithHeartbeatV2Components(&factoryMock.HeartbeatV2ComponentsStub{})) + require.Nil(t, err) + + receivedMessages := n.GetHeartbeats() + assert.True(t, sameMessages(providedMessages, receivedMessages)) + }) + t.Run("should work - heartbeatV2Components no messages", func(t *testing.T) { + heartbeatV2Components := createMockHeartbeatV2Components(nil) + n, err := node.NewNode(node.WithHeartbeatComponents(heartbeatComponents), + node.WithHeartbeatV2Components(heartbeatV2Components)) + require.Nil(t, err) + + receivedMessages := n.GetHeartbeats() + assert.True(t, sameMessages(providedMessages, receivedMessages)) + }) + }) + + t.Run("only heartbeat v2", func(t *testing.T) { + t.Parallel() + + numMessages := 5 + providedMessages := make([]heartbeatData.PubKeyHeartbeat, numMessages) + for i := 0; i < numMessages; i++ { + providedMessages[i] = createHeartbeatMessage("v2", i, true) + } + + heartbeatV2Components := createMockHeartbeatV2Components(providedMessages) + + t.Run("should work - nil heartbeatComponents", func(t *testing.T) { + n, err := node.NewNode(node.WithHeartbeatV2Components(heartbeatV2Components)) + require.Nil(t, err) + + receivedMessages := n.GetHeartbeats() + assert.True(t, sameMessages(providedMessages, receivedMessages)) + }) + t.Run("should work - nil heartbeatComponents monitor", func(t *testing.T) { + n, err := node.NewNode(node.WithHeartbeatV2Components(heartbeatV2Components), + node.WithHeartbeatComponents(&factoryMock.HeartbeatComponentsStub{})) + require.Nil(t, err) + + receivedMessages := n.GetHeartbeats() + assert.True(t, sameMessages(providedMessages, receivedMessages)) + }) + t.Run("should work - heartbeatComponents no messages", func(t *testing.T) { + heartbeatComponents := createMockHeartbeatV1Components(nil) + n, err := node.NewNode(node.WithHeartbeatV2Components(heartbeatV2Components), + node.WithHeartbeatComponents(heartbeatComponents)) + require.Nil(t, err) + + receivedMessages := n.GetHeartbeats() + assert.True(t, sameMessages(providedMessages, receivedMessages)) + }) + }) + t.Run("mixed messages", func(t *testing.T) { + t.Parallel() + + t.Run("same public keys in both versions should work", func(t *testing.T) { + t.Parallel() + + numV1Messages := 3 + providedV1Messages := make([]heartbeatData.PubKeyHeartbeat, numV1Messages) + for i := 0; i < numV1Messages; i++ { + providedV1Messages[i] = createHeartbeatMessage("same_prefix", i, false) + } + heartbeatV1Components := createMockHeartbeatV1Components(providedV1Messages) + + numV2Messages := 5 + providedV2Messages := make([]heartbeatData.PubKeyHeartbeat, numV2Messages) + for i := 0; i < numV2Messages; i++ { + providedV2Messages[i] = createHeartbeatMessage("same_prefix", i, true) + } + heartbeatV2Components := createMockHeartbeatV2Components(providedV2Messages) + + n, err := node.NewNode(node.WithHeartbeatComponents(heartbeatV1Components), + node.WithHeartbeatV2Components(heartbeatV2Components)) + require.Nil(t, err) + + receivedMessages := n.GetHeartbeats() + // should be the same messages from V2 + assert.True(t, sameMessages(providedV2Messages, receivedMessages)) + }) + t.Run("different public keys should work", func(t *testing.T) { + t.Parallel() + + numV1Messages := 3 + providedV1Messages := make([]heartbeatData.PubKeyHeartbeat, numV1Messages) + for i := 0; i < numV1Messages; i++ { + providedV1Messages[i] = createHeartbeatMessage("v1", i, false) + } + heartbeatV1Components := createMockHeartbeatV1Components(providedV1Messages) + + numV2Messages := 5 + providedV2Messages := make([]heartbeatData.PubKeyHeartbeat, numV2Messages) + for i := 0; i < numV2Messages; i++ { + providedV2Messages[i] = createHeartbeatMessage("v2", i, true) + } + heartbeatV2Components := createMockHeartbeatV2Components(providedV2Messages) + + n, err := node.NewNode(node.WithHeartbeatComponents(heartbeatV1Components), + node.WithHeartbeatV2Components(heartbeatV2Components)) + require.Nil(t, err) + + // result should be the merged lists, sorted + providedMessages := providedV1Messages + providedMessages = append(providedMessages, providedV2Messages...) + sort.Slice(providedMessages, func(i, j int) bool { + return strings.Compare(providedMessages[i].PublicKey, providedMessages[j].PublicKey) < 0 + }) + + receivedMessages := n.GetHeartbeats() + // should be all messages, merged + assert.True(t, sameMessages(providedMessages, receivedMessages)) + }) + t.Run("common public keys should work", func(t *testing.T) { + t.Parallel() + + providedV1Messages := make([]heartbeatData.PubKeyHeartbeat, 0) + v1Message := createHeartbeatMessage("v1", 0, false) + providedV1Messages = append(providedV1Messages, v1Message) + + providedV2Messages := make([]heartbeatData.PubKeyHeartbeat, 0) + v2Message := createHeartbeatMessage("v2", 0, true) + providedV2Messages = append(providedV2Messages, v2Message) + + commonMessage := createHeartbeatMessage("common", 0, true) + providedV1Messages = append(providedV1Messages, commonMessage) + providedV2Messages = append(providedV2Messages, commonMessage) + + heartbeatV1Components := createMockHeartbeatV1Components(providedV1Messages) + heartbeatV2Components := createMockHeartbeatV2Components(providedV2Messages) + + n, err := node.NewNode(node.WithHeartbeatComponents(heartbeatV1Components), + node.WithHeartbeatV2Components(heartbeatV2Components)) + require.Nil(t, err) + + // Result should be of len 3: one common message plus 1 different in each one + providedMessages := []heartbeatData.PubKeyHeartbeat{commonMessage, v1Message, v2Message} + + receivedMessages := n.GetHeartbeats() + assert.True(t, sameMessages(providedMessages, receivedMessages)) + }) + }) +} + +func createMockHeartbeatV1Components(providedMessages []heartbeatData.PubKeyHeartbeat) *factoryMock.HeartbeatComponentsStub { + heartbeatComponents := &factoryMock.HeartbeatComponentsStub{} + heartbeatComponents.MonitorField = &integrationTestsMock.HeartbeatMonitorStub{ + GetHeartbeatsCalled: func() []heartbeatData.PubKeyHeartbeat { + return providedMessages + }, + } + + return heartbeatComponents +} + +func createMockHeartbeatV2Components(providedMessages []heartbeatData.PubKeyHeartbeat) *factoryMock.HeartbeatV2ComponentsStub { + heartbeatV2Components := &factoryMock.HeartbeatV2ComponentsStub{} + heartbeatV2Components.MonitorField = &integrationTestsMock.HeartbeatMonitorStub{ + GetHeartbeatsCalled: func() []heartbeatData.PubKeyHeartbeat { + return providedMessages + }, + } + + return heartbeatV2Components +} + +func sameMessages(provided, received []heartbeatData.PubKeyHeartbeat) bool { + providedLen, receivedLen := len(provided), len(received) + if receivedLen != providedLen { + return false + } + + areEqual := true + for i := 0; i < providedLen; i++ { + p := provided[i] + r := received[i] + areEqual = areEqual && + (p.PublicKey == r.PublicKey) && + (p.TimeStamp == r.TimeStamp) && + (p.IsActive == r.IsActive) && + (p.ReceivedShardID == r.ReceivedShardID) && + (p.ComputedShardID == r.ComputedShardID) && + (p.VersionNumber == r.VersionNumber) && + (p.Identity == r.Identity) && + (p.PeerType == r.PeerType) && + (p.Nonce == r.Nonce) && + (p.NumInstances == r.NumInstances) && + (p.PeerSubType == r.PeerSubType) && + (p.PidString == r.PidString) + + if !areEqual { + return false + } + } + + return true +} + +func createHeartbeatMessage(prefix string, idx int, isActive bool) heartbeatData.PubKeyHeartbeat { + return heartbeatData.PubKeyHeartbeat{ + PublicKey: fmt.Sprintf("%d%spk", idx, prefix), + TimeStamp: time.Now(), + IsActive: isActive, + ReceivedShardID: 0, + ComputedShardID: 0, + VersionNumber: "v01", + NodeDisplayName: fmt.Sprintf("%d%s", idx, "node"), + Identity: "identity", + PeerType: core.ValidatorPeer.String(), + Nonce: 10, + NumInstances: 1, + PeerSubType: 1, + PidString: fmt.Sprintf("%d%spid", idx, prefix), + } +} + func getDefaultCoreComponents() *nodeMockFactory.CoreComponentsMock { return &nodeMockFactory.CoreComponentsMock{ IntMarsh: &testscommon.MarshalizerMock{}, @@ -3642,7 +3893,7 @@ func getDefaultCoreComponents() *nodeMockFactory.CoreComponentsMock { MinTransactionVersionCalled: func() uint32 { return 1 }, - AppStatusHdl: &statusHandler.AppStatusHandlerStub{}, + AppStatusHdl: &statusHandlerMock.AppStatusHandlerStub{}, WDTimer: &testscommon.WatchdogMock{}, Alarm: &testscommon.AlarmSchedulerStub{}, NtpTimer: &testscommon.SyncTimerStub{}, diff --git a/node/options.go b/node/options.go index 630c7530a4b..cd9ca396e22 100644 --- a/node/options.go +++ b/node/options.go @@ -159,6 +159,22 @@ func WithHeartbeatComponents(heartbeatComponents factory.HeartbeatComponentsHand } } +// WithHeartbeatV2Components sets up the Node heartbeatV2 components +func WithHeartbeatV2Components(heartbeatV2Components factory.HeartbeatV2ComponentsHandler) Option { + return func(n *Node) error { + if check.IfNil(heartbeatV2Components) { + return ErrNilStatusComponents + } + err := heartbeatV2Components.CheckSubcomponents() + if err != nil { + return err + } + n.heartbeatV2Components = heartbeatV2Components + n.closableComponents = append(n.closableComponents, heartbeatV2Components) + return nil + } +} + // WithConsensusComponents sets up the Node consensus components func WithConsensusComponents(consensusComponents factory.ConsensusComponentsHandler) Option { return func(n *Node) error { @@ -252,19 +268,6 @@ func WithRequestedItemsHandler(requestedItemsHandler dataRetriever.RequestedItem } } -// WithHardforkTrigger sets up a hardfork trigger -func WithHardforkTrigger(hardforkTrigger HardforkTrigger) Option { - return func(n *Node) error { - if check.IfNil(hardforkTrigger) { - return ErrNilHardforkTrigger - } - - n.hardforkTrigger = hardforkTrigger - - return nil - } -} - // WithAddressSignatureSize sets up an addressSignatureSize option for the Node func WithAddressSignatureSize(signatureSize int) Option { return func(n *Node) error { diff --git a/node/options_test.go b/node/options_test.go index 7f034c5a7c0..a3e9002d8d5 100644 --- a/node/options_test.go +++ b/node/options_test.go @@ -183,30 +183,6 @@ func TestWithPeerDenialEvaluator_OkHandlerShouldWork(t *testing.T) { assert.Nil(t, err) } -func TestWithHardforkTrigger_NilHardforkTriggerShouldErr(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - opt := WithHardforkTrigger(nil) - err := opt(node) - - assert.Equal(t, ErrNilHardforkTrigger, err) -} - -func TestWithHardforkTrigger_ShouldWork(t *testing.T) { - t.Parallel() - - node, _ := NewNode() - - hardforkTrigger := &mock.HardforkTriggerStub{} - opt := WithHardforkTrigger(hardforkTrigger) - err := opt(node) - - assert.Nil(t, err) - assert.True(t, node.hardforkTrigger == hardforkTrigger) -} - func TestWithAddressSignatureSize(t *testing.T) { t.Parallel() diff --git a/node/trieIterators/directStakedListProcessor_test.go b/node/trieIterators/directStakedListProcessor_test.go index 2340860973c..330b5bbe478 100644 --- a/node/trieIterators/directStakedListProcessor_test.go +++ b/node/trieIterators/directStakedListProcessor_test.go @@ -87,7 +87,7 @@ func TestDirectStakedListProc_GetDelegatorsListContextShouldTimeout(t *testing.T defer cancel() directStakedList, err := dslp.GetDirectStakedList(ctxWithTimeout) - require.Equal(t,ErrTrieOperationsTimeout, err) + require.Equal(t, ErrTrieOperationsTimeout, err) require.Nil(t, directStakedList) } diff --git a/outport/errors.go b/outport/errors.go index bfdb5d2bed6..eb44d2c671f 100644 --- a/outport/errors.go +++ b/outport/errors.go @@ -10,3 +10,6 @@ var ErrNilArgsOutportFactory = errors.New("nil args outport driver factory") // ErrInvalidRetrialInterval signals that an invalid retrial interval was provided var ErrInvalidRetrialInterval = errors.New("invalid retrial interval") + +// ErrNilPubKeyConverter signals that a nil pubkey converter has been provided +var ErrNilPubKeyConverter = errors.New("nil pub key converter") diff --git a/outport/factory/notifierFactory.go b/outport/factory/notifierFactory.go new file mode 100644 index 00000000000..60d21da7967 --- /dev/null +++ b/outport/factory/notifierFactory.go @@ -0,0 +1,59 @@ +package factory + +import ( + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/hashing" + "github.com/ElrondNetwork/elrond-go-core/marshal" + "github.com/ElrondNetwork/elrond-go/outport" + "github.com/ElrondNetwork/elrond-go/outport/notifier" +) + +// EventNotifierFactoryArgs defines the args needed for event notifier creation +type EventNotifierFactoryArgs struct { + Enabled bool + UseAuthorization bool + ProxyUrl string + Username string + Password string + Marshaller marshal.Marshalizer + Hasher hashing.Hasher + PubKeyConverter core.PubkeyConverter +} + +// CreateEventNotifier will create a new event notifier client instance +func CreateEventNotifier(args *EventNotifierFactoryArgs) (outport.Driver, error) { + if err := checkInputArgs(args); err != nil { + return nil, err + } + + httpClient := notifier.NewHttpClient(notifier.HttpClientArgs{ + UseAuthorization: args.UseAuthorization, + Username: args.Username, + Password: args.Password, + BaseUrl: args.ProxyUrl, + }) + + notifierArgs := notifier.ArgsEventNotifier{ + HttpClient: httpClient, + Marshalizer: args.Marshaller, + Hasher: args.Hasher, + PubKeyConverter: args.PubKeyConverter, + } + + return notifier.NewEventNotifier(notifierArgs) +} + +func checkInputArgs(args *EventNotifierFactoryArgs) error { + if check.IfNil(args.Marshaller) { + return core.ErrNilMarshalizer + } + if check.IfNil(args.Hasher) { + return core.ErrNilHasher + } + if check.IfNil(args.PubKeyConverter) { + return outport.ErrNilPubKeyConverter + } + + return nil +} diff --git a/outport/factory/notifierFactory_test.go b/outport/factory/notifierFactory_test.go new file mode 100644 index 00000000000..1c673aac63d --- /dev/null +++ b/outport/factory/notifierFactory_test.go @@ -0,0 +1,70 @@ +package factory_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go/outport" + "github.com/ElrondNetwork/elrond-go/outport/factory" + "github.com/ElrondNetwork/elrond-go/testscommon" + "github.com/ElrondNetwork/elrond-go/testscommon/hashingMocks" + "github.com/stretchr/testify/require" +) + +func createMockNotifierFactoryArgs() *factory.EventNotifierFactoryArgs { + return &factory.EventNotifierFactoryArgs{ + Enabled: true, + UseAuthorization: true, + ProxyUrl: "http://localhost:5000", + Username: "", + Password: "", + Marshaller: &testscommon.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + PubKeyConverter: &testscommon.PubkeyConverterMock{}, + } +} + +func TestCreateEventNotifier(t *testing.T) { + t.Parallel() + + t.Run("nil marshaller", func(t *testing.T) { + t.Parallel() + + args := createMockNotifierFactoryArgs() + args.Marshaller = nil + + en, err := factory.CreateEventNotifier(args) + require.Nil(t, en) + require.Equal(t, core.ErrNilMarshalizer, err) + }) + + t.Run("nil hasher", func(t *testing.T) { + t.Parallel() + + args := createMockNotifierFactoryArgs() + args.Hasher = nil + + en, err := factory.CreateEventNotifier(args) + require.Nil(t, en) + require.Equal(t, core.ErrNilHasher, err) + }) + + t.Run("nil pub key converter", func(t *testing.T) { + t.Parallel() + + args := createMockNotifierFactoryArgs() + args.PubKeyConverter = nil + + en, err := factory.CreateEventNotifier(args) + require.Nil(t, en) + require.Equal(t, outport.ErrNilPubKeyConverter, err) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + en, err := factory.CreateEventNotifier(createMockNotifierFactoryArgs()) + require.Nil(t, err) + require.NotNil(t, en) + }) +} diff --git a/outport/factory/outportFactory.go b/outport/factory/outportFactory.go index 59d72682a4f..bd2de0fe67d 100644 --- a/outport/factory/outportFactory.go +++ b/outport/factory/outportFactory.go @@ -6,14 +6,13 @@ import ( covalentFactory "github.com/ElrondNetwork/covalent-indexer-go/factory" indexerFactory "github.com/ElrondNetwork/elastic-indexer-go/factory" "github.com/ElrondNetwork/elrond-go/outport" - notifierFactory "github.com/ElrondNetwork/notifier-go/factory" ) // OutportFactoryArgs holds the factory arguments of different outport drivers type OutportFactoryArgs struct { RetrialInterval time.Duration ElasticIndexerFactoryArgs *indexerFactory.ArgsIndexerFactory - EventNotifierFactoryArgs *notifierFactory.EventNotifierFactoryArgs + EventNotifierFactoryArgs *EventNotifierFactoryArgs CovalentIndexerFactoryArgs *covalentFactory.ArgsCovalentIndexerFactory } @@ -90,13 +89,13 @@ func createAndSubscribeElasticDriverIfNeeded( func createAndSubscribeEventNotifierIfNeeded( outport outport.OutportHandler, - args *notifierFactory.EventNotifierFactoryArgs, + args *EventNotifierFactoryArgs, ) error { if !args.Enabled { return nil } - eventNotifier, err := notifierFactory.CreateEventNotifier(args) + eventNotifier, err := CreateEventNotifier(args) if err != nil { return err } diff --git a/outport/factory/outportFactory_test.go b/outport/factory/outportFactory_test.go index 7c68732c01d..09aab09216b 100644 --- a/outport/factory/outportFactory_test.go +++ b/outport/factory/outportFactory_test.go @@ -9,10 +9,10 @@ import ( indexerFactory "github.com/ElrondNetwork/elastic-indexer-go/factory" "github.com/ElrondNetwork/elrond-go/outport" "github.com/ElrondNetwork/elrond-go/outport/factory" + notifierFactory "github.com/ElrondNetwork/elrond-go/outport/factory" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/testscommon/hashingMocks" stateMock "github.com/ElrondNetwork/elrond-go/testscommon/state" - notifierFactory "github.com/ElrondNetwork/notifier-go/factory" "github.com/stretchr/testify/require" ) @@ -124,8 +124,9 @@ func TestCreateOutport_SubscribeCovalentDriver(t *testing.T) { func TestCreateOutport_SubscribeNotifierDriver(t *testing.T) { args := createMockArgsOutportHandler(false, true, false) - args.EventNotifierFactoryArgs.Marshalizer = &mock.MarshalizerMock{} + args.EventNotifierFactoryArgs.Marshaller = &mock.MarshalizerMock{} args.EventNotifierFactoryArgs.Hasher = &hashingMocks.HasherMock{} + args.EventNotifierFactoryArgs.PubKeyConverter = &mock.PubkeyConverterMock{} outPort, err := factory.CreateOutport(args) defer func(c outport.OutportHandler) { diff --git a/outport/mock/httpClientStub.go b/outport/mock/httpClientStub.go new file mode 100644 index 00000000000..f93eb04854b --- /dev/null +++ b/outport/mock/httpClientStub.go @@ -0,0 +1,20 @@ +package mock + +// HTTPClientStub - +type HTTPClientStub struct { + PostCalled func(route string, payload interface{}, response interface{}) error +} + +// Post - +func (stub *HTTPClientStub) Post(route string, payload interface{}, response interface{}) error { + if stub.PostCalled != nil { + return stub.PostCalled(route, payload, response) + } + + return nil +} + +// IsInterfaceNil - +func (stub *HTTPClientStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/outport/notifier/errors.go b/outport/notifier/errors.go new file mode 100644 index 00000000000..40467bb1842 --- /dev/null +++ b/outport/notifier/errors.go @@ -0,0 +1,8 @@ +package notifier + +import ( + "errors" +) + +// ErrNilTransactionsPool signals that a nil transactions pool was provided +var ErrNilTransactionsPool = errors.New("nil transactions pool") diff --git a/outport/notifier/eventNotifier.go b/outport/notifier/eventNotifier.go new file mode 100644 index 00000000000..3cf6713db98 --- /dev/null +++ b/outport/notifier/eventNotifier.go @@ -0,0 +1,211 @@ +package notifier + +import ( + "encoding/hex" + "fmt" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + nodeData "github.com/ElrondNetwork/elrond-go-core/data" + "github.com/ElrondNetwork/elrond-go-core/data/indexer" + "github.com/ElrondNetwork/elrond-go-core/hashing" + "github.com/ElrondNetwork/elrond-go-core/marshal" + logger "github.com/ElrondNetwork/elrond-go-logger" +) + +var log = logger.GetOrCreate("outport/eventNotifier") + +const ( + pushEventEndpoint = "/events/push" + revertEventsEndpoint = "/events/revert" + finalizedEventsEndpoint = "/events/finalized" +) + +// SaveBlockData holds the data that will be sent to notifier instance +type SaveBlockData struct { + Hash string `json:"hash"` + Txs map[string]nodeData.TransactionHandler `json:"txs"` + Scrs map[string]nodeData.TransactionHandler `json:"scrs"` + LogEvents []Event `json:"events"` +} + +// Event holds event data +type Event struct { + Address string `json:"address"` + Identifier string `json:"identifier"` + Topics [][]byte `json:"topics"` + Data []byte `json:"data"` +} + +// RevertBlock holds revert event data +type RevertBlock struct { + Hash string `json:"hash"` + Nonce uint64 `json:"nonce"` + Round uint64 `json:"round"` + Epoch uint32 `json:"epoch"` +} + +// FinalizedBlock holds finalized block data +type FinalizedBlock struct { + Hash string `json:"hash"` +} + +type eventNotifier struct { + httpClient httpClientHandler + marshalizer marshal.Marshalizer + hasher hashing.Hasher + pubKeyConverter core.PubkeyConverter +} + +// ArgsEventNotifier defines the arguments needed for event notifier creation +type ArgsEventNotifier struct { + HttpClient httpClientHandler + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + PubKeyConverter core.PubkeyConverter +} + +// NewEventNotifier creates a new instance of the eventNotifier +// It implements all methods of process.Indexer +func NewEventNotifier(args ArgsEventNotifier) (*eventNotifier, error) { + return &eventNotifier{ + httpClient: args.HttpClient, + marshalizer: args.Marshalizer, + hasher: args.Hasher, + pubKeyConverter: args.PubKeyConverter, + }, nil +} + +// SaveBlock converts block data in order to be pushed to subscribers +func (en *eventNotifier) SaveBlock(args *indexer.ArgsSaveBlockData) error { + log.Debug("eventNotifier: SaveBlock called at block", "block hash", args.HeaderHash) + if args.TransactionsPool == nil { + return ErrNilTransactionsPool + } + + log.Debug("eventNotifier: checking if block has logs", "num logs", len(args.TransactionsPool.Logs)) + log.Debug("eventNotifier: checking if block has txs", "num txs", len(args.TransactionsPool.Txs)) + + events := en.getLogEventsFromTransactionsPool(args.TransactionsPool.Logs) + log.Debug("eventNotifier: extracted events from block logs", "num events", len(events)) + + blockData := SaveBlockData{ + Hash: hex.EncodeToString(args.HeaderHash), + Txs: args.TransactionsPool.Txs, + Scrs: args.TransactionsPool.Scrs, + LogEvents: events, + } + + err := en.httpClient.Post(pushEventEndpoint, blockData, nil) + if err != nil { + return fmt.Errorf("%w in eventNotifier.SaveBlock while posting block data", err) + } + + return nil +} + +func (en *eventNotifier) getLogEventsFromTransactionsPool(logs []*nodeData.LogData) []Event { + var logEvents []nodeData.EventHandler + for _, logData := range logs { + if logData == nil { + continue + } + if check.IfNil(logData.LogHandler) { + continue + } + + logEvents = append(logEvents, logData.LogHandler.GetLogEvents()...) + } + + if len(logEvents) == 0 { + return nil + } + + var events []Event + for _, eventHandler := range logEvents { + if !eventHandler.IsInterfaceNil() { + bech32Address := en.pubKeyConverter.Encode(eventHandler.GetAddress()) + eventIdentifier := string(eventHandler.GetIdentifier()) + + log.Debug("eventNotifier: received event from address", + "address", bech32Address, + "identifier", eventIdentifier, + ) + + events = append(events, Event{ + Address: bech32Address, + Identifier: eventIdentifier, + Topics: eventHandler.GetTopics(), + Data: eventHandler.GetData(), + }) + } + } + + return events +} + +// RevertIndexedBlock converts revert data in order to be pushed to subscribers +func (en *eventNotifier) RevertIndexedBlock(header nodeData.HeaderHandler, _ nodeData.BodyHandler) error { + blockHash, err := core.CalculateHash(en.marshalizer, en.hasher, header) + if err != nil { + return fmt.Errorf("%w in eventNotifier.RevertIndexedBlock while computing the block hash", err) + } + + revertBlock := RevertBlock{ + Hash: hex.EncodeToString(blockHash), + Nonce: header.GetNonce(), + Round: header.GetRound(), + Epoch: header.GetEpoch(), + } + + err = en.httpClient.Post(revertEventsEndpoint, revertBlock, nil) + if err != nil { + return fmt.Errorf("%w in eventNotifier.RevertIndexedBlock while posting event data", err) + } + + return nil +} + +// FinalizedBlock converts finalized block data in order to push it to subscribers +func (en *eventNotifier) FinalizedBlock(headerHash []byte) error { + finalizedBlock := FinalizedBlock{ + Hash: hex.EncodeToString(headerHash), + } + + err := en.httpClient.Post(finalizedEventsEndpoint, finalizedBlock, nil) + if err != nil { + return fmt.Errorf("%w in eventNotifier.FinalizedBlock while posting event data", err) + } + + return nil +} + +// SaveRoundsInfo returns nil +func (en *eventNotifier) SaveRoundsInfo(_ []*indexer.RoundInfo) error { + return nil +} + +// SaveValidatorsRating returns nil +func (en *eventNotifier) SaveValidatorsRating(_ string, _ []*indexer.ValidatorRatingInfo) error { + return nil +} + +// SaveValidatorsPubKeys returns nil +func (en *eventNotifier) SaveValidatorsPubKeys(_ map[uint32][][]byte, _ uint32) error { + return nil +} + +// SaveAccounts does nothing +func (en *eventNotifier) SaveAccounts(_ uint64, _ []nodeData.UserAccountHandler) error { + return nil +} + +// IsInterfaceNil returns whether the interface is nil +func (en *eventNotifier) IsInterfaceNil() bool { + return en == nil +} + +// Close returns nil +func (en *eventNotifier) Close() error { + return nil +} diff --git a/outport/notifier/eventNotifier_test.go b/outport/notifier/eventNotifier_test.go new file mode 100644 index 00000000000..7a074962800 --- /dev/null +++ b/outport/notifier/eventNotifier_test.go @@ -0,0 +1,145 @@ +package notifier_test + +import ( + "fmt" + "testing" + + "github.com/ElrondNetwork/elrond-go-core/data" + "github.com/ElrondNetwork/elrond-go-core/data/block" + "github.com/ElrondNetwork/elrond-go-core/data/indexer" + "github.com/ElrondNetwork/elrond-go/outport/mock" + "github.com/ElrondNetwork/elrond-go/outport/notifier" + "github.com/ElrondNetwork/elrond-go/testscommon" + "github.com/ElrondNetwork/elrond-go/testscommon/hashingMocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func createMockEventNotifierArgs() notifier.ArgsEventNotifier { + return notifier.ArgsEventNotifier{ + HttpClient: &mock.HTTPClientStub{}, + Marshalizer: &testscommon.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + PubKeyConverter: &testscommon.PubkeyConverterMock{}, + } +} + +func TestNewEventNotifier(t *testing.T) { + t.Parallel() + + en, err := notifier.NewEventNotifier(createMockEventNotifierArgs()) + require.Nil(t, err) + require.NotNil(t, en) +} + +func TestSaveBlock(t *testing.T) { + t.Parallel() + + args := createMockEventNotifierArgs() + + wasCalled := false + args.HttpClient = &mock.HTTPClientStub{ + PostCalled: func(route string, payload, response interface{}) error { + wasCalled = true + return nil + }, + } + + en, _ := notifier.NewEventNotifier(args) + + saveBlockData := &indexer.ArgsSaveBlockData{ + HeaderHash: []byte{}, + TransactionsPool: &indexer.Pool{ + Txs: map[string]data.TransactionHandler{ + "txhash1": nil, + }, + Scrs: map[string]data.TransactionHandler{ + "scrHash1": nil, + }, + Logs: []*data.LogData{}, + }, + } + + err := en.SaveBlock(saveBlockData) + require.Nil(t, err) + + require.True(t, wasCalled) +} + +func TestRevertIndexedBlock(t *testing.T) { + t.Parallel() + + args := createMockEventNotifierArgs() + + wasCalled := false + args.HttpClient = &mock.HTTPClientStub{ + PostCalled: func(route string, payload, response interface{}) error { + wasCalled = true + return nil + }, + } + + en, _ := notifier.NewEventNotifier(args) + + header := &block.Header{ + Nonce: 1, + Round: 2, + Epoch: 3, + } + err := en.RevertIndexedBlock(header, &block.Body{}) + require.Nil(t, err) + + require.True(t, wasCalled) +} + +func TestFinalizedBlock(t *testing.T) { + t.Parallel() + + args := createMockEventNotifierArgs() + + wasCalled := false + args.HttpClient = &mock.HTTPClientStub{ + PostCalled: func(route string, payload, response interface{}) error { + wasCalled = true + return nil + }, + } + + en, _ := notifier.NewEventNotifier(args) + + hash := []byte("headerHash") + err := en.FinalizedBlock(hash) + require.Nil(t, err) + + require.True(t, wasCalled) +} + +func TestMockFunctions(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, fmt.Sprintf("should have not panicked: %v", r)) + } + }() + + en, err := notifier.NewEventNotifier(createMockEventNotifierArgs()) + require.Nil(t, err) + require.False(t, en.IsInterfaceNil()) + + err = en.SaveRoundsInfo(nil) + require.Nil(t, err) + + err = en.SaveValidatorsRating("", nil) + require.Nil(t, err) + + err = en.SaveValidatorsPubKeys(nil, 0) + require.Nil(t, err) + + err = en.SaveAccounts(0, nil) + require.Nil(t, err) + + err = en.Close() + require.Nil(t, err) +} diff --git a/outport/notifier/httpClient.go b/outport/notifier/httpClient.go new file mode 100644 index 00000000000..2cb11295759 --- /dev/null +++ b/outport/notifier/httpClient.go @@ -0,0 +1,88 @@ +package notifier + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" +) + +const ( + contentTypeKey = "Content-Type" + contentTypeValue = "application/json" +) + +type httpClientHandler interface { + Post(route string, payload interface{}, response interface{}) error +} + +type httpClient struct { + useAuthorization bool + username string + password string + baseUrl string +} + +// HttpClientArgs defines the arguments needed for http client creation +type HttpClientArgs struct { + UseAuthorization bool + Username string + Password string + BaseUrl string +} + +// NewHttpClient creates an instance of httpClient which is a wrapper for http.Client +func NewHttpClient(args HttpClientArgs) *httpClient { + return &httpClient{ + useAuthorization: args.UseAuthorization, + username: args.Username, + password: args.Password, + baseUrl: args.BaseUrl, + } +} + +// Post can be used to send POST requests. It handles marshalling to/from json +func (h *httpClient) Post( + route string, + payload interface{}, + response interface{}, +) error { + jsonData, err := json.Marshal(payload) + if err != nil { + return err + } + + client := &http.Client{} + url := fmt.Sprintf("%s%s", h.baseUrl, route) + req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(jsonData)) + if err != nil { + return err + } + + req.Header.Set(contentTypeKey, contentTypeValue) + + if h.useAuthorization { + req.SetBasicAuth(h.username, h.password) + } + + resp, err := client.Do(req) + if err != nil { + return err + } + defer func() { + if resp != nil && resp.Body != nil { + bodyCloseErr := resp.Body.Close() + if bodyCloseErr != nil { + log.Warn("error while trying to close response body", "err", bodyCloseErr.Error()) + } + } + }() + + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + return json.Unmarshal(resBody, &response) +} diff --git a/outport/notifier/httpClient_test.go b/outport/notifier/httpClient_test.go new file mode 100644 index 00000000000..62b986601e0 --- /dev/null +++ b/outport/notifier/httpClient_test.go @@ -0,0 +1,60 @@ +package notifier_test + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/ElrondNetwork/elrond-go/outport/notifier" + "github.com/stretchr/testify/require" +) + +type testStruct struct { + Hash string `json:"hash"` +} + +func createMockHTTPClientArgs() notifier.HttpClientArgs { + return notifier.HttpClientArgs{ + UseAuthorization: false, + Username: "user", + Password: "pass", + BaseUrl: "http://localhost:8080", + } +} + +func TestNewHTTPClient(t *testing.T) { + t.Parallel() + + args := createMockHTTPClientArgs() + client := notifier.NewHttpClient(args) + require.NotNil(t, client) +} + +func TestPOST(t *testing.T) { + t.Parallel() + + wasCalled := false + ws := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + wasCalled = true + + dataBytes, _ := json.Marshal(&testStruct{}) + _, err := w.Write(dataBytes) + require.Nil(t, err) + })) + + args := createMockHTTPClientArgs() + args.BaseUrl = ws.URL + + client := notifier.NewHttpClient(args) + require.NotNil(t, client) + + testPayload := testStruct{ + Hash: "hash1", + } + + err := client.Post("/events/push", testPayload, nil) + require.Nil(t, err) + + require.True(t, wasCalled) +} diff --git a/p2p/errors.go b/p2p/errors.go index 5bda39b304f..fba838283db 100644 --- a/p2p/errors.go +++ b/p2p/errors.go @@ -97,12 +97,6 @@ var ErrNilSharder = errors.New("nil sharder") // ErrNilPeerShardResolver signals that the peer shard resolver provided is nil var ErrNilPeerShardResolver = errors.New("nil PeerShardResolver") -// ErrNilNetworkShardingCollector signals that the network sharding collector provided is nil -var ErrNilNetworkShardingCollector = errors.New("nil network sharding collector") - -// ErrNilSignerVerifier signals that the signer-verifier instance provided is nil -var ErrNilSignerVerifier = errors.New("nil signer-verifier") - // ErrNilMarshalizer signals that an operation has been attempted to or with a nil marshalizer implementation var ErrNilMarshalizer = errors.New("nil marshalizer") @@ -158,3 +152,9 @@ var ErrWrongTypeAssertions = errors.New("wrong type assertion") // ErrNilConnectionsWatcher signals that a nil connections watcher has been provided var ErrNilConnectionsWatcher = errors.New("nil connections watcher") + +// ErrNilPeersRatingHandler signals that a nil peers rating handler has been provided +var ErrNilPeersRatingHandler = errors.New("nil peers rating handler") + +// ErrNilCacher signals that a nil cacher has been provided +var ErrNilCacher = errors.New("nil cacher") diff --git a/p2p/libp2p/connectionMonitor/libp2pConnectionMonitorSimple.go b/p2p/libp2p/connectionMonitor/libp2pConnectionMonitorSimple.go index 4f1fd291022..e67359400fd 100644 --- a/p2p/libp2p/connectionMonitor/libp2pConnectionMonitorSimple.go +++ b/p2p/libp2p/connectionMonitor/libp2pConnectionMonitorSimple.go @@ -87,7 +87,11 @@ func (lcms *libp2pConnectionMonitorSimple) doReconn() { func (lcms *libp2pConnectionMonitorSimple) Connected(netw network.Network, conn network.Conn) { allPeers := netw.Peers() - lcms.connectionsWatcher.NewKnownConnection(core.PeerID(conn.RemotePeer()), conn.RemoteMultiaddr().String()) + peerId := core.PeerID(conn.RemotePeer()) + connectionStr := conn.RemoteMultiaddr().String() + lcms.connectionsWatcher.NewKnownConnection(peerId, connectionStr) + lcms.preferredPeersHolder.PutConnectionAddress(peerId, connectionStr) + evicted := lcms.sharder.ComputeEvictionList(allPeers) for _, pid := range evicted { _ = netw.ClosePeer(pid) diff --git a/p2p/libp2p/connectionMonitor/libp2pConnectionMonitorSimple_test.go b/p2p/libp2p/connectionMonitor/libp2pConnectionMonitorSimple_test.go index 8e14dc8ed5f..74183699c1e 100644 --- a/p2p/libp2p/connectionMonitor/libp2pConnectionMonitorSimple_test.go +++ b/p2p/libp2p/connectionMonitor/libp2pConnectionMonitorSimple_test.go @@ -132,6 +132,12 @@ func TestLibp2pConnectionMonitorSimple_ConnectedWithSharderShouldCallEvictAndClo knownConnectionCalled = true }, } + putConnectionAddressCalled := false + args.PreferredPeersHolder = &p2pmocks.PeersHolderStub{ + PutConnectionAddressCalled: func(peerID core.PeerID, addressSlice string) { + putConnectionAddressCalled = true + }, + } lcms, _ := NewLibp2pConnectionMonitorSimple(args) lcms.Connected( @@ -146,7 +152,7 @@ func TestLibp2pConnectionMonitorSimple_ConnectedWithSharderShouldCallEvictAndClo }, &mock.ConnStub{ RemotePeerCalled: func() peer.ID { - return "" + return evictedPid[0] }, }, ) @@ -154,6 +160,7 @@ func TestLibp2pConnectionMonitorSimple_ConnectedWithSharderShouldCallEvictAndClo assert.Equal(t, 1, numClosedWasCalled) assert.Equal(t, 1, numComputeWasCalled) assert.True(t, knownConnectionCalled) + assert.True(t, putConnectionAddressCalled) } func TestNewLibp2pConnectionMonitorSimple_DisconnectedShouldRemovePeerFromPreferredPeers(t *testing.T) { diff --git a/p2p/libp2p/disabled/nilPeerDenialEvaluator.go b/p2p/libp2p/disabled/nilPeerDenialEvaluator.go deleted file mode 100644 index 95fa2f907c5..00000000000 --- a/p2p/libp2p/disabled/nilPeerDenialEvaluator.go +++ /dev/null @@ -1,27 +0,0 @@ -package disabled - -import ( - "time" - - "github.com/ElrondNetwork/elrond-go-core/core" -) - -// NilPeerDenialEvaluator is a mock implementation of PeerDenialEvaluator that does not manage black listed keys -// (all keys [peers] are whitelisted) -type NilPeerDenialEvaluator struct { -} - -// IsDenied outputs false (all peers are white listed) -func (npde *NilPeerDenialEvaluator) IsDenied(_ core.PeerID) bool { - return false -} - -// UpsertPeerID returns nil and does nothing -func (npde *NilPeerDenialEvaluator) UpsertPeerID(_ core.PeerID, _ time.Duration) error { - return nil -} - -// IsInterfaceNil returns true if there is no value under the interface -func (npde *NilPeerDenialEvaluator) IsInterfaceNil() bool { - return npde == nil -} diff --git a/p2p/libp2p/disabled/nilPeerDenialEvaluator_test.go b/p2p/libp2p/disabled/nilPeerDenialEvaluator_test.go deleted file mode 100644 index c723a0eb2c3..00000000000 --- a/p2p/libp2p/disabled/nilPeerDenialEvaluator_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package disabled - -import ( - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go-core/core/check" - "github.com/stretchr/testify/assert" -) - -func TestNilPeerDenialEvaluator_ShouldWork(t *testing.T) { - nbh := &NilPeerDenialEvaluator{} - - assert.False(t, check.IfNil(nbh)) - assert.Nil(t, nbh.UpsertPeerID("", time.Second)) - assert.False(t, nbh.IsDenied("")) -} diff --git a/p2p/libp2p/disabled/peerDenialEvaluator.go b/p2p/libp2p/disabled/peerDenialEvaluator.go new file mode 100644 index 00000000000..e4203127e66 --- /dev/null +++ b/p2p/libp2p/disabled/peerDenialEvaluator.go @@ -0,0 +1,27 @@ +package disabled + +import ( + "time" + + "github.com/ElrondNetwork/elrond-go-core/core" +) + +// PeerDenialEvaluator is a disabled implementation of PeerDenialEvaluator that does not manage black listed keys +// (all keys [peers] are whitelisted) +type PeerDenialEvaluator struct { +} + +// IsDenied outputs false (all peers are white listed) +func (pde *PeerDenialEvaluator) IsDenied(_ core.PeerID) bool { + return false +} + +// UpsertPeerID returns nil and does nothing +func (pde *PeerDenialEvaluator) UpsertPeerID(_ core.PeerID, _ time.Duration) error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pde *PeerDenialEvaluator) IsInterfaceNil() bool { + return pde == nil +} diff --git a/p2p/libp2p/disabled/peerDenialEvaluator_test.go b/p2p/libp2p/disabled/peerDenialEvaluator_test.go new file mode 100644 index 00000000000..7e2964be69e --- /dev/null +++ b/p2p/libp2p/disabled/peerDenialEvaluator_test.go @@ -0,0 +1,19 @@ +package disabled + +import ( + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/stretchr/testify/assert" +) + +func TestPeerDenialEvaluator_ShouldWork(t *testing.T) { + t.Parallel() + + pde := &PeerDenialEvaluator{} + + assert.False(t, check.IfNil(pde)) + assert.Nil(t, pde.UpsertPeerID("", time.Second)) + assert.False(t, pde.IsDenied("")) +} diff --git a/p2p/libp2p/export_test.go b/p2p/libp2p/export_test.go index 2be46cb2aa4..e560484893c 100644 --- a/p2p/libp2p/export_test.go +++ b/p2p/libp2p/export_test.go @@ -52,6 +52,27 @@ func (netMes *networkMessenger) MapHistogram(input map[uint32]int) string { return netMes.mapHistogram(input) } +// PubsubHasTopic - +func (netMes *networkMessenger) PubsubHasTopic(expectedTopic string) bool { + netMes.mutTopics.RLock() + topics := netMes.pb.GetTopics() + netMes.mutTopics.RUnlock() + + for _, topic := range topics { + if topic == expectedTopic { + return true + } + } + return false +} + +// HasProcessorForTopic - +func (netMes *networkMessenger) HasProcessorForTopic(expectedTopic string) bool { + processor, found := netMes.processors[expectedTopic] + + return found && processor != nil +} + // ProcessReceivedDirectMessage - func (ds *directSender) ProcessReceivedDirectMessage(message *pubsubPb.Message, fromConnectedPeer peer.ID) error { return ds.processReceivedDirectMessage(message, fromConnectedPeer) diff --git a/p2p/libp2p/issues_test.go b/p2p/libp2p/issues_test.go index d7eda3e170d..1afe91e0fbb 100644 --- a/p2p/libp2p/issues_test.go +++ b/p2p/libp2p/issues_test.go @@ -36,6 +36,7 @@ func createMessenger() p2p.Messenger { SyncTimer: &libp2p.LocalSyncTimer{}, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, NodeOperationMode: p2p.NormalOperation, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, } libP2PMes, err := libp2p.NewNetworkMessenger(args) diff --git a/p2p/libp2p/mockMessenger.go b/p2p/libp2p/mockMessenger.go index 58b1c535402..6ffc87fe047 100644 --- a/p2p/libp2p/mockMessenger.go +++ b/p2p/libp2p/mockMessenger.go @@ -26,11 +26,12 @@ func NewMockMessenger( ctx, cancelFunc := context.WithCancel(context.Background()) p2pNode := &networkMessenger{ + p2pSigner: &p2pSigner{}, p2pHost: NewConnectableHost(h), ctx: ctx, cancelFunc: cancelFunc, } - p2pNode.connectionsWatcher, err = factory.NewConnectionsWatcher(args.P2pConfig.Node.ConnectionWatcherType, ttlConnectionsWatcher) + p2pNode.printConnectionsWatcher, err = factory.NewConnectionsWatcher(args.P2pConfig.Node.ConnectionWatcherType, ttlConnectionsWatcher) if err != nil { return nil, err } diff --git a/p2p/libp2p/netMessenger.go b/p2p/libp2p/netMessenger.go index a5916bdad54..977e1fee34d 100644 --- a/p2p/libp2p/netMessenger.go +++ b/p2p/libp2p/netMessenger.go @@ -101,6 +101,7 @@ func init() { // TODO refactor this struct to have be a wrapper (with logic) over a glue code type networkMessenger struct { + *p2pSigner ctx context.Context cancelFunc context.CancelFunc p2pHost ConnectableHost @@ -126,7 +127,8 @@ type networkMessenger struct { marshalizer p2p.Marshalizer syncTimer p2p.SyncTimer preferredPeersHolder p2p.PreferredPeersHolderHandler - connectionsWatcher p2p.ConnectionsWatcher + printConnectionsWatcher p2p.ConnectionsWatcher + peersRatingHandler p2p.PeersRatingHandler } // ArgsNetworkMessenger defines the options used to create a p2p wrapper @@ -137,6 +139,7 @@ type ArgsNetworkMessenger struct { SyncTimer p2p.SyncTimer PreferredPeersHolder p2p.PreferredPeersHolderHandler NodeOperationMode p2p.NodeOperation + PeersRatingHandler p2p.PeersRatingHandler } // NewNetworkMessenger creates a libP2P messenger by opening a port on the current machine @@ -154,6 +157,9 @@ func newNetworkMessenger(args ArgsNetworkMessenger, messageSigning messageSignin if check.IfNil(args.PreferredPeersHolder) { return nil, fmt.Errorf("%w when creating a new network messenger", p2p.ErrNilPreferredPeersHolder) } + if check.IfNil(args.PeersRatingHandler) { + return nil, fmt.Errorf("%w when creating a new network messenger", p2p.ErrNilPeersRatingHandler) + } p2pPrivKey, err := createP2PPrivKey(args.P2pConfig.Node.Seed) if err != nil { @@ -222,11 +228,15 @@ func constructNode( } p2pNode := &networkMessenger{ - ctx: ctx, - cancelFunc: cancelFunc, - p2pHost: NewConnectableHost(h), - port: port, - connectionsWatcher: connWatcher, + p2pSigner: &p2pSigner{ + privateKey: p2pPrivKey, + }, + ctx: ctx, + cancelFunc: cancelFunc, + p2pHost: NewConnectableHost(h), + port: port, + printConnectionsWatcher: connWatcher, + peersRatingHandler: args.PeersRatingHandler, } return p2pNode, nil @@ -295,6 +305,7 @@ func addComponentsToNode( p2pNode.syncTimer = args.SyncTimer p2pNode.preferredPeersHolder = args.PreferredPeersHolder p2pNode.debugger = p2pDebug.NewP2PDebugger(core.PeerID(p2pNode.p2pHost.ID())) + p2pNode.peersRatingHandler = args.PeersRatingHandler err = p2pNode.createPubSub(messageSigning) if err != nil { @@ -347,6 +358,7 @@ func (netMes *networkMessenger) createPubSub(messageSigning messageSigningConfig } netMes.poc, err = newPeersOnChannel( + netMes.peersRatingHandler, netMes.pb.ListPeers, refreshPeersOnTopic, ttlPeersOnTopic) @@ -434,7 +446,7 @@ func (netMes *networkMessenger) createDiscoverer(p2pConfig config.P2PConfig) err Host: netMes.p2pHost, Sharder: netMes.sharder, P2pConfig: p2pConfig, - ConnectionsWatcher: netMes.connectionsWatcher, + ConnectionsWatcher: netMes.printConnectionsWatcher, } netMes.peerDiscoverer, err = discoveryFactory.NewPeerDiscoverer(args) @@ -458,7 +470,7 @@ func (netMes *networkMessenger) createConnectionMonitor(p2pConfig config.P2PConf Sharder: sharder, ThresholdMinConnectedPeers: p2pConfig.Node.ThresholdMinConnectedPeers, PreferredPeersHolder: netMes.preferredPeersHolder, - ConnectionsWatcher: netMes.connectionsWatcher, + ConnectionsWatcher: netMes.printConnectionsWatcher, } var err error netMes.connMonitor, err = connectionMonitor.NewLibp2pConnectionMonitorSimple(args) @@ -469,7 +481,7 @@ func (netMes *networkMessenger) createConnectionMonitor(p2pConfig config.P2PConf cmw := newConnectionMonitorWrapper( netMes.p2pHost.Network(), netMes.connMonitor, - &disabled.NilPeerDenialEvaluator{}, + &disabled.PeerDenialEvaluator{}, ) netMes.p2pHost.Network().Notify(cmw) netMes.connMonitorWrapper = cmw @@ -598,8 +610,8 @@ func (netMes *networkMessenger) Close() error { "error", err) } - log.Debug("closing network messenger's connection watcher...") - errConnWatcher := netMes.connectionsWatcher.Close() + log.Debug("closing network messenger's print connection watcher...") + errConnWatcher := netMes.printConnectionsWatcher.Close() if errConnWatcher != nil { err = errConnWatcher log.Warn("networkMessenger.Close", @@ -838,6 +850,10 @@ func (netMes *networkMessenger) CreateTopic(name string, createChannelForTopic b return nil } + if name == common.ConnectionTopic { + return nil + } + topic, err := netMes.pb.Join(name) if err != nil { return fmt.Errorf("%w for topic %s", err, name) @@ -946,7 +962,7 @@ func (netMes *networkMessenger) RegisterMessageProcessor(topic string, identifie topicProcs = newTopicProcessors() netMes.processors[topic] = topicProcs - err := netMes.pb.RegisterTopicValidator(topic, netMes.pubsubCallback(topicProcs, topic)) + err := netMes.registerOnPubSub(topic, topicProcs) if err != nil { return err } @@ -960,6 +976,15 @@ func (netMes *networkMessenger) RegisterMessageProcessor(topic string, identifie return nil } +func (netMes *networkMessenger) registerOnPubSub(topic string, topicProcs *topicProcessors) error { + if topic == common.ConnectionTopic { + // do not allow broadcasts on this connection topic + return nil + } + + return netMes.pb.RegisterTopicValidator(topic, netMes.pubsubCallback(topicProcs, topic)) +} + func (netMes *networkMessenger) pubsubCallback(topicProcs *topicProcessors, topic string) func(ctx context.Context, pid peer.ID, message *pubsub.Message) bool { return func(ctx context.Context, pid peer.ID, message *pubsub.Message) bool { fromConnectedPeer := core.PeerID(pid) @@ -987,6 +1012,10 @@ func (netMes *networkMessenger) pubsubCallback(topicProcs *topicProcessors, topi } netMes.processDebugMessage(topic, fromConnectedPeer, uint64(len(message.Data)), !messageOk) + if messageOk { + netMes.peersRatingHandler.IncreaseRating(fromConnectedPeer) + } + return messageOk } } @@ -1076,6 +1105,11 @@ func (netMes *networkMessenger) UnregisterAllMessageProcessors() error { defer netMes.mutTopics.Unlock() for topic := range netMes.processors { + if topic == common.ConnectionTopic { + delete(netMes.processors, topic) + continue + } + err := netMes.pb.UnregisterTopicValidator(topic) if err != nil { return err @@ -1132,7 +1166,9 @@ func (netMes *networkMessenger) UnregisterMessageProcessor(topic string, identif if len(identifiers) == 0 { netMes.processors[topic] = nil - return netMes.pb.UnregisterTopicValidator(topic) + if topic != common.ConnectionTopic { // no validator registered for this topic + return netMes.pb.UnregisterTopicValidator(topic) + } } return nil @@ -1214,6 +1250,10 @@ func (netMes *networkMessenger) directMessageHandler(message *pubsub.Message, fr } netMes.debugger.AddIncomingMessage(msg.Topic(), uint64(len(msg.Data())), !messageOk) + + if messageOk { + netMes.peersRatingHandler.IncreaseRating(fromConnectedPeer) + } }(msg) return nil diff --git a/p2p/libp2p/netMessenger_test.go b/p2p/libp2p/netMessenger_test.go index d61b0731de0..f3c91de8599 100644 --- a/p2p/libp2p/netMessenger_test.go +++ b/p2p/libp2p/netMessenger_test.go @@ -16,6 +16,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go-core/marshal" logger "github.com/ElrondNetwork/elrond-go-logger" + "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/p2p/data" @@ -46,14 +47,14 @@ func waitDoneWithTimeout(t *testing.T, chanDone chan bool, timeout time.Duration } } -func prepareMessengerForMatchDataReceive(mes p2p.Messenger, matchData []byte, wg *sync.WaitGroup) { - _ = mes.CreateTopic("test", false) +func prepareMessengerForMatchDataReceive(messenger p2p.Messenger, matchData []byte, wg *sync.WaitGroup) { + _ = messenger.CreateTopic("test", false) - _ = mes.RegisterMessageProcessor("test", "identifier", + _ = messenger.RegisterMessageProcessor("test", "identifier", &mock.MessageProcessorStub{ ProcessMessageCalled: func(message p2p.MessageP2P, _ core.PeerID) error { if bytes.Equal(matchData, message.Data()) { - fmt.Printf("%s got the message\n", mes.ID().Pretty()) + fmt.Printf("%s got the message\n", messenger.ID().Pretty()) wg.Done() } @@ -62,8 +63,8 @@ func prepareMessengerForMatchDataReceive(mes p2p.Messenger, matchData []byte, wg }) } -func getConnectableAddress(mes p2p.Messenger) string { - for _, addr := range mes.Addresses() { +func getConnectableAddress(messenger p2p.Messenger) string { + for _, addr := range messenger.Addresses() { if strings.Contains(addr, "circuit") || strings.Contains(addr, "169.254") { continue } @@ -92,56 +93,57 @@ func createMockNetworkArgs() libp2p.ArgsNetworkMessenger { }, SyncTimer: &libp2p.LocalSyncTimer{}, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, } } func createMockNetworkOf2() (mocknet.Mocknet, p2p.Messenger, p2p.Messenger) { netw := mocknet.New(context.Background()) - mes1, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) - mes2, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + messenger1, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + messenger2, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) _ = netw.LinkAll() - return netw, mes1, mes2 + return netw, messenger1, messenger2 } func createMockNetworkOf3() (p2p.Messenger, p2p.Messenger, p2p.Messenger) { netw := mocknet.New(context.Background()) - mes1, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) - mes2, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) - mes3, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + messenger1, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + messenger2, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + messenger3, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) _ = netw.LinkAll() nscm1 := mock.NewNetworkShardingCollectorMock() - nscm1.UpdatePeerIdSubType(mes1.ID(), core.FullHistoryObserver) - nscm1.UpdatePeerIdSubType(mes2.ID(), core.FullHistoryObserver) - nscm1.UpdatePeerIdSubType(mes3.ID(), core.RegularPeer) - _ = mes1.SetPeerShardResolver(nscm1) + nscm1.PutPeerIdSubType(messenger1.ID(), core.FullHistoryObserver) + nscm1.PutPeerIdSubType(messenger2.ID(), core.FullHistoryObserver) + nscm1.PutPeerIdSubType(messenger3.ID(), core.RegularPeer) + _ = messenger1.SetPeerShardResolver(nscm1) nscm2 := mock.NewNetworkShardingCollectorMock() - nscm2.UpdatePeerIdSubType(mes1.ID(), core.FullHistoryObserver) - nscm2.UpdatePeerIdSubType(mes2.ID(), core.FullHistoryObserver) - nscm2.UpdatePeerIdSubType(mes3.ID(), core.RegularPeer) - _ = mes2.SetPeerShardResolver(nscm2) + nscm2.PutPeerIdSubType(messenger1.ID(), core.FullHistoryObserver) + nscm2.PutPeerIdSubType(messenger2.ID(), core.FullHistoryObserver) + nscm2.PutPeerIdSubType(messenger3.ID(), core.RegularPeer) + _ = messenger2.SetPeerShardResolver(nscm2) nscm3 := mock.NewNetworkShardingCollectorMock() - nscm3.UpdatePeerIdSubType(mes1.ID(), core.FullHistoryObserver) - nscm3.UpdatePeerIdSubType(mes2.ID(), core.FullHistoryObserver) - nscm3.UpdatePeerIdSubType(mes3.ID(), core.RegularPeer) - _ = mes3.SetPeerShardResolver(nscm3) + nscm3.PutPeerIdSubType(messenger1.ID(), core.FullHistoryObserver) + nscm3.PutPeerIdSubType(messenger2.ID(), core.FullHistoryObserver) + nscm3.PutPeerIdSubType(messenger3.ID(), core.RegularPeer) + _ = messenger3.SetPeerShardResolver(nscm3) - return mes1, mes2, mes3 + return messenger1, messenger2, messenger3 } func createMockMessenger() p2p.Messenger { netw := mocknet.New(context.Background()) - mes, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + messenger, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) - return mes + return messenger } func containsPeerID(list []core.PeerID, searchFor core.PeerID) bool { @@ -157,21 +159,21 @@ func containsPeerID(list []core.PeerID, searchFor core.PeerID) bool { func TestNewMemoryLibp2pMessenger_NilMockNetShouldErr(t *testing.T) { args := createMockNetworkArgs() - mes, err := libp2p.NewMockMessenger(args, nil) + messenger, err := libp2p.NewMockMessenger(args, nil) - assert.Nil(t, mes) + assert.Nil(t, messenger) assert.Equal(t, p2p.ErrNilMockNet, err) } func TestNewMemoryLibp2pMessenger_OkValsWithoutDiscoveryShouldWork(t *testing.T) { netw := mocknet.New(context.Background()) - mes, err := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + messenger, err := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) assert.Nil(t, err) - assert.False(t, check.IfNil(mes)) + assert.False(t, check.IfNil(messenger)) - _ = mes.Close() + _ = messenger.Close() } // ------- NewNetworkMessenger @@ -179,38 +181,47 @@ func TestNewMemoryLibp2pMessenger_OkValsWithoutDiscoveryShouldWork(t *testing.T) func TestNewNetworkMessenger_NilMessengerShouldErr(t *testing.T) { arg := createMockNetworkArgs() arg.Marshalizer = nil - mes, err := libp2p.NewNetworkMessenger(arg) + messenger, err := libp2p.NewNetworkMessenger(arg) - assert.True(t, check.IfNil(mes)) + assert.True(t, check.IfNil(messenger)) assert.True(t, errors.Is(err, p2p.ErrNilMarshalizer)) } func TestNewNetworkMessenger_NilPreferredPeersHolderShouldErr(t *testing.T) { arg := createMockNetworkArgs() arg.PreferredPeersHolder = nil + messenger, err := libp2p.NewNetworkMessenger(arg) + + assert.True(t, check.IfNil(messenger)) + assert.True(t, errors.Is(err, p2p.ErrNilPreferredPeersHolder)) +} + +func TestNewNetworkMessenger_NilPeersRatingHandlerShouldErr(t *testing.T) { + arg := createMockNetworkArgs() + arg.PeersRatingHandler = nil mes, err := libp2p.NewNetworkMessenger(arg) assert.True(t, check.IfNil(mes)) - assert.True(t, errors.Is(err, p2p.ErrNilPreferredPeersHolder)) + assert.True(t, errors.Is(err, p2p.ErrNilPeersRatingHandler)) } func TestNewNetworkMessenger_NilSyncTimerShouldErr(t *testing.T) { arg := createMockNetworkArgs() arg.SyncTimer = nil - mes, err := libp2p.NewNetworkMessenger(arg) + messenger, err := libp2p.NewNetworkMessenger(arg) - assert.True(t, check.IfNil(mes)) + assert.True(t, check.IfNil(messenger)) assert.True(t, errors.Is(err, p2p.ErrNilSyncTimer)) } func TestNewNetworkMessenger_WithDeactivatedKadDiscovererShouldWork(t *testing.T) { arg := createMockNetworkArgs() - mes, err := libp2p.NewNetworkMessenger(arg) + messenger, err := libp2p.NewNetworkMessenger(arg) - assert.NotNil(t, mes) + assert.NotNil(t, messenger) assert.Nil(t, err) - _ = mes.Close() + _ = messenger.Close() } func TestNewNetworkMessenger_WithKadDiscovererListsSharderInvalidTargetConnShouldErr(t *testing.T) { @@ -225,9 +236,9 @@ func TestNewNetworkMessenger_WithKadDiscovererListsSharderInvalidTargetConnShoul RoutingTableRefreshIntervalInSec: 10, } arg.P2pConfig.Sharding.Type = p2p.ListsSharder - mes, err := libp2p.NewNetworkMessenger(arg) + messenger, err := libp2p.NewNetworkMessenger(arg) - assert.True(t, check.IfNil(mes)) + assert.True(t, check.IfNil(messenger)) assert.True(t, errors.Is(err, p2p.ErrInvalidValue)) } @@ -246,12 +257,12 @@ func TestNewNetworkMessenger_WithKadDiscovererListSharderShouldWork(t *testing.T Type: p2p.NilListSharder, TargetPeerCount: 10, } - mes, err := libp2p.NewNetworkMessenger(arg) + messenger, err := libp2p.NewNetworkMessenger(arg) - assert.False(t, check.IfNil(mes)) + assert.False(t, check.IfNil(messenger)) assert.Nil(t, err) - _ = mes.Close() + _ = messenger.Close() } // ------- Messenger functionality @@ -259,8 +270,8 @@ func TestNewNetworkMessenger_WithKadDiscovererListSharderShouldWork(t *testing.T func TestLibp2pMessenger_ConnectToPeerShouldCallUpgradedHost(t *testing.T) { netw := mocknet.New(context.Background()) - mes, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) - _ = mes.Close() + messenger, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + _ = messenger.Close() wasCalled := false @@ -275,156 +286,156 @@ func TestLibp2pMessenger_ConnectToPeerShouldCallUpgradedHost(t *testing.T) { }, } - mes.SetHost(uhs) - _ = mes.ConnectToPeer(p) + messenger.SetHost(uhs) + _ = messenger.ConnectToPeer(p) assert.True(t, wasCalled) } func TestLibp2pMessenger_IsConnectedShouldWork(t *testing.T) { - _, mes1, mes2 := createMockNetworkOf2() + _, messenger1, messenger2 := createMockNetworkOf2() - adr2 := mes2.Addresses()[0] + adr2 := messenger2.Addresses()[0] fmt.Printf("Connecting to %s...\n", adr2) - _ = mes1.ConnectToPeer(adr2) + _ = messenger1.ConnectToPeer(adr2) - assert.True(t, mes1.IsConnected(mes2.ID())) - assert.True(t, mes2.IsConnected(mes1.ID())) + assert.True(t, messenger1.IsConnected(messenger2.ID())) + assert.True(t, messenger2.IsConnected(messenger1.ID())) - _ = mes1.Close() - _ = mes2.Close() + _ = messenger1.Close() + _ = messenger2.Close() } func TestLibp2pMessenger_CreateTopicOkValsShouldWork(t *testing.T) { - mes := createMockMessenger() + messenger := createMockMessenger() - err := mes.CreateTopic("test", true) + err := messenger.CreateTopic("test", true) assert.Nil(t, err) - _ = mes.Close() + _ = messenger.Close() } func TestLibp2pMessenger_CreateTopicTwiceShouldNotErr(t *testing.T) { - mes := createMockMessenger() + messenger := createMockMessenger() - _ = mes.CreateTopic("test", false) - err := mes.CreateTopic("test", false) + _ = messenger.CreateTopic("test", false) + err := messenger.CreateTopic("test", false) assert.Nil(t, err) - _ = mes.Close() + _ = messenger.Close() } func TestLibp2pMessenger_HasTopicIfHaveTopicShouldReturnTrue(t *testing.T) { - mes := createMockMessenger() + messenger := createMockMessenger() - _ = mes.CreateTopic("test", false) + _ = messenger.CreateTopic("test", false) - assert.True(t, mes.HasTopic("test")) + assert.True(t, messenger.HasTopic("test")) - _ = mes.Close() + _ = messenger.Close() } func TestLibp2pMessenger_HasTopicIfDoNotHaveTopicShouldReturnFalse(t *testing.T) { - mes := createMockMessenger() + messenger := createMockMessenger() - _ = mes.CreateTopic("test", false) + _ = messenger.CreateTopic("test", false) - assert.False(t, mes.HasTopic("one topic")) + assert.False(t, messenger.HasTopic("one topic")) - _ = mes.Close() + _ = messenger.Close() } func TestLibp2pMessenger_RegisterTopicValidatorOnInexistentTopicShouldWork(t *testing.T) { - mes := createMockMessenger() + messenger := createMockMessenger() - err := mes.RegisterMessageProcessor("test", "identifier", &mock.MessageProcessorStub{}) + err := messenger.RegisterMessageProcessor("test", "identifier", &mock.MessageProcessorStub{}) assert.Nil(t, err) - _ = mes.Close() + _ = messenger.Close() } func TestLibp2pMessenger_RegisterTopicValidatorWithNilHandlerShouldErr(t *testing.T) { - mes := createMockMessenger() + messenger := createMockMessenger() - _ = mes.CreateTopic("test", false) + _ = messenger.CreateTopic("test", false) - err := mes.RegisterMessageProcessor("test", "identifier", nil) + err := messenger.RegisterMessageProcessor("test", "identifier", nil) assert.True(t, errors.Is(err, p2p.ErrNilValidator)) - _ = mes.Close() + _ = messenger.Close() } func TestLibp2pMessenger_RegisterTopicValidatorOkValsShouldWork(t *testing.T) { - mes := createMockMessenger() + messenger := createMockMessenger() - _ = mes.CreateTopic("test", false) + _ = messenger.CreateTopic("test", false) - err := mes.RegisterMessageProcessor("test", "identifier", &mock.MessageProcessorStub{}) + err := messenger.RegisterMessageProcessor("test", "identifier", &mock.MessageProcessorStub{}) assert.Nil(t, err) - _ = mes.Close() + _ = messenger.Close() } func TestLibp2pMessenger_RegisterTopicValidatorReregistrationShouldErr(t *testing.T) { - mes := createMockMessenger() - _ = mes.CreateTopic("test", false) + messenger := createMockMessenger() + _ = messenger.CreateTopic("test", false) // registration - _ = mes.RegisterMessageProcessor("test", "identifier", &mock.MessageProcessorStub{}) + _ = messenger.RegisterMessageProcessor("test", "identifier", &mock.MessageProcessorStub{}) // re-registration - err := mes.RegisterMessageProcessor("test", "identifier", &mock.MessageProcessorStub{}) + err := messenger.RegisterMessageProcessor("test", "identifier", &mock.MessageProcessorStub{}) assert.True(t, errors.Is(err, p2p.ErrMessageProcessorAlreadyDefined)) - _ = mes.Close() + _ = messenger.Close() } func TestLibp2pMessenger_UnegisterTopicValidatorOnANotRegisteredTopicShouldNotErr(t *testing.T) { - mes := createMockMessenger() + messenger := createMockMessenger() - _ = mes.CreateTopic("test", false) - err := mes.UnregisterMessageProcessor("test", "identifier") + _ = messenger.CreateTopic("test", false) + err := messenger.UnregisterMessageProcessor("test", "identifier") assert.Nil(t, err) - _ = mes.Close() + _ = messenger.Close() } func TestLibp2pMessenger_UnregisterTopicValidatorShouldWork(t *testing.T) { - mes := createMockMessenger() + messenger := createMockMessenger() - _ = mes.CreateTopic("test", false) + _ = messenger.CreateTopic("test", false) // registration - _ = mes.RegisterMessageProcessor("test", "identifier", &mock.MessageProcessorStub{}) + _ = messenger.RegisterMessageProcessor("test", "identifier", &mock.MessageProcessorStub{}) // unregistration - err := mes.UnregisterMessageProcessor("test", "identifier") + err := messenger.UnregisterMessageProcessor("test", "identifier") assert.Nil(t, err) - _ = mes.Close() + _ = messenger.Close() } func TestLibp2pMessenger_UnregisterAllTopicValidatorShouldWork(t *testing.T) { - mes := createMockMessenger() - _ = mes.CreateTopic("test", false) + messenger := createMockMessenger() + _ = messenger.CreateTopic("test", false) // registration - _ = mes.CreateTopic("test1", false) - _ = mes.RegisterMessageProcessor("test1", "identifier", &mock.MessageProcessorStub{}) - _ = mes.CreateTopic("test2", false) - _ = mes.RegisterMessageProcessor("test2", "identifier", &mock.MessageProcessorStub{}) + _ = messenger.CreateTopic("test1", false) + _ = messenger.RegisterMessageProcessor("test1", "identifier", &mock.MessageProcessorStub{}) + _ = messenger.CreateTopic("test2", false) + _ = messenger.RegisterMessageProcessor("test2", "identifier", &mock.MessageProcessorStub{}) // unregistration - err := mes.UnregisterAllMessageProcessors() + err := messenger.UnregisterAllMessageProcessors() assert.Nil(t, err) - err = mes.RegisterMessageProcessor("test1", "identifier", &mock.MessageProcessorStub{}) + err = messenger.RegisterMessageProcessor("test1", "identifier", &mock.MessageProcessorStub{}) assert.Nil(t, err) - err = mes.RegisterMessageProcessor("test2", "identifier", &mock.MessageProcessorStub{}) + err = messenger.RegisterMessageProcessor("test2", "identifier", &mock.MessageProcessorStub{}) assert.Nil(t, err) - _ = mes.Close() + _ = messenger.Close() } func TestLibp2pMessenger_RegisterUnregisterConcurrentlyShouldNotPanic(t *testing.T) { @@ -435,9 +446,9 @@ func TestLibp2pMessenger_RegisterUnregisterConcurrentlyShouldNotPanic(t *testing } }() - mes := createMockMessenger() + messenger := createMockMessenger() topic := "test topic" - _ = mes.CreateTopic(topic, false) + _ = messenger.CreateTopic(topic, false) numIdentifiers := 100 identifiers := make([]string, 0, numIdentifiers) @@ -449,29 +460,29 @@ func TestLibp2pMessenger_RegisterUnregisterConcurrentlyShouldNotPanic(t *testing wg.Add(numIdentifiers * 3) for i := 0; i < numIdentifiers; i++ { go func(index int) { - _ = mes.RegisterMessageProcessor(topic, identifiers[index], &mock.MessageProcessorStub{}) + _ = messenger.RegisterMessageProcessor(topic, identifiers[index], &mock.MessageProcessorStub{}) wg.Done() }(i) go func(index int) { - _ = mes.UnregisterMessageProcessor(topic, identifiers[index]) + _ = messenger.UnregisterMessageProcessor(topic, identifiers[index]) wg.Done() }(i) go func() { - mes.Broadcast(topic, []byte("buff")) + messenger.Broadcast(topic, []byte("buff")) wg.Done() }() } wg.Wait() - _ = mes.Close() + _ = messenger.Close() } func TestLibp2pMessenger_BroadcastDataLargeMessageShouldNotCallSend(t *testing.T) { msg := make([]byte, libp2p.MaxSendBuffSize+1) - mes, _ := libp2p.NewNetworkMessenger(createMockNetworkArgs()) - mes.SetLoadBalancer(&mock.ChannelLoadBalancerStub{ + messenger, _ := libp2p.NewNetworkMessenger(createMockNetworkArgs()) + messenger.SetLoadBalancer(&mock.ChannelLoadBalancerStub{ GetChannelOrDefaultCalled: func(pipe string) chan *p2p.SendableData { assert.Fail(t, "should have not got to this line") @@ -482,21 +493,21 @@ func TestLibp2pMessenger_BroadcastDataLargeMessageShouldNotCallSend(t *testing.T }, }) - mes.Broadcast("topic", msg) + messenger.Broadcast("topic", msg) - _ = mes.Close() + _ = messenger.Close() } func TestLibp2pMessenger_BroadcastDataBetween2PeersShouldWork(t *testing.T) { msg := []byte("test message") - _, mes1, mes2 := createMockNetworkOf2() + _, messenger1, messenger2 := createMockNetworkOf2() - adr2 := mes2.Addresses()[0] + adr2 := messenger2.Addresses()[0] fmt.Printf("Connecting to %s...\n", adr2) - _ = mes1.ConnectToPeer(adr2) + _ = messenger1.ConnectToPeer(adr2) wg := &sync.WaitGroup{} chanDone := make(chan bool) @@ -507,20 +518,20 @@ func TestLibp2pMessenger_BroadcastDataBetween2PeersShouldWork(t *testing.T) { chanDone <- true }() - prepareMessengerForMatchDataReceive(mes1, msg, wg) - prepareMessengerForMatchDataReceive(mes2, msg, wg) + prepareMessengerForMatchDataReceive(messenger1, msg, wg) + prepareMessengerForMatchDataReceive(messenger2, msg, wg) fmt.Println("Delaying as to allow peers to announce themselves on the opened topic...") time.Sleep(time.Second) - fmt.Printf("sending message from %s...\n", mes1.ID().Pretty()) + fmt.Printf("sending message from %s...\n", messenger1.ID().Pretty()) - mes1.Broadcast("test", msg) + messenger1.Broadcast("test", msg) waitDoneWithTimeout(t, chanDone, timeoutWaitResponses) - _ = mes1.Close() - _ = mes2.Close() + _ = messenger1.Close() + _ = messenger2.Close() } func TestLibp2pMessenger_BroadcastOnChannelBlockingShouldLimitNumberOfGoRoutines(t *testing.T) { @@ -536,8 +547,8 @@ func TestLibp2pMessenger_BroadcastOnChannelBlockingShouldLimitNumberOfGoRoutines wg := sync.WaitGroup{} wg.Add(numBroadcasts) - mes, _ := libp2p.NewNetworkMessenger(createMockNetworkArgs()) - mes.SetLoadBalancer(&mock.ChannelLoadBalancerStub{ + messenger, _ := libp2p.NewNetworkMessenger(createMockNetworkArgs()) + messenger.SetLoadBalancer(&mock.ChannelLoadBalancerStub{ CollectOneElementFromChannelsCalled: func() *p2p.SendableData { return nil }, @@ -551,7 +562,7 @@ func TestLibp2pMessenger_BroadcastOnChannelBlockingShouldLimitNumberOfGoRoutines for i := 0; i < numBroadcasts; i++ { go func() { - err := mes.BroadcastOnChannelBlocking("test", "test", msg) + err := messenger.BroadcastOnChannelBlocking("test", "test", msg) if err == p2p.ErrTooManyGoroutines { atomic.AddUint32(&numErrors, 1) wg.Done() @@ -571,19 +582,19 @@ func TestLibp2pMessenger_BroadcastOnChannelBlockingShouldLimitNumberOfGoRoutines assert.True(t, atomic.LoadUint32(&numErrors) > 0) - _ = mes.Close() + _ = messenger.Close() } func TestLibp2pMessenger_BroadcastDataBetween2PeersWithLargeMsgShouldWork(t *testing.T) { msg := bytes.Repeat([]byte{'A'}, libp2p.MaxSendBuffSize) - _, mes1, mes2 := createMockNetworkOf2() + _, messenger1, messenger2 := createMockNetworkOf2() - adr2 := mes2.Addresses()[0] + adr2 := messenger2.Addresses()[0] fmt.Printf("Connecting to %s...\n", adr2) - _ = mes1.ConnectToPeer(adr2) + _ = messenger1.ConnectToPeer(adr2) wg := &sync.WaitGroup{} chanDone := make(chan bool) @@ -594,104 +605,104 @@ func TestLibp2pMessenger_BroadcastDataBetween2PeersWithLargeMsgShouldWork(t *tes chanDone <- true }() - prepareMessengerForMatchDataReceive(mes1, msg, wg) - prepareMessengerForMatchDataReceive(mes2, msg, wg) + prepareMessengerForMatchDataReceive(messenger1, msg, wg) + prepareMessengerForMatchDataReceive(messenger2, msg, wg) fmt.Println("Delaying as to allow peers to announce themselves on the opened topic...") time.Sleep(time.Second) - fmt.Printf("sending message from %s...\n", mes1.ID().Pretty()) + fmt.Printf("sending message from %s...\n", messenger1.ID().Pretty()) - mes1.Broadcast("test", msg) + messenger1.Broadcast("test", msg) waitDoneWithTimeout(t, chanDone, timeoutWaitResponses) - _ = mes1.Close() - _ = mes2.Close() + _ = messenger1.Close() + _ = messenger2.Close() } func TestLibp2pMessenger_Peers(t *testing.T) { - _, mes1, mes2 := createMockNetworkOf2() + _, messenger1, messenger2 := createMockNetworkOf2() - adr2 := mes2.Addresses()[0] + adr2 := messenger2.Addresses()[0] fmt.Printf("Connecting to %s...\n", adr2) - _ = mes1.ConnectToPeer(adr2) + _ = messenger1.ConnectToPeer(adr2) // should know both peers foundCurrent := false foundConnected := false - for _, p := range mes1.Peers() { + for _, p := range messenger1.Peers() { fmt.Println(p.Pretty()) - if p.Pretty() == mes1.ID().Pretty() { + if p.Pretty() == messenger1.ID().Pretty() { foundCurrent = true } - if p.Pretty() == mes2.ID().Pretty() { + if p.Pretty() == messenger2.ID().Pretty() { foundConnected = true } } assert.True(t, foundCurrent && foundConnected) - _ = mes1.Close() - _ = mes2.Close() + _ = messenger1.Close() + _ = messenger2.Close() } func TestLibp2pMessenger_ConnectedPeers(t *testing.T) { - netw, mes1, mes2 := createMockNetworkOf2() - mes3, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + netw, messenger1, messenger2 := createMockNetworkOf2() + messenger3, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) _ = netw.LinkAll() - adr2 := mes2.Addresses()[0] + adr2 := messenger2.Addresses()[0] fmt.Printf("Connecting to %s...\n", adr2) - _ = mes1.ConnectToPeer(adr2) - _ = mes3.ConnectToPeer(adr2) + _ = messenger1.ConnectToPeer(adr2) + _ = messenger3.ConnectToPeer(adr2) // connected peers: 1 ----- 2 ----- 3 - assert.Equal(t, []core.PeerID{mes2.ID()}, mes1.ConnectedPeers()) - assert.Equal(t, []core.PeerID{mes2.ID()}, mes3.ConnectedPeers()) - assert.Equal(t, 2, len(mes2.ConnectedPeers())) - // no need to further test that mes2 is connected to mes1 and mes3 s this was tested in first 2 asserts + assert.Equal(t, []core.PeerID{messenger2.ID()}, messenger1.ConnectedPeers()) + assert.Equal(t, []core.PeerID{messenger2.ID()}, messenger3.ConnectedPeers()) + assert.Equal(t, 2, len(messenger2.ConnectedPeers())) + // no need to further test that messenger2 is connected to messenger1 and messenger3 as this was tested in first 2 asserts - _ = mes1.Close() - _ = mes2.Close() - _ = mes3.Close() + _ = messenger1.Close() + _ = messenger2.Close() + _ = messenger3.Close() } func TestLibp2pMessenger_ConnectedAddresses(t *testing.T) { - netw, mes1, mes2 := createMockNetworkOf2() - mes3, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + netw, messenger1, messenger2 := createMockNetworkOf2() + messenger3, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) _ = netw.LinkAll() - adr2 := mes2.Addresses()[0] + adr2 := messenger2.Addresses()[0] fmt.Printf("Connecting to %s...\n", adr2) - _ = mes1.ConnectToPeer(adr2) - _ = mes3.ConnectToPeer(adr2) + _ = messenger1.ConnectToPeer(adr2) + _ = messenger3.ConnectToPeer(adr2) // connected peers: 1 ----- 2 ----- 3 foundAddr1 := false foundAddr3 := false - for _, addr := range mes2.ConnectedAddresses() { - for _, addrMes1 := range mes1.Addresses() { - if addr == addrMes1 { + for _, addr := range messenger2.ConnectedAddresses() { + for _, address := range messenger1.Addresses() { + if addr == address { foundAddr1 = true } } - for _, addrMes3 := range mes3.Addresses() { - if addr == addrMes3 { + for _, address := range messenger3.Addresses() { + if addr == address { foundAddr3 = true } } @@ -699,37 +710,37 @@ func TestLibp2pMessenger_ConnectedAddresses(t *testing.T) { assert.True(t, foundAddr1) assert.True(t, foundAddr3) - assert.Equal(t, 2, len(mes2.ConnectedAddresses())) - // no need to further test that mes2 is connected to mes1 and mes3 s this was tested in first 2 asserts + assert.Equal(t, 2, len(messenger2.ConnectedAddresses())) + // no need to further test that messenger2 is connected to messenger1 and messenger3 as this was tested in first 2 asserts - _ = mes1.Close() - _ = mes2.Close() - _ = mes3.Close() + _ = messenger1.Close() + _ = messenger2.Close() + _ = messenger3.Close() } func TestLibp2pMessenger_PeerAddressConnectedPeerShouldWork(t *testing.T) { - netw, mes1, mes2 := createMockNetworkOf2() - mes3, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + netw, messenger1, messenger2 := createMockNetworkOf2() + messenger3, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) _ = netw.LinkAll() - adr2 := mes2.Addresses()[0] + adr2 := messenger2.Addresses()[0] fmt.Printf("Connecting to %s...\n", adr2) - _ = mes1.ConnectToPeer(adr2) - _ = mes3.ConnectToPeer(adr2) + _ = messenger1.ConnectToPeer(adr2) + _ = messenger3.ConnectToPeer(adr2) // connected peers: 1 ----- 2 ----- 3 defer func() { - _ = mes1.Close() - _ = mes2.Close() - _ = mes3.Close() + _ = messenger1.Close() + _ = messenger2.Close() + _ = messenger3.Close() }() - addressesRecov := mes2.PeerAddresses(mes1.ID()) - for _, addr := range mes1.Addresses() { + addressesRecov := messenger2.PeerAddresses(messenger1.ID()) + for _, addr := range messenger1.Addresses() { for _, addrRecov := range addressesRecov { if strings.Contains(addr, addrRecov) { // address returned is valid, test is successful @@ -743,7 +754,7 @@ func TestLibp2pMessenger_PeerAddressConnectedPeerShouldWork(t *testing.T) { func TestLibp2pMessenger_PeerAddressNotConnectedShouldReturnFromPeerstore(t *testing.T) { netw := mocknet.New(context.Background()) - mes, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + messenger, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) networkHandler := &mock.NetworkStub{ ConnsCalled: func() []network.Conn { @@ -768,7 +779,7 @@ func TestLibp2pMessenger_PeerAddressNotConnectedShouldReturnFromPeerstore(t *tes }, } - mes.SetHost(&mock.ConnectableHostStub{ + messenger.SetHost(&mock.ConnectableHostStub{ NetworkCalled: func() network.Network { return networkHandler }, @@ -777,225 +788,225 @@ func TestLibp2pMessenger_PeerAddressNotConnectedShouldReturnFromPeerstore(t *tes }, }) - addresses := mes.PeerAddresses("pid") + addresses := messenger.PeerAddresses("pid") require.Equal(t, 2, len(addresses)) assert.Equal(t, addresses[0], "multiaddress 1") assert.Equal(t, addresses[1], "multiaddress 2") } func TestLibp2pMessenger_PeerAddressDisconnectedPeerShouldWork(t *testing.T) { - netw, mes1, mes2 := createMockNetworkOf2() - mes3, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + netw, messenger1, messenger2 := createMockNetworkOf2() + messenger3, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) _ = netw.LinkAll() - adr2 := mes2.Addresses()[0] + adr2 := messenger2.Addresses()[0] fmt.Printf("Connecting to %s...\n", adr2) - _ = mes1.ConnectToPeer(adr2) - _ = mes3.ConnectToPeer(adr2) + _ = messenger1.ConnectToPeer(adr2) + _ = messenger3.ConnectToPeer(adr2) defer func() { - _ = mes1.Close() - _ = mes2.Close() - _ = mes3.Close() + _ = messenger1.Close() + _ = messenger2.Close() + _ = messenger3.Close() }() - _ = netw.UnlinkPeers(peer.ID(mes1.ID().Bytes()), peer.ID(mes2.ID().Bytes())) - _ = netw.DisconnectPeers(peer.ID(mes1.ID().Bytes()), peer.ID(mes2.ID().Bytes())) - _ = netw.DisconnectPeers(peer.ID(mes2.ID().Bytes()), peer.ID(mes1.ID().Bytes())) + _ = netw.UnlinkPeers(peer.ID(messenger1.ID().Bytes()), peer.ID(messenger2.ID().Bytes())) + _ = netw.DisconnectPeers(peer.ID(messenger1.ID().Bytes()), peer.ID(messenger2.ID().Bytes())) + _ = netw.DisconnectPeers(peer.ID(messenger2.ID().Bytes()), peer.ID(messenger1.ID().Bytes())) // connected peers: 1 --x-- 2 ----- 3 - assert.False(t, mes2.IsConnected(mes1.ID())) + assert.False(t, messenger2.IsConnected(messenger1.ID())) } func TestLibp2pMessenger_PeerAddressUnknownPeerShouldReturnEmpty(t *testing.T) { - _, mes1, _ := createMockNetworkOf2() + _, messenger1, _ := createMockNetworkOf2() defer func() { - _ = mes1.Close() + _ = messenger1.Close() }() - adr1Recov := mes1.PeerAddresses("unknown peer") + adr1Recov := messenger1.PeerAddresses("unknown peer") assert.Equal(t, 0, len(adr1Recov)) } // ------- ConnectedPeersOnTopic func TestLibp2pMessenger_ConnectedPeersOnTopicInvalidTopicShouldRetEmptyList(t *testing.T) { - netw, mes1, mes2 := createMockNetworkOf2() - mes3, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + netw, messenger1, messenger2 := createMockNetworkOf2() + messenger3, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) _ = netw.LinkAll() - adr2 := mes2.Addresses()[0] + adr2 := messenger2.Addresses()[0] fmt.Printf("Connecting to %s...\n", adr2) - _ = mes1.ConnectToPeer(adr2) - _ = mes3.ConnectToPeer(adr2) + _ = messenger1.ConnectToPeer(adr2) + _ = messenger3.ConnectToPeer(adr2) // connected peers: 1 ----- 2 ----- 3 - connPeers := mes1.ConnectedPeersOnTopic("non-existent topic") + connPeers := messenger1.ConnectedPeersOnTopic("non-existent topic") assert.Equal(t, 0, len(connPeers)) - _ = mes1.Close() - _ = mes2.Close() - _ = mes3.Close() + _ = messenger1.Close() + _ = messenger2.Close() + _ = messenger3.Close() } func TestLibp2pMessenger_ConnectedPeersOnTopicOneTopicShouldWork(t *testing.T) { - netw, mes1, mes2 := createMockNetworkOf2() - mes3, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) - mes4, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + netw, messenger1, messenger2 := createMockNetworkOf2() + messenger3, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + messenger4, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) _ = netw.LinkAll() - adr2 := mes2.Addresses()[0] + adr2 := messenger2.Addresses()[0] fmt.Printf("Connecting to %s...\n", adr2) - _ = mes1.ConnectToPeer(adr2) - _ = mes3.ConnectToPeer(adr2) - _ = mes4.ConnectToPeer(adr2) + _ = messenger1.ConnectToPeer(adr2) + _ = messenger3.ConnectToPeer(adr2) + _ = messenger4.ConnectToPeer(adr2) // connected peers: 1 ----- 2 ----- 3 // | // 4 // 1, 2, 3 should be on topic "topic123" - _ = mes1.CreateTopic("topic123", false) - _ = mes2.CreateTopic("topic123", false) - _ = mes3.CreateTopic("topic123", false) + _ = messenger1.CreateTopic("topic123", false) + _ = messenger2.CreateTopic("topic123", false) + _ = messenger3.CreateTopic("topic123", false) // wait a bit for topic announcements time.Sleep(time.Second) - peersOnTopic123 := mes2.ConnectedPeersOnTopic("topic123") + peersOnTopic123 := messenger2.ConnectedPeersOnTopic("topic123") assert.Equal(t, 2, len(peersOnTopic123)) - assert.True(t, containsPeerID(peersOnTopic123, mes1.ID())) - assert.True(t, containsPeerID(peersOnTopic123, mes3.ID())) + assert.True(t, containsPeerID(peersOnTopic123, messenger1.ID())) + assert.True(t, containsPeerID(peersOnTopic123, messenger3.ID())) - _ = mes1.Close() - _ = mes2.Close() - _ = mes3.Close() - _ = mes4.Close() + _ = messenger1.Close() + _ = messenger2.Close() + _ = messenger3.Close() + _ = messenger4.Close() } func TestLibp2pMessenger_ConnectedPeersOnTopicOneTopicDifferentViewsShouldWork(t *testing.T) { - netw, mes1, mes2 := createMockNetworkOf2() - mes3, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) - mes4, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + netw, messenger1, messenger2 := createMockNetworkOf2() + messenger3, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + messenger4, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) _ = netw.LinkAll() - adr2 := mes2.Addresses()[0] + adr2 := messenger2.Addresses()[0] fmt.Printf("Connecting to %s...\n", adr2) - _ = mes1.ConnectToPeer(adr2) - _ = mes3.ConnectToPeer(adr2) - _ = mes4.ConnectToPeer(adr2) + _ = messenger1.ConnectToPeer(adr2) + _ = messenger3.ConnectToPeer(adr2) + _ = messenger4.ConnectToPeer(adr2) // connected peers: 1 ----- 2 ----- 3 // | // 4 // 1, 2, 3 should be on topic "topic123" - _ = mes1.CreateTopic("topic123", false) - _ = mes2.CreateTopic("topic123", false) - _ = mes3.CreateTopic("topic123", false) + _ = messenger1.CreateTopic("topic123", false) + _ = messenger2.CreateTopic("topic123", false) + _ = messenger3.CreateTopic("topic123", false) // wait a bit for topic announcements time.Sleep(time.Second) - peersOnTopic123FromMes2 := mes2.ConnectedPeersOnTopic("topic123") - peersOnTopic123FromMes4 := mes4.ConnectedPeersOnTopic("topic123") + peersOnTopic123FromMessenger2 := messenger2.ConnectedPeersOnTopic("topic123") + peersOnTopic123FromMessenger4 := messenger4.ConnectedPeersOnTopic("topic123") // keep the same checks as the test above as to be 100% that the returned list are correct - assert.Equal(t, 2, len(peersOnTopic123FromMes2)) - assert.True(t, containsPeerID(peersOnTopic123FromMes2, mes1.ID())) - assert.True(t, containsPeerID(peersOnTopic123FromMes2, mes3.ID())) + assert.Equal(t, 2, len(peersOnTopic123FromMessenger2)) + assert.True(t, containsPeerID(peersOnTopic123FromMessenger2, messenger1.ID())) + assert.True(t, containsPeerID(peersOnTopic123FromMessenger2, messenger3.ID())) - assert.Equal(t, 1, len(peersOnTopic123FromMes4)) - assert.True(t, containsPeerID(peersOnTopic123FromMes4, mes2.ID())) + assert.Equal(t, 1, len(peersOnTopic123FromMessenger4)) + assert.True(t, containsPeerID(peersOnTopic123FromMessenger4, messenger2.ID())) - _ = mes1.Close() - _ = mes2.Close() - _ = mes3.Close() - _ = mes4.Close() + _ = messenger1.Close() + _ = messenger2.Close() + _ = messenger3.Close() + _ = messenger4.Close() } func TestLibp2pMessenger_ConnectedPeersOnTopicTwoTopicsShouldWork(t *testing.T) { - netw, mes1, mes2 := createMockNetworkOf2() - mes3, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) - mes4, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + netw, messenger1, messenger2 := createMockNetworkOf2() + messenger3, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) + messenger4, _ := libp2p.NewMockMessenger(createMockNetworkArgs(), netw) _ = netw.LinkAll() - adr2 := mes2.Addresses()[0] + adr2 := messenger2.Addresses()[0] fmt.Printf("Connecting to %s...\n", adr2) - _ = mes1.ConnectToPeer(adr2) - _ = mes3.ConnectToPeer(adr2) - _ = mes4.ConnectToPeer(adr2) + _ = messenger1.ConnectToPeer(adr2) + _ = messenger3.ConnectToPeer(adr2) + _ = messenger4.ConnectToPeer(adr2) // connected peers: 1 ----- 2 ----- 3 // | // 4 // 1, 2, 3 should be on topic "topic123" // 2, 4 should be on topic "topic24" - _ = mes1.CreateTopic("topic123", false) - _ = mes2.CreateTopic("topic123", false) - _ = mes2.CreateTopic("topic24", false) - _ = mes3.CreateTopic("topic123", false) - _ = mes4.CreateTopic("topic24", false) + _ = messenger1.CreateTopic("topic123", false) + _ = messenger2.CreateTopic("topic123", false) + _ = messenger2.CreateTopic("topic24", false) + _ = messenger3.CreateTopic("topic123", false) + _ = messenger4.CreateTopic("topic24", false) // wait a bit for topic announcements time.Sleep(time.Second) - peersOnTopic123 := mes2.ConnectedPeersOnTopic("topic123") - peersOnTopic24 := mes2.ConnectedPeersOnTopic("topic24") + peersOnTopic123 := messenger2.ConnectedPeersOnTopic("topic123") + peersOnTopic24 := messenger2.ConnectedPeersOnTopic("topic24") // keep the same checks as the test above as to be 100% that the returned list are correct assert.Equal(t, 2, len(peersOnTopic123)) - assert.True(t, containsPeerID(peersOnTopic123, mes1.ID())) - assert.True(t, containsPeerID(peersOnTopic123, mes3.ID())) + assert.True(t, containsPeerID(peersOnTopic123, messenger1.ID())) + assert.True(t, containsPeerID(peersOnTopic123, messenger3.ID())) assert.Equal(t, 1, len(peersOnTopic24)) - assert.True(t, containsPeerID(peersOnTopic24, mes4.ID())) + assert.True(t, containsPeerID(peersOnTopic24, messenger4.ID())) - _ = mes1.Close() - _ = mes2.Close() - _ = mes3.Close() - _ = mes4.Close() + _ = messenger1.Close() + _ = messenger2.Close() + _ = messenger3.Close() + _ = messenger4.Close() } // ------- ConnectedFullHistoryPeersOnTopic func TestLibp2pMessenger_ConnectedFullHistoryPeersOnTopicShouldWork(t *testing.T) { - mes1, mes2, mes3 := createMockNetworkOf3() + messenger1, messenger2, messenger3 := createMockNetworkOf3() - adr2 := mes2.Addresses()[0] - adr3 := mes3.Addresses()[0] + adr2 := messenger2.Addresses()[0] + adr3 := messenger3.Addresses()[0] fmt.Println("Connecting ...") - _ = mes1.ConnectToPeer(adr2) - _ = mes3.ConnectToPeer(adr2) - _ = mes1.ConnectToPeer(adr3) + _ = messenger1.ConnectToPeer(adr2) + _ = messenger3.ConnectToPeer(adr2) + _ = messenger1.ConnectToPeer(adr3) // connected peers: 1 ----- 2 // | | // 3 ------+ - _ = mes1.CreateTopic("topic123", false) - _ = mes2.CreateTopic("topic123", false) - _ = mes3.CreateTopic("topic123", false) + _ = messenger1.CreateTopic("topic123", false) + _ = messenger2.CreateTopic("topic123", false) + _ = messenger3.CreateTopic("topic123", false) // wait a bit for topic announcements time.Sleep(time.Second) - assert.Equal(t, 2, len(mes1.ConnectedPeersOnTopic("topic123"))) - assert.Equal(t, 1, len(mes1.ConnectedFullHistoryPeersOnTopic("topic123"))) + assert.Equal(t, 2, len(messenger1.ConnectedPeersOnTopic("topic123"))) + assert.Equal(t, 1, len(messenger1.ConnectedFullHistoryPeersOnTopic("topic123"))) - assert.Equal(t, 2, len(mes2.ConnectedPeersOnTopic("topic123"))) - assert.Equal(t, 1, len(mes2.ConnectedFullHistoryPeersOnTopic("topic123"))) + assert.Equal(t, 2, len(messenger2.ConnectedPeersOnTopic("topic123"))) + assert.Equal(t, 1, len(messenger2.ConnectedFullHistoryPeersOnTopic("topic123"))) - assert.Equal(t, 2, len(mes3.ConnectedPeersOnTopic("topic123"))) - assert.Equal(t, 2, len(mes3.ConnectedFullHistoryPeersOnTopic("topic123"))) + assert.Equal(t, 2, len(messenger3.ConnectedPeersOnTopic("topic123"))) + assert.Equal(t, 2, len(messenger3.ConnectedFullHistoryPeersOnTopic("topic123"))) - _ = mes1.Close() - _ = mes2.Close() - _ = mes3.Close() + _ = messenger1.Close() + _ = messenger2.Close() + _ = messenger3.Close() } func TestLibp2pMessenger_ConnectedPeersShouldReturnUniquePeers(t *testing.T) { @@ -1068,13 +1079,13 @@ func generateConnWithRemotePeer(pid core.PeerID) network.Conn { func TestLibp2pMessenger_SendDirectWithMockNetToConnectedPeerShouldWork(t *testing.T) { msg := []byte("test message") - _, mes1, mes2 := createMockNetworkOf2() + _, messenger1, messenger2 := createMockNetworkOf2() - adr2 := mes2.Addresses()[0] + adr2 := messenger2.Addresses()[0] fmt.Printf("Connecting to %s...\n", adr2) - _ = mes1.ConnectToPeer(adr2) + _ = messenger1.ConnectToPeer(adr2) wg := &sync.WaitGroup{} chanDone := make(chan bool) @@ -1085,33 +1096,33 @@ func TestLibp2pMessenger_SendDirectWithMockNetToConnectedPeerShouldWork(t *testi chanDone <- true }() - prepareMessengerForMatchDataReceive(mes2, msg, wg) + prepareMessengerForMatchDataReceive(messenger2, msg, wg) fmt.Println("Delaying as to allow peers to announce themselves on the opened topic...") time.Sleep(time.Second) - fmt.Printf("sending message from %s...\n", mes1.ID().Pretty()) + fmt.Printf("sending message from %s...\n", messenger1.ID().Pretty()) - err := mes1.SendToConnectedPeer("test", msg, mes2.ID()) + err := messenger1.SendToConnectedPeer("test", msg, messenger2.ID()) assert.Nil(t, err) waitDoneWithTimeout(t, chanDone, timeoutWaitResponses) - _ = mes1.Close() - _ = mes2.Close() + _ = messenger1.Close() + _ = messenger2.Close() } func TestLibp2pMessenger_SendDirectWithRealNetToConnectedPeerShouldWork(t *testing.T) { msg := []byte("test message") fmt.Println("Messenger 1:") - mes1, _ := libp2p.NewNetworkMessenger(createMockNetworkArgs()) + messenger1, _ := libp2p.NewNetworkMessenger(createMockNetworkArgs()) fmt.Println("Messenger 2:") - mes2, _ := libp2p.NewNetworkMessenger(createMockNetworkArgs()) + messenger2, _ := libp2p.NewNetworkMessenger(createMockNetworkArgs()) - err := mes1.ConnectToPeer(getConnectableAddress(mes2)) + err := messenger1.ConnectToPeer(getConnectableAddress(messenger2)) assert.Nil(t, err) wg := &sync.WaitGroup{} @@ -1123,25 +1134,25 @@ func TestLibp2pMessenger_SendDirectWithRealNetToConnectedPeerShouldWork(t *testi chanDone <- true }() - prepareMessengerForMatchDataReceive(mes1, msg, wg) - prepareMessengerForMatchDataReceive(mes2, msg, wg) + prepareMessengerForMatchDataReceive(messenger1, msg, wg) + prepareMessengerForMatchDataReceive(messenger2, msg, wg) fmt.Println("Delaying as to allow peers to announce themselves on the opened topic...") time.Sleep(time.Second) - fmt.Printf("Messenger 1 is sending message from %s...\n", mes1.ID().Pretty()) - err = mes1.SendToConnectedPeer("test", msg, mes2.ID()) + fmt.Printf("Messenger 1 is sending message from %s...\n", messenger1.ID().Pretty()) + err = messenger1.SendToConnectedPeer("test", msg, messenger2.ID()) assert.Nil(t, err) time.Sleep(time.Second) - fmt.Printf("Messenger 2 is sending message from %s...\n", mes2.ID().Pretty()) - err = mes2.SendToConnectedPeer("test", msg, mes1.ID()) + fmt.Printf("Messenger 2 is sending message from %s...\n", messenger2.ID().Pretty()) + err = messenger2.SendToConnectedPeer("test", msg, messenger1.ID()) assert.Nil(t, err) waitDoneWithTimeout(t, chanDone, timeoutWaitResponses) - _ = mes1.Close() - _ = mes2.Close() + _ = messenger1.Close() + _ = messenger2.Close() } func TestLibp2pMessenger_SendDirectWithRealNetToSelfShouldWork(t *testing.T) { @@ -1200,88 +1211,88 @@ func TestNetworkMessenger_BootstrapPeerDiscoveryShouldCallPeerBootstrapper(t *te // ------- SetThresholdMinConnectedPeers func TestNetworkMessenger_SetThresholdMinConnectedPeersInvalidValueShouldErr(t *testing.T) { - mes := createMockMessenger() + messenger := createMockMessenger() defer func() { - _ = mes.Close() + _ = messenger.Close() }() - err := mes.SetThresholdMinConnectedPeers(-1) + err := messenger.SetThresholdMinConnectedPeers(-1) assert.Equal(t, p2p.ErrInvalidValue, err) } func TestNetworkMessenger_SetThresholdMinConnectedPeersShouldWork(t *testing.T) { - mes := createMockMessenger() + messenger := createMockMessenger() defer func() { - _ = mes.Close() + _ = messenger.Close() }() minConnectedPeers := 56 - err := mes.SetThresholdMinConnectedPeers(minConnectedPeers) + err := messenger.SetThresholdMinConnectedPeers(minConnectedPeers) assert.Nil(t, err) - assert.Equal(t, minConnectedPeers, mes.ThresholdMinConnectedPeers()) + assert.Equal(t, minConnectedPeers, messenger.ThresholdMinConnectedPeers()) } // ------- IsConnectedToTheNetwork func TestNetworkMessenger_IsConnectedToTheNetworkRetFalse(t *testing.T) { - mes := createMockMessenger() + messenger := createMockMessenger() defer func() { - _ = mes.Close() + _ = messenger.Close() }() minConnectedPeers := 56 - _ = mes.SetThresholdMinConnectedPeers(minConnectedPeers) + _ = messenger.SetThresholdMinConnectedPeers(minConnectedPeers) - assert.False(t, mes.IsConnectedToTheNetwork()) + assert.False(t, messenger.IsConnectedToTheNetwork()) } func TestNetworkMessenger_IsConnectedToTheNetworkWithZeroRetTrue(t *testing.T) { - mes := createMockMessenger() + messenger := createMockMessenger() defer func() { - _ = mes.Close() + _ = messenger.Close() }() minConnectedPeers := 0 - _ = mes.SetThresholdMinConnectedPeers(minConnectedPeers) + _ = messenger.SetThresholdMinConnectedPeers(minConnectedPeers) - assert.True(t, mes.IsConnectedToTheNetwork()) + assert.True(t, messenger.IsConnectedToTheNetwork()) } // ------- SetPeerShardResolver func TestNetworkMessenger_SetPeerShardResolverNilShouldErr(t *testing.T) { - mes := createMockMessenger() + messenger := createMockMessenger() defer func() { - _ = mes.Close() + _ = messenger.Close() }() - err := mes.SetPeerShardResolver(nil) + err := messenger.SetPeerShardResolver(nil) assert.Equal(t, p2p.ErrNilPeerShardResolver, err) } func TestNetworkMessenger_SetPeerShardResolver(t *testing.T) { - mes := createMockMessenger() + messenger := createMockMessenger() defer func() { - _ = mes.Close() + _ = messenger.Close() }() - err := mes.SetPeerShardResolver(&mock.PeerShardResolverStub{}) + err := messenger.SetPeerShardResolver(&mock.PeerShardResolverStub{}) assert.Nil(t, err) } func TestNetworkMessenger_DoubleCloseShouldWork(t *testing.T) { - mes := createMessenger() + messenger := createMessenger() time.Sleep(time.Second) - err := mes.Close() + err := messenger.Close() assert.Nil(t, err) - err = mes.Close() + err = messenger.Close() assert.Nil(t, err) } @@ -1303,6 +1314,7 @@ func TestNetworkMessenger_PreventReprocessingShouldWork(t *testing.T) { }, SyncTimer: &libp2p.LocalSyncTimer{}, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, } mes, _ := libp2p.NewNetworkMessenger(args) @@ -1368,6 +1380,7 @@ func TestNetworkMessenger_PubsubCallbackNotMessageNotValidShouldNotCallHandler(t }, SyncTimer: &libp2p.LocalSyncTimer{}, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, } mes, _ := libp2p.NewNetworkMessenger(args) @@ -1440,6 +1453,7 @@ func TestNetworkMessenger_PubsubCallbackReturnsFalseIfHandlerErrors(t *testing.T }, SyncTimer: &libp2p.LocalSyncTimer{}, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, } mes, _ := libp2p.NewNetworkMessenger(args) @@ -1503,6 +1517,7 @@ func TestNetworkMessenger_UnjoinAllTopicsShouldWork(t *testing.T) { }, SyncTimer: &libp2p.LocalSyncTimer{}, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, } mes, _ := libp2p.NewNetworkMessenger(args) @@ -1703,18 +1718,18 @@ func TestNetworkMessenger_ChooseAnotherPortIfBindFails(t *testing.T) { time.Sleep(time.Second) mutMessengers.Lock() - for index1, mes1 := range messengers { - for index2, mes2 := range messengers { + for index1, messenger1 := range messengers { + for index2, messenger2 := range messengers { if index1 == index2 { continue } - assert.NotEqual(t, mes1.Port(), mes2.Port()) + assert.NotEqual(t, messenger1.Port(), messenger2.Port()) } } - for _, mes := range messengers { - _ = mes.Close() + for _, messenger := range messengers { + _ = messenger.Close() } mutMessengers.Unlock() } @@ -1757,7 +1772,9 @@ func TestNetworkMessenger_Bootstrap(t *testing.T) { Type: "NilListSharder", }, }, - SyncTimer: &mock.SyncTimerStub{}, + SyncTimer: &mock.SyncTimerStub{}, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, } netMes, err := libp2p.NewNetworkMessenger(args) @@ -1844,3 +1861,121 @@ func TestNetworkMessenger_WaitForConnections(t *testing.T) { assert.True(t, timeToWait < time.Since(startTime)) }) } + +func TestLibp2pMessenger_SignVerifyPayloadShouldWork(t *testing.T) { + fmt.Println("Messenger 1:") + messenger1, _ := libp2p.NewNetworkMessenger(createMockNetworkArgs()) + + fmt.Println("Messenger 2:") + messenger2, _ := libp2p.NewNetworkMessenger(createMockNetworkArgs()) + + err := messenger1.ConnectToPeer(getConnectableAddress(messenger2)) + assert.Nil(t, err) + + defer func() { + _ = messenger1.Close() + _ = messenger2.Close() + }() + + payload := []byte("payload") + sig, err := messenger1.Sign(payload) + assert.Nil(t, err) + + err = messenger2.Verify(payload, messenger1.ID(), sig) + assert.Nil(t, err) + + err = messenger1.Verify(payload, messenger1.ID(), sig) + assert.Nil(t, err) +} + +func TestLibp2pMessenger_ConnectionTopic(t *testing.T) { + t.Parallel() + + t.Run("create topic should work", func(t *testing.T) { + t.Parallel() + + netMes, _ := libp2p.NewNetworkMessenger(createMockNetworkArgs()) + + topic := common.ConnectionTopic + err := netMes.CreateTopic(topic, true) + assert.Nil(t, err) + assert.False(t, netMes.HasTopic(topic)) + assert.False(t, netMes.PubsubHasTopic(topic)) + + testTopic := "test topic" + err = netMes.CreateTopic(testTopic, true) + assert.Nil(t, err) + assert.True(t, netMes.HasTopic(testTopic)) + assert.True(t, netMes.PubsubHasTopic(testTopic)) + + err = netMes.UnjoinAllTopics() + assert.Nil(t, err) + assert.False(t, netMes.HasTopic(topic)) + assert.False(t, netMes.PubsubHasTopic(topic)) + assert.False(t, netMes.HasTopic(testTopic)) + assert.False(t, netMes.PubsubHasTopic(testTopic)) + + _ = netMes.Close() + }) + t.Run("register-unregister message processor should work", func(t *testing.T) { + t.Parallel() + + netMes, _ := libp2p.NewNetworkMessenger(createMockNetworkArgs()) + + identifier := "identifier" + topic := common.ConnectionTopic + err := netMes.RegisterMessageProcessor(topic, identifier, &mock.MessageProcessorStub{}) + assert.Nil(t, err) + assert.True(t, netMes.HasProcessorForTopic(topic)) + + err = netMes.UnregisterMessageProcessor(topic, identifier) + assert.Nil(t, err) + assert.False(t, netMes.HasProcessorForTopic(topic)) + + _ = netMes.Close() + }) + t.Run("unregister all processors should work", func(t *testing.T) { + t.Parallel() + + netMes, _ := libp2p.NewNetworkMessenger(createMockNetworkArgs()) + + topic := common.ConnectionTopic + err := netMes.RegisterMessageProcessor(topic, "identifier", &mock.MessageProcessorStub{}) + assert.Nil(t, err) + assert.True(t, netMes.HasProcessorForTopic(topic)) + + testTopic := "test topic" + err = netMes.RegisterMessageProcessor(testTopic, "identifier", &mock.MessageProcessorStub{}) + assert.Nil(t, err) + assert.True(t, netMes.HasProcessorForTopic(testTopic)) + + err = netMes.UnregisterAllMessageProcessors() + assert.Nil(t, err) + assert.False(t, netMes.HasProcessorForTopic(topic)) + assert.False(t, netMes.HasProcessorForTopic(testTopic)) + + _ = netMes.Close() + }) + t.Run("unregister all processors should work", func(t *testing.T) { + t.Parallel() + + netMes, _ := libp2p.NewNetworkMessenger(createMockNetworkArgs()) + + topic := common.ConnectionTopic + err := netMes.RegisterMessageProcessor(topic, "identifier", &mock.MessageProcessorStub{}) + assert.Nil(t, err) + assert.True(t, netMes.HasProcessorForTopic(topic)) + + testTopic := "test topic" + err = netMes.RegisterMessageProcessor(testTopic, "identifier", &mock.MessageProcessorStub{}) + assert.Nil(t, err) + assert.True(t, netMes.HasProcessorForTopic(testTopic)) + + err = netMes.UnregisterAllMessageProcessors() + assert.Nil(t, err) + assert.False(t, netMes.HasProcessorForTopic(topic)) + assert.False(t, netMes.HasProcessorForTopic(testTopic)) + + _ = netMes.Close() + }) +} diff --git a/p2p/libp2p/networksharding/listsSharder_test.go b/p2p/libp2p/networksharding/listsSharder_test.go index a27026c8f33..0470db2fadf 100644 --- a/p2p/libp2p/networksharding/listsSharder_test.go +++ b/p2p/libp2p/networksharding/listsSharder_test.go @@ -9,10 +9,10 @@ import ( "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go-core/core/check" - "github.com/ElrondNetwork/elrond-go-core/core/peersholder" "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/p2p/mock" + "github.com/ElrondNetwork/elrond-go/p2p/peersHolder" "github.com/ElrondNetwork/elrond-go/testscommon/p2pmocks" "github.com/libp2p/go-libp2p-core/peer" "github.com/stretchr/testify/assert" @@ -422,33 +422,31 @@ func TestListsSharder_ComputeEvictionListShouldNotContainPreferredPeers(t *testi func TestListsSharder_ComputeEvictionListWithRealPreferredPeersHandler(t *testing.T) { arg := createMockListSharderArguments() - prefP0 := hex.EncodeToString([]byte("preferredPeer0")) - prefP1 := hex.EncodeToString([]byte("preferredPeer1")) - prefP2 := hex.EncodeToString([]byte("preferredPeer2")) - preferredHexPrefix := hex.EncodeToString([]byte("preferred")) + preferredHexPrefix := "preferred" + prefP0 := preferredHexPrefix + "preferredPeer0" + prefP1 := preferredHexPrefix + "preferredPeer1" + prefP2 := preferredHexPrefix + "preferredPeer2" pubKeyHexSuffix := hex.EncodeToString([]byte("pubKey")) pids := []peer.ID{ - peer.ID(prefP0), + peer.ID(core.PeerID(prefP0).Pretty()), "peer0", "peer1", - peer.ID(prefP1), + peer.ID(core.PeerID(prefP1).Pretty()), "peer2", - peer.ID(prefP2), + peer.ID(core.PeerID(prefP2).Pretty()), } - prefP0PkBytes, _ := hex.DecodeString(prefP0 + pubKeyHexSuffix) - prefP1PkBytes, _ := hex.DecodeString(prefP1 + pubKeyHexSuffix) - prefP2PkBytes, _ := hex.DecodeString(prefP2 + pubKeyHexSuffix) - prefPeers := [][]byte{ - prefP0PkBytes, - prefP1PkBytes, - prefP2PkBytes, + prefPeers := []string{ + core.PeerID(prefP0).Pretty(), + core.PeerID(prefP1).Pretty(), + core.PeerID(prefP2).Pretty(), } - arg.PreferredPeersHolder = peersholder.NewPeersHolder(prefPeers) - for _, prefPk := range prefPeers { - pid := strings.Replace(hex.EncodeToString(prefPk), pubKeyHexSuffix, "", 1) - arg.PreferredPeersHolder.Put(prefPk, core.PeerID(pid), 0) + arg.PreferredPeersHolder, _ = peersHolder.NewPeersHolder(prefPeers) + for _, prefPid := range prefPeers { + peerId := core.PeerID(prefPid) + arg.PreferredPeersHolder.PutConnectionAddress(peerId, prefPid) + arg.PreferredPeersHolder.PutShardID(peerId, 0) } arg.PeerResolver = &mock.PeerShardResolverStub{ @@ -476,21 +474,21 @@ func TestListsSharder_ComputeEvictionListWithRealPreferredPeersHandler(t *testin require.False(t, strings.HasPrefix(string(peerID), preferredHexPrefix)) } - found := arg.PreferredPeersHolder.Contains(core.PeerID(prefP0)) + found := arg.PreferredPeersHolder.Contains(core.PeerID(peer.ID(prefP0).Pretty())) require.True(t, found) - found = arg.PreferredPeersHolder.Contains(core.PeerID(prefP1)) + found = arg.PreferredPeersHolder.Contains(core.PeerID(peer.ID(prefP1).Pretty())) require.True(t, found) - found = arg.PreferredPeersHolder.Contains(core.PeerID(prefP2)) + found = arg.PreferredPeersHolder.Contains(core.PeerID(peer.ID(prefP2).Pretty())) require.True(t, found) peers := arg.PreferredPeersHolder.Get() expectedMap := map[uint32][]core.PeerID{ 0: { - core.PeerID(prefP0), - core.PeerID(prefP1), - core.PeerID(prefP2), + core.PeerID(peer.ID(prefP0).Pretty()), + core.PeerID(peer.ID(prefP1).Pretty()), + core.PeerID(peer.ID(prefP2).Pretty()), }, } require.Equal(t, expectedMap, peers) diff --git a/p2p/libp2p/p2pSigner.go b/p2p/libp2p/p2pSigner.go new file mode 100644 index 00000000000..3be693c95fb --- /dev/null +++ b/p2p/libp2p/p2pSigner.go @@ -0,0 +1,42 @@ +package libp2p + +import ( + "fmt" + + "github.com/ElrondNetwork/elrond-go-core/core" + crypto "github.com/ElrondNetwork/elrond-go-crypto" + libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" + "github.com/libp2p/go-libp2p-core/peer" +) + +type p2pSigner struct { + privateKey *libp2pCrypto.Secp256k1PrivateKey +} + +// Sign will sign a payload with the internal private key +func (signer *p2pSigner) Sign(payload []byte) ([]byte, error) { + return signer.privateKey.Sign(payload) +} + +// Verify will check that the (payload, peer ID, signature) tuple is valid or not +func (signer *p2pSigner) Verify(payload []byte, pid core.PeerID, signature []byte) error { + libp2pPid, err := peer.IDFromBytes(pid.Bytes()) + if err != nil { + return err + } + + pubk, err := libp2pPid.ExtractPublicKey() + if err != nil { + return fmt.Errorf("cannot extract signing key: %s", err.Error()) + } + + sigOk, err := pubk.Verify(payload, signature) + if err != nil { + return err + } + if !sigOk { + return crypto.ErrInvalidSignature + } + + return nil +} diff --git a/p2p/libp2p/p2pSigner_test.go b/p2p/libp2p/p2pSigner_test.go new file mode 100644 index 00000000000..e373c00a082 --- /dev/null +++ b/p2p/libp2p/p2pSigner_test.go @@ -0,0 +1,128 @@ +package libp2p + +import ( + "crypto/ecdsa" + cryptoRand "crypto/rand" + "sync" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-core/core" + crypto "github.com/ElrondNetwork/elrond-go-crypto" + "github.com/btcsuite/btcd/btcec" + libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/stretchr/testify/assert" +) + +func generatePrivateKey() *libp2pCrypto.Secp256k1PrivateKey { + prvKey, _ := ecdsa.GenerateKey(btcec.S256(), cryptoRand.Reader) + + return (*libp2pCrypto.Secp256k1PrivateKey)(prvKey) +} + +func TestP2pSigner_Sign(t *testing.T) { + t.Parallel() + + signer := &p2pSigner{ + privateKey: generatePrivateKey(), + } + + sig, err := signer.Sign([]byte("payload")) + assert.Nil(t, err) + assert.NotNil(t, sig) +} + +func TestP2pSigner_Verify(t *testing.T) { + t.Parallel() + + sk := generatePrivateKey() + pk := sk.GetPublic() + payload := []byte("payload") + signer := &p2pSigner{ + privateKey: sk, + } + libp2pPid, _ := peer.IDFromPublicKey(pk) + + t.Run("invalid public key should error", func(t *testing.T) { + t.Parallel() + + sig, err := signer.Sign(payload) + assert.Nil(t, err) + + err = signer.Verify(payload, "invalid PK", sig) + assert.NotNil(t, err) + assert.Equal(t, "length greater than remaining number of bytes in buffer", err.Error()) + }) + t.Run("malformed signature header should error", func(t *testing.T) { + t.Parallel() + + sig, err := signer.Sign(payload) + assert.Nil(t, err) + + sig[0] = sig[0] ^ sig[1] ^ sig[2] + + err = signer.Verify(payload, core.PeerID(libp2pPid), sig) + assert.NotNil(t, err) + assert.Equal(t, "malformed signature: no header magic", err.Error()) + }) + t.Run("altered signature should error", func(t *testing.T) { + t.Parallel() + + sig, err := signer.Sign(payload) + assert.Nil(t, err) + + sig[len(sig)-1] = sig[0] ^ sig[1] ^ sig[2] + + err = signer.Verify(payload, core.PeerID(libp2pPid), sig) + assert.Equal(t, crypto.ErrInvalidSignature, err) + }) + t.Run("sign and verify should work", func(t *testing.T) { + t.Parallel() + + sig, err := signer.Sign(payload) + assert.Nil(t, err) + + err = signer.Verify(payload, core.PeerID(libp2pPid), sig) + assert.Nil(t, err) + }) +} + +func TestP2pSigner_ConcurrentOperations(t *testing.T) { + t.Parallel() + + numOps := 1000 + wg := sync.WaitGroup{} + wg.Add(numOps) + + sk := generatePrivateKey() + pk := sk.GetPublic() + payload1 := []byte("payload1") + payload2 := []byte("payload2") + signer := &p2pSigner{ + privateKey: sk, + } + libp2pPid, _ := peer.IDFromPublicKey(pk) + pid := core.PeerID(libp2pPid) + + sig1, _ := signer.Sign(payload1) + + for i := 0; i < numOps; i++ { + go func(idx int) { + time.Sleep(time.Millisecond * 10) + + switch idx { + case 0: + _, errSign := signer.Sign(payload2) + assert.Nil(t, errSign) + case 1: + errVerify := signer.Verify(payload1, pid, sig1) + assert.Nil(t, errVerify) + } + + wg.Done() + }(i % 2) + } + + wg.Wait() +} diff --git a/p2p/libp2p/peersOnChannel.go b/p2p/libp2p/peersOnChannel.go index 0ecc03287a4..01ae7be96b3 100644 --- a/p2p/libp2p/peersOnChannel.go +++ b/p2p/libp2p/peersOnChannel.go @@ -6,6 +6,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/libp2p/go-libp2p-core/peer" ) @@ -13,9 +14,10 @@ import ( // peersOnChannel manages peers on topics // it buffers the data and refresh the peers list continuously (in refreshInterval intervals) type peersOnChannel struct { - mutPeers sync.RWMutex - peers map[string][]core.PeerID - lastUpdated map[string]time.Time + mutPeers sync.RWMutex + peersRatingHandler p2p.PeersRatingHandler + peers map[string][]core.PeerID + lastUpdated map[string]time.Time refreshInterval time.Duration ttlInterval time.Duration @@ -26,11 +28,15 @@ type peersOnChannel struct { // newPeersOnChannel returns a new peersOnChannel object func newPeersOnChannel( + peersRatingHandler p2p.PeersRatingHandler, fetchPeersHandler func(topic string) []peer.ID, refreshInterval time.Duration, ttlInterval time.Duration, ) (*peersOnChannel, error) { + if check.IfNil(peersRatingHandler) { + return nil, p2p.ErrNilPeersRatingHandler + } if fetchPeersHandler == nil { return nil, p2p.ErrNilFetchPeersOnTopicHandler } @@ -44,12 +50,13 @@ func newPeersOnChannel( ctx, cancelFunc := context.WithCancel(context.Background()) poc := &peersOnChannel{ - peers: make(map[string][]core.PeerID), - lastUpdated: make(map[string]time.Time), - refreshInterval: refreshInterval, - ttlInterval: ttlInterval, - fetchPeersHandler: fetchPeersHandler, - cancelFunc: cancelFunc, + peersRatingHandler: peersRatingHandler, + peers: make(map[string][]core.PeerID), + lastUpdated: make(map[string]time.Time), + refreshInterval: refreshInterval, + ttlInterval: ttlInterval, + fetchPeersHandler: fetchPeersHandler, + cancelFunc: cancelFunc, } poc.getTimeHandler = poc.clockTime @@ -118,7 +125,9 @@ func (poc *peersOnChannel) refreshPeersOnTopic(topic string) []core.PeerID { list := poc.fetchPeersHandler(topic) connectedPeers := make([]core.PeerID, len(list)) for i, pid := range list { - connectedPeers[i] = core.PeerID(pid) + peerID := core.PeerID(pid) + connectedPeers[i] = peerID + poc.peersRatingHandler.AddPeer(peerID) } poc.updateConnectedPeersOnTopic(topic, connectedPeers) diff --git a/p2p/libp2p/peersOnChannel_test.go b/p2p/libp2p/peersOnChannel_test.go index 412121d13ea..43a363ac2aa 100644 --- a/p2p/libp2p/peersOnChannel_test.go +++ b/p2p/libp2p/peersOnChannel_test.go @@ -7,14 +7,24 @@ import ( "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/testscommon/p2pmocks" "github.com/libp2p/go-libp2p-core/peer" "github.com/stretchr/testify/assert" ) +func TestNewPeersOnChannel_NilPeersRatingHandlerShouldErr(t *testing.T) { + t.Parallel() + + poc, err := newPeersOnChannel(nil, nil, 1, 1) + + assert.Nil(t, poc) + assert.Equal(t, p2p.ErrNilPeersRatingHandler, err) +} + func TestNewPeersOnChannel_NilFetchPeersHandlerShouldErr(t *testing.T) { t.Parallel() - poc, err := newPeersOnChannel(nil, 1, 1) + poc, err := newPeersOnChannel(&p2pmocks.PeersRatingHandlerStub{}, nil, 1, 1) assert.Nil(t, poc) assert.Equal(t, p2p.ErrNilFetchPeersOnTopicHandler, err) @@ -24,6 +34,7 @@ func TestNewPeersOnChannel_InvalidRefreshIntervalShouldErr(t *testing.T) { t.Parallel() poc, err := newPeersOnChannel( + &p2pmocks.PeersRatingHandlerStub{}, func(topic string) []peer.ID { return nil }, @@ -38,6 +49,7 @@ func TestNewPeersOnChannel_InvalidTTLIntervalShouldErr(t *testing.T) { t.Parallel() poc, err := newPeersOnChannel( + &p2pmocks.PeersRatingHandlerStub{}, func(topic string) []peer.ID { return nil }, @@ -52,6 +64,7 @@ func TestNewPeersOnChannel_OkValsShouldWork(t *testing.T) { t.Parallel() poc, err := newPeersOnChannel( + &p2pmocks.PeersRatingHandlerStub{}, func(topic string) []peer.ID { return nil }, @@ -71,6 +84,7 @@ func TestPeersOnChannel_ConnectedPeersOnChannelMissingTopicShouldTriggerFetchAnd wasFetchCalled.Store(false) poc, _ := newPeersOnChannel( + &p2pmocks.PeersRatingHandlerStub{}, func(topic string) []peer.ID { if topic == testTopic { wasFetchCalled.Store(true) @@ -99,6 +113,7 @@ func TestPeersOnChannel_ConnectedPeersOnChannelFindTopicShouldReturn(t *testing. wasFetchCalled.Store(false) poc, _ := newPeersOnChannel( + &p2pmocks.PeersRatingHandlerStub{}, func(topic string) []peer.ID { wasFetchCalled.Store(true) return nil @@ -131,6 +146,7 @@ func TestPeersOnChannel_RefreshShouldBeDone(t *testing.T) { ttlInterval := time.Duration(2) poc, _ := newPeersOnChannel( + &p2pmocks.PeersRatingHandlerStub{}, func(topic string) []peer.ID { wasFetchCalled.Store(true) return nil diff --git a/p2p/message/directConnectionMessage.pb.go b/p2p/message/directConnectionMessage.pb.go new file mode 100644 index 00000000000..9a2a6bb0aa9 --- /dev/null +++ b/p2p/message/directConnectionMessage.pb.go @@ -0,0 +1,379 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: directConnectionMessage.proto + +package message + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// DirectConnectionInfo represents the data regarding a new direct connection`s info +type DirectConnectionInfo struct { + ShardId string `protobuf:"bytes,1,opt,name=ShardId,proto3" json:"shardId"` +} + +func (m *DirectConnectionInfo) Reset() { *m = DirectConnectionInfo{} } +func (*DirectConnectionInfo) ProtoMessage() {} +func (*DirectConnectionInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_f237562c19ebfede, []int{0} +} +func (m *DirectConnectionInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DirectConnectionInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DirectConnectionInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_DirectConnectionInfo.Merge(m, src) +} +func (m *DirectConnectionInfo) XXX_Size() int { + return m.Size() +} +func (m *DirectConnectionInfo) XXX_DiscardUnknown() { + xxx_messageInfo_DirectConnectionInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_DirectConnectionInfo proto.InternalMessageInfo + +func (m *DirectConnectionInfo) GetShardId() string { + if m != nil { + return m.ShardId + } + return "" +} + +func init() { + proto.RegisterType((*DirectConnectionInfo)(nil), "proto.DirectConnectionInfo") +} + +func init() { proto.RegisterFile("directConnectionMessage.proto", fileDescriptor_f237562c19ebfede) } + +var fileDescriptor_f237562c19ebfede = []byte{ + // 201 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4d, 0xc9, 0x2c, 0x4a, + 0x4d, 0x2e, 0x71, 0xce, 0xcf, 0xcb, 0x4b, 0x4d, 0x2e, 0xc9, 0xcc, 0xcf, 0xf3, 0x4d, 0x2d, 0x2e, + 0x4e, 0x4c, 0x4f, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x05, 0x53, 0x52, 0xba, 0xe9, + 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0xe9, 0xf9, 0xfa, 0x60, + 0xe1, 0xa4, 0xd2, 0x34, 0x30, 0x0f, 0xcc, 0x01, 0xb3, 0x20, 0xba, 0x94, 0x6c, 0xb9, 0x44, 0x5c, + 0xd0, 0x8c, 0xf5, 0xcc, 0x4b, 0xcb, 0x17, 0x52, 0xe5, 0x62, 0x0f, 0xce, 0x48, 0x2c, 0x4a, 0xf1, + 0x4c, 0x91, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x74, 0xe2, 0x7e, 0x75, 0x4f, 0x9e, 0xbd, 0x18, 0x22, + 0x14, 0x04, 0x93, 0x73, 0x72, 0xbc, 0xf0, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x0f, 0x0f, + 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, + 0x47, 0x72, 0x8c, 0x37, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0xf8, 0xe2, 0x91, 0x1c, 0xc3, + 0x87, 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, + 0x43, 0x14, 0x7b, 0x2e, 0xc4, 0xf5, 0x49, 0x6c, 0x60, 0x87, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x70, 0x6f, 0x2c, 0x03, 0xdf, 0x00, 0x00, 0x00, +} + +func (this *DirectConnectionInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DirectConnectionInfo) + if !ok { + that2, ok := that.(DirectConnectionInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ShardId != that1.ShardId { + return false + } + return true +} +func (this *DirectConnectionInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&message.DirectConnectionInfo{") + s = append(s, "ShardId: "+fmt.Sprintf("%#v", this.ShardId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDirectConnectionMessage(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *DirectConnectionInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DirectConnectionInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DirectConnectionInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ShardId) > 0 { + i -= len(m.ShardId) + copy(dAtA[i:], m.ShardId) + i = encodeVarintDirectConnectionMessage(dAtA, i, uint64(len(m.ShardId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintDirectConnectionMessage(dAtA []byte, offset int, v uint64) int { + offset -= sovDirectConnectionMessage(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DirectConnectionInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ShardId) + if l > 0 { + n += 1 + l + sovDirectConnectionMessage(uint64(l)) + } + return n +} + +func sovDirectConnectionMessage(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDirectConnectionMessage(x uint64) (n int) { + return sovDirectConnectionMessage(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DirectConnectionInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DirectConnectionInfo{`, + `ShardId:` + fmt.Sprintf("%v", this.ShardId) + `,`, + `}`, + }, "") + return s +} +func valueToStringDirectConnectionMessage(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DirectConnectionInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDirectConnectionMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DirectConnectionInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DirectConnectionInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDirectConnectionMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDirectConnectionMessage + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDirectConnectionMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShardId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDirectConnectionMessage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDirectConnectionMessage + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDirectConnectionMessage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDirectConnectionMessage(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDirectConnectionMessage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDirectConnectionMessage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDirectConnectionMessage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDirectConnectionMessage + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDirectConnectionMessage + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDirectConnectionMessage + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDirectConnectionMessage = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDirectConnectionMessage = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDirectConnectionMessage = fmt.Errorf("proto: unexpected end of group") +) diff --git a/p2p/message/directConnectionMessage.proto b/p2p/message/directConnectionMessage.proto new file mode 100644 index 00000000000..26eeec0be32 --- /dev/null +++ b/p2p/message/directConnectionMessage.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package proto; + +option go_package = "message"; +option (gogoproto.stable_marshaler_all) = true; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +// DirectConnectionInfo represents the data regarding a new direct connection`s info +message DirectConnectionInfo { + string ShardId = 1 [(gogoproto.jsontag) = "shardId"]; +} diff --git a/p2p/message/generate.go b/p2p/message/generate.go new file mode 100644 index 00000000000..d0b9445a167 --- /dev/null +++ b/p2p/message/generate.go @@ -0,0 +1,3 @@ +//go:generate protoc -I=. -I=$GOPATH/src -I=$GOPATH/src/github.com/ElrondNetwork/protobuf/protobuf --gogoslick_out=. directConnectionMessage.proto + +package message diff --git a/p2p/mock/networkShardingCollectorMock.go b/p2p/mock/networkShardingCollectorMock.go index ab5e83f5bbb..750f3dbffb6 100644 --- a/p2p/mock/networkShardingCollectorMock.go +++ b/p2p/mock/networkShardingCollectorMock.go @@ -49,8 +49,8 @@ func (nscm *networkShardingCollectorMock) UpdatePeerIDInfo(pid core.PeerID, pk [ nscm.mutFallbackPidShardMap.Unlock() } -// UpdatePeerIdSubType - -func (nscm *networkShardingCollectorMock) UpdatePeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) { +// PutPeerIdSubType - +func (nscm *networkShardingCollectorMock) PutPeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) { nscm.mutPeerIdSubType.Lock() nscm.peerIdSubType[pid] = uint32(peerSubType) nscm.mutPeerIdSubType.Unlock() diff --git a/p2p/p2p.go b/p2p/p2p.go index 5fd4a3db0fd..d8b5a8fa26f 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -154,6 +154,8 @@ type Messenger interface { UnjoinAllTopics() error Port() int WaitForConnections(maxWaitingTime time.Duration, minNumOfPeers uint32) + Sign(payload []byte) ([]byte, error) + Verify(payload []byte, pid core.PeerID, signature []byte) error // IsInterfaceNil returns true if there is no value under the interface IsInterfaceNil() bool @@ -270,7 +272,8 @@ type Marshalizer interface { // PreferredPeersHolderHandler defines the behavior of a component able to handle preferred peers operations type PreferredPeersHolderHandler interface { - Put(publicKey []byte, peerID core.PeerID, shardID uint32) + PutConnectionAddress(peerID core.PeerID, address string) + PutShardID(peerID core.PeerID, shardID uint32) Get() map[uint32][]core.PeerID Contains(peerID core.PeerID) bool Remove(peerID core.PeerID) @@ -331,3 +334,12 @@ type ConnectionsWatcher interface { Close() error IsInterfaceNil() bool } + +// PeersRatingHandler represent an entity able to handle peers ratings +type PeersRatingHandler interface { + AddPeer(pid core.PeerID) + IncreaseRating(pid core.PeerID) + DecreaseRating(pid core.PeerID) + GetTopRatedPeersFromList(peers []core.PeerID, minNumOfPeersExpected int) []core.PeerID + IsInterfaceNil() bool +} diff --git a/p2p/peersHolder/connectionStringValidator/connectionStringValidator.go b/p2p/peersHolder/connectionStringValidator/connectionStringValidator.go new file mode 100644 index 00000000000..ce9e90c5616 --- /dev/null +++ b/p2p/peersHolder/connectionStringValidator/connectionStringValidator.go @@ -0,0 +1,29 @@ +package connectionStringValidator + +import ( + "net" + + "github.com/ElrondNetwork/elrond-go-core/core" +) + +type connectionStringValidator struct { +} + +// NewConnectionStringValidator returns a new connection string validator +func NewConnectionStringValidator() *connectionStringValidator { + return &connectionStringValidator{} +} + +// IsValid checks either a connection string is a valid ip or peer id +func (csv *connectionStringValidator) IsValid(connStr string) bool { + return csv.isValidIP(connStr) || csv.isValidPeerID(connStr) +} + +func (csv *connectionStringValidator) isValidIP(connStr string) bool { + return net.ParseIP(connStr) != nil +} + +func (csv *connectionStringValidator) isValidPeerID(connStr string) bool { + _, err := core.NewPeerID(connStr) + return err == nil +} diff --git a/p2p/peersHolder/connectionStringValidator/connectionStringValidator_test.go b/p2p/peersHolder/connectionStringValidator/connectionStringValidator_test.go new file mode 100644 index 00000000000..ad9052dfa6b --- /dev/null +++ b/p2p/peersHolder/connectionStringValidator/connectionStringValidator_test.go @@ -0,0 +1,55 @@ +package connectionStringValidator + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestConnectionStringValidator_IsValid(t *testing.T) { + t.Parallel() + + csv := NewConnectionStringValidator() + assert.False(t, csv.IsValid("invalid string")) + assert.False(t, csv.IsValid("")) + + assert.True(t, csv.IsValid("5.22.219.242")) + assert.True(t, csv.IsValid("2031:0:130F:0:0:9C0:876A:130B")) + assert.True(t, csv.IsValid("16Uiu2HAm6yvbp1oZ6zjnWsn9FdRqBSaQkbhELyaThuq48ybdojvJ")) +} +func TestConnectionStringValidator_isValidIP(t *testing.T) { + t.Parallel() + + csv := NewConnectionStringValidator() + assert.False(t, csv.isValidIP("invalid ip")) + assert.False(t, csv.isValidIP("")) + assert.False(t, csv.isValidIP("a.b.c.d")) + assert.False(t, csv.isValidIP("10.0.0")) + assert.False(t, csv.isValidIP("10.0")) + assert.False(t, csv.isValidIP("10")) + assert.False(t, csv.isValidIP("2031:0:130F:0:0:9C0:876A")) + assert.False(t, csv.isValidIP("2031:0:130F:0:0:9C0")) + assert.False(t, csv.isValidIP("2031:0:130F:0:0")) + assert.False(t, csv.isValidIP("2031:0:130F:0")) + assert.False(t, csv.isValidIP("2031:0:130F")) + assert.False(t, csv.isValidIP("2031:0")) + assert.False(t, csv.isValidIP("16Uiu2HAm6yvbp1oZ6zjnWsn9FdRqBSaQkbhELyaThuq48ybdojvJ")) + + assert.True(t, csv.isValidIP("127.0.0.1")) + assert.True(t, csv.isValidIP("5.22.219.242")) + assert.True(t, csv.isValidIP("2031:0:130F:0:0:9C0:876A:130B")) +} + +func TestConnectionStringValidator_isValidPeerID(t *testing.T) { + t.Parallel() + + csv := NewConnectionStringValidator() + assert.False(t, csv.isValidPeerID("invalid peer id")) + assert.False(t, csv.isValidPeerID("")) + assert.False(t, csv.isValidPeerID("blaiu2HAm6yvbp1oZ6zjnWsn9FdRqBSaQkbhELyaThuq48ybdojvJ")) // first 3 chars altered + assert.False(t, csv.isValidPeerID("16Uiu2HAm6yvbp1oZ6zjnWsn9FdRqBSaQkbhELyaThuq48ybdobla")) // last 3 chars altered + assert.False(t, csv.isValidPeerID("16Uiu2HAm6yvbp1oZ6zjnWsn9FblaBSaQkbhELyaThuq48ybdojvJ")) // middle chars altered + assert.False(t, csv.isValidPeerID("5.22.219.242")) + + assert.True(t, csv.isValidPeerID("16Uiu2HAm6yvbp1oZ6zjnWsn9FdRqBSaQkbhELyaThuq48ybdojvJ")) +} diff --git a/p2p/peersHolder/peersHolder.go b/p2p/peersHolder/peersHolder.go new file mode 100644 index 00000000000..938f63610a7 --- /dev/null +++ b/p2p/peersHolder/peersHolder.go @@ -0,0 +1,253 @@ +package peersHolder + +import ( + "fmt" + "strings" + "sync" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/p2p/peersHolder/connectionStringValidator" +) + +type peerInfo struct { + pid core.PeerID + shardID uint32 +} + +type peerIDData struct { + connectionAddress string + shardID uint32 + index int +} + +type peersHolder struct { + preferredConnAddresses []string + connAddrToPeersInfo map[string][]*peerInfo + tempPeerIDsWaitingForShard map[core.PeerID]string + peerIDsPerShard map[uint32][]core.PeerID + peerIDs map[core.PeerID]*peerIDData + mut sync.RWMutex +} + +// NewPeersHolder returns a new instance of peersHolder +func NewPeersHolder(preferredConnectionAddresses []string) (*peersHolder, error) { + preferredConnections := make([]string, 0) + connAddrToPeerIDs := make(map[string][]*peerInfo) + + connectionValidator := connectionStringValidator.NewConnectionStringValidator() + + for _, connAddr := range preferredConnectionAddresses { + if !connectionValidator.IsValid(connAddr) { + return nil, fmt.Errorf("%w for preferred connection address %s", p2p.ErrInvalidValue, connAddr) + } + + preferredConnections = append(preferredConnections, connAddr) + connAddrToPeerIDs[connAddr] = nil + } + + return &peersHolder{ + preferredConnAddresses: preferredConnections, + connAddrToPeersInfo: connAddrToPeerIDs, + tempPeerIDsWaitingForShard: make(map[core.PeerID]string), + peerIDsPerShard: make(map[uint32][]core.PeerID), + peerIDs: make(map[core.PeerID]*peerIDData), + }, nil +} + +// PutConnectionAddress will perform the insert or the upgrade operation if the provided peerID is inside the preferred peers list +func (ph *peersHolder) PutConnectionAddress(peerID core.PeerID, connectionAddress string) { + ph.mut.Lock() + defer ph.mut.Unlock() + + knownConnection := ph.getKnownConnection(connectionAddress) + if len(knownConnection) == 0 { + return + } + + peersInfo := ph.connAddrToPeersInfo[knownConnection] + if peersInfo == nil { + ph.addNewPeerInfoToMaps(peerID, knownConnection) + return + } + + // if we have new peer for same connection, add it to maps + pInfo := ph.getPeerInfoForPeerID(peerID, peersInfo) + if pInfo == nil { + ph.addNewPeerInfoToMaps(peerID, knownConnection) + } +} + +func (ph *peersHolder) addNewPeerInfoToMaps(peerID core.PeerID, knownConnection string) { + ph.tempPeerIDsWaitingForShard[peerID] = knownConnection + + newPeerInfo := &peerInfo{ + pid: peerID, + shardID: core.AllShardId, // this will be overwritten once shard is available + } + + ph.connAddrToPeersInfo[knownConnection] = append(ph.connAddrToPeersInfo[knownConnection], newPeerInfo) +} + +func (ph *peersHolder) getPeerInfoForPeerID(peerID core.PeerID, peersInfo []*peerInfo) *peerInfo { + for _, pInfo := range peersInfo { + if peerID == pInfo.pid { + return pInfo + } + } + + return nil +} + +// PutShardID will perform the insert or the upgrade operation if the provided peerID is inside the preferred peers list +func (ph *peersHolder) PutShardID(peerID core.PeerID, shardID uint32) { + ph.mut.Lock() + defer ph.mut.Unlock() + + knownConnection, isWaitingForShardID := ph.tempPeerIDsWaitingForShard[peerID] + if !isWaitingForShardID { + return + } + + peersInfo, ok := ph.connAddrToPeersInfo[knownConnection] + if !ok || peersInfo == nil { + return + } + + pInfo := ph.getPeerInfoForPeerID(peerID, peersInfo) + if pInfo == nil { + return + } + + pInfo.shardID = shardID + + ph.peerIDsPerShard[shardID] = append(ph.peerIDsPerShard[shardID], peerID) + + ph.peerIDs[peerID] = &peerIDData{ + connectionAddress: knownConnection, + shardID: shardID, + index: len(ph.peerIDsPerShard[shardID]) - 1, + } + + delete(ph.tempPeerIDsWaitingForShard, peerID) +} + +// Get will return a map containing the preferred peer IDs, split by shard ID +func (ph *peersHolder) Get() map[uint32][]core.PeerID { + var peerIDsPerShardCopy map[uint32][]core.PeerID + + ph.mut.RLock() + peerIDsPerShardCopy = ph.peerIDsPerShard + ph.mut.RUnlock() + + return peerIDsPerShardCopy +} + +// Contains returns true if the provided peer id is a preferred connection +func (ph *peersHolder) Contains(peerID core.PeerID) bool { + ph.mut.RLock() + defer ph.mut.RUnlock() + + _, found := ph.peerIDs[peerID] + return found +} + +// Remove will remove the provided peer ID from the inner members +func (ph *peersHolder) Remove(peerID core.PeerID) { + ph.mut.Lock() + defer ph.mut.Unlock() + + pidData, found := ph.peerIDs[peerID] + if !found { + return + } + + shard, index, _ := ph.getShardAndIndexForPeer(peerID) + ph.removePeerFromMapAtIndex(shard, index) + + connAddress := pidData.connectionAddress + + delete(ph.peerIDs, peerID) + + ph.removePeerInfoAtConnectionAddress(peerID, connAddress) + + _, isWaitingForShardID := ph.tempPeerIDsWaitingForShard[peerID] + if isWaitingForShardID { + delete(ph.tempPeerIDsWaitingForShard, peerID) + } +} + +// removePeerInfoAtConnectionAddress removes the entry associated with the provided pid from connAddrToPeersInfo map +// it never removes the map key as it may be reused on a further reconnection +func (ph *peersHolder) removePeerInfoAtConnectionAddress(peerID core.PeerID, connAddr string) { + peersInfo := ph.connAddrToPeersInfo[connAddr] + if peersInfo == nil { + return + } + + var index int + var pInfo *peerInfo + for index, pInfo = range peersInfo { + if peerID == pInfo.pid { + ph.removePeerFromPeersInfoAtIndex(peersInfo, index, connAddr) + return + } + } + +} + +func (ph *peersHolder) removePeerFromPeersInfoAtIndex(peersInfo []*peerInfo, index int, connAddr string) { + peersInfo = append(peersInfo[:index], peersInfo[index+1:]...) + if len(peersInfo) == 0 { + peersInfo = nil + } + + ph.connAddrToPeersInfo[connAddr] = peersInfo +} + +// getKnownConnection checks if the connection address string contains any of the initial preferred connection address +// if true, it returns it +// this function must be called under mutex protection +func (ph *peersHolder) getKnownConnection(connectionAddressStr string) string { + for _, preferredConnAddr := range ph.preferredConnAddresses { + if strings.Contains(connectionAddressStr, preferredConnAddr) { + return preferredConnAddr + } + } + + return "" +} + +// this function must be called under mutex protection +func (ph *peersHolder) removePeerFromMapAtIndex(shardID uint32, index int) { + ph.peerIDsPerShard[shardID] = append(ph.peerIDsPerShard[shardID][:index], ph.peerIDsPerShard[shardID][index+1:]...) + if len(ph.peerIDsPerShard[shardID]) == 0 { + delete(ph.peerIDsPerShard, shardID) + } +} + +// this function must be called under mutex protection +func (ph *peersHolder) getShardAndIndexForPeer(peerID core.PeerID) (uint32, int, bool) { + pidData, ok := ph.peerIDs[peerID] + if !ok { + return 0, 0, false + } + + return pidData.shardID, pidData.index, true +} + +// Clear will delete all the entries from the inner map +func (ph *peersHolder) Clear() { + ph.mut.Lock() + defer ph.mut.Unlock() + + ph.tempPeerIDsWaitingForShard = make(map[core.PeerID]string) + ph.peerIDsPerShard = make(map[uint32][]core.PeerID) + ph.peerIDs = make(map[core.PeerID]*peerIDData) + ph.connAddrToPeersInfo = make(map[string][]*peerInfo) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ph *peersHolder) IsInterfaceNil() bool { + return ph == nil +} diff --git a/p2p/peersHolder/peersHolder_test.go b/p2p/peersHolder/peersHolder_test.go new file mode 100644 index 00000000000..ca48fd5d35f --- /dev/null +++ b/p2p/peersHolder/peersHolder_test.go @@ -0,0 +1,227 @@ +package peersHolder + +import ( + "errors" + "testing" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/stretchr/testify/assert" +) + +func TestNewPeersHolder(t *testing.T) { + t.Parallel() + + t.Run("invalid addresses should error", func(t *testing.T) { + t.Parallel() + + preferredPeers := []string{"10.100.100", "invalid string"} + ph, err := NewPeersHolder(preferredPeers) + assert.True(t, check.IfNil(ph)) + assert.True(t, errors.Is(err, p2p.ErrInvalidValue)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + ph, _ := NewPeersHolder([]string{"10.100.100.100"}) + assert.False(t, check.IfNil(ph)) + }) +} + +func TestPeersHolder_PutConnectionAddress(t *testing.T) { + t.Parallel() + + t.Run("not preferred should not add", func(t *testing.T) { + t.Parallel() + + preferredPeers := []string{"10.100.100.100"} + ph, _ := NewPeersHolder(preferredPeers) + assert.False(t, check.IfNil(ph)) + + unknownConnection := "/ip4/20.200.200.200/tcp/8080/p2p/some-random-pid" // preferredPeers[0] + providedPid := core.PeerID("provided pid") + ph.PutConnectionAddress(providedPid, unknownConnection) + + _, found := ph.tempPeerIDsWaitingForShard[providedPid] + assert.False(t, found) + + peers := ph.Get() + assert.Equal(t, 0, len(peers)) + }) + t.Run("new connection should add to intermediate maps", func(t *testing.T) { + t.Parallel() + + preferredPeers := []string{"10.100.100.100", "10.100.100.101"} + ph, _ := NewPeersHolder(preferredPeers) + assert.False(t, check.IfNil(ph)) + + newConnection := "/ip4/10.100.100.100/tcp/38191/p2p/some-random-pid" // preferredPeers[0] + providedPid := core.PeerID("provided pid") + ph.PutConnectionAddress(providedPid, newConnection) + + knownConnection, found := ph.tempPeerIDsWaitingForShard[providedPid] + assert.True(t, found) + assert.Equal(t, preferredPeers[0], knownConnection) + + peersInfo := ph.connAddrToPeersInfo[knownConnection] + assert.Equal(t, 1, len(peersInfo)) + assert.Equal(t, providedPid, peersInfo[0].pid) + assert.Equal(t, core.AllShardId, peersInfo[0].shardID) + + // not in the final map yet + peers := ph.Get() + assert.Equal(t, 0, len(peers)) + }) + t.Run("should save second pid on same address", func(t *testing.T) { + t.Parallel() + + preferredPeers := []string{"10.100.100.100", "10.100.100.101", "16Uiu2HAm6yvbp1oZ6zjnWsn9FdRqBSaQkbhELyaThuq48ybdojvJ"} + ph, _ := NewPeersHolder(preferredPeers) + assert.False(t, check.IfNil(ph)) + + newConnection := "/ip4/10.100.100.102/tcp/38191/p2p/16Uiu2HAm6yvbp1oZ6zjnWsn9FdRqBSaQkbhELyaThuq48ybdojvJ" // preferredPeers[2] + providedPid := core.PeerID("provided pid") + ph.PutConnectionAddress(providedPid, newConnection) + + knownConnection, found := ph.tempPeerIDsWaitingForShard[providedPid] + assert.True(t, found) + assert.Equal(t, preferredPeers[2], knownConnection) + + peersInfo := ph.connAddrToPeersInfo[knownConnection] + assert.Equal(t, 1, len(peersInfo)) + assert.Equal(t, providedPid, peersInfo[0].pid) + assert.Equal(t, core.AllShardId, peersInfo[0].shardID) + + ph.PutConnectionAddress(providedPid, newConnection) // try to update with same connection for coverage + + newPid := core.PeerID("new pid") + ph.PutConnectionAddress(newPid, newConnection) + knownConnection, found = ph.tempPeerIDsWaitingForShard[providedPid] + assert.True(t, found) + assert.Equal(t, preferredPeers[2], knownConnection) + + peersInfo = ph.connAddrToPeersInfo[knownConnection] + assert.Equal(t, 2, len(peersInfo)) + assert.Equal(t, newPid, peersInfo[1].pid) + assert.Equal(t, core.AllShardId, peersInfo[1].shardID) + + // not in the final map yet + peers := ph.Get() + assert.Equal(t, 0, len(peers)) + }) +} + +func TestPeersHolder_PutShardID(t *testing.T) { + t.Parallel() + + t.Run("peer not added in the waiting list should be skipped", func(t *testing.T) { + t.Parallel() + + preferredPeers := []string{"10.100.100.100"} + ph, _ := NewPeersHolder(preferredPeers) + assert.False(t, check.IfNil(ph)) + + providedPid := core.PeerID("provided pid") + providedShardID := uint32(123) + ph.PutShardID(providedPid, providedShardID) + + peers := ph.Get() + assert.Equal(t, 0, len(peers)) + }) + t.Run("peer not added in map should be skipped", func(t *testing.T) { + t.Parallel() + + preferredPeers := []string{"10.100.100.100"} + ph, _ := NewPeersHolder(preferredPeers) + assert.False(t, check.IfNil(ph)) + + providedPid := core.PeerID("provided pid") + providedShardID := uint32(123) + ph.tempPeerIDsWaitingForShard[providedPid] = preferredPeers[0] + ph.PutShardID(providedPid, providedShardID) + + peers := ph.Get() + assert.Equal(t, 0, len(peers)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + preferredPeers := []string{"10.100.100.100", "10.100.100.101", "16Uiu2HAm6yvbp1oZ6zjnWsn9FdRqBSaQkbhELyaThuq48ybdojvJ"} + ph, _ := NewPeersHolder(preferredPeers) + assert.False(t, check.IfNil(ph)) + + newConnection := "/ip4/10.100.100.101/tcp/38191/p2p/some-random-pid" // preferredPeers[1] + providedPid := core.PeerID("provided pid") + ph.PutConnectionAddress(providedPid, newConnection) + + providedShardID := uint32(123) + ph.PutShardID(providedPid, providedShardID) + + peers := ph.Get() + assert.Equal(t, 1, len(peers)) + peersInShard, found := peers[providedShardID] + assert.True(t, found) + assert.Equal(t, providedPid, peersInShard[0]) + + pidData := ph.peerIDs[providedPid] + assert.Equal(t, preferredPeers[1], pidData.connectionAddress) + assert.Equal(t, providedShardID, pidData.shardID) + assert.Equal(t, 0, pidData.index) + + _, found = ph.tempPeerIDsWaitingForShard[providedPid] + assert.False(t, found) + }) +} + +func TestPeersHolder_Contains(t *testing.T) { + t.Parallel() + + preferredPeers := []string{"10.100.100.100", "10.100.100.101"} + ph, _ := NewPeersHolder(preferredPeers) + assert.False(t, check.IfNil(ph)) + + newConnection := "/ip4/10.100.100.101/tcp/38191/p2p/some-random-pid" // preferredPeers[1] + providedPid := core.PeerID("provided pid") + ph.PutConnectionAddress(providedPid, newConnection) + + providedShardID := uint32(123) + ph.PutShardID(providedPid, providedShardID) + + assert.True(t, ph.Contains(providedPid)) + + ph.Remove(providedPid) + assert.False(t, ph.Contains(providedPid)) + + unknownPid := core.PeerID("unknown pid") + ph.Remove(unknownPid) // for code coverage +} + +func TestPeersHolder_Clear(t *testing.T) { + t.Parallel() + + preferredPeers := []string{"10.100.100.100", "16Uiu2HAm6yvbp1oZ6zjnWsn9FdRqBSaQkbhELyaThuq48ybdojvJ"} + ph, _ := NewPeersHolder(preferredPeers) + assert.False(t, check.IfNil(ph)) + + newConnection1 := "/ip4/10.100.100.100/tcp/38191/p2p/some-random-pid" // preferredPeers[0] + providedPid1 := core.PeerID("provided pid 1") + ph.PutConnectionAddress(providedPid1, newConnection1) + providedShardID := uint32(123) + ph.PutShardID(providedPid1, providedShardID) + assert.True(t, ph.Contains(providedPid1)) + + newConnection2 := "/ip4/10.100.100.102/tcp/38191/p2p/16Uiu2HAm6yvbp1oZ6zjnWsn9FdRqBSaQkbhELyaThuq48ybdojvJ" // preferredPeers[1] + providedPid2 := core.PeerID("provided pid 2") + ph.PutConnectionAddress(providedPid2, newConnection2) + ph.PutShardID(providedPid2, providedShardID) + assert.True(t, ph.Contains(providedPid2)) + + peers := ph.Get() + assert.Equal(t, 1, len(peers)) + assert.Equal(t, 2, len(peers[providedShardID])) + + ph.Clear() + peers = ph.Get() + assert.Equal(t, 0, len(peers)) +} diff --git a/p2p/rating/peersRatingHandler.go b/p2p/rating/peersRatingHandler.go new file mode 100644 index 00000000000..be7935ef2d3 --- /dev/null +++ b/p2p/rating/peersRatingHandler.go @@ -0,0 +1,238 @@ +package rating + +import ( + "fmt" + "sync" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + logger "github.com/ElrondNetwork/elrond-go-logger" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/storage" +) + +const ( + topRatedTier = "top rated tier" + badRatedTier = "bad rated tier" + defaultRating = int32(0) + minRating = -100 + maxRating = 100 + increaseFactor = 2 + decreaseFactor = -1 + minNumOfPeers = 1 + int32Size = 4 +) + +var log = logger.GetOrCreate("p2p/peersRatingHandler") + +// ArgPeersRatingHandler is the DTO used to create a new peers rating handler +type ArgPeersRatingHandler struct { + TopRatedCache storage.Cacher + BadRatedCache storage.Cacher +} + +type peersRatingHandler struct { + topRatedCache storage.Cacher + badRatedCache storage.Cacher + mut sync.Mutex +} + +// NewPeersRatingHandler returns a new peers rating handler +func NewPeersRatingHandler(args ArgPeersRatingHandler) (*peersRatingHandler, error) { + err := checkArgs(args) + if err != nil { + return nil, err + } + + prh := &peersRatingHandler{ + topRatedCache: args.TopRatedCache, + badRatedCache: args.BadRatedCache, + } + + return prh, nil +} + +func checkArgs(args ArgPeersRatingHandler) error { + if check.IfNil(args.TopRatedCache) { + return fmt.Errorf("%w for TopRatedCache", p2p.ErrNilCacher) + } + if check.IfNil(args.BadRatedCache) { + return fmt.Errorf("%w for BadRatedCache", p2p.ErrNilCacher) + } + + return nil +} + +// AddPeer adds a new peer to the cache with rating 0 +// this is called when a new peer is detected +func (prh *peersRatingHandler) AddPeer(pid core.PeerID) { + prh.mut.Lock() + defer prh.mut.Unlock() + + _, found := prh.getOldRating(pid) + if found { + return + } + + prh.topRatedCache.Put(pid.Bytes(), defaultRating, int32Size) +} + +// IncreaseRating increases the rating of a peer with the increase factor +func (prh *peersRatingHandler) IncreaseRating(pid core.PeerID) { + prh.mut.Lock() + defer prh.mut.Unlock() + + prh.updateRatingIfNeeded(pid, increaseFactor) +} + +// DecreaseRating decreases the rating of a peer with the decrease factor +func (prh *peersRatingHandler) DecreaseRating(pid core.PeerID) { + prh.mut.Lock() + defer prh.mut.Unlock() + + prh.updateRatingIfNeeded(pid, decreaseFactor) +} + +func (prh *peersRatingHandler) getOldRating(pid core.PeerID) (int32, bool) { + oldRating, found := prh.topRatedCache.Get(pid.Bytes()) + if found { + oldRatingInt, _ := oldRating.(int32) + return oldRatingInt, found + } + + oldRating, found = prh.badRatedCache.Get(pid.Bytes()) + if found { + oldRatingInt, _ := oldRating.(int32) + return oldRatingInt, found + } + + return defaultRating, found +} + +func (prh *peersRatingHandler) updateRatingIfNeeded(pid core.PeerID, updateFactor int32) { + oldRating, found := prh.getOldRating(pid) + if !found { + // new pid, add it with default rating + prh.topRatedCache.Put(pid.Bytes(), defaultRating, int32Size) + return + } + + decreasingUnderMin := oldRating == minRating && updateFactor == decreaseFactor + increasingOverMax := oldRating == maxRating && updateFactor == increaseFactor + shouldSkipUpdate := decreasingUnderMin || increasingOverMax + if shouldSkipUpdate { + return + } + + newRating := oldRating + updateFactor + if newRating > maxRating { + newRating = maxRating + } + + if newRating < minRating { + newRating = minRating + } + + prh.updateRating(pid, oldRating, newRating) +} + +func (prh *peersRatingHandler) updateRating(pid core.PeerID, oldRating, newRating int32) { + oldTier := computeRatingTier(oldRating) + newTier := computeRatingTier(newRating) + if newTier == oldTier { + if newTier == topRatedTier { + prh.topRatedCache.Put(pid.Bytes(), newRating, int32Size) + } else { + prh.badRatedCache.Put(pid.Bytes(), newRating, int32Size) + } + + return + } + + prh.movePeerToNewTier(newRating, pid) +} + +func computeRatingTier(peerRating int32) string { + if peerRating >= defaultRating { + return topRatedTier + } + + return badRatedTier +} + +func (prh *peersRatingHandler) movePeerToNewTier(newRating int32, pid core.PeerID) { + newTier := computeRatingTier(newRating) + if newTier == topRatedTier { + prh.badRatedCache.Remove(pid.Bytes()) + prh.topRatedCache.Put(pid.Bytes(), newRating, int32Size) + } else { + prh.topRatedCache.Remove(pid.Bytes()) + prh.badRatedCache.Put(pid.Bytes(), newRating, int32Size) + } +} + +// GetTopRatedPeersFromList returns a list of peers, searching them in the order of rating tiers +func (prh *peersRatingHandler) GetTopRatedPeersFromList(peers []core.PeerID, minNumOfPeersExpected int) []core.PeerID { + prh.mut.Lock() + defer prh.mut.Unlock() + + peersTopRated := make([]core.PeerID, 0) + defer prh.displayPeersRating(&peersTopRated, minNumOfPeersExpected) + + isListEmpty := len(peers) == 0 + if minNumOfPeersExpected < minNumOfPeers || isListEmpty { + return make([]core.PeerID, 0) + } + + peersTopRated, peersBadRated := prh.splitPeersByTiers(peers) + if len(peersTopRated) < minNumOfPeersExpected { + peersTopRated = append(peersTopRated, peersBadRated...) + } + + return peersTopRated +} + +func (prh *peersRatingHandler) displayPeersRating(peers *[]core.PeerID, minNumOfPeersExpected int) { + if log.GetLevel() != logger.LogTrace { + return + } + + strPeersRatings := "" + for _, peer := range *peers { + rating, ok := prh.topRatedCache.Get(peer.Bytes()) + if !ok { + rating, _ = prh.badRatedCache.Get(peer.Bytes()) + } + + ratingInt, ok := rating.(int32) + if ok { + strPeersRatings += fmt.Sprintf("\n peerID: %s, rating: %d", peer.Pretty(), ratingInt) + } else { + strPeersRatings += fmt.Sprintf("\n peerID: %s, rating: invalid", peer.Pretty()) + } + } + + log.Trace("Best peers to request from", "min requested", minNumOfPeersExpected, "peers ratings", strPeersRatings) +} + +func (prh *peersRatingHandler) splitPeersByTiers(peers []core.PeerID) ([]core.PeerID, []core.PeerID) { + topRated := make([]core.PeerID, 0) + badRated := make([]core.PeerID, 0) + + for _, peer := range peers { + if prh.topRatedCache.Has(peer.Bytes()) { + topRated = append(topRated, peer) + } + + if prh.badRatedCache.Has(peer.Bytes()) { + badRated = append(badRated, peer) + } + } + + return topRated, badRated +} + +// IsInterfaceNil returns true if there is no value under the interface +func (prh *peersRatingHandler) IsInterfaceNil() bool { + return prh == nil +} diff --git a/p2p/rating/peersRatingHandler_test.go b/p2p/rating/peersRatingHandler_test.go new file mode 100644 index 00000000000..5070634847e --- /dev/null +++ b/p2p/rating/peersRatingHandler_test.go @@ -0,0 +1,426 @@ +package rating + +import ( + "bytes" + "errors" + "strings" + "testing" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/testscommon" + "github.com/stretchr/testify/assert" +) + +func createMockArgs() ArgPeersRatingHandler { + return ArgPeersRatingHandler{ + TopRatedCache: &testscommon.CacherStub{}, + BadRatedCache: &testscommon.CacherStub{}, + } +} + +func TestNewPeersRatingHandler(t *testing.T) { + t.Parallel() + + t.Run("nil top rated cache should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgs() + args.TopRatedCache = nil + + prh, err := NewPeersRatingHandler(args) + assert.True(t, errors.Is(err, p2p.ErrNilCacher)) + assert.True(t, strings.Contains(err.Error(), "TopRatedCache")) + assert.True(t, check.IfNil(prh)) + }) + t.Run("nil bad rated cache should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgs() + args.BadRatedCache = nil + + prh, err := NewPeersRatingHandler(args) + assert.True(t, errors.Is(err, p2p.ErrNilCacher)) + assert.True(t, strings.Contains(err.Error(), "BadRatedCache")) + assert.True(t, check.IfNil(prh)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + prh, err := NewPeersRatingHandler(createMockArgs()) + assert.Nil(t, err) + assert.False(t, check.IfNil(prh)) + }) +} + +func TestPeersRatingHandler_AddPeer(t *testing.T) { + t.Parallel() + + t.Run("new peer should add", func(t *testing.T) { + t.Parallel() + + wasCalled := false + providedPid := core.PeerID("provided pid") + args := createMockArgs() + args.TopRatedCache = &testscommon.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + PutCalled: func(key []byte, value interface{}, sizeInBytes int) (evicted bool) { + assert.True(t, bytes.Equal(providedPid.Bytes(), key)) + + wasCalled = true + return false + }, + } + args.BadRatedCache = &testscommon.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + } + + prh, _ := NewPeersRatingHandler(args) + assert.False(t, check.IfNil(prh)) + + prh.AddPeer(providedPid) + assert.True(t, wasCalled) + }) + t.Run("peer in top rated should not add", func(t *testing.T) { + t.Parallel() + + wasCalled := false + providedPid := core.PeerID("provided pid") + args := createMockArgs() + args.TopRatedCache = &testscommon.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return nil, true + }, + PutCalled: func(key []byte, value interface{}, sizeInBytes int) (evicted bool) { + wasCalled = true + return false + }, + } + args.BadRatedCache = &testscommon.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + } + + prh, _ := NewPeersRatingHandler(args) + assert.False(t, check.IfNil(prh)) + + prh.AddPeer(providedPid) + assert.False(t, wasCalled) + }) + t.Run("peer in bad rated should not add", func(t *testing.T) { + t.Parallel() + + wasCalled := false + providedPid := core.PeerID("provided pid") + args := createMockArgs() + args.TopRatedCache = &testscommon.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + PutCalled: func(key []byte, value interface{}, sizeInBytes int) (evicted bool) { + wasCalled = true + return false + }, + } + args.BadRatedCache = &testscommon.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return nil, true + }, + } + + prh, _ := NewPeersRatingHandler(args) + assert.False(t, check.IfNil(prh)) + + prh.AddPeer(providedPid) + assert.False(t, wasCalled) + }) +} + +func TestPeersRatingHandler_IncreaseRating(t *testing.T) { + t.Parallel() + + t.Run("new peer should add to cache", func(t *testing.T) { + t.Parallel() + + wasCalled := false + providedPid := core.PeerID("provided pid") + args := createMockArgs() + args.TopRatedCache = &testscommon.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + PutCalled: func(key []byte, value interface{}, sizeInBytes int) (evicted bool) { + assert.True(t, bytes.Equal(providedPid.Bytes(), key)) + + wasCalled = true + return false + }, + } + args.BadRatedCache = &testscommon.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + } + prh, _ := NewPeersRatingHandler(args) + assert.False(t, check.IfNil(prh)) + + prh.IncreaseRating(providedPid) + assert.True(t, wasCalled) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cacheMap := make(map[string]interface{}) + providedPid := core.PeerID("provided pid") + args := createMockArgs() + args.TopRatedCache = &testscommon.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + val, found := cacheMap[string(key)] + return val, found + }, + PutCalled: func(key []byte, value interface{}, sizeInBytes int) (evicted bool) { + cacheMap[string(key)] = value + return false + }, + } + + prh, _ := NewPeersRatingHandler(args) + assert.False(t, check.IfNil(prh)) + + prh.IncreaseRating(providedPid) + val, found := cacheMap[string(providedPid.Bytes())] + assert.True(t, found) + assert.Equal(t, defaultRating, val) + + // exceed the limit + numOfCalls := 100 + for i := 0; i < numOfCalls; i++ { + prh.IncreaseRating(providedPid) + } + val, found = cacheMap[string(providedPid.Bytes())] + assert.True(t, found) + assert.Equal(t, int32(maxRating), val) + }) +} + +func TestPeersRatingHandler_DecreaseRating(t *testing.T) { + t.Parallel() + + t.Run("new peer should add to cache", func(t *testing.T) { + t.Parallel() + + wasCalled := false + providedPid := core.PeerID("provided pid") + args := createMockArgs() + args.TopRatedCache = &testscommon.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + PutCalled: func(key []byte, value interface{}, sizeInBytes int) (evicted bool) { + assert.True(t, bytes.Equal(providedPid.Bytes(), key)) + + wasCalled = true + return false + }, + } + args.BadRatedCache = &testscommon.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + return nil, false + }, + } + prh, _ := NewPeersRatingHandler(args) + assert.False(t, check.IfNil(prh)) + + prh.DecreaseRating(providedPid) + assert.True(t, wasCalled) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + topRatedCacheMap := make(map[string]interface{}) + badRatedCacheMap := make(map[string]interface{}) + providedPid := core.PeerID("provided pid") + args := createMockArgs() + args.TopRatedCache = &testscommon.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + val, found := topRatedCacheMap[string(key)] + return val, found + }, + PutCalled: func(key []byte, value interface{}, sizeInBytes int) (evicted bool) { + topRatedCacheMap[string(key)] = value + return false + }, + RemoveCalled: func(key []byte) { + delete(topRatedCacheMap, string(key)) + }, + } + args.BadRatedCache = &testscommon.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + val, found := badRatedCacheMap[string(key)] + return val, found + }, + PutCalled: func(key []byte, value interface{}, sizeInBytes int) (evicted bool) { + badRatedCacheMap[string(key)] = value + return false + }, + RemoveCalled: func(key []byte) { + delete(badRatedCacheMap, string(key)) + }, + } + + prh, _ := NewPeersRatingHandler(args) + assert.False(t, check.IfNil(prh)) + + // first call just adds it with default rating + prh.DecreaseRating(providedPid) + val, found := topRatedCacheMap[string(providedPid.Bytes())] + assert.True(t, found) + assert.Equal(t, defaultRating, val) + + // exceed the limit + numOfCalls := 200 + for i := 0; i < numOfCalls; i++ { + prh.DecreaseRating(providedPid) + } + val, found = badRatedCacheMap[string(providedPid.Bytes())] + assert.True(t, found) + assert.Equal(t, int32(minRating), val) + + // move back to top tier + for i := 0; i < numOfCalls; i++ { + prh.IncreaseRating(providedPid) + } + _, found = badRatedCacheMap[string(providedPid.Bytes())] + assert.False(t, found) + + val, found = topRatedCacheMap[string(providedPid.Bytes())] + assert.True(t, found) + assert.Equal(t, int32(maxRating), val) + }) +} + +func TestPeersRatingHandler_GetTopRatedPeersFromList(t *testing.T) { + t.Parallel() + + t.Run("asking for 0 peers should return empty list", func(t *testing.T) { + t.Parallel() + + prh, _ := NewPeersRatingHandler(createMockArgs()) + assert.False(t, check.IfNil(prh)) + + res := prh.GetTopRatedPeersFromList([]core.PeerID{"pid"}, 0) + assert.Equal(t, 0, len(res)) + }) + t.Run("nil provided list should return empty list", func(t *testing.T) { + t.Parallel() + + prh, _ := NewPeersRatingHandler(createMockArgs()) + assert.False(t, check.IfNil(prh)) + + res := prh.GetTopRatedPeersFromList(nil, 1) + assert.Equal(t, 0, len(res)) + }) + t.Run("no peers in maps should return empty list", func(t *testing.T) { + t.Parallel() + + prh, _ := NewPeersRatingHandler(createMockArgs()) + assert.False(t, check.IfNil(prh)) + + providedListOfPeers := []core.PeerID{"pid 1", "pid 2"} + res := prh.GetTopRatedPeersFromList(providedListOfPeers, 5) + assert.Equal(t, 0, len(res)) + }) + t.Run("one peer in top rated, asking for one should work", func(t *testing.T) { + t.Parallel() + + providedPid := core.PeerID("provided pid") + args := createMockArgs() + args.TopRatedCache = &testscommon.CacherStub{ + LenCalled: func() int { + return 1 + }, + KeysCalled: func() [][]byte { + return [][]byte{providedPid.Bytes()} + }, + HasCalled: func(key []byte) bool { + return bytes.Equal(key, providedPid.Bytes()) + }, + } + prh, _ := NewPeersRatingHandler(args) + assert.False(t, check.IfNil(prh)) + + providedListOfPeers := []core.PeerID{providedPid, "another pid"} + res := prh.GetTopRatedPeersFromList(providedListOfPeers, 1) + assert.Equal(t, 1, len(res)) + assert.Equal(t, providedPid, res[0]) + }) + t.Run("one peer in each, asking for two should work", func(t *testing.T) { + t.Parallel() + + providedTopPid := core.PeerID("provided top pid") + providedBadPid := core.PeerID("provided bad pid") + args := createMockArgs() + args.TopRatedCache = &testscommon.CacherStub{ + LenCalled: func() int { + return 1 + }, + KeysCalled: func() [][]byte { + return [][]byte{providedTopPid.Bytes()} + }, + HasCalled: func(key []byte) bool { + return bytes.Equal(key, providedTopPid.Bytes()) + }, + } + args.BadRatedCache = &testscommon.CacherStub{ + LenCalled: func() int { + return 1 + }, + KeysCalled: func() [][]byte { + return [][]byte{providedBadPid.Bytes()} + }, + HasCalled: func(key []byte) bool { + return bytes.Equal(key, providedBadPid.Bytes()) + }, + } + prh, _ := NewPeersRatingHandler(args) + assert.False(t, check.IfNil(prh)) + + providedListOfPeers := []core.PeerID{providedTopPid, providedBadPid, "another pid"} + expectedListOfPeers := []core.PeerID{providedTopPid, providedBadPid} + res := prh.GetTopRatedPeersFromList(providedListOfPeers, 2) + assert.Equal(t, expectedListOfPeers, res) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedPid1, providedPid2, providedPid3 := core.PeerID("provided pid 1"), core.PeerID("provided pid 2"), core.PeerID("provided pid 3") + args := createMockArgs() + args.TopRatedCache = &testscommon.CacherStub{ + LenCalled: func() int { + return 3 + }, + KeysCalled: func() [][]byte { + return [][]byte{providedPid1.Bytes(), providedPid2.Bytes(), providedPid3.Bytes()} + }, + HasCalled: func(key []byte) bool { + has := bytes.Equal(key, providedPid1.Bytes()) || + bytes.Equal(key, providedPid2.Bytes()) || + bytes.Equal(key, providedPid3.Bytes()) + return has + }, + } + prh, _ := NewPeersRatingHandler(args) + assert.False(t, check.IfNil(prh)) + + providedListOfPeers := []core.PeerID{providedPid1, providedPid2, providedPid3, "another pid 1", "another pid 2"} + expectedListOfPeers := []core.PeerID{providedPid1, providedPid2, providedPid3} + res := prh.GetTopRatedPeersFromList(providedListOfPeers, 2) + assert.Equal(t, expectedListOfPeers, res) + }) +} diff --git a/process/block/argProcessor.go b/process/block/argProcessor.go index f9b3d1e5328..b7e1e747c7a 100644 --- a/process/block/argProcessor.go +++ b/process/block/argProcessor.go @@ -79,6 +79,7 @@ type ArgBaseProcessor struct { GasHandler gasConsumedProvider ScheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler ScheduledMiniBlocksEnableEpoch uint32 + ProcessedMiniBlocksTracker process.ProcessedMiniBlocksTracker } // ArgShardProcessor holds all dependencies required by the process data factory in order to create diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 37bd922599f..e6a0e6b2aa5 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -28,6 +28,7 @@ import ( "github.com/ElrondNetwork/elrond-go/outport" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/sharding/nodesCoordinator" "github.com/ElrondNetwork/elrond-go/state" @@ -97,6 +98,7 @@ type baseProcessor struct { processDataTriesOnCommitEpoch bool scheduledMiniBlocksEnableEpoch uint32 flagScheduledMiniBlocks atomic.Flag + processedMiniBlocksTracker process.ProcessedMiniBlocksTracker } type bootStorerDataArgs struct { @@ -504,6 +506,9 @@ func checkProcessorNilParameters(arguments ArgBaseProcessor) error { if check.IfNil(arguments.BootstrapComponents.VersionedHeaderFactory()) { return process.ErrNilVersionedHeaderFactory } + if check.IfNil(arguments.ProcessedMiniBlocksTracker) { + return process.ErrNilProcessedMiniBlocksTracker + } return nil } @@ -588,7 +593,10 @@ func (bp *baseProcessor) sortHeaderHashesForCurrentBlockByNonce(usedInBlock bool return hdrsHashesForCurrentBlock } -func (bp *baseProcessor) createMiniBlockHeaderHandlers(body *block.Body) (int, []data.MiniBlockHeaderHandler, error) { +func (bp *baseProcessor) createMiniBlockHeaderHandlers( + body *block.Body, + processedMiniBlocksDestMeInfo map[string]*processedMb.ProcessedMiniBlockInfo, +) (int, []data.MiniBlockHeaderHandler, error) { if len(body.MiniBlocks) == 0 { return 0, nil, nil } @@ -613,7 +621,7 @@ func (bp *baseProcessor) createMiniBlockHeaderHandlers(body *block.Body) (int, [ Type: body.MiniBlocks[i].Type, } - err = bp.setMiniBlockHeaderReservedField(body.MiniBlocks[i], miniBlockHash, miniBlockHeaderHandlers[i]) + err = bp.setMiniBlockHeaderReservedField(body.MiniBlocks[i], miniBlockHeaderHandlers[i], processedMiniBlocksDestMeInfo) if err != nil { return 0, nil, err } @@ -624,24 +632,52 @@ func (bp *baseProcessor) createMiniBlockHeaderHandlers(body *block.Body) (int, [ func (bp *baseProcessor) setMiniBlockHeaderReservedField( miniBlock *block.MiniBlock, - miniBlockHash []byte, miniBlockHeaderHandler data.MiniBlockHeaderHandler, + processedMiniBlocksDestMeInfo map[string]*processedMb.ProcessedMiniBlockInfo, ) error { if !bp.flagScheduledMiniBlocks.IsSet() { return nil } + err := bp.setIndexOfFirstTxProcessed(miniBlockHeaderHandler) + if err != nil { + return err + } + + err = bp.setIndexOfLastTxProcessed(miniBlockHeaderHandler, processedMiniBlocksDestMeInfo) + if err != nil { + return err + } + notEmpty := len(miniBlock.TxHashes) > 0 isScheduledMiniBlock := notEmpty && bp.scheduledTxsExecutionHandler.IsScheduledTx(miniBlock.TxHashes[0]) if isScheduledMiniBlock { - return bp.setProcessingTypeAndConstructionStateForScheduledMb(miniBlockHeaderHandler) + return bp.setProcessingTypeAndConstructionStateForScheduledMb(miniBlockHeaderHandler, processedMiniBlocksDestMeInfo) } - return bp.setProcessingTypeAndConstructionStateForNormalMb(miniBlockHeaderHandler, miniBlockHash) + return bp.setProcessingTypeAndConstructionStateForNormalMb(miniBlockHeaderHandler, processedMiniBlocksDestMeInfo) +} + +func (bp *baseProcessor) setIndexOfFirstTxProcessed(miniBlockHeaderHandler data.MiniBlockHeaderHandler) error { + processedMiniBlockInfo, _ := bp.processedMiniBlocksTracker.GetProcessedMiniBlockInfo(miniBlockHeaderHandler.GetHash()) + return miniBlockHeaderHandler.SetIndexOfFirstTxProcessed(processedMiniBlockInfo.IndexOfLastTxProcessed + 1) +} + +func (bp *baseProcessor) setIndexOfLastTxProcessed( + miniBlockHeaderHandler data.MiniBlockHeaderHandler, + processedMiniBlocksDestMeInfo map[string]*processedMb.ProcessedMiniBlockInfo, +) error { + processedMiniBlockInfo := processedMiniBlocksDestMeInfo[string(miniBlockHeaderHandler.GetHash())] + if processedMiniBlockInfo != nil { + return miniBlockHeaderHandler.SetIndexOfLastTxProcessed(processedMiniBlockInfo.IndexOfLastTxProcessed) + } + + return miniBlockHeaderHandler.SetIndexOfLastTxProcessed(int32(miniBlockHeaderHandler.GetTxCount()) - 1) } func (bp *baseProcessor) setProcessingTypeAndConstructionStateForScheduledMb( miniBlockHeaderHandler data.MiniBlockHeaderHandler, + processedMiniBlocksDestMeInfo map[string]*processedMb.ProcessedMiniBlockInfo, ) error { err := miniBlockHeaderHandler.SetProcessingType(int32(block.Scheduled)) if err != nil { @@ -654,7 +690,8 @@ func (bp *baseProcessor) setProcessingTypeAndConstructionStateForScheduledMb( return err } } else { - err = miniBlockHeaderHandler.SetConstructionState(int32(block.Final)) + constructionState := getConstructionState(miniBlockHeaderHandler, processedMiniBlocksDestMeInfo) + err = miniBlockHeaderHandler.SetConstructionState(constructionState) if err != nil { return err } @@ -664,9 +701,9 @@ func (bp *baseProcessor) setProcessingTypeAndConstructionStateForScheduledMb( func (bp *baseProcessor) setProcessingTypeAndConstructionStateForNormalMb( miniBlockHeaderHandler data.MiniBlockHeaderHandler, - miniBlockHash []byte, + processedMiniBlocksDestMeInfo map[string]*processedMb.ProcessedMiniBlockInfo, ) error { - if bp.scheduledTxsExecutionHandler.IsMiniBlockExecuted(miniBlockHash) { + if bp.scheduledTxsExecutionHandler.IsMiniBlockExecuted(miniBlockHeaderHandler.GetHash()) { err := miniBlockHeaderHandler.SetProcessingType(int32(block.Processed)) if err != nil { return err @@ -678,7 +715,8 @@ func (bp *baseProcessor) setProcessingTypeAndConstructionStateForNormalMb( } } - err := miniBlockHeaderHandler.SetConstructionState(int32(block.Final)) + constructionState := getConstructionState(miniBlockHeaderHandler, processedMiniBlocksDestMeInfo) + err := miniBlockHeaderHandler.SetConstructionState(constructionState) if err != nil { return err } @@ -686,6 +724,27 @@ func (bp *baseProcessor) setProcessingTypeAndConstructionStateForNormalMb( return nil } +func getConstructionState( + miniBlockHeaderHandler data.MiniBlockHeaderHandler, + processedMiniBlocksDestMeInfo map[string]*processedMb.ProcessedMiniBlockInfo, +) int32 { + constructionState := int32(block.Final) + if isPartiallyExecuted(miniBlockHeaderHandler, processedMiniBlocksDestMeInfo) { + constructionState = int32(block.PartialExecuted) + } + + return constructionState +} + +func isPartiallyExecuted( + miniBlockHeaderHandler data.MiniBlockHeaderHandler, + processedMiniBlocksDestMeInfo map[string]*processedMb.ProcessedMiniBlockInfo, +) bool { + processedMiniBlockInfo := processedMiniBlocksDestMeInfo[string(miniBlockHeaderHandler.GetHash())] + return processedMiniBlockInfo != nil && !processedMiniBlockInfo.FullyProcessed + +} + // check if header has the same miniblocks as presented in body func (bp *baseProcessor) checkHeaderBodyCorrelation(miniBlockHeaders []data.MiniBlockHeaderHandler, body *block.Body) error { mbHashesFromHdr := make(map[string]data.MiniBlockHeaderHandler, len(miniBlockHeaders)) @@ -724,6 +783,28 @@ func (bp *baseProcessor) checkHeaderBodyCorrelation(miniBlockHeaders []data.Mini if mbHdr.GetSenderShardID() != miniBlock.SenderShardID { return process.ErrHeaderBodyMismatch } + + err = process.CheckIfIndexesAreOutOfBound(mbHdr.GetIndexOfFirstTxProcessed(), mbHdr.GetIndexOfLastTxProcessed(), miniBlock) + if err != nil { + return err + } + + err = checkConstructionStateAndIndexesCorrectness(mbHdr) + if err != nil { + return err + } + } + + return nil +} + +func checkConstructionStateAndIndexesCorrectness(mbh data.MiniBlockHeaderHandler) error { + if mbh.GetConstructionState() == int32(block.PartialExecuted) && mbh.GetIndexOfLastTxProcessed() == int32(mbh.GetTxCount())-1 { + return process.ErrIndexDoesNotMatchWithPartialExecutedMiniBlock + + } + if mbh.GetConstructionState() != int32(block.PartialExecuted) && mbh.GetIndexOfLastTxProcessed() != int32(mbh.GetTxCount())-1 { + return process.ErrIndexDoesNotMatchWithFullyExecutedMiniBlock } return nil @@ -836,10 +917,10 @@ func (bp *baseProcessor) cleanupPoolsForCrossShard( ) { crossNotarizedHeader, _, err := bp.blockTracker.GetCrossNotarizedHeader(shardID, noncesToPrevFinal) if err != nil { - log.Warn("cleanupPoolsForCrossShard", - "shard", shardID, - "nonces to previous final", noncesToPrevFinal, - "error", err.Error()) + displayCleanupErrorMessage("cleanupPoolsForCrossShard", + shardID, + noncesToPrevFinal, + err) return } @@ -966,10 +1047,10 @@ func (bp *baseProcessor) cleanupBlockTrackerPools(noncesToPrevFinal uint64) { func (bp *baseProcessor) cleanupBlockTrackerPoolsForShard(shardID uint32, noncesToPrevFinal uint64) { selfNotarizedHeader, _, errSelfNotarized := bp.blockTracker.GetSelfNotarizedHeader(shardID, noncesToPrevFinal) if errSelfNotarized != nil { - log.Warn("cleanupBlockTrackerPoolsForShard.GetSelfNotarizedHeader", - "shard", shardID, - "nonces to previous final", noncesToPrevFinal, - "error", errSelfNotarized.Error()) + displayCleanupErrorMessage("cleanupBlockTrackerPoolsForShard.GetSelfNotarizedHeader", + shardID, + noncesToPrevFinal, + errSelfNotarized) return } @@ -979,10 +1060,10 @@ func (bp *baseProcessor) cleanupBlockTrackerPoolsForShard(shardID uint32, nonces if shardID != bp.shardCoordinator.SelfId() { crossNotarizedHeader, _, errCrossNotarized := bp.blockTracker.GetCrossNotarizedHeader(shardID, noncesToPrevFinal) if errCrossNotarized != nil { - log.Warn("cleanupBlockTrackerPoolsForShard.GetCrossNotarizedHeader", - "shard", shardID, - "nonces to previous final", noncesToPrevFinal, - "error", errCrossNotarized.Error()) + displayCleanupErrorMessage("cleanupBlockTrackerPoolsForShard.GetCrossNotarizedHeader", + shardID, + noncesToPrevFinal, + errCrossNotarized) return } @@ -1868,3 +1949,17 @@ func (bp *baseProcessor) EpochConfirmed(epoch uint32, _ uint64) { bp.flagScheduledMiniBlocks.SetValue(epoch >= bp.scheduledMiniBlocksEnableEpoch) log.Debug("baseProcessor: scheduled mini blocks", "enabled", bp.flagScheduledMiniBlocks.IsSet()) } + +func displayCleanupErrorMessage(message string, shardID uint32, noncesToPrevFinal uint64, err error) { + // 2 blocks on shard + 2 blocks on meta + 1 block to previous final + maxNoncesToPrevFinalWithoutWarn := uint64(process.BlockFinality+1)*2 + 1 + level := logger.LogWarning + if noncesToPrevFinal <= maxNoncesToPrevFinalWithoutWarn { + level = logger.LogDebug + } + + log.Log(level, message, + "shard", shardID, + "nonces to previous final", noncesToPrevFinal, + "error", err.Error()) +} diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 8319f0758ae..ffc1fbb9c60 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -30,6 +30,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process" blproc "github.com/ElrondNetwork/elrond-go/process/block" "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" "github.com/ElrondNetwork/elrond-go/process/coordinator" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/state" @@ -105,6 +106,7 @@ func createArgBaseProcessor( GasHandler: &mock.GasHandlerMock{}, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, ScheduledMiniBlocksEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } } @@ -417,18 +419,20 @@ func createMockTransactionCoordinatorArguments( return []block.Type{block.SmartContractResultBlock} }, }, - GasHandler: &testscommon.GasHandlerStub{}, - FeeHandler: &mock.FeeAccumulatorStub{}, - BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, - BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{}, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + GasHandler: &testscommon.GasHandlerStub{}, + FeeHandler: &mock.FeeAccumulatorStub{}, + BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, + BalanceComputation: &testscommon.BalanceComputationStub{}, + EconomicsFee: &mock.FeeHandlerStub{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } return argsTransactionCoordinator @@ -691,6 +695,14 @@ func TestCheckProcessorNilParameters(t *testing.T) { }, expectedErr: process.ErrNilScheduledTxsExecutionHandler, }, + { + args: func() blproc.ArgBaseProcessor { + args := createArgBaseProcessor(coreComponents, dataComponents, bootstrapComponents, statusComponents) + args.ProcessedMiniBlocksTracker = nil + return args + }, + expectedErr: process.ErrNilProcessedMiniBlocksTracker, + }, { args: func() blproc.ArgBaseProcessor { bootstrapCopy := *bootstrapComponents @@ -788,6 +800,162 @@ func TestVerifyStateRoot_ShouldWork(t *testing.T) { assert.True(t, bp.VerifyStateRoot(rootHash)) } +func TestBaseProcessor_SetIndexOfFirstTxProcessed(t *testing.T) { + t.Parallel() + + arguments := CreateMockArguments(createComponentHolderMocks()) + processedMiniBlocksTracker := processedMb.NewProcessedMiniBlocksTracker() + arguments.ProcessedMiniBlocksTracker = processedMiniBlocksTracker + bp, _ := blproc.NewShardProcessor(arguments) + + metaHash := []byte("meta_hash") + mbHash := []byte("mb_hash") + miniBlockHeader := &block.MiniBlockHeader{ + Hash: mbHash, + } + + processedMbInfo := &processedMb.ProcessedMiniBlockInfo{ + FullyProcessed: false, + IndexOfLastTxProcessed: 8, + } + processedMiniBlocksTracker.SetProcessedMiniBlockInfo(metaHash, mbHash, processedMbInfo) + err := bp.SetIndexOfFirstTxProcessed(miniBlockHeader) + assert.Nil(t, err) + assert.Equal(t, int32(9), miniBlockHeader.GetIndexOfFirstTxProcessed()) +} + +func TestBaseProcessor_SetIndexOfLastTxProcessed(t *testing.T) { + t.Parallel() + + arguments := CreateMockArguments(createComponentHolderMocks()) + bp, _ := blproc.NewShardProcessor(arguments) + + mbHash := []byte("mb_hash") + processedMiniBlocksDestMeInfo := make(map[string]*processedMb.ProcessedMiniBlockInfo) + miniBlockHeader := &block.MiniBlockHeader{ + Hash: mbHash, + TxCount: 100, + } + + err := bp.SetIndexOfLastTxProcessed(miniBlockHeader, processedMiniBlocksDestMeInfo) + assert.Nil(t, err) + assert.Equal(t, int32(99), miniBlockHeader.GetIndexOfLastTxProcessed()) + + processedMbInfo := &processedMb.ProcessedMiniBlockInfo{ + FullyProcessed: false, + IndexOfLastTxProcessed: 8, + } + processedMiniBlocksDestMeInfo[string(mbHash)] = processedMbInfo + + err = bp.SetIndexOfLastTxProcessed(miniBlockHeader, processedMiniBlocksDestMeInfo) + assert.Nil(t, err) + assert.Equal(t, int32(8), miniBlockHeader.GetIndexOfLastTxProcessed()) +} + +func TestBaseProcessor_SetProcessingTypeAndConstructionStateForScheduledMb(t *testing.T) { + t.Parallel() + + arguments := CreateMockArguments(createComponentHolderMocks()) + bp, _ := blproc.NewShardProcessor(arguments) + + mbHash := []byte("mb_hash") + processedMiniBlocksDestMeInfo := make(map[string]*processedMb.ProcessedMiniBlockInfo) + miniBlockHeader := &block.MiniBlockHeader{ + Hash: mbHash, + } + + processedMbInfo := &processedMb.ProcessedMiniBlockInfo{ + FullyProcessed: false, + } + + miniBlockHeader.SenderShardID = 0 + err := bp.SetProcessingTypeAndConstructionStateForScheduledMb(miniBlockHeader, processedMiniBlocksDestMeInfo) + assert.Nil(t, err) + assert.Equal(t, int32(block.Proposed), miniBlockHeader.GetConstructionState()) + assert.Equal(t, int32(block.Scheduled), miniBlockHeader.GetProcessingType()) + + miniBlockHeader.SenderShardID = 1 + + err = bp.SetProcessingTypeAndConstructionStateForScheduledMb(miniBlockHeader, processedMiniBlocksDestMeInfo) + assert.Nil(t, err) + assert.Equal(t, int32(block.Final), miniBlockHeader.GetConstructionState()) + assert.Equal(t, int32(block.Scheduled), miniBlockHeader.GetProcessingType()) + + processedMiniBlocksDestMeInfo[string(mbHash)] = processedMbInfo + + err = bp.SetProcessingTypeAndConstructionStateForScheduledMb(miniBlockHeader, processedMiniBlocksDestMeInfo) + assert.Nil(t, err) + assert.Equal(t, int32(block.PartialExecuted), miniBlockHeader.GetConstructionState()) + assert.Equal(t, int32(block.Scheduled), miniBlockHeader.GetProcessingType()) +} + +func TestBaseProcessor_SetProcessingTypeAndConstructionStateForNormalMb(t *testing.T) { + t.Parallel() + + t.Run("set processing/construction for normal mini blocks not processed, should work", func(t *testing.T) { + t.Parallel() + + arguments := CreateMockArguments(createComponentHolderMocks()) + bp, _ := blproc.NewShardProcessor(arguments) + + mbHash := []byte("mb_hash") + processedMiniBlocksDestMeInfo := make(map[string]*processedMb.ProcessedMiniBlockInfo) + miniBlockHeader := &block.MiniBlockHeader{ + Hash: mbHash, + } + + processedMbInfo := &processedMb.ProcessedMiniBlockInfo{ + FullyProcessed: false, + } + + err := bp.SetProcessingTypeAndConstructionStateForNormalMb(miniBlockHeader, processedMiniBlocksDestMeInfo) + assert.Nil(t, err) + assert.Equal(t, int32(block.Final), miniBlockHeader.GetConstructionState()) + assert.Equal(t, int32(block.Normal), miniBlockHeader.GetProcessingType()) + + processedMiniBlocksDestMeInfo[string(mbHash)] = processedMbInfo + + err = bp.SetProcessingTypeAndConstructionStateForNormalMb(miniBlockHeader, processedMiniBlocksDestMeInfo) + assert.Nil(t, err) + assert.Equal(t, int32(block.PartialExecuted), miniBlockHeader.GetConstructionState()) + assert.Equal(t, int32(block.Normal), miniBlockHeader.GetProcessingType()) + }) + + t.Run("set processing/construction for normal mini blocks already processed, should work", func(t *testing.T) { + t.Parallel() + + arguments := CreateMockArguments(createComponentHolderMocks()) + arguments.ScheduledTxsExecutionHandler = &testscommon.ScheduledTxsExecutionStub{ + IsMiniBlockExecutedCalled: func(i []byte) bool { + return true + }, + } + bp, _ := blproc.NewShardProcessor(arguments) + + mbHash := []byte("mb_hash") + processedMiniBlocksDestMeInfo := make(map[string]*processedMb.ProcessedMiniBlockInfo) + miniBlockHeader := &block.MiniBlockHeader{ + Hash: mbHash, + } + + processedMbInfo := &processedMb.ProcessedMiniBlockInfo{ + FullyProcessed: false, + } + + err := bp.SetProcessingTypeAndConstructionStateForNormalMb(miniBlockHeader, processedMiniBlocksDestMeInfo) + assert.Nil(t, err) + assert.Equal(t, int32(block.Final), miniBlockHeader.GetConstructionState()) + assert.Equal(t, int32(block.Processed), miniBlockHeader.GetProcessingType()) + + processedMiniBlocksDestMeInfo[string(mbHash)] = processedMbInfo + + err = bp.SetProcessingTypeAndConstructionStateForNormalMb(miniBlockHeader, processedMiniBlocksDestMeInfo) + assert.Nil(t, err) + assert.Equal(t, int32(block.PartialExecuted), miniBlockHeader.GetConstructionState()) + assert.Equal(t, int32(block.Processed), miniBlockHeader.GetProcessingType()) + }) +} + // ------- RevertState func TestBaseProcessor_RevertStateRecreateTrieFailsShouldErr(t *testing.T) { t.Parallel() @@ -2468,7 +2636,7 @@ func TestBaseProcessor_setMiniBlockHeaderReservedField(t *testing.T) { arguments := CreateMockArguments(createComponentHolderMocks()) bp, _ := blproc.NewShardProcessor(arguments) - err := bp.SetMiniBlockHeaderReservedField(&block.MiniBlock{}, []byte{}, &block.MiniBlockHeader{}) + err := bp.SetMiniBlockHeaderReservedField(&block.MiniBlock{}, &block.MiniBlockHeader{Hash: []byte{}}, make(map[string]*processedMb.ProcessedMiniBlockInfo)) assert.Nil(t, err) }) @@ -2489,9 +2657,11 @@ func TestBaseProcessor_setMiniBlockHeaderReservedField(t *testing.T) { bp, _ := blproc.NewShardProcessor(arguments) bp.EpochConfirmed(4, 0) - mbHandler := &block.MiniBlockHeader{} + mbHandler := &block.MiniBlockHeader{ + Hash: miniBlockHash, + } - err := bp.SetMiniBlockHeaderReservedField(&block.MiniBlock{}, miniBlockHash, mbHandler) + err := bp.SetMiniBlockHeaderReservedField(&block.MiniBlock{}, mbHandler, make(map[string]*processedMb.ProcessedMiniBlockInfo)) assert.Nil(t, err) assert.Equal(t, int32(block.Normal), mbHandler.GetProcessingType()) assert.Equal(t, int32(block.Final), mbHandler.GetConstructionState()) @@ -2515,9 +2685,11 @@ func TestBaseProcessor_setMiniBlockHeaderReservedField(t *testing.T) { bp, _ := blproc.NewShardProcessor(arguments) bp.EpochConfirmed(4, 0) - mbHandler := &block.MiniBlockHeader{} + mbHandler := &block.MiniBlockHeader{ + Hash: miniBlockHash, + } - err := bp.SetMiniBlockHeaderReservedField(&block.MiniBlock{}, miniBlockHash, mbHandler) + err := bp.SetMiniBlockHeaderReservedField(&block.MiniBlock{}, mbHandler, make(map[string]*processedMb.ProcessedMiniBlockInfo)) assert.Nil(t, err) assert.Equal(t, int32(block.Processed), mbHandler.GetProcessingType()) assert.Equal(t, int32(block.Final), mbHandler.GetConstructionState()) @@ -2548,10 +2720,11 @@ func TestBaseProcessor_setMiniBlockHeaderReservedField(t *testing.T) { } mbHandler := &block.MiniBlockHeader{ + Hash: miniBlockHash, SenderShardID: 2, } - err := bp.SetMiniBlockHeaderReservedField(mb, miniBlockHash, mbHandler) + err := bp.SetMiniBlockHeaderReservedField(mb, mbHandler, make(map[string]*processedMb.ProcessedMiniBlockInfo)) assert.Nil(t, err) assert.Equal(t, int32(block.Scheduled), mbHandler.GetProcessingType()) assert.Equal(t, int32(block.Final), mbHandler.GetConstructionState()) @@ -2584,10 +2757,11 @@ func TestBaseProcessor_setMiniBlockHeaderReservedField(t *testing.T) { } mbHandler := &block.MiniBlockHeader{ + Hash: miniBlockHash, SenderShardID: shardId, } - err := bp.SetMiniBlockHeaderReservedField(mb, miniBlockHash, mbHandler) + err := bp.SetMiniBlockHeaderReservedField(mb, mbHandler, make(map[string]*processedMb.ProcessedMiniBlockInfo)) assert.Nil(t, err) assert.Equal(t, int32(block.Scheduled), mbHandler.GetProcessingType()) assert.Equal(t, int32(block.Proposed), mbHandler.GetConstructionState()) @@ -2641,3 +2815,42 @@ func TestMetaProcessor_RestoreBlockBodyIntoPoolsShouldWork(t *testing.T) { err := mp.RestoreBlockBodyIntoPools(&block.Body{}) assert.Nil(t, err) } + +func TestBaseProcessor_checkConstructionStateAndIndexesCorrectness(t *testing.T) { + t.Parallel() + + arguments := CreateMockArguments(createComponentHolderMocks()) + bp, _ := blproc.NewShardProcessor(arguments) + + mbh := &block.MiniBlockHeader{ + TxCount: 5, + } + + _ = mbh.SetConstructionState(int32(block.PartialExecuted)) + + _ = mbh.SetIndexOfLastTxProcessed(int32(mbh.TxCount)) + err := bp.CheckConstructionStateAndIndexesCorrectness(mbh) + assert.Nil(t, err) + + _ = mbh.SetIndexOfLastTxProcessed(int32(mbh.TxCount) - 2) + err = bp.CheckConstructionStateAndIndexesCorrectness(mbh) + assert.Nil(t, err) + + _ = mbh.SetIndexOfLastTxProcessed(int32(mbh.TxCount) - 1) + err = bp.CheckConstructionStateAndIndexesCorrectness(mbh) + assert.Equal(t, process.ErrIndexDoesNotMatchWithPartialExecutedMiniBlock, err) + + _ = mbh.SetConstructionState(int32(block.Final)) + + _ = mbh.SetIndexOfLastTxProcessed(int32(mbh.TxCount)) + err = bp.CheckConstructionStateAndIndexesCorrectness(mbh) + assert.Equal(t, process.ErrIndexDoesNotMatchWithFullyExecutedMiniBlock, err) + + _ = mbh.SetIndexOfLastTxProcessed(int32(mbh.TxCount) - 2) + err = bp.CheckConstructionStateAndIndexesCorrectness(mbh) + assert.Equal(t, process.ErrIndexDoesNotMatchWithFullyExecutedMiniBlock, err) + + _ = mbh.SetIndexOfLastTxProcessed(int32(mbh.TxCount) - 1) + err = bp.CheckConstructionStateAndIndexesCorrectness(mbh) + assert.Nil(t, err) +} diff --git a/process/block/bootstrapStorage/boostrapData.go b/process/block/bootstrapStorage/boostrapData.go new file mode 100644 index 00000000000..6a46e555133 --- /dev/null +++ b/process/block/bootstrapStorage/boostrapData.go @@ -0,0 +1,23 @@ +package bootstrapStorage + +import "github.com/ElrondNetwork/elrond-go/common" + +// IsFullyProcessed returns if the mini block at the given index is fully processed or not +func (m *MiniBlocksInMeta) IsFullyProcessed(index int) bool { + fullyProcessed := true + if m.FullyProcessed != nil && index < len(m.FullyProcessed) { + fullyProcessed = m.FullyProcessed[index] + } + + return fullyProcessed +} + +// GetIndexOfLastTxProcessedInMiniBlock returns index of the last transaction processed in the mini block with the given index +func (m *MiniBlocksInMeta) GetIndexOfLastTxProcessedInMiniBlock(index int) int32 { + indexOfLastTxProcessed := common.MaxIndexOfTxInMiniBlock + if m.IndexOfLastTxProcessed != nil && index < len(m.IndexOfLastTxProcessed) { + indexOfLastTxProcessed = m.IndexOfLastTxProcessed[index] + } + + return indexOfLastTxProcessed +} diff --git a/process/block/bootstrapStorage/bootstrapData.pb.go b/process/block/bootstrapStorage/bootstrapData.pb.go index 1d39d1b2f1d..b27029a205e 100644 --- a/process/block/bootstrapStorage/bootstrapData.pb.go +++ b/process/block/bootstrapStorage/bootstrapData.pb.go @@ -28,8 +28,10 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package //MiniBlocksInMeta is used to store all mini blocks hashes for a metablock hash type MiniBlocksInMeta struct { - MetaHash []byte `protobuf:"bytes,1,opt,name=MetaHash,proto3" json:"MetaHash,omitempty"` - MiniBlocksHashes [][]byte `protobuf:"bytes,2,rep,name=MiniBlocksHashes,proto3" json:"MiniBlocksHashes,omitempty"` + MetaHash []byte `protobuf:"bytes,1,opt,name=MetaHash,proto3" json:"MetaHash,omitempty"` + MiniBlocksHashes [][]byte `protobuf:"bytes,2,rep,name=MiniBlocksHashes,proto3" json:"MiniBlocksHashes,omitempty"` + FullyProcessed []bool `protobuf:"varint,3,rep,packed,name=FullyProcessed,proto3" json:"FullyProcessed,omitempty"` + IndexOfLastTxProcessed []int32 `protobuf:"varint,4,rep,packed,name=IndexOfLastTxProcessed,proto3" json:"IndexOfLastTxProcessed,omitempty"` } func (m *MiniBlocksInMeta) Reset() { *m = MiniBlocksInMeta{} } @@ -74,6 +76,20 @@ func (m *MiniBlocksInMeta) GetMiniBlocksHashes() [][]byte { return nil } +func (m *MiniBlocksInMeta) GetFullyProcessed() []bool { + if m != nil { + return m.FullyProcessed + } + return nil +} + +func (m *MiniBlocksInMeta) GetIndexOfLastTxProcessed() []int32 { + if m != nil { + return m.IndexOfLastTxProcessed + } + return nil +} + //BootstrapHeaderInfo is used to store information about a header type BootstrapHeaderInfo struct { ShardId uint32 `protobuf:"varint,1,opt,name=ShardId,proto3" json:"ShardId,omitempty"` @@ -340,41 +356,45 @@ func init() { func init() { proto.RegisterFile("bootstrapData.proto", fileDescriptor_cd9e3de0f7706101) } var fileDescriptor_cd9e3de0f7706101 = []byte{ - // 544 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xcd, 0x8e, 0x12, 0x41, - 0x10, 0xc7, 0xa7, 0x17, 0xd8, 0x65, 0xcb, 0xdd, 0x04, 0x1b, 0x3f, 0x66, 0x09, 0x69, 0x09, 0x27, - 0x62, 0x22, 0x9b, 0x68, 0xe2, 0xc9, 0x18, 0x03, 0x68, 0x40, 0x5d, 0x24, 0x83, 0xa7, 0x8d, 0x9a, - 0x34, 0x4c, 0x33, 0x4c, 0x84, 0x29, 0xd2, 0xdd, 0x1c, 0xf4, 0xe4, 0xc9, 0xb3, 0x8f, 0xe1, 0xa3, - 0xec, 0x91, 0x23, 0x27, 0x23, 0xc3, 0xc5, 0xe3, 0x3e, 0x82, 0x99, 0x1e, 0x76, 0x77, 0x14, 0x30, - 0xeb, 0x69, 0xea, 0x5f, 0x5d, 0xf5, 0xeb, 0xaa, 0xea, 0xca, 0x40, 0xbe, 0x87, 0xa8, 0x95, 0x96, - 0x7c, 0xd2, 0xe0, 0x9a, 0x57, 0x27, 0x12, 0x35, 0xd2, 0x8c, 0xf9, 0x14, 0x1e, 0x78, 0xbe, 0x1e, - 0x4e, 0x7b, 0xd5, 0x3e, 0x8e, 0x8f, 0x3d, 0xf4, 0xf0, 0xd8, 0xb8, 0x7b, 0xd3, 0x81, 0x51, 0x46, - 0x18, 0x2b, 0xce, 0x2a, 0x9f, 0x42, 0xee, 0xc4, 0x0f, 0xfc, 0xda, 0x08, 0xfb, 0x1f, 0x55, 0x2b, - 0x38, 0x11, 0x9a, 0xd3, 0x02, 0x64, 0xa3, 0x6f, 0x93, 0xab, 0xa1, 0x4d, 0x4a, 0xa4, 0x72, 0xe0, - 0x5c, 0x6a, 0x7a, 0x3f, 0x19, 0x1f, 0x79, 0x84, 0xb2, 0x77, 0x4a, 0xa9, 0xca, 0x81, 0xb3, 0xe6, - 0x2f, 0x23, 0xe4, 0x6b, 0x17, 0x85, 0x36, 0x05, 0x77, 0x85, 0x6c, 0x05, 0x03, 0xa4, 0x36, 0xec, - 0x75, 0x87, 0x5c, 0xba, 0x2d, 0xd7, 0xd0, 0x0f, 0x9d, 0x0b, 0x49, 0x6f, 0x41, 0xe6, 0xf9, 0x04, - 0xfb, 0x43, 0x7b, 0xc7, 0xf8, 0x63, 0x11, 0x79, 0xdb, 0x18, 0xf4, 0x85, 0x9d, 0x2a, 0x91, 0x4a, - 0xda, 0x89, 0x05, 0xa5, 0x90, 0x36, 0x05, 0xa6, 0x4d, 0x81, 0xc6, 0x2e, 0xbf, 0x87, 0xdb, 0x1d, - 0x11, 0xb8, 0x7e, 0xe0, 0x25, 0x7b, 0x4a, 0x5e, 0xd9, 0xf8, 0xf3, 0xca, 0xc6, 0x7f, 0xf5, 0xf3, - 0x35, 0x03, 0x87, 0xb5, 0xe4, 0xe4, 0xe9, 0x33, 0x80, 0xd7, 0x5c, 0xe9, 0xb8, 0x39, 0x83, 0xbe, - 0xf1, 0xb0, 0x10, 0x4f, 0xb6, 0xba, 0xa1, 0xf5, 0x5a, 0xfa, 0xec, 0xc7, 0x3d, 0xcb, 0x49, 0xe4, - 0xd0, 0x0f, 0x70, 0x14, 0xa9, 0xba, 0x44, 0xa5, 0xda, 0xa8, 0xb9, 0xf4, 0x3f, 0x0b, 0x37, 0x3e, - 0x8b, 0x0b, 0xb9, 0x0e, 0x70, 0x3b, 0x82, 0xbe, 0x03, 0x3b, 0x3a, 0xec, 0x8a, 0xd1, 0x60, 0x0d, - 0xbf, 0x77, 0x4d, 0xfc, 0x56, 0x02, 0x7d, 0x03, 0xf9, 0x8e, 0xc4, 0xbe, 0x50, 0x4a, 0xb8, 0x57, - 0xe3, 0xb2, 0xd3, 0x06, 0x7c, 0x77, 0x05, 0xfe, 0x7b, 0xbf, 0x56, 0xd4, 0x4d, 0x99, 0xb4, 0x03, - 0x37, 0xd7, 0x5e, 0xd0, 0xce, 0x1a, 0x5c, 0x71, 0x85, 0xdb, 0xf8, 0xc2, 0x2b, 0xe6, 0x7a, 0x32, - 0x7d, 0x02, 0x47, 0x6d, 0x74, 0x85, 0xaa, 0x23, 0x4a, 0xd7, 0x0f, 0xb8, 0x46, 0x59, 0xc7, 0x60, - 0xe0, 0x7b, 0xaf, 0xc4, 0x27, 0x7b, 0xdf, 0x2c, 0xcf, 0xf6, 0x00, 0xfa, 0x14, 0x0a, 0x66, 0x09, - 0xbb, 0x9a, 0x4b, 0xfd, 0x56, 0xfa, 0x9e, 0x27, 0x12, 0xe9, 0x60, 0xd2, 0xff, 0x11, 0x41, 0x1f, - 0xc3, 0x9d, 0xa6, 0xef, 0x0d, 0x85, 0xd2, 0x2f, 0xfc, 0x80, 0x8f, 0x4c, 0x4d, 0xf1, 0x32, 0x67, - 0xcc, 0x32, 0x6f, 0x39, 0xa5, 0x45, 0xd8, 0x8f, 0x86, 0xee, 0xe0, 0x34, 0x70, 0xed, 0xdd, 0x12, - 0xa9, 0xa4, 0x9c, 0x2b, 0x47, 0xb9, 0x08, 0x59, 0x63, 0xb4, 0xa7, 0x63, 0x9a, 0x83, 0x54, 0x7b, - 0x3a, 0x36, 0xbb, 0x97, 0x72, 0x22, 0xb3, 0xf6, 0x72, 0xb6, 0x60, 0xd6, 0x7c, 0xc1, 0xac, 0xf3, - 0x05, 0x23, 0x5f, 0x42, 0x46, 0xbe, 0x87, 0x8c, 0x9c, 0x85, 0x8c, 0xcc, 0x42, 0x46, 0xe6, 0x21, - 0x23, 0x3f, 0x43, 0x46, 0x7e, 0x85, 0xcc, 0x3a, 0x0f, 0x19, 0xf9, 0xb6, 0x64, 0xd6, 0x6c, 0xc9, - 0xac, 0xf9, 0x92, 0x59, 0xa7, 0xb9, 0xcb, 0xff, 0x4a, 0x57, 0xa3, 0xe4, 0x9e, 0xe8, 0xed, 0x9a, - 0x99, 0x3f, 0xfa, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xe3, 0xe3, 0xf9, 0x05, 0x72, 0x04, 0x00, 0x00, + // 593 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcf, 0x6e, 0xda, 0x4e, + 0x10, 0xf6, 0xc6, 0x90, 0x90, 0x49, 0xf2, 0x53, 0x7e, 0x9b, 0xfe, 0x71, 0x50, 0xb4, 0xb5, 0x38, + 0x54, 0x56, 0xa5, 0x12, 0x29, 0x95, 0x7a, 0xaa, 0xaa, 0x0a, 0x68, 0x04, 0x6d, 0x43, 0x90, 0xc9, + 0xa9, 0x6a, 0x2b, 0x2d, 0x78, 0x31, 0x56, 0xc1, 0x8b, 0xd6, 0x6b, 0x29, 0xe9, 0xa9, 0xa7, 0x9e, + 0xfb, 0x18, 0x7d, 0x86, 0x3e, 0x41, 0x8e, 0x1c, 0x39, 0x55, 0xc5, 0x5c, 0x7a, 0xcc, 0x23, 0x54, + 0x5e, 0x13, 0x70, 0x03, 0x54, 0xe9, 0xc9, 0x33, 0xdf, 0xcc, 0x7c, 0xbb, 0xfb, 0xcd, 0x07, 0xb0, + 0xd7, 0xe2, 0x5c, 0x06, 0x52, 0xd0, 0x41, 0x85, 0x4a, 0x5a, 0x1c, 0x08, 0x2e, 0x39, 0xce, 0xaa, + 0x4f, 0xfe, 0xb1, 0xeb, 0xc9, 0x6e, 0xd8, 0x2a, 0xb6, 0x79, 0xff, 0xd0, 0xe5, 0x2e, 0x3f, 0x54, + 0x70, 0x2b, 0xec, 0xa8, 0x4c, 0x25, 0x2a, 0x4a, 0xa6, 0x0a, 0xdf, 0x11, 0xec, 0x9e, 0x78, 0xbe, + 0x57, 0xea, 0xf1, 0xf6, 0xc7, 0xa0, 0xe6, 0x9f, 0x30, 0x49, 0x71, 0x1e, 0x72, 0xf1, 0xb7, 0x4a, + 0x83, 0xae, 0x81, 0x4c, 0x64, 0x6d, 0xdb, 0xb3, 0x1c, 0x3f, 0x4a, 0xf7, 0xc7, 0x08, 0x0b, 0x8c, + 0x35, 0x53, 0xb7, 0xb6, 0xed, 0x05, 0x1c, 0x3f, 0x84, 0xff, 0x8e, 0xc3, 0x5e, 0xef, 0xa2, 0x21, + 0x78, 0x9b, 0x05, 0x01, 0x73, 0x0c, 0xdd, 0xd4, 0xad, 0x9c, 0x7d, 0x03, 0xc5, 0x4f, 0xe1, 0x5e, + 0xcd, 0x77, 0xd8, 0xf9, 0x69, 0xe7, 0x0d, 0x0d, 0xe4, 0xd9, 0xf9, 0xbc, 0x3f, 0x63, 0xea, 0x56, + 0xd6, 0x5e, 0x51, 0x2d, 0x70, 0xd8, 0x2b, 0x5d, 0x2b, 0x51, 0x65, 0xd4, 0x61, 0xa2, 0xe6, 0x77, + 0x38, 0x36, 0x60, 0xa3, 0xd9, 0xa5, 0xc2, 0xa9, 0x39, 0xea, 0xf6, 0x3b, 0xf6, 0x75, 0x8a, 0xef, + 0x40, 0xf6, 0xe5, 0x80, 0xb7, 0xbb, 0xc6, 0x9a, 0xc2, 0x93, 0x24, 0x46, 0xeb, 0xdc, 0x6f, 0x33, + 0x43, 0x37, 0x91, 0x95, 0xb1, 0x93, 0x04, 0x63, 0xc8, 0x28, 0x01, 0x32, 0x4a, 0x00, 0x15, 0x17, + 0xde, 0xc3, 0xdd, 0x06, 0xf3, 0x1d, 0xcf, 0x77, 0xd3, 0x9a, 0xa5, 0x8f, 0xac, 0xfc, 0x79, 0x64, + 0xe5, 0x5f, 0xf4, 0x2a, 0x7c, 0xc9, 0xc2, 0x4e, 0x29, 0xbd, 0x5a, 0xfc, 0x02, 0x20, 0x7e, 0x74, + 0xf2, 0x38, 0x45, 0xbd, 0x75, 0x94, 0x4f, 0x56, 0x57, 0x5c, 0xf2, 0xf4, 0x52, 0xe6, 0xf2, 0xc7, + 0x03, 0xcd, 0x4e, 0xcd, 0xe0, 0x0f, 0xb0, 0x1f, 0x67, 0x65, 0xc1, 0x83, 0xa0, 0xce, 0x25, 0x15, + 0xde, 0x27, 0xe6, 0x24, 0xb5, 0xe4, 0x22, 0xb7, 0x21, 0x5c, 0x4d, 0x81, 0xdf, 0x81, 0x11, 0x17, + 0x9b, 0xac, 0xd7, 0x59, 0xa0, 0xdf, 0xb8, 0x25, 0xfd, 0x4a, 0x06, 0x7c, 0x0a, 0x7b, 0xb3, 0x75, + 0xcf, 0xe5, 0x52, 0xb6, 0xd8, 0x3a, 0xba, 0x3f, 0x25, 0xbe, 0xe9, 0xdf, 0x29, 0xeb, 0xb2, 0x49, + 0xdc, 0x80, 0xff, 0x17, 0x36, 0x68, 0xe4, 0x14, 0xdd, 0xc1, 0x94, 0x6e, 0xe9, 0x86, 0xa7, 0x9c, + 0x8b, 0xc3, 0xf8, 0x19, 0xec, 0xd7, 0xb9, 0xc3, 0x82, 0x32, 0xe7, 0xc2, 0xf1, 0x7c, 0x2a, 0xb9, + 0x28, 0x73, 0xbf, 0xe3, 0xb9, 0xaf, 0xd9, 0x85, 0xb1, 0xa9, 0xcc, 0xb3, 0xba, 0x01, 0x3f, 0x87, + 0xbc, 0x32, 0x61, 0x53, 0x52, 0x21, 0xcf, 0x84, 0xe7, 0xba, 0x2c, 0x35, 0x0e, 0x6a, 0xfc, 0x2f, + 0x1d, 0xf1, 0x4f, 0xa7, 0xea, 0xb9, 0x5d, 0x16, 0xc8, 0x63, 0xcf, 0xa7, 0x3d, 0x75, 0xa7, 0xc4, + 0xcc, 0x59, 0x65, 0xe6, 0x15, 0x55, 0x7c, 0x00, 0x9b, 0xb1, 0xe8, 0x36, 0x0f, 0x7d, 0xc7, 0x58, + 0x37, 0x91, 0xa5, 0xdb, 0x73, 0xa0, 0x70, 0x00, 0x39, 0x15, 0xd4, 0xc3, 0x3e, 0xde, 0x05, 0xbd, + 0x1e, 0xf6, 0x95, 0xf7, 0x74, 0x3b, 0x0e, 0x4b, 0xaf, 0x86, 0x63, 0xa2, 0x8d, 0xc6, 0x44, 0xbb, + 0x1a, 0x13, 0xf4, 0x39, 0x22, 0xe8, 0x5b, 0x44, 0xd0, 0x65, 0x44, 0xd0, 0x30, 0x22, 0x68, 0x14, + 0x11, 0xf4, 0x33, 0x22, 0xe8, 0x57, 0x44, 0xb4, 0xab, 0x88, 0xa0, 0xaf, 0x13, 0xa2, 0x0d, 0x27, + 0x44, 0x1b, 0x4d, 0x88, 0xf6, 0x76, 0x77, 0xf6, 0xc7, 0xd5, 0x94, 0x5c, 0x50, 0x97, 0xb5, 0xd6, + 0x95, 0xe6, 0x4f, 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, 0x30, 0x3e, 0xc8, 0x88, 0xd3, 0x04, 0x00, + 0x00, } func (this *MiniBlocksInMeta) Equal(that interface{}) bool { @@ -407,6 +427,22 @@ func (this *MiniBlocksInMeta) Equal(that interface{}) bool { return false } } + if len(this.FullyProcessed) != len(that1.FullyProcessed) { + return false + } + for i := range this.FullyProcessed { + if this.FullyProcessed[i] != that1.FullyProcessed[i] { + return false + } + } + if len(this.IndexOfLastTxProcessed) != len(that1.IndexOfLastTxProcessed) { + return false + } + for i := range this.IndexOfLastTxProcessed { + if this.IndexOfLastTxProcessed[i] != that1.IndexOfLastTxProcessed[i] { + return false + } + } return true } func (this *BootstrapHeaderInfo) Equal(that interface{}) bool { @@ -570,10 +606,12 @@ func (this *MiniBlocksInMeta) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 8) s = append(s, "&bootstrapStorage.MiniBlocksInMeta{") s = append(s, "MetaHash: "+fmt.Sprintf("%#v", this.MetaHash)+",\n") s = append(s, "MiniBlocksHashes: "+fmt.Sprintf("%#v", this.MiniBlocksHashes)+",\n") + s = append(s, "FullyProcessed: "+fmt.Sprintf("%#v", this.FullyProcessed)+",\n") + s = append(s, "IndexOfLastTxProcessed: "+fmt.Sprintf("%#v", this.IndexOfLastTxProcessed)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -681,6 +719,38 @@ func (m *MiniBlocksInMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.IndexOfLastTxProcessed) > 0 { + dAtA2 := make([]byte, len(m.IndexOfLastTxProcessed)*10) + var j1 int + for _, num1 := range m.IndexOfLastTxProcessed { + num := uint64(num1) + for num >= 1<<7 { + dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA2[j1] = uint8(num) + j1++ + } + i -= j1 + copy(dAtA[i:], dAtA2[:j1]) + i = encodeVarintBootstrapData(dAtA, i, uint64(j1)) + i-- + dAtA[i] = 0x22 + } + if len(m.FullyProcessed) > 0 { + for iNdEx := len(m.FullyProcessed) - 1; iNdEx >= 0; iNdEx-- { + i-- + if m.FullyProcessed[iNdEx] { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + } + i = encodeVarintBootstrapData(dAtA, i, uint64(len(m.FullyProcessed))) + i-- + dAtA[i] = 0x1a + } if len(m.MiniBlocksHashes) > 0 { for iNdEx := len(m.MiniBlocksHashes) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.MiniBlocksHashes[iNdEx]) @@ -950,6 +1020,16 @@ func (m *MiniBlocksInMeta) Size() (n int) { n += 1 + l + sovBootstrapData(uint64(l)) } } + if len(m.FullyProcessed) > 0 { + n += 1 + sovBootstrapData(uint64(len(m.FullyProcessed))) + len(m.FullyProcessed)*1 + } + if len(m.IndexOfLastTxProcessed) > 0 { + l = 0 + for _, e := range m.IndexOfLastTxProcessed { + l += sovBootstrapData(uint64(e)) + } + n += 1 + sovBootstrapData(uint64(l)) + l + } return n } @@ -1067,6 +1147,8 @@ func (this *MiniBlocksInMeta) String() string { s := strings.Join([]string{`&MiniBlocksInMeta{`, `MetaHash:` + fmt.Sprintf("%v", this.MetaHash) + `,`, `MiniBlocksHashes:` + fmt.Sprintf("%v", this.MiniBlocksHashes) + `,`, + `FullyProcessed:` + fmt.Sprintf("%v", this.FullyProcessed) + `,`, + `IndexOfLastTxProcessed:` + fmt.Sprintf("%v", this.IndexOfLastTxProcessed) + `,`, `}`, }, "") return s @@ -1246,6 +1328,152 @@ func (m *MiniBlocksInMeta) Unmarshal(dAtA []byte) error { m.MiniBlocksHashes = append(m.MiniBlocksHashes, make([]byte, postIndex-iNdEx)) copy(m.MiniBlocksHashes[len(m.MiniBlocksHashes)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType == 0 { + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBootstrapData + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FullyProcessed = append(m.FullyProcessed, bool(v != 0)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBootstrapData + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthBootstrapData + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthBootstrapData + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen + if elementCount != 0 && len(m.FullyProcessed) == 0 { + m.FullyProcessed = make([]bool, 0, elementCount) + } + for iNdEx < postIndex { + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBootstrapData + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FullyProcessed = append(m.FullyProcessed, bool(v != 0)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field FullyProcessed", wireType) + } + case 4: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBootstrapData + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IndexOfLastTxProcessed = append(m.IndexOfLastTxProcessed, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBootstrapData + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthBootstrapData + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthBootstrapData + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.IndexOfLastTxProcessed) == 0 { + m.IndexOfLastTxProcessed = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBootstrapData + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IndexOfLastTxProcessed = append(m.IndexOfLastTxProcessed, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field IndexOfLastTxProcessed", wireType) + } default: iNdEx = preIndex skippy, err := skipBootstrapData(dAtA[iNdEx:]) diff --git a/process/block/bootstrapStorage/bootstrapData.proto b/process/block/bootstrapStorage/bootstrapData.proto index 1e4ca779965..78a62e7aabc 100644 --- a/process/block/bootstrapStorage/bootstrapData.proto +++ b/process/block/bootstrapStorage/bootstrapData.proto @@ -9,8 +9,10 @@ import "github.com/gogo/protobuf/gogoproto/gogo.proto"; //MiniBlocksInMeta is used to store all mini blocks hashes for a metablock hash message MiniBlocksInMeta { - bytes MetaHash = 1; - repeated bytes MiniBlocksHashes = 2; + bytes MetaHash = 1; + repeated bytes MiniBlocksHashes = 2; + repeated bool FullyProcessed = 3; + repeated int32 IndexOfLastTxProcessed = 4; } //BootstrapHeaderInfo is used to store information about a header diff --git a/process/block/bootstrapStorage/bootstrapData_test.go b/process/block/bootstrapStorage/bootstrapData_test.go new file mode 100644 index 00000000000..083216123c9 --- /dev/null +++ b/process/block/bootstrapStorage/bootstrapData_test.go @@ -0,0 +1,52 @@ +package bootstrapStorage + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/common" + "github.com/stretchr/testify/assert" +) + +func TestMiniBlocksInMeta_IsFullyProcessedShouldWork(t *testing.T) { + t.Parallel() + + mbim := MiniBlocksInMeta{} + + isFullyProcessed := mbim.IsFullyProcessed(0) + assert.True(t, isFullyProcessed) + + mbim.FullyProcessed = make([]bool, 0) + isFullyProcessed = mbim.IsFullyProcessed(0) + assert.True(t, isFullyProcessed) + + mbim.FullyProcessed = append(mbim.FullyProcessed, true) + isFullyProcessed = mbim.IsFullyProcessed(0) + assert.True(t, isFullyProcessed) + + mbim.FullyProcessed = append(mbim.FullyProcessed, false) + isFullyProcessed = mbim.IsFullyProcessed(1) + assert.False(t, isFullyProcessed) + + isFullyProcessed = mbim.IsFullyProcessed(2) + assert.True(t, isFullyProcessed) +} + +func TestMiniBlocksInMeta_GetIndexOfLastTxProcessedInMiniBlock(t *testing.T) { + t.Parallel() + + mbim := MiniBlocksInMeta{} + + index := mbim.GetIndexOfLastTxProcessedInMiniBlock(0) + assert.Equal(t, common.MaxIndexOfTxInMiniBlock, index) + + mbim.FullyProcessed = make([]bool, 0) + index = mbim.GetIndexOfLastTxProcessedInMiniBlock(0) + assert.Equal(t, common.MaxIndexOfTxInMiniBlock, index) + + mbim.IndexOfLastTxProcessed = append(mbim.IndexOfLastTxProcessed, 1) + index = mbim.GetIndexOfLastTxProcessedInMiniBlock(0) + assert.Equal(t, int32(1), index) + + index = mbim.GetIndexOfLastTxProcessedInMiniBlock(1) + assert.Equal(t, common.MaxIndexOfTxInMiniBlock, index) +} diff --git a/process/block/displayBlock.go b/process/block/displayBlock.go index f523763cbe5..6e4978c62f9 100644 --- a/process/block/displayBlock.go +++ b/process/block/displayBlock.go @@ -198,13 +198,19 @@ func (txc *transactionCounter) displayTxBlockBody( processingTypeInMiniBlockHeaderStr = getProcessingTypeAsString(miniBlockHeaders[i]) } + constructionStateInMiniBlockHeaderStr := "" + if len(miniBlockHeaders) > i { + constructionStateInMiniBlockHeaderStr = getConstructionStateAsString(miniBlockHeaders[i]) + } + processingTypeInMiniBlockStr := "" if miniBlock.IsScheduledMiniBlock() { processingTypeInMiniBlockStr = "S_" } - part := fmt.Sprintf("%s%s_MiniBlock_%s%d->%d", + part := fmt.Sprintf("%s%s%s_MiniBlock_%s%d->%d", processingTypeInMiniBlockHeaderStr, + constructionStateInMiniBlockHeaderStr, miniBlock.Type.String(), processingTypeInMiniBlockStr, miniBlock.SenderShardID, @@ -217,6 +223,8 @@ func (txc *transactionCounter) displayTxBlockBody( if len(miniBlockHeaders) > i { lines = append(lines, display.NewLineData(false, []string{"", "MbHash", logger.DisplayByteSlice(miniBlockHeaders[i].GetHash())})) + strProcessedRange := fmt.Sprintf("%d-%d", miniBlockHeaders[i].GetIndexOfFirstTxProcessed(), miniBlockHeaders[i].GetIndexOfLastTxProcessed()) + lines = append(lines, display.NewLineData(false, []string{"", "TxsProcessedRange", strProcessedRange})) } currentBlockTxs += len(miniBlock.TxHashes) @@ -263,6 +271,18 @@ func getProcessingTypeAsString(miniBlockHeader data.MiniBlockHeaderHandler) stri return "" } +func getConstructionStateAsString(miniBlockHeader data.MiniBlockHeaderHandler) string { + constructionState := block.MiniBlockState(miniBlockHeader.GetConstructionState()) + switch constructionState { + case block.Proposed: + return "Proposed_" + case block.PartialExecuted: + return "Partial_" + } + + return "" +} + // DisplayLastNotarized will display information about last notarized block func DisplayLastNotarized( marshalizer marshal.Marshalizer, diff --git a/process/block/displayBlock_test.go b/process/block/displayBlock_test.go index c9fa72ae655..8df32ef8408 100644 --- a/process/block/displayBlock_test.go +++ b/process/block/displayBlock_test.go @@ -89,3 +89,19 @@ func TestDisplayBlock_DisplayTxBlockBody(t *testing.T) { assert.NotNil(t, lines) assert.Equal(t, len(miniblock.TxHashes), len(lines)) } + +func TestDisplayBlock_GetConstructionStateAsString(t *testing.T) { + miniBlockHeader := &block.MiniBlockHeader{} + + _ = miniBlockHeader.SetConstructionState(int32(block.Proposed)) + str := getConstructionStateAsString(miniBlockHeader) + assert.Equal(t, "Proposed_", str) + + _ = miniBlockHeader.SetConstructionState(int32(block.PartialExecuted)) + str = getConstructionStateAsString(miniBlockHeader) + assert.Equal(t, "Partial_", str) + + _ = miniBlockHeader.SetConstructionState(int32(block.Final)) + str = getConstructionStateAsString(miniBlockHeader) + assert.Equal(t, "", str) +} diff --git a/process/block/export_test.go b/process/block/export_test.go index 39425bec2fb..1cdcab9aead 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -1,6 +1,7 @@ package block import ( + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" "sync" "time" @@ -56,7 +57,7 @@ func (sp *shardProcessor) ReceivedMetaBlock(header data.HeaderHandler, metaBlock sp.receivedMetaBlock(header, metaBlockHash) } -func (sp *shardProcessor) CreateMiniBlocks(haveTime func() bool) (*block.Body, error) { +func (sp *shardProcessor) CreateMiniBlocks(haveTime func() bool) (*block.Body, map[string]*processedMb.ProcessedMiniBlockInfo, error) { return sp.createMiniBlocks(haveTime, []byte("random")) } @@ -144,6 +145,7 @@ func NewShardProcessorEmptyWith3shards( GasHandler: &mock.GasHandlerMock{}, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, ScheduledMiniBlocksEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, }, } shardProc, err := NewShardProcessor(arguments) @@ -310,7 +312,8 @@ func (sp *shardProcessor) CheckMetaHeadersValidityAndFinality() error { func (sp *shardProcessor) CreateAndProcessMiniBlocksDstMe( haveTime func() bool, ) (block.MiniBlockSlice, uint32, uint32, error) { - return sp.createAndProcessMiniBlocksDstMe(haveTime) + createAndProcessInfo, err := sp.createAndProcessMiniBlocksDstMe(haveTime) + return createAndProcessInfo.miniBlocks, createAndProcessInfo.numHdrsAdded, createAndProcessInfo.numTxsAdded, err } func (sp *shardProcessor) DisplayLogInfo( @@ -333,8 +336,9 @@ func (sp *shardProcessor) GetHighestHdrForOwnShardFromMetachain(processedHdrs [] func (sp *shardProcessor) RestoreMetaBlockIntoPool( miniBlockHashes map[string]uint32, metaBlockHashes [][]byte, + headerHandler data.HeaderHandler, ) error { - return sp.restoreMetaBlockIntoPool(miniBlockHashes, metaBlockHashes) + return sp.restoreMetaBlockIntoPool(headerHandler, miniBlockHashes, metaBlockHashes) } func (sp *shardProcessor) GetAllMiniBlockDstMeFromMeta( @@ -371,15 +375,15 @@ func (mp *metaProcessor) ApplyBodyToHeader(metaHdr data.MetaHeaderHandler, body return mp.applyBodyToHeader(metaHdr, body) } -func (sp *shardProcessor) ApplyBodyToHeader(shardHdr data.ShardHeaderHandler, body *block.Body) (*block.Body, error) { - return sp.applyBodyToHeader(shardHdr, body) +func (sp *shardProcessor) ApplyBodyToHeader(shardHdr data.ShardHeaderHandler, body *block.Body, processedMiniBlocksDestMeInfo map[string]*processedMb.ProcessedMiniBlockInfo) (*block.Body, error) { + return sp.applyBodyToHeader(shardHdr, body, processedMiniBlocksDestMeInfo) } func (mp *metaProcessor) CreateBlockBody(metaBlock data.HeaderHandler, haveTime func() bool) (data.BodyHandler, error) { return mp.createBlockBody(metaBlock, haveTime) } -func (sp *shardProcessor) CreateBlockBody(shardHdr data.HeaderHandler, haveTime func() bool) (data.BodyHandler, error) { +func (sp *shardProcessor) CreateBlockBody(shardHdr data.HeaderHandler, haveTime func() bool) (data.BodyHandler, map[string]*processedMb.ProcessedMiniBlockInfo, error) { return sp.createBlockBody(shardHdr, haveTime) } @@ -461,10 +465,10 @@ func (bp *baseProcessor) CheckScheduledMiniBlocksValidity(headerHandler data.Hea func (bp *baseProcessor) SetMiniBlockHeaderReservedField( miniBlock *block.MiniBlock, - miniBlockHash []byte, miniBlockHeaderHandler data.MiniBlockHeaderHandler, + processedMiniBlocksDestMeInfo map[string]*processedMb.ProcessedMiniBlockInfo, ) error { - return bp.setMiniBlockHeaderReservedField(miniBlock, miniBlockHash, miniBlockHeaderHandler) + return bp.setMiniBlockHeaderReservedField(miniBlock, miniBlockHeaderHandler, processedMiniBlocksDestMeInfo) } func (mp *metaProcessor) GetFinalMiniBlockHeaders(miniBlockHeaderHandlers []data.MiniBlockHeaderHandler) []data.MiniBlockHeaderHandler { @@ -474,3 +478,52 @@ func (mp *metaProcessor) GetFinalMiniBlockHeaders(miniBlockHeaderHandlers []data func CheckProcessorNilParameters(arguments ArgBaseProcessor) error { return checkProcessorNilParameters(arguments) } + +func (bp *baseProcessor) SetIndexOfFirstTxProcessed(miniBlockHeaderHandler data.MiniBlockHeaderHandler) error { + return bp.setIndexOfFirstTxProcessed(miniBlockHeaderHandler) +} + +func (bp *baseProcessor) SetIndexOfLastTxProcessed( + miniBlockHeaderHandler data.MiniBlockHeaderHandler, + processedMiniBlocksDestMeInfo map[string]*processedMb.ProcessedMiniBlockInfo, +) error { + return bp.setIndexOfLastTxProcessed(miniBlockHeaderHandler, processedMiniBlocksDestMeInfo) +} + +func (bp *baseProcessor) GetProcessedMiniBlocksTracker() process.ProcessedMiniBlocksTracker { + return bp.processedMiniBlocksTracker +} + +func (bp *baseProcessor) SetProcessingTypeAndConstructionStateForScheduledMb( + miniBlockHeaderHandler data.MiniBlockHeaderHandler, + processedMiniBlocksDestMeInfo map[string]*processedMb.ProcessedMiniBlockInfo, +) error { + return bp.setProcessingTypeAndConstructionStateForScheduledMb(miniBlockHeaderHandler, processedMiniBlocksDestMeInfo) +} + +func (bp *baseProcessor) SetProcessingTypeAndConstructionStateForNormalMb( + miniBlockHeaderHandler data.MiniBlockHeaderHandler, + processedMiniBlocksDestMeInfo map[string]*processedMb.ProcessedMiniBlockInfo, +) error { + return bp.setProcessingTypeAndConstructionStateForNormalMb(miniBlockHeaderHandler, processedMiniBlocksDestMeInfo) +} + +func (sp *shardProcessor) RollBackProcessedMiniBlockInfo(miniBlockHeader data.MiniBlockHeaderHandler, miniBlockHash []byte) { + sp.rollBackProcessedMiniBlockInfo(miniBlockHeader, miniBlockHash) +} + +func (sp *shardProcessor) SetProcessedMiniBlocksInfo(miniBlockHashes [][]byte, metaBlockHash string, metaBlock *block.MetaBlock) { + sp.setProcessedMiniBlocksInfo(miniBlockHashes, metaBlockHash, metaBlock) +} + +func (sp *shardProcessor) GetIndexOfLastTxProcessedInMiniBlock(miniBlockHash []byte, metaBlock *block.MetaBlock) int32 { + return getIndexOfLastTxProcessedInMiniBlock(miniBlockHash, metaBlock) +} + +func (sp *shardProcessor) RollBackProcessedMiniBlocksInfo(headerHandler data.HeaderHandler, mapMiniBlockHashes map[string]uint32) { + sp.rollBackProcessedMiniBlocksInfo(headerHandler, mapMiniBlockHashes) +} + +func (bp *baseProcessor) CheckConstructionStateAndIndexesCorrectness(mbh data.MiniBlockHeaderHandler) error { + return checkConstructionStateAndIndexesCorrectness(mbh) +} diff --git a/process/block/interceptedBlocks/common.go b/process/block/interceptedBlocks/common.go index fefdc91533f..9dbf9cf341c 100644 --- a/process/block/interceptedBlocks/common.go +++ b/process/block/interceptedBlocks/common.go @@ -9,7 +9,7 @@ import ( ) const maxLenMiniBlockReservedField = 10 -const maxLenMiniBlockHeaderReservedField = 16 +const maxLenMiniBlockHeaderReservedField = 32 func checkBlockHeaderArgument(arg *ArgInterceptedBlockHeader) error { if arg == nil { @@ -122,20 +122,20 @@ func checkShardData(sd data.ShardDataHandler, coordinator sharding.Coordinator) return nil } -func checkMiniblocks(miniblocks []data.MiniBlockHeaderHandler, coordinator sharding.Coordinator) error { - for _, miniblock := range miniblocks { - isWrongSenderShardId := miniblock.GetSenderShardID() >= coordinator.NumberOfShards() && - miniblock.GetSenderShardID() != core.MetachainShardId && - miniblock.GetSenderShardID() != core.AllShardId - isWrongDestinationShardId := miniblock.GetReceiverShardID() >= coordinator.NumberOfShards() && - miniblock.GetReceiverShardID() != core.MetachainShardId && - miniblock.GetReceiverShardID() != core.AllShardId +func checkMiniBlocksHeaders(mbHeaders []data.MiniBlockHeaderHandler, coordinator sharding.Coordinator) error { + for _, mbHeader := range mbHeaders { + isWrongSenderShardId := mbHeader.GetSenderShardID() >= coordinator.NumberOfShards() && + mbHeader.GetSenderShardID() != core.MetachainShardId && + mbHeader.GetSenderShardID() != core.AllShardId + isWrongDestinationShardId := mbHeader.GetReceiverShardID() >= coordinator.NumberOfShards() && + mbHeader.GetReceiverShardID() != core.MetachainShardId && + mbHeader.GetReceiverShardID() != core.AllShardId isWrongShardId := isWrongSenderShardId || isWrongDestinationShardId if isWrongShardId { return process.ErrInvalidShardId } - if len(miniblock.GetReserved()) > maxLenMiniBlockReservedField { + if len(mbHeader.GetReserved()) > maxLenMiniBlockHeaderReservedField { return process.ErrReservedFieldInvalid } } diff --git a/process/block/interceptedBlocks/common_test.go b/process/block/interceptedBlocks/common_test.go index 5860cf0b50e..5a997453627 100644 --- a/process/block/interceptedBlocks/common_test.go +++ b/process/block/interceptedBlocks/common_test.go @@ -389,7 +389,7 @@ func TestCheckMetaShardInfo_ReservedPopulatedShouldErr(t *testing.T) { ReceiverShardID: shardCoordinator.SelfId(), SenderShardID: shardCoordinator.SelfId(), TxCount: 0, - Reserved: []byte("rrrrrrrrrrrrrrrrr"), + Reserved: []byte("rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr"), } sd := block.ShardData{ @@ -431,21 +431,21 @@ func TestCheckMetaShardInfo_OkValsShouldWork(t *testing.T) { assert.Nil(t, err) } -//------- checkMiniblocks +//------- checkMiniBlocksHeaders -func TestCheckMiniblocks_WithNilOrEmptyShouldReturnNil(t *testing.T) { +func TestCheckMiniBlocksHeaders_WithNilOrEmptyShouldReturnNil(t *testing.T) { t.Parallel() shardCoordinator := mock.NewOneShardCoordinatorMock() - err1 := checkMiniblocks(nil, shardCoordinator) - err2 := checkMiniblocks(make([]data.MiniBlockHeaderHandler, 0), shardCoordinator) + err1 := checkMiniBlocksHeaders(nil, shardCoordinator) + err2 := checkMiniBlocksHeaders(make([]data.MiniBlockHeaderHandler, 0), shardCoordinator) assert.Nil(t, err1) assert.Nil(t, err2) } -func TestCheckMiniblocks_WrongMiniblockSenderShardIdShouldErr(t *testing.T) { +func TestCheckMiniBlocksHeaders_WrongMiniblockSenderShardIdShouldErr(t *testing.T) { t.Parallel() shardCoordinator := mock.NewOneShardCoordinatorMock() @@ -458,12 +458,12 @@ func TestCheckMiniblocks_WrongMiniblockSenderShardIdShouldErr(t *testing.T) { Type: 0, } - err := checkMiniblocks([]data.MiniBlockHeaderHandler{&miniblockHeader}, shardCoordinator) + err := checkMiniBlocksHeaders([]data.MiniBlockHeaderHandler{&miniblockHeader}, shardCoordinator) assert.Equal(t, process.ErrInvalidShardId, err) } -func TestCheckMiniblocks_WrongMiniblockReceiverShardIdShouldErr(t *testing.T) { +func TestCheckMiniBlocksHeaders_WrongMiniblockReceiverShardIdShouldErr(t *testing.T) { t.Parallel() shardCoordinator := mock.NewOneShardCoordinatorMock() @@ -476,12 +476,12 @@ func TestCheckMiniblocks_WrongMiniblockReceiverShardIdShouldErr(t *testing.T) { Type: 0, } - err := checkMiniblocks([]data.MiniBlockHeaderHandler{&miniblockHeader}, shardCoordinator) + err := checkMiniBlocksHeaders([]data.MiniBlockHeaderHandler{&miniblockHeader}, shardCoordinator) assert.Equal(t, process.ErrInvalidShardId, err) } -func TestCheckMiniblocks_ReservedPopulatedShouldErr(t *testing.T) { +func TestCheckMiniBlocksHeaders_ReservedPopulatedShouldErr(t *testing.T) { t.Parallel() shardCoordinator := mock.NewOneShardCoordinatorMock() @@ -491,15 +491,15 @@ func TestCheckMiniblocks_ReservedPopulatedShouldErr(t *testing.T) { ReceiverShardID: shardCoordinator.SelfId(), TxCount: 0, Type: 0, - Reserved: []byte("rrrrrrrrrrrr"), + Reserved: []byte("rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr"), } - err := checkMiniblocks([]data.MiniBlockHeaderHandler{&miniblockHeader}, shardCoordinator) + err := checkMiniBlocksHeaders([]data.MiniBlockHeaderHandler{&miniblockHeader}, shardCoordinator) assert.Equal(t, process.ErrReservedFieldInvalid, err) } -func TestCheckMiniblocks_ReservedPopulatedCorrectly(t *testing.T) { +func TestCheckMiniBlocksHeaders_ReservedPopulatedCorrectly(t *testing.T) { t.Parallel() shardCoordinator := mock.NewOneShardCoordinatorMock() @@ -512,12 +512,12 @@ func TestCheckMiniblocks_ReservedPopulatedCorrectly(t *testing.T) { Reserved: []byte("r"), } - err := checkMiniblocks([]data.MiniBlockHeaderHandler{&miniblockHeader}, shardCoordinator) + err := checkMiniBlocksHeaders([]data.MiniBlockHeaderHandler{&miniblockHeader}, shardCoordinator) assert.Nil(t, err) } -func TestCheckMiniblocks_OkValsShouldWork(t *testing.T) { +func TestCheckMiniBlocksHeaders_OkValsShouldWork(t *testing.T) { t.Parallel() shardCoordinator := mock.NewOneShardCoordinatorMock() @@ -529,7 +529,7 @@ func TestCheckMiniblocks_OkValsShouldWork(t *testing.T) { Type: 0, } - err := checkMiniblocks([]data.MiniBlockHeaderHandler{&miniblockHeader}, shardCoordinator) + err := checkMiniBlocksHeaders([]data.MiniBlockHeaderHandler{&miniblockHeader}, shardCoordinator) assert.Nil(t, err) } diff --git a/process/block/interceptedBlocks/export_test.go b/process/block/interceptedBlocks/export_test.go new file mode 100644 index 00000000000..fca0028c459 --- /dev/null +++ b/process/block/interceptedBlocks/export_test.go @@ -0,0 +1,6 @@ +package interceptedBlocks + +// IsMetaHeaderOutOfRange - +func (imh *InterceptedMetaHeader) IsMetaHeaderOutOfRange() bool { + return imh.isMetaHeaderEpochOutOfRange() +} diff --git a/process/block/interceptedBlocks/interceptedBlockHeader.go b/process/block/interceptedBlocks/interceptedBlockHeader.go index ad9445beb49..c71e84dab89 100644 --- a/process/block/interceptedBlocks/interceptedBlockHeader.go +++ b/process/block/interceptedBlocks/interceptedBlockHeader.go @@ -138,7 +138,7 @@ func (inHdr *InterceptedHeader) integrity() error { return err } - err = checkMiniblocks(inHdr.hdr.GetMiniBlockHeaderHandlers(), inHdr.shardCoordinator) + err = checkMiniBlocksHeaders(inHdr.hdr.GetMiniBlockHeaderHandlers(), inHdr.shardCoordinator) if err != nil { return err } diff --git a/process/block/interceptedBlocks/interceptedMetaBlockHeader.go b/process/block/interceptedBlocks/interceptedMetaBlockHeader.go index ad5a78adceb..3e176af98f6 100644 --- a/process/block/interceptedBlocks/interceptedMetaBlockHeader.go +++ b/process/block/interceptedBlocks/interceptedMetaBlockHeader.go @@ -8,6 +8,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go-core/hashing" "github.com/ElrondNetwork/elrond-go-core/marshal" + logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -15,6 +16,8 @@ import ( var _ process.HdrValidatorHandler = (*InterceptedMetaHeader)(nil) var _ process.InterceptedData = (*InterceptedMetaHeader)(nil) +var log = logger.GetOrCreate("process/block/interceptedBlocks") + // InterceptedMetaHeader represents the wrapper over the meta block header struct type InterceptedMetaHeader struct { hdr data.MetaHeaderHandler @@ -91,6 +94,15 @@ func (imh *InterceptedMetaHeader) CheckValidity() error { if err != nil { return err } + + if imh.isMetaHeaderEpochOutOfRange() { + log.Trace("InterceptedMetaHeader.CheckValidity", + "trigger epoch", imh.epochStartTrigger.Epoch(), + "metaBlock epoch", imh.hdr.GetEpoch(), + "error", process.ErrMetaHeaderEpochOutOfRange) + + return process.ErrMetaHeaderEpochOutOfRange + } } err = imh.validityAttester.CheckBlockAgainstRoundHandler(imh.HeaderHandler()) @@ -111,6 +123,18 @@ func (imh *InterceptedMetaHeader) CheckValidity() error { return imh.integrityVerifier.Verify(imh.hdr) } +func (imh *InterceptedMetaHeader) isMetaHeaderEpochOutOfRange() bool { + if imh.shardCoordinator.SelfId() == core.MetachainShardId { + return false + } + + if imh.hdr.GetEpoch() > imh.epochStartTrigger.Epoch()+1 { + return true + } + + return false +} + // integrity checks the integrity of the meta header block wrapper func (imh *InterceptedMetaHeader) integrity() error { err := checkHeaderHandler(imh.HeaderHandler()) diff --git a/process/block/interceptedBlocks/interceptedMetaBlockHeader_test.go b/process/block/interceptedBlocks/interceptedMetaBlockHeader_test.go index e60c46b2bd7..576728d7555 100644 --- a/process/block/interceptedBlocks/interceptedMetaBlockHeader_test.go +++ b/process/block/interceptedBlocks/interceptedMetaBlockHeader_test.go @@ -11,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/block/interceptedBlocks" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func createDefaultMetaArgument() *interceptedBlocks.ArgInterceptedBlockHeader { @@ -21,7 +22,11 @@ func createDefaultMetaArgument() *interceptedBlocks.ArgInterceptedBlockHeader { HeaderSigVerifier: &mock.HeaderSigVerifierStub{}, HeaderIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, ValidityAttester: &mock.ValidityAttesterStub{}, - EpochStartTrigger: &mock.EpochStartTriggerStub{}, + EpochStartTrigger: &mock.EpochStartTriggerStub{ + EpochCalled: func() uint32 { + return hdrEpoch + }, + }, } hdr := createMockMetaHeader() @@ -116,7 +121,7 @@ func TestInterceptedMetaHeader_ErrorInMiniBlockShouldErr(t *testing.T) { } buff, _ := testMarshalizer.Marshal(hdr) - arg := createDefaultShardArgument() + arg := createDefaultMetaArgument() arg.HdrBuff = buff inHdr, _ := interceptedBlocks.NewInterceptedMetaHeader(arg) @@ -187,11 +192,11 @@ func TestInterceptedMetaHeader_Getters(t *testing.T) { func TestInterceptedMetaHeader_CheckValidityLeaderSignatureNotCorrectShouldErr(t *testing.T) { t.Parallel() - hdr := createMockShardHeader() + hdr := createMockMetaHeader() expectedErr := errors.New("expected err") buff, _ := testMarshalizer.Marshal(hdr) - arg := createDefaultShardArgument() + arg := createDefaultMetaArgument() arg.HeaderSigVerifier = &mock.HeaderSigVerifierStub{ VerifyRandSeedAndLeaderSignatureCalled: func(header data.HeaderHandler) error { return expectedErr @@ -207,12 +212,12 @@ func TestInterceptedMetaHeader_CheckValidityLeaderSignatureNotCorrectShouldErr(t func TestInterceptedMetaHeader_CheckValidityLeaderSignatureOkShouldWork(t *testing.T) { t.Parallel() - hdr := createMockShardHeader() + hdr := createMockMetaHeader() expectedSignature := []byte("ran") hdr.LeaderSignature = expectedSignature buff, _ := testMarshalizer.Marshal(hdr) - arg := createDefaultShardArgument() + arg := createDefaultMetaArgument() arg.HdrBuff = buff inHdr, _ := interceptedBlocks.NewInterceptedMetaHeader(arg) @@ -220,6 +225,57 @@ func TestInterceptedMetaHeader_CheckValidityLeaderSignatureOkShouldWork(t *testi assert.Nil(t, err) } +func TestInterceptedMetaHeader_isMetaHeaderEpochOutOfRange(t *testing.T) { + epochStartTrigger := &mock.EpochStartTriggerStub{ + EpochCalled: func() uint32 { + return 10 + }, + } + t.Run("old epoch header accepted", func(t *testing.T) { + arg := createDefaultMetaArgument() + arg.EpochStartTrigger = epochStartTrigger + hdr := createMockMetaHeader() + hdr.Epoch = 8 + arg.HdrBuff, _ = testMarshalizer.Marshal(hdr) + + inHdr, _ := interceptedBlocks.NewInterceptedMetaHeader(arg) + require.False(t, inHdr.IsMetaHeaderOutOfRange()) + }) + + t.Run("current epoch header accepted", func(t *testing.T) { + arg := createDefaultMetaArgument() + arg.EpochStartTrigger = epochStartTrigger + hdr := createMockMetaHeader() + hdr.Epoch = 10 + arg.HdrBuff, _ = testMarshalizer.Marshal(hdr) + + inHdr, _ := interceptedBlocks.NewInterceptedMetaHeader(arg) + require.False(t, inHdr.IsMetaHeaderOutOfRange()) + }) + + t.Run("next epoch header accepted", func(t *testing.T) { + arg := createDefaultMetaArgument() + arg.EpochStartTrigger = epochStartTrigger + hdr := createMockMetaHeader() + hdr.Epoch = 11 + arg.HdrBuff, _ = testMarshalizer.Marshal(hdr) + + inHdr, _ := interceptedBlocks.NewInterceptedMetaHeader(arg) + require.False(t, inHdr.IsMetaHeaderOutOfRange()) + }) + + t.Run("larger epoch difference header rejected", func(t *testing.T) { + arg := createDefaultMetaArgument() + arg.EpochStartTrigger = epochStartTrigger + hdr := createMockMetaHeader() + hdr.Epoch = 12 + arg.HdrBuff, _ = testMarshalizer.Marshal(hdr) + + inHdr, _ := interceptedBlocks.NewInterceptedMetaHeader(arg) + require.True(t, inHdr.IsMetaHeaderOutOfRange()) + }) +} + //------- IsInterfaceNil func TestInterceptedMetaHeader_IsInterfaceNil(t *testing.T) { diff --git a/process/block/metablock.go b/process/block/metablock.go index 3d240555ae7..e2330e49e1e 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -124,6 +124,7 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { economicsData: arguments.CoreComponents.EconomicsData(), scheduledTxsExecutionHandler: arguments.ScheduledTxsExecutionHandler, scheduledMiniBlocksEnableEpoch: arguments.ScheduledMiniBlocksEnableEpoch, + processedMiniBlocksTracker: arguments.ProcessedMiniBlocksTracker, } mp := metaProcessor{ @@ -1559,10 +1560,6 @@ func (mp *metaProcessor) getLastSelfNotarizedHeaderByShard( return lastNotarizedMetaHeader, lastNotarizedMetaHeaderHash } -// ApplyProcessedMiniBlocks will do nothing on meta processor -func (mp *metaProcessor) ApplyProcessedMiniBlocks(_ *processedMb.ProcessedMiniBlockTracker) { -} - // getRewardsTxs must be called before method commitEpoch start because when commit is done rewards txs are removed from pool and saved in storage func (mp *metaProcessor) getRewardsTxs(header *block.MetaBlock, body *block.Body) (rewardsTx map[string]data.TransactionHandler) { if !mp.outportHandler.HasDrivers() { @@ -2203,7 +2200,7 @@ func (mp *metaProcessor) applyBodyToHeader(metaHdr data.MetaHeaderHandler, bodyH return nil, err } - totalTxCount, miniBlockHeaderHandlers, err := mp.createMiniBlockHeaderHandlers(body) + totalTxCount, miniBlockHeaderHandlers, err := mp.createMiniBlockHeaderHandlers(body, make(map[string]*processedMb.ProcessedMiniBlockInfo)) if err != nil { return nil, err } diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index 6b98fef2ab6..79b33147a97 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -18,6 +18,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process" blproc "github.com/ElrondNetwork/elrond-go/process/block" "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/state" @@ -134,6 +135,7 @@ func createMockMetaArguments( RoundNotifier: &mock.RoundNotifierStub{}, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, ScheduledMiniBlocksEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, }, SCToProtocol: &mock.SCToProtocolStub{}, PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, @@ -2481,7 +2483,7 @@ func TestMetaProcessor_CreateMiniBlocksDestMe(t *testing.T) { } txCoordinator := &mock.TransactionCoordinatorMock{ - CreateMbsAndProcessCrossShardTransactionsDstMeCalled: func(header data.HeaderHandler, processedMiniBlocksHashes map[string]struct{}, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool) (slices block.MiniBlockSlice, u uint32, b bool, err error) { + CreateMbsAndProcessCrossShardTransactionsDstMeCalled: func(header data.HeaderHandler, processedMiniBlocksInfo map[string]*processedMb.ProcessedMiniBlockInfo, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool) (slices block.MiniBlockSlice, u uint32, b bool, err error) { return block.MiniBlockSlice{expectedMiniBlock1}, 0, true, nil }, CreateMbsAndProcessTransactionsFromMeCalled: func(haveTime func() bool) block.MiniBlockSlice { @@ -2648,7 +2650,7 @@ func TestMetaProcessor_VerifyCrossShardMiniBlocksDstMe(t *testing.T) { } txCoordinator := &mock.TransactionCoordinatorMock{ - CreateMbsAndProcessCrossShardTransactionsDstMeCalled: func(header data.HeaderHandler, processedMiniBlocksHashes map[string]struct{}, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool) (slices block.MiniBlockSlice, u uint32, b bool, err error) { + CreateMbsAndProcessCrossShardTransactionsDstMeCalled: func(header data.HeaderHandler, processedMiniBlocksInfo map[string]*processedMb.ProcessedMiniBlockInfo, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool) (slices block.MiniBlockSlice, u uint32, b bool, err error) { return block.MiniBlockSlice{miniBlock1}, 0, true, nil }, CreateMbsAndProcessTransactionsFromMeCalled: func(haveTime func() bool) block.MiniBlockSlice { @@ -2771,7 +2773,7 @@ func TestMetaProcessor_CreateBlockCreateHeaderProcessBlock(t *testing.T) { } txCoordinator := &mock.TransactionCoordinatorMock{ - CreateMbsAndProcessCrossShardTransactionsDstMeCalled: func(header data.HeaderHandler, processedMiniBlocksHashes map[string]struct{}, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool) (slices block.MiniBlockSlice, u uint32, b bool, err error) { + CreateMbsAndProcessCrossShardTransactionsDstMeCalled: func(header data.HeaderHandler, processedMiniBlocksInfo map[string]*processedMb.ProcessedMiniBlockInfo, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool) (slices block.MiniBlockSlice, u uint32, b bool, err error) { return block.MiniBlockSlice{miniBlock1}, 0, true, nil }, } @@ -2917,7 +2919,7 @@ func TestMetaProcessor_CreateAndProcessBlockCallsProcessAfterFirstEpoch(t *testi } txCoordinator := &mock.TransactionCoordinatorMock{ - CreateMbsAndProcessCrossShardTransactionsDstMeCalled: func(header data.HeaderHandler, processedMiniBlocksHashes map[string]struct{}, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool) (slices block.MiniBlockSlice, u uint32, b bool, err error) { + CreateMbsAndProcessCrossShardTransactionsDstMeCalled: func(header data.HeaderHandler, processedMiniBlocksInfo map[string]*processedMb.ProcessedMiniBlockInfo, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool) (slices block.MiniBlockSlice, u uint32, b bool, err error) { return block.MiniBlockSlice{miniBlock1}, 0, true, nil }, } diff --git a/process/block/postprocess/basePostProcess.go b/process/block/postprocess/basePostProcess.go index c2e290b4455..9c5ff8fd2a0 100644 --- a/process/block/postprocess/basePostProcess.go +++ b/process/block/postprocess/basePostProcess.go @@ -38,7 +38,7 @@ type basePostProcessor struct { mutInterResultsForBlock sync.Mutex interResultsForBlock map[string]*txInfo - mapProcessedResult map[string]struct{} + mapProcessedResult map[string][][]byte intraShardMiniBlock *block.MiniBlock economicsFee process.FeeHandler } @@ -76,7 +76,7 @@ func (bpp *basePostProcessor) CreateBlockStarted() { bpp.mutInterResultsForBlock.Lock() bpp.interResultsForBlock = make(map[string]*txInfo) bpp.intraShardMiniBlock = nil - bpp.mapProcessedResult = make(map[string]struct{}) + bpp.mapProcessedResult = make(map[string][][]byte) bpp.mutInterResultsForBlock.Unlock() } @@ -163,24 +163,28 @@ func (bpp *basePostProcessor) GetCreatedInShardMiniBlock() *block.MiniBlock { } // RemoveProcessedResults will remove the processed results since the last init -func (bpp *basePostProcessor) RemoveProcessedResults() [][]byte { +func (bpp *basePostProcessor) RemoveProcessedResults(key []byte) [][]byte { bpp.mutInterResultsForBlock.Lock() defer bpp.mutInterResultsForBlock.Unlock() - listHashes := make([][]byte, 0, len(bpp.mapProcessedResult)) - for txHash := range bpp.mapProcessedResult { - listHashes = append(listHashes, []byte(txHash)) - delete(bpp.interResultsForBlock, txHash) + txHashes, ok := bpp.mapProcessedResult[string(key)] + if !ok { + return nil + } + + for _, txHash := range txHashes { + delete(bpp.interResultsForBlock, string(txHash)) } - return listHashes + + return txHashes } // InitProcessedResults will initialize the processed results -func (bpp *basePostProcessor) InitProcessedResults() { +func (bpp *basePostProcessor) InitProcessedResults(key []byte) { bpp.mutInterResultsForBlock.Lock() defer bpp.mutInterResultsForBlock.Unlock() - bpp.mapProcessedResult = make(map[string]struct{}) + bpp.mapProcessedResult[string(key)] = make([][]byte, 0) } func (bpp *basePostProcessor) splitMiniBlocksIfNeeded(miniBlocks []*block.MiniBlock) []*block.MiniBlock { @@ -262,3 +266,18 @@ func createMiniBlocksMap(scrMbs []*block.MiniBlock) map[uint32][]*block.MiniBloc return createdMapMbs } + +func (bpp *basePostProcessor) addIntermediateTxToResultsForBlock( + txHandler data.TransactionHandler, + txHash []byte, + sndShardID uint32, + rcvShardID uint32, +) { + addScrShardInfo := &txShardInfo{receiverShardID: rcvShardID, senderShardID: sndShardID} + scrInfo := &txInfo{tx: txHandler, txShardInfo: addScrShardInfo} + bpp.interResultsForBlock[string(txHash)] = scrInfo + + for key := range bpp.mapProcessedResult { + bpp.mapProcessedResult[key] = append(bpp.mapProcessedResult[key], txHash) + } +} diff --git a/process/block/postprocess/intermediateResults.go b/process/block/postprocess/intermediateResults.go index 0a42d3cbf5b..e52db7f3e82 100644 --- a/process/block/postprocess/intermediateResults.go +++ b/process/block/postprocess/intermediateResults.go @@ -67,7 +67,7 @@ func NewIntermediateResultsProcessor( shardCoordinator: coordinator, store: store, storageType: dataRetriever.UnsignedTransactionUnit, - mapProcessedResult: make(map[string]struct{}), + mapProcessedResult: make(map[string][][]byte), economicsFee: economicsFee, } @@ -235,11 +235,7 @@ func (irp *intermediateResultsProcessor) AddIntermediateTransactions(txs []data. } sndShId, dstShId := irp.getShardIdsFromAddresses(addScr.SndAddr, addScr.RcvAddr) - - addScrShardInfo := &txShardInfo{receiverShardID: dstShId, senderShardID: sndShId} - scrInfo := &txInfo{tx: addScr, txShardInfo: addScrShardInfo} - irp.interResultsForBlock[string(scrHash)] = scrInfo - irp.mapProcessedResult[string(scrHash)] = struct{}{} + irp.addIntermediateTxToResultsForBlock(addScr, scrHash, sndShId, dstShId) } return nil diff --git a/process/block/postprocess/intermediateResults_test.go b/process/block/postprocess/intermediateResults_test.go index 2271f47c30e..1d4f26b92fd 100644 --- a/process/block/postprocess/intermediateResults_test.go +++ b/process/block/postprocess/intermediateResults_test.go @@ -17,6 +17,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/testscommon/hashingMocks" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const maxGasLimitPerBlock = uint64(1500000000) @@ -448,21 +449,24 @@ func TestIntermediateResultsProcessor_AddIntermediateTransactionsAddAndRevert(t txs = append(txs, &smartContractResult.SmartContractResult{RcvAddr: []byte("rcv"), SndAddr: []byte("snd"), Value: big.NewInt(0), PrevTxHash: txHash, Nonce: 3}) txs = append(txs, &smartContractResult.SmartContractResult{RcvAddr: []byte("rcv"), SndAddr: []byte("snd"), Value: big.NewInt(0), PrevTxHash: txHash, Nonce: 4}) + key := []byte("key") + irp.InitProcessedResults(key) + err = irp.AddIntermediateTransactions(txs) assert.Nil(t, err) irp.mutInterResultsForBlock.Lock() - assert.Equal(t, len(irp.mapProcessedResult), len(txs)) + assert.Equal(t, len(irp.mapProcessedResult[string(key)]), len(txs)) irp.mutInterResultsForBlock.Unlock() - irp.RemoveProcessedResults() + irp.RemoveProcessedResults(key) irp.mutInterResultsForBlock.Lock() assert.Equal(t, len(irp.interResultsForBlock), 0) - assert.Equal(t, len(irp.mapProcessedResult), len(txs)) + assert.Equal(t, len(irp.mapProcessedResult[string(key)]), len(txs)) irp.mutInterResultsForBlock.Unlock() - irp.InitProcessedResults() + irp.InitProcessedResults(key) irp.mutInterResultsForBlock.Lock() - assert.Equal(t, len(irp.mapProcessedResult), 0) + assert.Equal(t, len(irp.mapProcessedResult[string(key)]), 0) irp.mutInterResultsForBlock.Unlock() } @@ -1075,3 +1079,41 @@ func TestIntermediateResultsProcessor_SplitMiniBlocksIfNeededShouldWork(t *testi splitMiniBlocks = irp.splitMiniBlocksIfNeeded(miniBlocks) assert.Equal(t, 5, len(splitMiniBlocks)) } + +func TestIntermediateResultsProcessor_addIntermediateTxToResultsForBlock(t *testing.T) { + t.Parallel() + + irp, _ := NewIntermediateResultsProcessor( + &hashingMocks.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(5), + createMockPubkeyConverter(), + &mock.ChainStorerMock{}, + block.TxBlock, + &mock.TxForCurrentBlockStub{}, + &mock.FeeHandlerStub{}, + ) + + key := []byte("key") + irp.InitProcessedResults(key) + + tx := &transaction.Transaction{} + txHash := []byte("txHash") + sndShardID := uint32(1) + rcvShardID := uint32(2) + irp.addIntermediateTxToResultsForBlock(tx, txHash, sndShardID, rcvShardID) + + require.Equal(t, 1, len(irp.interResultsForBlock)) + require.Equal(t, 1, len(irp.mapProcessedResult)) + + scrInfo, ok := irp.interResultsForBlock[string(txHash)] + require.True(t, ok) + assert.Equal(t, tx, scrInfo.tx) + assert.Equal(t, sndShardID, scrInfo.senderShardID) + assert.Equal(t, rcvShardID, scrInfo.receiverShardID) + + intermediateResultsHashes, ok := irp.mapProcessedResult[string(key)] + require.True(t, ok) + require.Equal(t, 1, len(intermediateResultsHashes)) + assert.Equal(t, txHash, intermediateResultsHashes[0]) +} diff --git a/process/block/postprocess/oneMBPostProcessor.go b/process/block/postprocess/oneMBPostProcessor.go index 2fe8a76638c..4542fbde7c1 100644 --- a/process/block/postprocess/oneMBPostProcessor.go +++ b/process/block/postprocess/oneMBPostProcessor.go @@ -54,7 +54,7 @@ func NewOneMiniBlockPostProcessor( shardCoordinator: coordinator, store: store, storageType: storageType, - mapProcessedResult: make(map[string]struct{}), + mapProcessedResult: make(map[string][][]byte), economicsFee: economicsFee, } @@ -155,10 +155,7 @@ func (opp *oneMBPostProcessor) AddIntermediateTransactions(txs []data.Transactio return err } - addReceiptShardInfo := &txShardInfo{receiverShardID: selfId, senderShardID: selfId} - scrInfo := &txInfo{tx: txs[i], txShardInfo: addReceiptShardInfo} - opp.interResultsForBlock[string(txHash)] = scrInfo - opp.mapProcessedResult[string(txHash)] = struct{}{} + opp.addIntermediateTxToResultsForBlock(txs[i], txHash, selfId, selfId) } return nil diff --git a/process/block/preprocess/basePreProcess.go b/process/block/preprocess/basePreProcess.go index 4be1674b427..a928ab7e23b 100644 --- a/process/block/preprocess/basePreProcess.go +++ b/process/block/preprocess/basePreProcess.go @@ -1,6 +1,7 @@ package preprocess import ( + "bytes" "math/big" "sync" "time" @@ -105,6 +106,11 @@ type txsForBlock struct { txHashAndInfo map[string]*txInfo } +type processedIndexes struct { + indexOfLastTxProcessed int32 + indexOfLastTxProcessedByProposer int32 +} + // basePreProcess is the base struct for all pre-processors // beware of calling basePreProcess.epochConfirmed in all extensions of this struct if the flags from the basePreProcess are // used in those extensions instances @@ -120,6 +126,7 @@ type basePreProcess struct { flagOptimizeGasUsedInCrossMiniBlocks atomic.Flag frontRunningProtectionEnableEpoch uint32 flagFrontRunningProtection atomic.Flag + processedMiniBlocksTracker process.ProcessedMiniBlocksTracker } func (bpp *basePreProcess) removeBlockDataFromPools( @@ -481,6 +488,34 @@ func (bpp *basePreProcess) updateGasConsumedWithGasRefundedAndGasPenalized( gasInfo.totalGasConsumedInSelfShard -= gasToBeSubtracted } +func (bpp *basePreProcess) handleProcessTransactionInit(preProcessorExecutionInfoHandler process.PreProcessorExecutionInfoHandler, txHash []byte) int { + snapshot := bpp.accounts.JournalLen() + preProcessorExecutionInfoHandler.InitProcessedTxsResults(txHash) + bpp.gasHandler.Reset(txHash) + return snapshot +} + +func (bpp *basePreProcess) handleProcessTransactionError(preProcessorExecutionInfoHandler process.PreProcessorExecutionInfoHandler, snapshot int, txHash []byte) { + bpp.gasHandler.RestoreGasSinceLastReset(txHash) + + errRevert := bpp.accounts.RevertToSnapshot(snapshot) + if errRevert != nil { + log.Debug("basePreProcess.handleProcessError: RevertToSnapshot", "error", errRevert.Error()) + } + + preProcessorExecutionInfoHandler.RevertProcessedTxsResults([][]byte{txHash}, txHash) +} + +func getMiniBlockHeaderOfMiniBlock(headerHandler data.HeaderHandler, miniBlockHash []byte) (data.MiniBlockHeaderHandler, error) { + for _, miniBlockHeader := range headerHandler.GetMiniBlockHeaderHandlers() { + if bytes.Equal(miniBlockHeader.GetHash(), miniBlockHash) { + return miniBlockHeader, nil + } + } + + return nil, process.ErrMissingMiniBlockHeader +} + // epochConfirmed is called whenever a new epoch is confirmed from the structs that extend this instance func (bpp *basePreProcess) epochConfirmed(epoch uint32, _ uint64) { bpp.flagOptimizeGasUsedInCrossMiniBlocks.SetValue(epoch >= bpp.optimizeGasUsedInCrossMiniBlocksEnableEpoch) @@ -488,3 +523,28 @@ func (bpp *basePreProcess) epochConfirmed(epoch uint32, _ uint64) { bpp.flagFrontRunningProtection.SetValue(epoch >= bpp.frontRunningProtectionEnableEpoch) log.Debug("basePreProcess: front running protection", "enabled", bpp.flagFrontRunningProtection.IsSet()) } + +func (bpp *basePreProcess) getIndexesOfLastTxProcessed( + miniBlock *block.MiniBlock, + headerHandler data.HeaderHandler, +) (*processedIndexes, error) { + + miniBlockHash, err := core.CalculateHash(bpp.marshalizer, bpp.hasher, miniBlock) + if err != nil { + return nil, err + } + + pi := &processedIndexes{} + + processedMiniBlockInfo, _ := bpp.processedMiniBlocksTracker.GetProcessedMiniBlockInfo(miniBlockHash) + pi.indexOfLastTxProcessed = processedMiniBlockInfo.IndexOfLastTxProcessed + + miniBlockHeader, err := getMiniBlockHeaderOfMiniBlock(headerHandler, miniBlockHash) + if err != nil { + return nil, err + } + + pi.indexOfLastTxProcessedByProposer = miniBlockHeader.GetIndexOfLastTxProcessed() + + return pi, nil +} diff --git a/process/block/preprocess/export_test.go b/process/block/preprocess/export_test.go index e111d7a6db5..a14df7be4db 100644 --- a/process/block/preprocess/export_test.go +++ b/process/block/preprocess/export_test.go @@ -149,20 +149,20 @@ func (bc *balanceComputation) GetBalanceOfAddress(address []byte) *big.Int { return big.NewInt(0).Set(currValue) } -func (gc *gasComputation) GetTxHashesWithGasProvidedSinceLastReset() [][]byte { - return gc.getTxHashesWithGasProvidedSinceLastReset() +func (gc *gasComputation) GetTxHashesWithGasProvidedSinceLastReset(key []byte) [][]byte { + return gc.getTxHashesWithGasProvidedSinceLastReset(key) } -func (gc *gasComputation) GetTxHashesWithGasProvidedAsScheduledSinceLastReset() [][]byte { - return gc.getTxHashesWithGasProvidedAsScheduledSinceLastReset() +func (gc *gasComputation) GetTxHashesWithGasProvidedAsScheduledSinceLastReset(key []byte) [][]byte { + return gc.getTxHashesWithGasProvidedAsScheduledSinceLastReset(key) } -func (gc *gasComputation) GetTxHashesWithGasRefundedSinceLastReset() [][]byte { - return gc.getTxHashesWithGasRefundedSinceLastReset() +func (gc *gasComputation) GetTxHashesWithGasRefundedSinceLastReset(key []byte) [][]byte { + return gc.getTxHashesWithGasRefundedSinceLastReset(key) } -func (gc *gasComputation) GetTxHashesWithGasPenalizedSinceLastReset() [][]byte { - return gc.getTxHashesWithGasPenalizedSinceLastReset() +func (gc *gasComputation) GetTxHashesWithGasPenalizedSinceLastReset(key []byte) [][]byte { + return gc.getTxHashesWithGasPenalizedSinceLastReset(key) } func (ste *scheduledTxsExecution) ComputeScheduledIntermediateTxs( diff --git a/process/block/preprocess/gasComputation.go b/process/block/preprocess/gasComputation.go index c4158e43a1c..56c1a4dbee4 100644 --- a/process/block/preprocess/gasComputation.go +++ b/process/block/preprocess/gasComputation.go @@ -18,15 +18,15 @@ type gasComputation struct { txTypeHandler process.TxTypeHandler //TODO: Refactor these mutexes and maps in separated structures that handle the locking and unlocking for each operation required gasProvided map[string]uint64 - txHashesWithGasProvidedSinceLastReset [][]byte + txHashesWithGasProvidedSinceLastReset map[string][][]byte gasProvidedAsScheduled map[string]uint64 - txHashesWithGasProvidedAsScheduledSinceLastReset [][]byte + txHashesWithGasProvidedAsScheduledSinceLastReset map[string][][]byte mutGasProvided sync.RWMutex gasRefunded map[string]uint64 - txHashesWithGasRefundedSinceLastReset [][]byte + txHashesWithGasRefundedSinceLastReset map[string][][]byte mutGasRefunded sync.RWMutex gasPenalized map[string]uint64 - txHashesWithGasPenalizedSinceLastReset [][]byte + txHashesWithGasPenalizedSinceLastReset map[string][][]byte mutGasPenalized sync.RWMutex flagGasComputeV2 atomic.Flag @@ -54,13 +54,13 @@ func NewGasComputation( txTypeHandler: txTypeHandler, economicsFee: economicsFee, gasProvided: make(map[string]uint64), - txHashesWithGasProvidedSinceLastReset: make([][]byte, 0), + txHashesWithGasProvidedSinceLastReset: make(map[string][][]byte), gasProvidedAsScheduled: make(map[string]uint64), - txHashesWithGasProvidedAsScheduledSinceLastReset: make([][]byte, 0), + txHashesWithGasProvidedAsScheduledSinceLastReset: make(map[string][][]byte, 0), gasRefunded: make(map[string]uint64), - txHashesWithGasRefundedSinceLastReset: make([][]byte, 0), + txHashesWithGasRefundedSinceLastReset: make(map[string][][]byte, 0), gasPenalized: make(map[string]uint64), - txHashesWithGasPenalizedSinceLastReset: make([][]byte, 0), + txHashesWithGasPenalizedSinceLastReset: make(map[string][][]byte, 0), gasComputeV2EnableEpoch: gasComputeV2EnableEpoch, } log.Debug("gasComputation: enable epoch for sc deploy", "epoch", g.gasComputeV2EnableEpoch) @@ -74,35 +74,35 @@ func NewGasComputation( func (gc *gasComputation) Init() { gc.mutGasProvided.Lock() gc.gasProvided = make(map[string]uint64) - gc.txHashesWithGasProvidedSinceLastReset = make([][]byte, 0) + gc.txHashesWithGasProvidedSinceLastReset = make(map[string][][]byte) gc.gasProvidedAsScheduled = make(map[string]uint64) - gc.txHashesWithGasProvidedAsScheduledSinceLastReset = make([][]byte, 0) + gc.txHashesWithGasProvidedAsScheduledSinceLastReset = make(map[string][][]byte) gc.mutGasProvided.Unlock() gc.mutGasRefunded.Lock() gc.gasRefunded = make(map[string]uint64) - gc.txHashesWithGasRefundedSinceLastReset = make([][]byte, 0) + gc.txHashesWithGasRefundedSinceLastReset = make(map[string][][]byte) gc.mutGasRefunded.Unlock() gc.mutGasPenalized.Lock() gc.gasPenalized = make(map[string]uint64) - gc.txHashesWithGasPenalizedSinceLastReset = make([][]byte, 0) + gc.txHashesWithGasPenalizedSinceLastReset = make(map[string][][]byte) gc.mutGasPenalized.Unlock() } // Reset method resets tx hashes with gas provided, refunded and penalized since last reset -func (gc *gasComputation) Reset() { +func (gc *gasComputation) Reset(key []byte) { gc.mutGasProvided.Lock() - gc.txHashesWithGasProvidedSinceLastReset = make([][]byte, 0) - gc.txHashesWithGasProvidedAsScheduledSinceLastReset = make([][]byte, 0) + gc.txHashesWithGasProvidedSinceLastReset[string(key)] = make([][]byte, 0) + gc.txHashesWithGasProvidedAsScheduledSinceLastReset[string(key)] = make([][]byte, 0) gc.mutGasProvided.Unlock() gc.mutGasRefunded.Lock() - gc.txHashesWithGasRefundedSinceLastReset = make([][]byte, 0) + gc.txHashesWithGasRefundedSinceLastReset[string(key)] = make([][]byte, 0) gc.mutGasRefunded.Unlock() gc.mutGasPenalized.Lock() - gc.txHashesWithGasPenalizedSinceLastReset = make([][]byte, 0) + gc.txHashesWithGasPenalizedSinceLastReset[string(key)] = make([][]byte, 0) gc.mutGasPenalized.Unlock() } @@ -110,7 +110,9 @@ func (gc *gasComputation) Reset() { func (gc *gasComputation) SetGasProvided(gasProvided uint64, hash []byte) { gc.mutGasProvided.Lock() gc.gasProvided[string(hash)] = gasProvided - gc.txHashesWithGasProvidedSinceLastReset = append(gc.txHashesWithGasProvidedSinceLastReset, hash) + for key := range gc.txHashesWithGasProvidedSinceLastReset { + gc.txHashesWithGasProvidedSinceLastReset[key] = append(gc.txHashesWithGasProvidedSinceLastReset[key], hash) + } gc.mutGasProvided.Unlock() } @@ -118,7 +120,9 @@ func (gc *gasComputation) SetGasProvided(gasProvided uint64, hash []byte) { func (gc *gasComputation) SetGasProvidedAsScheduled(gasProvided uint64, hash []byte) { gc.mutGasProvided.Lock() gc.gasProvidedAsScheduled[string(hash)] = gasProvided - gc.txHashesWithGasProvidedAsScheduledSinceLastReset = append(gc.txHashesWithGasProvidedAsScheduledSinceLastReset, hash) + for key := range gc.txHashesWithGasProvidedAsScheduledSinceLastReset { + gc.txHashesWithGasProvidedAsScheduledSinceLastReset[key] = append(gc.txHashesWithGasProvidedAsScheduledSinceLastReset[key], hash) + } gc.mutGasProvided.Unlock() } @@ -126,7 +130,9 @@ func (gc *gasComputation) SetGasProvidedAsScheduled(gasProvided uint64, hash []b func (gc *gasComputation) SetGasRefunded(gasRefunded uint64, hash []byte) { gc.mutGasRefunded.Lock() gc.gasRefunded[string(hash)] = gasRefunded - gc.txHashesWithGasRefundedSinceLastReset = append(gc.txHashesWithGasRefundedSinceLastReset, hash) + for key := range gc.txHashesWithGasRefundedSinceLastReset { + gc.txHashesWithGasRefundedSinceLastReset[key] = append(gc.txHashesWithGasRefundedSinceLastReset[key], hash) + } gc.mutGasRefunded.Unlock() log.Trace("gasComputation.SetGasRefunded", "tx hash", hash, "gas refunded", gasRefunded) @@ -136,7 +142,9 @@ func (gc *gasComputation) SetGasRefunded(gasRefunded uint64, hash []byte) { func (gc *gasComputation) SetGasPenalized(gasPenalized uint64, hash []byte) { gc.mutGasPenalized.Lock() gc.gasPenalized[string(hash)] = gasPenalized - gc.txHashesWithGasPenalizedSinceLastReset = append(gc.txHashesWithGasPenalizedSinceLastReset, hash) + for key := range gc.txHashesWithGasPenalizedSinceLastReset { + gc.txHashesWithGasPenalizedSinceLastReset[key] = append(gc.txHashesWithGasPenalizedSinceLastReset[key], hash) + } gc.mutGasPenalized.Unlock() log.Trace("gasComputation.SetGasPenalized", "tx hash", hash, "gas penalized", gasPenalized) @@ -275,35 +283,35 @@ func (gc *gasComputation) RemoveGasPenalized(hashes [][]byte) { } // RestoreGasSinceLastReset method restores gas provided, refunded and penalized since last reset -func (gc *gasComputation) RestoreGasSinceLastReset() { - gc.RemoveGasProvided(gc.getTxHashesWithGasProvidedSinceLastReset()) - gc.RemoveGasProvidedAsScheduled(gc.getTxHashesWithGasProvidedAsScheduledSinceLastReset()) - gc.RemoveGasRefunded(gc.getTxHashesWithGasRefundedSinceLastReset()) - gc.RemoveGasPenalized(gc.getTxHashesWithGasPenalizedSinceLastReset()) +func (gc *gasComputation) RestoreGasSinceLastReset(key []byte) { + gc.RemoveGasProvided(gc.getTxHashesWithGasProvidedSinceLastReset(key)) + gc.RemoveGasProvidedAsScheduled(gc.getTxHashesWithGasProvidedAsScheduledSinceLastReset(key)) + gc.RemoveGasRefunded(gc.getTxHashesWithGasRefundedSinceLastReset(key)) + gc.RemoveGasPenalized(gc.getTxHashesWithGasPenalizedSinceLastReset(key)) } -func (gc *gasComputation) getTxHashesWithGasProvidedSinceLastReset() [][]byte { +func (gc *gasComputation) getTxHashesWithGasProvidedSinceLastReset(key []byte) [][]byte { gc.mutGasProvided.RLock() defer gc.mutGasProvided.RUnlock() - return gc.txHashesWithGasProvidedSinceLastReset + return gc.txHashesWithGasProvidedSinceLastReset[string(key)] } -func (gc *gasComputation) getTxHashesWithGasProvidedAsScheduledSinceLastReset() [][]byte { +func (gc *gasComputation) getTxHashesWithGasProvidedAsScheduledSinceLastReset(key []byte) [][]byte { gc.mutGasProvided.RLock() defer gc.mutGasProvided.RUnlock() - return gc.txHashesWithGasProvidedAsScheduledSinceLastReset + return gc.txHashesWithGasProvidedAsScheduledSinceLastReset[string(key)] } -func (gc *gasComputation) getTxHashesWithGasRefundedSinceLastReset() [][]byte { +func (gc *gasComputation) getTxHashesWithGasRefundedSinceLastReset(key []byte) [][]byte { gc.mutGasRefunded.RLock() defer gc.mutGasRefunded.RUnlock() - return gc.txHashesWithGasRefundedSinceLastReset + return gc.txHashesWithGasRefundedSinceLastReset[string(key)] } -func (gc *gasComputation) getTxHashesWithGasPenalizedSinceLastReset() [][]byte { +func (gc *gasComputation) getTxHashesWithGasPenalizedSinceLastReset(key []byte) [][]byte { gc.mutGasPenalized.RLock() defer gc.mutGasPenalized.RUnlock() - return gc.txHashesWithGasPenalizedSinceLastReset + return gc.txHashesWithGasPenalizedSinceLastReset[string(key)] } // ComputeGasProvidedByMiniBlock computes gas provided by the given miniblock in sender and receiver shard diff --git a/process/block/preprocess/gasComputation_test.go b/process/block/preprocess/gasComputation_test.go index 47984b17abe..60d95a395c8 100644 --- a/process/block/preprocess/gasComputation_test.go +++ b/process/block/preprocess/gasComputation_test.go @@ -55,16 +55,19 @@ func TestGasProvided_ShouldWork(t *testing.T) { 0, ) + key := []byte("key") + gc.Reset(key) + gc.SetGasProvided(2, []byte("hash1")) assert.Equal(t, uint64(2), gc.GasProvided([]byte("hash1"))) - require.Equal(t, 1, len(gc.GetTxHashesWithGasProvidedSinceLastReset())) - assert.Equal(t, []byte("hash1"), gc.GetTxHashesWithGasProvidedSinceLastReset()[0]) + require.Equal(t, 1, len(gc.GetTxHashesWithGasProvidedSinceLastReset(key))) + assert.Equal(t, []byte("hash1"), gc.GetTxHashesWithGasProvidedSinceLastReset(key)[0]) gc.SetGasProvided(3, []byte("hash2")) assert.Equal(t, uint64(3), gc.GasProvided([]byte("hash2"))) - require.Equal(t, 2, len(gc.GetTxHashesWithGasProvidedSinceLastReset())) - assert.Equal(t, []byte("hash1"), gc.GetTxHashesWithGasProvidedSinceLastReset()[0]) - assert.Equal(t, []byte("hash2"), gc.GetTxHashesWithGasProvidedSinceLastReset()[1]) + require.Equal(t, 2, len(gc.GetTxHashesWithGasProvidedSinceLastReset(key))) + assert.Equal(t, []byte("hash1"), gc.GetTxHashesWithGasProvidedSinceLastReset(key)[0]) + assert.Equal(t, []byte("hash2"), gc.GetTxHashesWithGasProvidedSinceLastReset(key)[1]) assert.Equal(t, uint64(5), gc.TotalGasProvided()) @@ -85,16 +88,19 @@ func TestGasRefunded_ShouldWork(t *testing.T) { 0, ) + key := []byte("key") + gc.Reset(key) + gc.SetGasRefunded(2, []byte("hash1")) assert.Equal(t, uint64(2), gc.GasRefunded([]byte("hash1"))) - require.Equal(t, 1, len(gc.GetTxHashesWithGasRefundedSinceLastReset())) - assert.Equal(t, []byte("hash1"), gc.GetTxHashesWithGasRefundedSinceLastReset()[0]) + require.Equal(t, 1, len(gc.GetTxHashesWithGasRefundedSinceLastReset(key))) + assert.Equal(t, []byte("hash1"), gc.GetTxHashesWithGasRefundedSinceLastReset(key)[0]) gc.SetGasRefunded(3, []byte("hash2")) assert.Equal(t, uint64(3), gc.GasRefunded([]byte("hash2"))) - require.Equal(t, 2, len(gc.GetTxHashesWithGasRefundedSinceLastReset())) - assert.Equal(t, []byte("hash1"), gc.GetTxHashesWithGasRefundedSinceLastReset()[0]) - assert.Equal(t, []byte("hash2"), gc.GetTxHashesWithGasRefundedSinceLastReset()[1]) + require.Equal(t, 2, len(gc.GetTxHashesWithGasRefundedSinceLastReset(key))) + assert.Equal(t, []byte("hash1"), gc.GetTxHashesWithGasRefundedSinceLastReset(key)[0]) + assert.Equal(t, []byte("hash2"), gc.GetTxHashesWithGasRefundedSinceLastReset(key)[1]) assert.Equal(t, uint64(5), gc.TotalGasRefunded()) @@ -115,16 +121,19 @@ func TestGasPenalized_ShouldWork(t *testing.T) { 0, ) + key := []byte("key") + gc.Reset(key) + gc.SetGasPenalized(2, []byte("hash1")) assert.Equal(t, uint64(2), gc.GasPenalized([]byte("hash1"))) - require.Equal(t, 1, len(gc.GetTxHashesWithGasPenalizedSinceLastReset())) - assert.Equal(t, []byte("hash1"), gc.GetTxHashesWithGasPenalizedSinceLastReset()[0]) + require.Equal(t, 1, len(gc.GetTxHashesWithGasPenalizedSinceLastReset(key))) + assert.Equal(t, []byte("hash1"), gc.GetTxHashesWithGasPenalizedSinceLastReset(key)[0]) gc.SetGasPenalized(3, []byte("hash2")) assert.Equal(t, uint64(3), gc.GasPenalized([]byte("hash2"))) - require.Equal(t, 2, len(gc.GetTxHashesWithGasPenalizedSinceLastReset())) - assert.Equal(t, []byte("hash1"), gc.GetTxHashesWithGasPenalizedSinceLastReset()[0]) - assert.Equal(t, []byte("hash2"), gc.GetTxHashesWithGasPenalizedSinceLastReset()[1]) + require.Equal(t, 2, len(gc.GetTxHashesWithGasPenalizedSinceLastReset(key))) + assert.Equal(t, []byte("hash1"), gc.GetTxHashesWithGasPenalizedSinceLastReset(key)[0]) + assert.Equal(t, []byte("hash2"), gc.GetTxHashesWithGasPenalizedSinceLastReset(key)[1]) assert.Equal(t, uint64(5), gc.TotalGasPenalized()) @@ -538,29 +547,32 @@ func TestReset_ShouldWork(t *testing.T) { 0, ) + key := []byte("key") + gc.Reset(key) + gc.SetGasProvided(5, []byte("hash1")) gc.SetGasProvidedAsScheduled(7, []byte("hash2")) gc.SetGasRefunded(2, []byte("hash1")) gc.SetGasPenalized(1, []byte("hash2")) - require.Equal(t, 1, len(gc.GetTxHashesWithGasProvidedSinceLastReset())) - assert.Equal(t, []byte("hash1"), gc.GetTxHashesWithGasProvidedSinceLastReset()[0]) + require.Equal(t, 1, len(gc.GetTxHashesWithGasProvidedSinceLastReset(key))) + assert.Equal(t, []byte("hash1"), gc.GetTxHashesWithGasProvidedSinceLastReset(key)[0]) - require.Equal(t, 1, len(gc.GetTxHashesWithGasProvidedAsScheduledSinceLastReset())) - assert.Equal(t, []byte("hash2"), gc.GetTxHashesWithGasProvidedAsScheduledSinceLastReset()[0]) + require.Equal(t, 1, len(gc.GetTxHashesWithGasProvidedAsScheduledSinceLastReset(key))) + assert.Equal(t, []byte("hash2"), gc.GetTxHashesWithGasProvidedAsScheduledSinceLastReset(key)[0]) - require.Equal(t, 1, len(gc.GetTxHashesWithGasRefundedSinceLastReset())) - assert.Equal(t, []byte("hash1"), gc.GetTxHashesWithGasRefundedSinceLastReset()[0]) + require.Equal(t, 1, len(gc.GetTxHashesWithGasRefundedSinceLastReset(key))) + assert.Equal(t, []byte("hash1"), gc.GetTxHashesWithGasRefundedSinceLastReset(key)[0]) - require.Equal(t, 1, len(gc.GetTxHashesWithGasPenalizedSinceLastReset())) - assert.Equal(t, []byte("hash2"), gc.GetTxHashesWithGasPenalizedSinceLastReset()[0]) + require.Equal(t, 1, len(gc.GetTxHashesWithGasPenalizedSinceLastReset(key))) + assert.Equal(t, []byte("hash2"), gc.GetTxHashesWithGasPenalizedSinceLastReset(key)[0]) - gc.Reset() + gc.Reset(key) - require.Equal(t, 0, len(gc.GetTxHashesWithGasProvidedSinceLastReset())) - require.Equal(t, 0, len(gc.GetTxHashesWithGasProvidedAsScheduledSinceLastReset())) - require.Equal(t, 0, len(gc.GetTxHashesWithGasRefundedSinceLastReset())) - require.Equal(t, 0, len(gc.GetTxHashesWithGasPenalizedSinceLastReset())) + require.Equal(t, 0, len(gc.GetTxHashesWithGasProvidedSinceLastReset(key))) + require.Equal(t, 0, len(gc.GetTxHashesWithGasProvidedAsScheduledSinceLastReset(key))) + require.Equal(t, 0, len(gc.GetTxHashesWithGasRefundedSinceLastReset(key))) + require.Equal(t, 0, len(gc.GetTxHashesWithGasPenalizedSinceLastReset(key))) } func TestRestoreGasSinceLastReset_ShouldWork(t *testing.T) { @@ -583,7 +595,7 @@ func TestRestoreGasSinceLastReset_ShouldWork(t *testing.T) { assert.Equal(t, uint64(2), gc.TotalGasRefunded()) assert.Equal(t, uint64(1), gc.TotalGasPenalized()) - gc.Reset() + gc.Reset([]byte("key")) gc.SetGasProvided(5, []byte("hash3")) gc.SetGasProvidedAsScheduled(7, []byte("hash4")) @@ -595,7 +607,7 @@ func TestRestoreGasSinceLastReset_ShouldWork(t *testing.T) { assert.Equal(t, uint64(4), gc.TotalGasRefunded()) assert.Equal(t, uint64(2), gc.TotalGasPenalized()) - gc.RestoreGasSinceLastReset() + gc.RestoreGasSinceLastReset([]byte("key")) assert.Equal(t, uint64(5), gc.TotalGasProvided()) assert.Equal(t, uint64(7), gc.TotalGasProvidedAsScheduled()) diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index 571cbe97f07..8f3da4d0449 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -45,6 +45,7 @@ func NewRewardTxPreprocessor( pubkeyConverter core.PubkeyConverter, blockSizeComputation BlockSizeComputationHandler, balanceComputation BalanceComputationHandler, + processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, ) (*rewardTxPreprocessor, error) { if check.IfNil(hasher) { @@ -83,6 +84,9 @@ func NewRewardTxPreprocessor( if check.IfNil(balanceComputation) { return nil, process.ErrNilBalanceComputationHandler } + if check.IfNil(processedMiniBlocksTracker) { + return nil, process.ErrNilProcessedMiniBlocksTracker + } bpp := &basePreProcess{ hasher: hasher, @@ -92,10 +96,11 @@ func NewRewardTxPreprocessor( gasHandler: gasHandler, economicsFee: nil, }, - blockSizeComputation: blockSizeComputation, - balanceComputation: balanceComputation, - accounts: accounts, - pubkeyConverter: pubkeyConverter, + blockSizeComputation: blockSizeComputation, + balanceComputation: balanceComputation, + accounts: accounts, + pubkeyConverter: pubkeyConverter, + processedMiniBlocksTracker: processedMiniBlocksTracker, } rtp := &rewardTxPreprocessor{ @@ -208,7 +213,7 @@ func (rtp *rewardTxPreprocessor) RestoreBlockDataIntoPools( // ProcessBlockTransactions processes all the reward transactions from the block.Body, updates the state func (rtp *rewardTxPreprocessor) ProcessBlockTransactions( - _ data.HeaderHandler, + headerHandler data.HeaderHandler, body *block.Body, haveTime func() bool, ) error { @@ -222,7 +227,18 @@ func (rtp *rewardTxPreprocessor) ProcessBlockTransactions( continue } - for j := 0; j < len(miniBlock.TxHashes); j++ { + pi, err := rtp.getIndexesOfLastTxProcessed(miniBlock, headerHandler) + if err != nil { + return err + } + + indexOfFirstTxToBeProcessed := pi.indexOfLastTxProcessed + 1 + err = process.CheckIfIndexesAreOutOfBound(indexOfFirstTxToBeProcessed, pi.indexOfLastTxProcessedByProposer, miniBlock) + if err != nil { + return err + } + + for j := indexOfFirstTxToBeProcessed; j <= pi.indexOfLastTxProcessedByProposer; j++ { if !haveTime() { return process.ErrTimeIsOut } @@ -249,6 +265,7 @@ func (rtp *rewardTxPreprocessor) ProcessBlockTransactions( } } } + return nil } @@ -424,38 +441,62 @@ func (rtp *rewardTxPreprocessor) CreateAndProcessMiniBlocks( // ProcessMiniBlock processes all the reward transactions from a miniblock and saves the processed reward transactions // in local cache -func (rtp *rewardTxPreprocessor) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, _ func() bool, _ func() (int, int), _ bool) ([][]byte, int, error) { +func (rtp *rewardTxPreprocessor) ProcessMiniBlock( + miniBlock *block.MiniBlock, + haveTime func() bool, + _ func() bool, + _ bool, + partialMbExecutionMode bool, + indexOfLastTxProcessed int, + preProcessorExecutionInfoHandler process.PreProcessorExecutionInfoHandler, +) ([][]byte, int, bool, error) { + + var err error + var txIndex int if miniBlock.Type != block.RewardsBlock { - return nil, 0, process.ErrWrongTypeInMiniBlock + return nil, indexOfLastTxProcessed, false, process.ErrWrongTypeInMiniBlock } if miniBlock.SenderShardID != core.MetachainShardId { - return nil, 0, process.ErrRewardMiniBlockNotFromMeta + return nil, indexOfLastTxProcessed, false, process.ErrRewardMiniBlockNotFromMeta + } + + indexOfFirstTxToBeProcessed := indexOfLastTxProcessed + 1 + err = process.CheckIfIndexesAreOutOfBound(int32(indexOfFirstTxToBeProcessed), int32(len(miniBlock.TxHashes))-1, miniBlock) + if err != nil { + return nil, indexOfLastTxProcessed, false, err } miniBlockRewardTxs, miniBlockTxHashes, err := rtp.getAllRewardTxsFromMiniBlock(miniBlock, haveTime) if err != nil { - return nil, 0, err + return nil, indexOfLastTxProcessed, false, err } - if rtp.blockSizeComputation.IsMaxBlockSizeWithoutThrottleReached(1, len(miniBlockRewardTxs)) { - return nil, 0, process.ErrMaxBlockSizeReached + if rtp.blockSizeComputation.IsMaxBlockSizeWithoutThrottleReached(1, len(miniBlock.TxHashes)) { + return nil, indexOfLastTxProcessed, false, process.ErrMaxBlockSizeReached } processedTxHashes := make([][]byte, 0) - for index := range miniBlockRewardTxs { + for txIndex = indexOfFirstTxToBeProcessed; txIndex < len(miniBlockRewardTxs); txIndex++ { if !haveTime() { - return processedTxHashes, index, process.ErrTimeIsOut + err = process.ErrTimeIsOut + break } - processedTxHashes = append(processedTxHashes, miniBlockTxHashes[index]) - - rtp.saveAccountBalanceForAddress(miniBlockRewardTxs[index].GetRcvAddr()) + rtp.saveAccountBalanceForAddress(miniBlockRewardTxs[txIndex].GetRcvAddr()) - err = rtp.rewardsProcessor.ProcessRewardTransaction(miniBlockRewardTxs[index]) + snapshot := rtp.handleProcessTransactionInit(preProcessorExecutionInfoHandler, miniBlockTxHashes[txIndex]) + err = rtp.rewardsProcessor.ProcessRewardTransaction(miniBlockRewardTxs[txIndex]) if err != nil { - return processedTxHashes, index, err + rtp.handleProcessTransactionError(preProcessorExecutionInfoHandler, snapshot, miniBlockTxHashes[txIndex]) + break } + + processedTxHashes = append(processedTxHashes, miniBlockTxHashes[txIndex]) + } + + if err != nil && !partialMbExecutionMode { + return processedTxHashes, txIndex - 1, true, err } txShardData := &txShardInfo{senderShardID: miniBlock.SenderShardID, receiverShardID: miniBlock.ReceiverShardID} @@ -467,9 +508,9 @@ func (rtp *rewardTxPreprocessor) ProcessMiniBlock(miniBlock *block.MiniBlock, ha rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() rtp.blockSizeComputation.AddNumMiniBlocks(1) - rtp.blockSizeComputation.AddNumTxs(len(miniBlockRewardTxs)) + rtp.blockSizeComputation.AddNumTxs(len(miniBlock.TxHashes)) - return nil, len(processedTxHashes), nil + return nil, txIndex - 1, false, err } // CreateMarshalizedData marshalizes reward transaction hashes and and saves them into a new structure diff --git a/process/block/preprocess/rewardTxPreProcessor_test.go b/process/block/preprocess/rewardTxPreProcessor_test.go index 9f4e79ffbc7..7d0e6a1d53c 100644 --- a/process/block/preprocess/rewardTxPreProcessor_test.go +++ b/process/block/preprocess/rewardTxPreProcessor_test.go @@ -37,6 +37,7 @@ func TestNewRewardTxPreprocessor_NilRewardTxDataPoolShouldErr(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, rtp) @@ -60,6 +61,7 @@ func TestNewRewardTxPreprocessor_NilStoreShouldErr(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, rtp) @@ -83,6 +85,7 @@ func TestNewRewardTxPreprocessor_NilHasherShouldErr(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, rtp) @@ -106,6 +109,7 @@ func TestNewRewardTxPreprocessor_NilMarshalizerShouldErr(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, rtp) @@ -129,6 +133,7 @@ func TestNewRewardTxPreprocessor_NilRewardTxProcessorShouldErr(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, rtp) @@ -152,6 +157,7 @@ func TestNewRewardTxPreprocessor_NilShardCoordinatorShouldErr(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, rtp) @@ -175,6 +181,7 @@ func TestNewRewardTxPreprocessor_NilAccountsShouldErr(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, rtp) @@ -198,6 +205,7 @@ func TestNewRewardTxPreprocessor_NilRequestHandlerShouldErr(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, rtp) @@ -221,6 +229,7 @@ func TestNewRewardTxPreprocessor_NilGasHandlerShouldErr(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, rtp) @@ -244,6 +253,7 @@ func TestNewRewardTxPreprocessor_NilPubkeyConverterShouldErr(t *testing.T) { nil, &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, rtp) @@ -267,6 +277,7 @@ func TestNewRewardTxPreprocessor_NilBlockSizeComputationHandlerShouldErr(t *test createMockPubkeyConverter(), nil, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, rtp) @@ -290,12 +301,37 @@ func TestNewRewardTxPreprocessor_NilBalanceComputationHandlerShouldErr(t *testin createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, nil, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, rtp) assert.Equal(t, process.ErrNilBalanceComputationHandler, err) } +func TestNewRewardTxPreprocessor_NilProcessedMiniBlocksTrackerShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &hashingMocks.HasherMock{}, + &mock.MarshalizerMock{}, + &testscommon.RewardTxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &stateMock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + &testscommon.GasHandlerStub{}, + createMockPubkeyConverter(), + &testscommon.BlockSizeComputationStub{}, + &testscommon.BalanceComputationStub{}, + nil, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilProcessedMiniBlocksTracker, err) +} + func TestNewRewardTxPreprocessor_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -313,6 +349,7 @@ func TestNewRewardTxPreprocessor_OkValsShouldWork(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, err) assert.NotNil(t, rtp) @@ -336,6 +373,7 @@ func TestRewardTxPreprocessor_CreateMarshalizedDataShouldWork(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) txHashes := [][]byte{[]byte(txHash)} @@ -366,6 +404,7 @@ func TestRewardTxPreprocessor_ProcessMiniBlockInvalidMiniBlockTypeShouldErr(t *t createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) txHashes := [][]byte{[]byte(txHash)} @@ -376,7 +415,11 @@ func TestRewardTxPreprocessor_ProcessMiniBlockInvalidMiniBlockTypeShouldErr(t *t Type: 0, } - _, _, err := rtp.ProcessMiniBlock(&mb1, haveTimeTrue, haveAdditionalTimeFalse, getNumOfCrossInterMbsAndTxsZero, false) + preProcessorExecutionInfoHandlerMock := &testscommon.PreProcessorExecutionInfoHandlerMock{ + GetNumOfCrossInterMbsAndTxsCalled: getNumOfCrossInterMbsAndTxsZero, + } + + _, _, _, err := rtp.ProcessMiniBlock(&mb1, haveTimeTrue, haveAdditionalTimeFalse, false, false, -1, preProcessorExecutionInfoHandlerMock) assert.Equal(t, process.ErrWrongTypeInMiniBlock, err) } @@ -398,6 +441,7 @@ func TestRewardTxPreprocessor_ProcessMiniBlockShouldWork(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) txHashes := [][]byte{[]byte(txHash)} @@ -411,7 +455,11 @@ func TestRewardTxPreprocessor_ProcessMiniBlockShouldWork(t *testing.T) { txs := []data.TransactionHandler{&rewardTx.RewardTx{}} rtp.AddTxs(txHashes, txs) - _, _, err := rtp.ProcessMiniBlock(&mb1, haveTimeTrue, haveAdditionalTimeFalse, getNumOfCrossInterMbsAndTxsZero, false) + preProcessorExecutionInfoHandlerMock := &testscommon.PreProcessorExecutionInfoHandlerMock{ + GetNumOfCrossInterMbsAndTxsCalled: getNumOfCrossInterMbsAndTxsZero, + } + + _, _, _, err := rtp.ProcessMiniBlock(&mb1, haveTimeTrue, haveAdditionalTimeFalse, false, false, -1, preProcessorExecutionInfoHandlerMock) assert.Nil(t, err) txsMap := rtp.GetAllCurrentUsedTxs() @@ -438,6 +486,7 @@ func TestRewardTxPreprocessor_ProcessMiniBlockNotFromMeta(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) txHashes := [][]byte{[]byte(txHash)} @@ -451,7 +500,11 @@ func TestRewardTxPreprocessor_ProcessMiniBlockNotFromMeta(t *testing.T) { txs := []data.TransactionHandler{&rewardTx.RewardTx{}} rtp.AddTxs(txHashes, txs) - _, _, err := rtp.ProcessMiniBlock(&mb1, haveTimeTrue, haveAdditionalTimeFalse, getNumOfCrossInterMbsAndTxsZero, false) + preProcessorExecutionInfoHandlerMock := &testscommon.PreProcessorExecutionInfoHandlerMock{ + GetNumOfCrossInterMbsAndTxsCalled: getNumOfCrossInterMbsAndTxsZero, + } + + _, _, _, err := rtp.ProcessMiniBlock(&mb1, haveTimeTrue, haveAdditionalTimeFalse, false, false, -1, preProcessorExecutionInfoHandlerMock) assert.Equal(t, process.ErrRewardMiniBlockNotFromMeta, err) } @@ -473,6 +526,7 @@ func TestRewardTxPreprocessor_SaveTxsToStorageShouldWork(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) txHashes := [][]byte{[]byte(txHash)} @@ -517,6 +571,7 @@ func TestRewardTxPreprocessor_RequestBlockTransactionsNoMissingTxsShouldWork(t * createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) txHashes := [][]byte{[]byte(txHash)} @@ -560,6 +615,7 @@ func TestRewardTxPreprocessor_RequestTransactionsForMiniBlockShouldWork(t *testi createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) txHashes := [][]byte{[]byte(txHash)} @@ -592,6 +648,7 @@ func TestRewardTxPreprocessor_ProcessBlockTransactions(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) txHashes := [][]byte{[]byte(txHash)} @@ -611,10 +668,13 @@ func TestRewardTxPreprocessor_ProcessBlockTransactions(t *testing.T) { Type: block.RewardsBlock, } + mbHash1, _ := core.CalculateHash(rtp.marshalizer, rtp.hasher, &mb1) + mbHash2, _ := core.CalculateHash(rtp.marshalizer, rtp.hasher, &mb2) + var blockBody block.Body blockBody.MiniBlocks = append(blockBody.MiniBlocks, &mb1, &mb2) - err := rtp.ProcessBlockTransactions(&block.Header{}, &blockBody, haveTimeTrue) + err := rtp.ProcessBlockTransactions(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{TxCount: 1, Hash: mbHash1}, {TxCount: 1, Hash: mbHash2}}}, &blockBody, haveTimeTrue) assert.Nil(t, err) } @@ -635,6 +695,7 @@ func TestRewardTxPreprocessor_IsDataPreparedShouldErr(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) err := rtp.IsDataPrepared(1, haveTime) @@ -659,6 +720,7 @@ func TestRewardTxPreprocessor_IsDataPrepared(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) go func() { @@ -704,6 +766,7 @@ func TestRewardTxPreprocessor_RestoreBlockDataIntoPools(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) txHashes := [][]byte{[]byte("tx_hash1")} @@ -748,6 +811,7 @@ func TestRewardTxPreprocessor_CreateAndProcessMiniBlocksShouldWork(t *testing.T) createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) mBlocksSlice, err := rtp.CreateAndProcessMiniBlocks(haveTimeTrue, []byte("randomness")) @@ -772,6 +836,7 @@ func TestRewardTxPreprocessor_CreateBlockStartedShouldCleanMap(t *testing.T) { createMockPubkeyConverter(), &testscommon.BlockSizeComputationStub{}, &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) rtp.CreateBlockStarted() diff --git a/process/block/preprocess/smartContractResults.go b/process/block/preprocess/smartContractResults.go index 02f83b8646b..c9706a26687 100644 --- a/process/block/preprocess/smartContractResults.go +++ b/process/block/preprocess/smartContractResults.go @@ -48,6 +48,7 @@ func NewSmartContractResultPreprocessor( balanceComputation BalanceComputationHandler, epochNotifier process.EpochNotifier, optimizeGasUsedInCrossMiniBlocksEnableEpoch uint32, + processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, ) (*smartContractResults, error) { if check.IfNil(hasher) { @@ -92,6 +93,9 @@ func NewSmartContractResultPreprocessor( if check.IfNil(epochNotifier) { return nil, process.ErrNilEpochNotifier } + if check.IfNil(processedMiniBlocksTracker) { + return nil, process.ErrNilProcessedMiniBlocksTracker + } bpp := &basePreProcess{ hasher: hasher, @@ -107,6 +111,7 @@ func NewSmartContractResultPreprocessor( pubkeyConverter: pubkeyConverter, optimizeGasUsedInCrossMiniBlocksEnableEpoch: optimizeGasUsedInCrossMiniBlocksEnableEpoch, + processedMiniBlocksTracker: processedMiniBlocksTracker, } scr := &smartContractResults{ @@ -226,7 +231,7 @@ func (scr *smartContractResults) RestoreBlockDataIntoPools( // ProcessBlockTransactions processes all the smartContractResult from the block.Body, updates the state func (scr *smartContractResults) ProcessBlockTransactions( - _ data.HeaderHandler, + headerHandler data.HeaderHandler, body *block.Body, haveTime func() bool, ) error { @@ -274,7 +279,18 @@ func (scr *smartContractResults) ProcessBlockTransactions( continue } - for j := 0; j < len(miniBlock.TxHashes); j++ { + pi, err := scr.getIndexesOfLastTxProcessed(miniBlock, headerHandler) + if err != nil { + return err + } + + indexOfFirstTxToBeProcessed := pi.indexOfLastTxProcessed + 1 + err = process.CheckIfIndexesAreOutOfBound(indexOfFirstTxToBeProcessed, pi.indexOfLastTxProcessedByProposer, miniBlock) + if err != nil { + return err + } + + for j := indexOfFirstTxToBeProcessed; j <= pi.indexOfLastTxProcessedByProposer; j++ { if !haveTime() { return process.ErrTimeIsOut } @@ -494,35 +510,36 @@ func (scr *smartContractResults) ProcessMiniBlock( miniBlock *block.MiniBlock, haveTime func() bool, _ func() bool, - _ func() (int, int), _ bool, -) (processedTxHashes [][]byte, numProcessedSCRs int, err error) { + partialMbExecutionMode bool, + indexOfLastTxProcessed int, + preProcessorExecutionInfoHandler process.PreProcessorExecutionInfoHandler, +) ([][]byte, int, bool, error) { if miniBlock.Type != block.SmartContractResultBlock { - return nil, 0, process.ErrWrongTypeInMiniBlock + return nil, indexOfLastTxProcessed, false, process.ErrWrongTypeInMiniBlock } numSCRsProcessed := 0 var gasProvidedByTxInSelfShard uint64 - processedTxHashes = make([][]byte, 0) - miniBlockScrs, miniBlockTxHashes, err := scr.getAllScrsFromMiniBlock(miniBlock, haveTime) + var err error + var txIndex int + processedTxHashes := make([][]byte, 0) + + indexOfFirstTxToBeProcessed := indexOfLastTxProcessed + 1 + err = process.CheckIfIndexesAreOutOfBound(int32(indexOfFirstTxToBeProcessed), int32(len(miniBlock.TxHashes))-1, miniBlock) if err != nil { - return nil, 0, err + return nil, indexOfLastTxProcessed, false, err } - if scr.blockSizeComputation.IsMaxBlockSizeWithoutThrottleReached(1, len(miniBlockScrs)) { - return nil, 0, process.ErrMaxBlockSizeReached + miniBlockScrs, miniBlockTxHashes, err := scr.getAllScrsFromMiniBlock(miniBlock, haveTime) + if err != nil { + return nil, indexOfLastTxProcessed, false, err } - defer func() { - if err != nil { - for _, hash := range processedTxHashes { - log.Trace("smartContractResults.ProcessMiniBlock: defer func()", "tx hash", hash) - } - - scr.gasHandler.RestoreGasSinceLastReset() - } - }() + if scr.blockSizeComputation.IsMaxBlockSizeWithoutThrottleReached(1, len(miniBlock.TxHashes)) { + return nil, indexOfLastTxProcessed, false, process.ErrMaxBlockSizeReached + } gasInfo := gasConsumedInfo{ gasConsumedByMiniBlockInReceiverShard: uint64(0), @@ -557,42 +574,49 @@ func (scr *smartContractResults) ProcessMiniBlock( ) }() - for index := range miniBlockScrs { + for txIndex = indexOfFirstTxToBeProcessed; txIndex < len(miniBlockScrs); txIndex++ { if !haveTime() { - return processedTxHashes, index, process.ErrTimeIsOut + err = process.ErrTimeIsOut + break } gasProvidedByTxInSelfShard, err = scr.computeGasProvided( miniBlock.SenderShardID, miniBlock.ReceiverShardID, - miniBlockScrs[index], - miniBlockTxHashes[index], + miniBlockScrs[txIndex], + miniBlockTxHashes[txIndex], &gasInfo) if err != nil { - return processedTxHashes, index, err + break } - scr.gasHandler.SetGasProvided(gasProvidedByTxInSelfShard, miniBlockTxHashes[index]) - processedTxHashes = append(processedTxHashes, miniBlockTxHashes[index]) - if scr.flagOptimizeGasUsedInCrossMiniBlocks.IsSet() { if gasInfo.totalGasConsumedInSelfShard > maxGasLimitUsedForDestMeTxs { - return processedTxHashes, index, process.ErrMaxGasLimitUsedForDestMeTxsIsReached + err = process.ErrMaxGasLimitUsedForDestMeTxsIsReached + break } } - scr.saveAccountBalanceForAddress(miniBlockScrs[index].GetRcvAddr()) + scr.saveAccountBalanceForAddress(miniBlockScrs[txIndex].GetRcvAddr()) - _, err = scr.scrProcessor.ProcessSmartContractResult(miniBlockScrs[index]) + snapshot := scr.handleProcessTransactionInit(preProcessorExecutionInfoHandler, miniBlockTxHashes[txIndex]) + _, err = scr.scrProcessor.ProcessSmartContractResult(miniBlockScrs[txIndex]) if err != nil { - return processedTxHashes, index, err + scr.handleProcessTransactionError(preProcessorExecutionInfoHandler, snapshot, miniBlockTxHashes[txIndex]) + break } - scr.updateGasConsumedWithGasRefundedAndGasPenalized(miniBlockTxHashes[index], &gasInfo) + scr.updateGasConsumedWithGasRefundedAndGasPenalized(miniBlockTxHashes[txIndex], &gasInfo) + scr.gasHandler.SetGasProvided(gasProvidedByTxInSelfShard, miniBlockTxHashes[txIndex]) + processedTxHashes = append(processedTxHashes, miniBlockTxHashes[txIndex]) numSCRsProcessed++ } + if err != nil && !partialMbExecutionMode { + return processedTxHashes, txIndex - 1, true, err + } + txShardInfoToSet := &txShardInfo{senderShardID: miniBlock.SenderShardID, receiverShardID: miniBlock.ReceiverShardID} scr.scrForBlock.mutTxsForBlock.Lock() @@ -602,9 +626,9 @@ func (scr *smartContractResults) ProcessMiniBlock( scr.scrForBlock.mutTxsForBlock.Unlock() scr.blockSizeComputation.AddNumMiniBlocks(1) - scr.blockSizeComputation.AddNumTxs(len(miniBlockScrs)) + scr.blockSizeComputation.AddNumTxs(len(miniBlock.TxHashes)) - return nil, len(processedTxHashes), nil + return nil, txIndex - 1, false, err } // CreateMarshalizedData marshalizes smartContractResults and creates and saves them into a new structure diff --git a/process/block/preprocess/smartContractResults_test.go b/process/block/preprocess/smartContractResults_test.go index bdfd7525000..8e397f07958 100644 --- a/process/block/preprocess/smartContractResults_test.go +++ b/process/block/preprocess/smartContractResults_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" @@ -71,6 +72,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilPool(t *testing.T &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, txs) @@ -98,6 +100,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilStore(t *testing. &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, txs) @@ -125,6 +128,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilHasher(t *testing &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, txs) @@ -152,6 +156,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilMarsalizer(t *tes &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, txs) @@ -179,6 +184,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilTxProce(t *testin &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, txs) @@ -206,6 +212,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilShardCoord(t *tes &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, txs) @@ -233,6 +240,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilAccounts(t *testi &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, txs) @@ -259,6 +267,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilRequestFunc(t *te &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, txs) @@ -286,6 +295,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilGasHandler(t *tes &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, txs) @@ -313,6 +323,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorShouldWork(t *testin &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, err) @@ -340,6 +351,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilPubkeyConverter(t &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, txs) @@ -367,6 +379,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilBlockSizeComputat &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, txs) @@ -394,6 +407,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilBalanceComputatio nil, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, txs) @@ -421,12 +435,41 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilEpochNotifier(t * &testscommon.BalanceComputationStub{}, nil, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, txs) assert.Equal(t, process.ErrNilEpochNotifier, err) } +func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilProcessedMiniBlocksTracker(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + txs, err := NewSmartContractResultPreprocessor( + tdp.UnsignedTransactions(), + &mock.ChainStorerMock{}, + &hashingMocks.HasherMock{}, + &mock.MarshalizerMock{}, + &testscommon.TxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &stateMock.AccountsStub{}, + requestTransaction, + &mock.GasHandlerMock{}, + feeHandlerMock(), + createMockPubkeyConverter(), + &testscommon.BlockSizeComputationStub{}, + &testscommon.BalanceComputationStub{}, + &epochNotifier.EpochNotifierStub{}, + 2, + nil, + ) + + assert.Nil(t, txs) + assert.Equal(t, process.ErrNilProcessedMiniBlocksTracker, err) +} + func TestScrsPreProcessor_GetTransactionFromPool(t *testing.T) { t.Parallel() @@ -448,6 +491,7 @@ func TestScrsPreProcessor_GetTransactionFromPool(t *testing.T) { &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) txHash := []byte("tx1_hash") @@ -479,6 +523,7 @@ func TestScrsPreprocessor_RequestTransactionNothingToRequestAsGeneratedAtProcess &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) shardId := uint32(1) @@ -518,6 +563,7 @@ func TestScrsPreprocessor_RequestTransactionFromNetwork(t *testing.T) { &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) shardId := uint32(1) @@ -556,6 +602,7 @@ func TestScrsPreprocessor_RequestBlockTransactionFromMiniBlockFromNetwork(t *tes &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) shardId := uint32(1) @@ -605,6 +652,7 @@ func TestScrsPreprocessor_ReceivedTransactionShouldEraseRequested(t *testing.T) &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) // add 3 tx hashes on requested list @@ -680,6 +728,7 @@ func TestScrsPreprocessor_GetAllTxsFromMiniBlockShouldWork(t *testing.T) { &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) mb := &block.MiniBlock{ @@ -724,6 +773,7 @@ func TestScrsPreprocessor_RemoveBlockDataFromPoolsNilBlockShouldErr(t *testing.T &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) err := txs.RemoveBlockDataFromPools(nil, tdp.MiniBlocks()) @@ -753,6 +803,7 @@ func TestScrsPreprocessor_RemoveBlockDataFromPoolsOK(t *testing.T) { &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) body := &block.Body{} @@ -795,6 +846,7 @@ func TestScrsPreprocessor_IsDataPreparedErr(t *testing.T) { &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) err := txs.IsDataPrepared(1, haveTime) @@ -824,6 +876,7 @@ func TestScrsPreprocessor_IsDataPrepared(t *testing.T) { &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) go func() { @@ -858,6 +911,7 @@ func TestScrsPreprocessor_SaveTxsToStorage(t *testing.T) { &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) body := &block.Body{} @@ -915,6 +969,7 @@ func TestScrsPreprocessor_SaveTxsToStorageShouldSaveCorrectly(t *testing.T) { &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) body := &block.Body{} @@ -994,6 +1049,7 @@ func TestScrsPreprocessor_SaveTxsToStorageMissingTransactionsShouldNotErr(t *tes &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) body := &block.Body{} @@ -1041,6 +1097,7 @@ func TestScrsPreprocessor_ProcessBlockTransactionsShouldWork(t *testing.T) { &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) body := &block.Body{} @@ -1056,6 +1113,8 @@ func TestScrsPreprocessor_ProcessBlockTransactionsShouldWork(t *testing.T) { Type: block.SmartContractResultBlock, } + miniblockHash, _ := core.CalculateHash(scrPreproc.marshalizer, scrPreproc.hasher, &miniblock) + body.MiniBlocks = append(body.MiniBlocks, &miniblock) scrPreproc.AddScrHashToRequestedList([]byte("txHash")) @@ -1067,7 +1126,7 @@ func TestScrsPreprocessor_ProcessBlockTransactionsShouldWork(t *testing.T) { scrPreproc.scrForBlock.txHashAndInfo["txHash"] = &txInfo{&scr, &txshardInfo} - err := scrPreproc.ProcessBlockTransactions(&block.Header{}, body, haveTimeTrue) + err := scrPreproc.ProcessBlockTransactions(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{TxCount: 1, Hash: miniblockHash}}}, body, haveTimeTrue) assert.Nil(t, err) } @@ -1101,6 +1160,7 @@ func TestScrsPreprocessor_ProcessBlockTransactionsShouldErrMaxGasLimitPerBlockIn &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) body := &block.Body{} @@ -1115,6 +1175,7 @@ func TestScrsPreprocessor_ProcessBlockTransactionsShouldErrMaxGasLimitPerBlockIn TxHashes: txHashes, Type: block.SmartContractResultBlock, } + miniblockHash, _ := core.CalculateHash(scrPreproc.marshalizer, scrPreproc.hasher, &miniblock) body.MiniBlocks = append(body.MiniBlocks, &miniblock) @@ -1127,12 +1188,12 @@ func TestScrsPreprocessor_ProcessBlockTransactionsShouldErrMaxGasLimitPerBlockIn scrPreproc.scrForBlock.txHashAndInfo["txHash"] = &txInfo{&scr, &txshardInfo} - err := scrPreproc.ProcessBlockTransactions(&block.Header{}, body, haveTimeTrue) + err := scrPreproc.ProcessBlockTransactions(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{Hash: miniblockHash, TxCount: 1}}}, body, haveTimeTrue) assert.Nil(t, err) scrPreproc.EpochConfirmed(2, 0) - err = scrPreproc.ProcessBlockTransactions(&block.Header{}, body, haveTimeTrue) + err = scrPreproc.ProcessBlockTransactions(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{Hash: miniblockHash, TxCount: 1}}}, body, haveTimeTrue) assert.Equal(t, process.ErrMaxGasLimitPerBlockInSelfShardIsReached, err) } @@ -1178,6 +1239,7 @@ func TestScrsPreprocessor_ProcessMiniBlock(t *testing.T) { &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) txHash := []byte("tx1_hash") @@ -1191,7 +1253,11 @@ func TestScrsPreprocessor_ProcessMiniBlock(t *testing.T) { Type: block.SmartContractResultBlock, } - _, _, err := scr.ProcessMiniBlock(&miniblock, haveTimeTrue, haveAdditionalTimeFalse, getNumOfCrossInterMbsAndTxsZero, false) + preProcessorExecutionInfoHandlerMock := &testscommon.PreProcessorExecutionInfoHandlerMock{ + GetNumOfCrossInterMbsAndTxsCalled: getNumOfCrossInterMbsAndTxsZero, + } + + _, _, _, err := scr.ProcessMiniBlock(&miniblock, haveTimeTrue, haveAdditionalTimeFalse, false, false, -1, preProcessorExecutionInfoHandlerMock) assert.Nil(t, err) } @@ -1218,6 +1284,7 @@ func TestScrsPreprocessor_ProcessMiniBlockWrongTypeMiniblockShouldErr(t *testing &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) miniblock := block.MiniBlock{ @@ -1225,7 +1292,11 @@ func TestScrsPreprocessor_ProcessMiniBlockWrongTypeMiniblockShouldErr(t *testing SenderShardID: 0, } - _, _, err := scr.ProcessMiniBlock(&miniblock, haveTimeTrue, haveAdditionalTimeFalse, getNumOfCrossInterMbsAndTxsZero, false) + preProcessorExecutionInfoHandlerMock := &testscommon.PreProcessorExecutionInfoHandlerMock{ + GetNumOfCrossInterMbsAndTxsCalled: getNumOfCrossInterMbsAndTxsZero, + } + + _, _, _, err := scr.ProcessMiniBlock(&miniblock, haveTimeTrue, haveAdditionalTimeFalse, false, false, -1, preProcessorExecutionInfoHandlerMock) assert.NotNil(t, err) assert.Equal(t, err, process.ErrWrongTypeInMiniBlock) @@ -1277,6 +1348,7 @@ func TestScrsPreprocessor_RestoreBlockDataIntoPools(t *testing.T) { &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) body := &block.Body{} @@ -1321,6 +1393,7 @@ func TestScrsPreprocessor_RestoreBlockDataIntoPoolsNilMiniblockPoolShouldErr(t * &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) body := &block.Body{} @@ -1355,6 +1428,7 @@ func TestSmartContractResults_CreateBlockStartedShouldEmptyTxHashAndInfo(t *test &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) scr.CreateBlockStarted() @@ -1383,6 +1457,7 @@ func TestSmartContractResults_GetAllCurrentUsedTxs(t *testing.T) { &testscommon.BalanceComputationStub{}, &epochNotifier.EpochNotifierStub{}, 2, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) txshardInfo := txShardInfo{0, 3} diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 7b2cf070982..67a882862ee 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -89,6 +89,7 @@ type ArgsTransactionPreProcessor struct { ScheduledMiniBlocksEnableEpoch uint32 TxTypeHandler process.TxTypeHandler ScheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler + ProcessedMiniBlocksTracker process.ProcessedMiniBlocksTracker } // NewTransactionPreprocessor creates a new transaction preprocessor object @@ -146,6 +147,9 @@ func NewTransactionPreprocessor( if check.IfNil(args.ScheduledTxsExecutionHandler) { return nil, process.ErrNilScheduledTxsExecutionHandler } + if check.IfNil(args.ProcessedMiniBlocksTracker) { + return nil, process.ErrNilProcessedMiniBlocksTracker + } bpp := basePreProcess{ hasher: args.Hasher, @@ -162,6 +166,7 @@ func NewTransactionPreprocessor( optimizeGasUsedInCrossMiniBlocksEnableEpoch: args.OptimizeGasUsedInCrossMiniBlocksEnableEpoch, frontRunningProtectionEnableEpoch: args.FrontRunningProtectionEnableEpoch, + processedMiniBlocksTracker: args.ProcessedMiniBlocksTracker, } txs := &transactions{ @@ -325,7 +330,10 @@ func (txs *transactions) ProcessBlockTransactions( return process.ErrInvalidBody } -func (txs *transactions) computeTxsToMe(body *block.Body) ([]*txcache.WrappedTransaction, error) { +func (txs *transactions) computeTxsToMe( + headerHandler data.HeaderHandler, + body *block.Body, +) ([]*txcache.WrappedTransaction, error) { if check.IfNil(body) { return nil, process.ErrNilBlockBody } @@ -344,7 +352,12 @@ func (txs *transactions) computeTxsToMe(body *block.Body) ([]*txcache.WrappedTra miniBlock.ReceiverShardID) } - txsFromMiniBlock, err := txs.computeTxsFromMiniBlock(miniBlock) + pi, err := txs.getIndexesOfLastTxProcessed(miniBlock, headerHandler) + if err != nil { + return nil, err + } + + txsFromMiniBlock, err := txs.computeTxsFromMiniBlock(miniBlock, pi) if err != nil { return nil, err } @@ -369,7 +382,12 @@ func (txs *transactions) computeTxsFromMe(body *block.Body) ([]*txcache.WrappedT continue } - txsFromMiniBlock, err := txs.computeTxsFromMiniBlock(miniBlock) + pi := &processedIndexes{ + indexOfLastTxProcessed: -1, + indexOfLastTxProcessedByProposer: int32(len(miniBlock.TxHashes)) - 1, + } + + txsFromMiniBlock, err := txs.computeTxsFromMiniBlock(miniBlock, pi) if err != nil { return nil, err } @@ -394,7 +412,12 @@ func (txs *transactions) computeScheduledTxsFromMe(body *block.Body) ([]*txcache continue } - txsFromScheduledMiniBlock, err := txs.computeTxsFromMiniBlock(miniBlock) + pi := &processedIndexes{ + indexOfLastTxProcessed: -1, + indexOfLastTxProcessedByProposer: int32(len(miniBlock.TxHashes)) - 1, + } + + txsFromScheduledMiniBlock, err := txs.computeTxsFromMiniBlock(miniBlock, pi) if err != nil { return nil, err } @@ -405,10 +428,20 @@ func (txs *transactions) computeScheduledTxsFromMe(body *block.Body) ([]*txcache return allScheduledTxs, nil } -func (txs *transactions) computeTxsFromMiniBlock(miniBlock *block.MiniBlock) ([]*txcache.WrappedTransaction, error) { +func (txs *transactions) computeTxsFromMiniBlock( + miniBlock *block.MiniBlock, + pi *processedIndexes, +) ([]*txcache.WrappedTransaction, error) { + txsFromMiniBlock := make([]*txcache.WrappedTransaction, 0, len(miniBlock.TxHashes)) - for i := 0; i < len(miniBlock.TxHashes); i++ { + indexOfFirstTxToBeProcessed := pi.indexOfLastTxProcessed + 1 + err := process.CheckIfIndexesAreOutOfBound(indexOfFirstTxToBeProcessed, pi.indexOfLastTxProcessedByProposer, miniBlock) + if err != nil { + return nil, err + } + + for i := indexOfFirstTxToBeProcessed; i <= pi.indexOfLastTxProcessedByProposer; i++ { txHash := miniBlock.TxHashes[i] txs.txsForCurrBlock.mutTxsForBlock.RLock() txInfoFromMap, ok := txs.txsForCurrBlock.txHashAndInfo[string(txHash)] @@ -470,7 +503,7 @@ func (txs *transactions) processTxsToMe( } } - txsToMe, err := txs.computeTxsToMe(body) + txsToMe, err := txs.computeTxsToMe(header, body) if err != nil { return err } @@ -502,7 +535,7 @@ func (txs *transactions) processTxsToMe( "scheduled mode", scheduledMode, "totalGasConsumedInSelfShard", gasInfo.totalGasConsumedInSelfShard, "gasConsumedByMiniBlockInReceiverShard", gasInfo.gasConsumedByMiniBlockInReceiverShard, - "num scrs processed", numTXsProcessed, + "num txs processed", numTXsProcessed, "total gas provided", txs.gasHandler.TotalGasProvided(), "total gas provided as scheduled", txs.gasHandler.TotalGasProvidedAsScheduled(), "total gas refunded", txs.gasHandler.TotalGasRefunded(), @@ -780,7 +813,7 @@ func (txs *transactions) AddTxsFromMiniBlocks(miniBlocks block.MiniBlockSlice) { searchFirst, ) if err != nil { - log.Warn("transactions.AddTxsFromMiniBlocks: GetTransactionHandler", "tx hash", txHash, "error", err.Error()) + log.Debug("transactions.AddTxsFromMiniBlocks: GetTransactionHandler", "tx hash", txHash, "error", err.Error()) continue } @@ -1410,35 +1443,36 @@ func (txs *transactions) ProcessMiniBlock( miniBlock *block.MiniBlock, haveTime func() bool, haveAdditionalTime func() bool, - getNumOfCrossInterMbsAndTxs func() (int, int), scheduledMode bool, -) (processedTxHashes [][]byte, numProcessedTxs int, err error) { + partialMbExecutionMode bool, + indexOfLastTxProcessed int, + preProcessorExecutionInfoHandler process.PreProcessorExecutionInfoHandler, +) ([][]byte, int, bool, error) { if miniBlock.Type != block.TxBlock { - return nil, 0, process.ErrWrongTypeInMiniBlock + return nil, indexOfLastTxProcessed, false, process.ErrWrongTypeInMiniBlock } numTXsProcessed := 0 var gasProvidedByTxInSelfShard uint64 - processedTxHashes = make([][]byte, 0) - miniBlockTxs, miniBlockTxHashes, err := txs.getAllTxsFromMiniBlock(miniBlock, haveTime, haveAdditionalTime) + var err error + var txIndex int + processedTxHashes := make([][]byte, 0) + + indexOfFirstTxToBeProcessed := indexOfLastTxProcessed + 1 + err = process.CheckIfIndexesAreOutOfBound(int32(indexOfFirstTxToBeProcessed), int32(len(miniBlock.TxHashes))-1, miniBlock) if err != nil { - return nil, 0, err + return nil, indexOfLastTxProcessed, false, err } - if txs.blockSizeComputation.IsMaxBlockSizeWithoutThrottleReached(1, len(miniBlockTxs)) { - return nil, 0, process.ErrMaxBlockSizeReached + miniBlockTxs, miniBlockTxHashes, err := txs.getAllTxsFromMiniBlock(miniBlock, haveTime, haveAdditionalTime) + if err != nil { + return nil, indexOfLastTxProcessed, false, err } - defer func() { - if err != nil { - for _, hash := range processedTxHashes { - log.Trace("transactions.ProcessMiniBlock: defer func()", "tx hash", hash) - } - - txs.gasHandler.RestoreGasSinceLastReset() - } - }() + if txs.blockSizeComputation.IsMaxBlockSizeWithoutThrottleReached(1, len(miniBlock.TxHashes)) { + return nil, indexOfLastTxProcessed, false, process.ErrMaxBlockSizeReached + } var totalGasConsumed uint64 if scheduledMode { @@ -1482,53 +1516,57 @@ func (txs *transactions) ProcessMiniBlock( ) }() - numOfOldCrossInterMbs, numOfOldCrossInterTxs := getNumOfCrossInterMbsAndTxs() + numOfOldCrossInterMbs, numOfOldCrossInterTxs := preProcessorExecutionInfoHandler.GetNumOfCrossInterMbsAndTxs() - for index := range miniBlockTxs { + for txIndex = indexOfFirstTxToBeProcessed; txIndex < len(miniBlockTxs); txIndex++ { if !haveTime() && !haveAdditionalTime() { - return processedTxHashes, index, process.ErrTimeIsOut + err = process.ErrTimeIsOut + break } gasProvidedByTxInSelfShard, err = txs.computeGasProvided( miniBlock.SenderShardID, miniBlock.ReceiverShardID, - miniBlockTxs[index], - miniBlockTxHashes[index], + miniBlockTxs[txIndex], + miniBlockTxHashes[txIndex], &gasInfo) if err != nil { - return processedTxHashes, index, err - } - - if scheduledMode { - txs.gasHandler.SetGasProvidedAsScheduled(gasProvidedByTxInSelfShard, miniBlockTxHashes[index]) - } else { - txs.gasHandler.SetGasProvided(gasProvidedByTxInSelfShard, miniBlockTxHashes[index]) + break } - processedTxHashes = append(processedTxHashes, miniBlockTxHashes[index]) - if txs.flagOptimizeGasUsedInCrossMiniBlocks.IsSet() { if gasInfo.totalGasConsumedInSelfShard > maxGasLimitUsedForDestMeTxs { - return processedTxHashes, index, process.ErrMaxGasLimitUsedForDestMeTxsIsReached + err = process.ErrMaxGasLimitUsedForDestMeTxsIsReached + break } } - txs.saveAccountBalanceForAddress(miniBlockTxs[index].GetRcvAddr()) + txs.saveAccountBalanceForAddress(miniBlockTxs[txIndex].GetRcvAddr()) if !scheduledMode { - _, err = txs.txProcessor.ProcessTransaction(miniBlockTxs[index]) + err = txs.processInNormalMode( + preProcessorExecutionInfoHandler, + miniBlockTxs[txIndex], + miniBlockTxHashes[txIndex], + &gasInfo, + gasProvidedByTxInSelfShard) if err != nil { - return processedTxHashes, index, err + break } - - txs.updateGasConsumedWithGasRefundedAndGasPenalized(miniBlockTxHashes[index], &gasInfo) + } else { + txs.gasHandler.SetGasProvidedAsScheduled(gasProvidedByTxInSelfShard, miniBlockTxHashes[txIndex]) } + processedTxHashes = append(processedTxHashes, miniBlockTxHashes[txIndex]) numTXsProcessed++ } - numOfCrtCrossInterMbs, numOfCrtCrossInterTxs := getNumOfCrossInterMbsAndTxs() + if err != nil && !partialMbExecutionMode { + return processedTxHashes, txIndex - 1, true, err + } + + numOfCrtCrossInterMbs, numOfCrtCrossInterTxs := preProcessorExecutionInfoHandler.GetNumOfCrossInterMbsAndTxs() numOfNewCrossInterMbs := numOfCrtCrossInterMbs - numOfOldCrossInterMbs numOfNewCrossInterTxs := numOfCrtCrossInterTxs - numOfOldCrossInterTxs @@ -1540,9 +1578,9 @@ func (txs *transactions) ProcessMiniBlock( ) numMiniBlocks := 1 + numOfNewCrossInterMbs - numTxs := len(miniBlockTxs) + numOfNewCrossInterTxs + numTxs := len(miniBlock.TxHashes) + numOfNewCrossInterTxs if txs.blockSizeComputation.IsMaxBlockSizeWithoutThrottleReached(numMiniBlocks, numTxs) { - return processedTxHashes, len(processedTxHashes), process.ErrMaxBlockSizeReached + return processedTxHashes, txIndex - 1, true, process.ErrMaxBlockSizeReached } txShardInfoToSet := &txShardInfo{senderShardID: miniBlock.SenderShardID, receiverShardID: miniBlock.ReceiverShardID} @@ -1557,12 +1595,34 @@ func (txs *transactions) ProcessMiniBlock( txs.blockSizeComputation.AddNumTxs(numTxs) if scheduledMode { - for index := range miniBlockTxs { + for index := indexOfFirstTxToBeProcessed; index <= txIndex-1; index++ { txs.scheduledTxsExecutionHandler.AddScheduledTx(miniBlockTxHashes[index], miniBlockTxs[index]) } } - return nil, len(processedTxHashes), nil + return nil, txIndex - 1, false, err +} + +func (txs *transactions) processInNormalMode( + preProcessorExecutionInfoHandler process.PreProcessorExecutionInfoHandler, + tx *transaction.Transaction, + txHash []byte, + gasInfo *gasConsumedInfo, + gasProvidedByTxInSelfShard uint64, +) error { + + snapshot := txs.handleProcessTransactionInit(preProcessorExecutionInfoHandler, txHash) + + _, err := txs.txProcessor.ProcessTransaction(tx) + if err != nil { + txs.handleProcessTransactionError(preProcessorExecutionInfoHandler, snapshot, txHash) + return err + } + + txs.updateGasConsumedWithGasRefundedAndGasPenalized(txHash, gasInfo) + txs.gasHandler.SetGasProvided(gasProvidedByTxInSelfShard, txHash) + + return nil } // CreateMarshalizedData marshalizes transactions and creates and saves them into a new structure diff --git a/process/block/preprocess/transactionsV2_test.go b/process/block/preprocess/transactionsV2_test.go index 58420e0730e..d3e707df6af 100644 --- a/process/block/preprocess/transactionsV2_test.go +++ b/process/block/preprocess/transactionsV2_test.go @@ -73,6 +73,7 @@ func createTransactionPreprocessor() *transactions { }, }, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } preprocessor, _ := NewTransactionPreprocessor(txPreProcArgs) diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index ca33eb3f1c7..0f0cc3c5396 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -232,6 +232,7 @@ func createDefaultTransactionsProcessorArgs() ArgsTransactionPreProcessor { ScheduledMiniBlocksEnableEpoch: 2, TxTypeHandler: &testscommon.TxTypeHandlerMock{}, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } } @@ -419,6 +420,16 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilScheduledTxsExecutionHandl assert.Equal(t, process.ErrNilScheduledTxsExecutionHandler, err) } +func TestTxsPreprocessor_NewTransactionPreprocessorNilProcessedMiniBlocksTracker(t *testing.T) { + t.Parallel() + + args := createDefaultTransactionsProcessorArgs() + args.ProcessedMiniBlocksTracker = nil + txs, err := NewTransactionPreprocessor(args) + assert.Nil(t, txs) + assert.Equal(t, process.ErrNilProcessedMiniBlocksTracker, err) +} + func TestTxsPreprocessor_NewTransactionPreprocessorOkValsShouldWork(t *testing.T) { t.Parallel() @@ -1113,15 +1124,15 @@ func TestTransactionPreprocessor_ProcessTxsToMeShouldUseCorrectSenderAndReceiver tx := transaction.Transaction{SndAddr: []byte("2"), RcvAddr: []byte("0")} txHash, _ := core.CalculateHash(preprocessor.marshalizer, preprocessor.hasher, tx) + miniBlock := &block.MiniBlock{ + TxHashes: [][]byte{txHash}, + SenderShardID: 1, + ReceiverShardID: 0, + Type: block.TxBlock, + } + miniBlockHash, _ := core.CalculateHash(preprocessor.marshalizer, preprocessor.hasher, miniBlock) body := block.Body{ - MiniBlocks: []*block.MiniBlock{ - { - TxHashes: [][]byte{txHash}, - SenderShardID: 1, - ReceiverShardID: 0, - Type: block.TxBlock, - }, - }, + MiniBlocks: []*block.MiniBlock{miniBlock}, } preprocessor.AddTxForCurrentBlock(txHash, &tx, 1, 0) @@ -1130,7 +1141,7 @@ func TestTransactionPreprocessor_ProcessTxsToMeShouldUseCorrectSenderAndReceiver assert.Equal(t, uint32(1), senderShardID) assert.Equal(t, uint32(0), receiverShardID) - _ = preprocessor.ProcessTxsToMe(&block.Header{}, &body, haveTimeTrue) + _ = preprocessor.ProcessTxsToMe(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{Hash: miniBlockHash, TxCount: 1}}}, &body, haveTimeTrue) _, senderShardID, receiverShardID = preprocessor.GetTxInfoForCurrentBlock(txHash) assert.Equal(t, uint32(2), senderShardID) @@ -1198,11 +1209,14 @@ func TestTransactionsPreprocessor_ProcessMiniBlockShouldWork(t *testing.T) { } return nbTxsProcessed + 1, nbTxsProcessed * common.AdditionalScrForEachScCallOrSpecialTx } - txsToBeReverted, numTxsProcessed, err := txs.ProcessMiniBlock(miniBlock, haveTimeTrue, haveAdditionalTimeFalse, f, false) + preProcessorExecutionInfoHandlerMock := &testscommon.PreProcessorExecutionInfoHandlerMock{ + GetNumOfCrossInterMbsAndTxsCalled: f, + } + txsToBeReverted, indexOfLastTxProcessed, _, err := txs.ProcessMiniBlock(miniBlock, haveTimeTrue, haveAdditionalTimeFalse, false, false, -1, preProcessorExecutionInfoHandlerMock) assert.Equal(t, process.ErrMaxBlockSizeReached, err) assert.Equal(t, 3, len(txsToBeReverted)) - assert.Equal(t, 3, numTxsProcessed) + assert.Equal(t, 2, indexOfLastTxProcessed) f = func() (int, int) { if nbTxsProcessed == 0 { @@ -1210,11 +1224,14 @@ func TestTransactionsPreprocessor_ProcessMiniBlockShouldWork(t *testing.T) { } return nbTxsProcessed, nbTxsProcessed * common.AdditionalScrForEachScCallOrSpecialTx } - txsToBeReverted, numTxsProcessed, err = txs.ProcessMiniBlock(miniBlock, haveTimeTrue, haveAdditionalTimeFalse, f, false) + preProcessorExecutionInfoHandlerMock = &testscommon.PreProcessorExecutionInfoHandlerMock{ + GetNumOfCrossInterMbsAndTxsCalled: f, + } + txsToBeReverted, indexOfLastTxProcessed, _, err = txs.ProcessMiniBlock(miniBlock, haveTimeTrue, haveAdditionalTimeFalse, false, false, -1, preProcessorExecutionInfoHandlerMock) assert.Nil(t, err) assert.Equal(t, 0, len(txsToBeReverted)) - assert.Equal(t, 3, numTxsProcessed) + assert.Equal(t, 2, indexOfLastTxProcessed) } func TestTransactionsPreprocessor_ProcessMiniBlockShouldErrMaxGasLimitUsedForDestMeTxsIsReached(t *testing.T) { @@ -1261,19 +1278,23 @@ func TestTransactionsPreprocessor_ProcessMiniBlockShouldErrMaxGasLimitUsedForDes Type: block.TxBlock, } - txsToBeReverted, numTxsProcessed, err := txs.ProcessMiniBlock(miniBlock, haveTimeTrue, haveAdditionalTimeFalse, getNumOfCrossInterMbsAndTxsZero, false) + preProcessorExecutionInfoHandlerMock := &testscommon.PreProcessorExecutionInfoHandlerMock{ + GetNumOfCrossInterMbsAndTxsCalled: getNumOfCrossInterMbsAndTxsZero, + } + + txsToBeReverted, indexOfLastTxProcessed, _, err := txs.ProcessMiniBlock(miniBlock, haveTimeTrue, haveAdditionalTimeFalse, false, false, -1, preProcessorExecutionInfoHandlerMock) assert.Nil(t, err) assert.Equal(t, 0, len(txsToBeReverted)) - assert.Equal(t, 1, numTxsProcessed) + assert.Equal(t, 0, indexOfLastTxProcessed) txs.EpochConfirmed(2, 0) - txsToBeReverted, numTxsProcessed, err = txs.ProcessMiniBlock(miniBlock, haveTimeTrue, haveAdditionalTimeFalse, getNumOfCrossInterMbsAndTxsZero, false) + txsToBeReverted, indexOfLastTxProcessed, _, err = txs.ProcessMiniBlock(miniBlock, haveTimeTrue, haveAdditionalTimeFalse, false, false, -1, preProcessorExecutionInfoHandlerMock) assert.Equal(t, process.ErrMaxGasLimitUsedForDestMeTxsIsReached, err) - assert.Equal(t, 1, len(txsToBeReverted)) - assert.Equal(t, 0, numTxsProcessed) + assert.Equal(t, 0, len(txsToBeReverted)) + assert.Equal(t, -1, indexOfLastTxProcessed) } func TestTransactionsPreprocessor_ComputeGasProvidedShouldWork(t *testing.T) { @@ -2020,6 +2041,26 @@ func TestTransactions_RestoreBlockDataIntoPools(t *testing.T) { }) } +func TestTransactions_getMiniBlockHeaderOfMiniBlock(t *testing.T) { + t.Parallel() + + mbHash := []byte("mb_hash") + mbHeader := block.MiniBlockHeader{ + Hash: mbHash, + } + header := &block.Header{ + MiniBlockHeaders: []block.MiniBlockHeader{mbHeader}, + } + + miniBlockHeader, err := getMiniBlockHeaderOfMiniBlock(header, []byte("mb_hash_missing")) + assert.Nil(t, miniBlockHeader) + assert.Equal(t, process.ErrMissingMiniBlockHeader, err) + + miniBlockHeader, err = getMiniBlockHeaderOfMiniBlock(header, mbHash) + assert.Nil(t, err) + assert.Equal(t, &mbHeader, miniBlockHeader) +} + func createMockBlockBody() (*block.Body, []*txInfoHolder) { txsShard1 := createMockTransactions(2, 1, 1, 1000) txsShard2to1 := createMockTransactions(2, 2, 1, 2000) @@ -2096,3 +2137,68 @@ func createMockTransactions(numTxs int, sndShId byte, rcvShId byte, startNonce u return txs } + +func TestTransactions_getIndexesOfLastTxProcessed(t *testing.T) { + t.Parallel() + + t.Run("calculating hash error should not get indexes", func(t *testing.T) { + t.Parallel() + + args := createDefaultTransactionsProcessorArgs() + args.Marshalizer = &testscommon.MarshalizerMock{ + Fail: true, + } + txs, _ := NewTransactionPreprocessor(args) + + miniBlock := &block.MiniBlock{} + headerHandler := &block.Header{} + + pi, err := txs.getIndexesOfLastTxProcessed(miniBlock, headerHandler) + assert.Nil(t, pi) + assert.Equal(t, testscommon.ErrMockMarshalizer, err) + }) + + t.Run("missing mini block header should not get indexes", func(t *testing.T) { + t.Parallel() + + args := createDefaultTransactionsProcessorArgs() + args.Marshalizer = &testscommon.MarshalizerMock{ + Fail: false, + } + txs, _ := NewTransactionPreprocessor(args) + + miniBlock := &block.MiniBlock{} + headerHandler := &block.Header{} + + pi, err := txs.getIndexesOfLastTxProcessed(miniBlock, headerHandler) + assert.Nil(t, pi) + assert.Equal(t, process.ErrMissingMiniBlockHeader, err) + }) + + t.Run("should get indexes", func(t *testing.T) { + t.Parallel() + + args := createDefaultTransactionsProcessorArgs() + args.Marshalizer = &testscommon.MarshalizerMock{ + Fail: false, + } + txs, _ := NewTransactionPreprocessor(args) + + miniBlock := &block.MiniBlock{} + miniBlockHash, _ := core.CalculateHash(txs.marshalizer, txs.hasher, miniBlock) + mbh := block.MiniBlockHeader{ + Hash: miniBlockHash, + TxCount: 6, + } + _ = mbh.SetIndexOfFirstTxProcessed(2) + _ = mbh.SetIndexOfLastTxProcessed(4) + headerHandler := &block.Header{ + MiniBlockHeaders: []block.MiniBlockHeader{mbh}, + } + + pi, err := txs.getIndexesOfLastTxProcessed(miniBlock, headerHandler) + assert.Nil(t, err) + assert.Equal(t, int32(-1), pi.indexOfLastTxProcessed) + assert.Equal(t, mbh.GetIndexOfLastTxProcessed(), pi.indexOfLastTxProcessedByProposer) + }) +} diff --git a/process/block/preprocess/validatorInfoPreProcessor.go b/process/block/preprocess/validatorInfoPreProcessor.go index ef77c2f34f1..70d29f876fe 100644 --- a/process/block/preprocess/validatorInfoPreProcessor.go +++ b/process/block/preprocess/validatorInfoPreProcessor.go @@ -149,24 +149,32 @@ func (vip *validatorInfoPreprocessor) CreateAndProcessMiniBlocks(_ func() bool, } // ProcessMiniBlock does nothing -func (vip *validatorInfoPreprocessor) ProcessMiniBlock(miniBlock *block.MiniBlock, _ func() bool, _ func() bool, _ func() (int, int), _ bool) ([][]byte, int, error) { +func (vip *validatorInfoPreprocessor) ProcessMiniBlock( + miniBlock *block.MiniBlock, + _ func() bool, + _ func() bool, + _ bool, + _ bool, + indexOfLastTxProcessed int, + _ process.PreProcessorExecutionInfoHandler, +) ([][]byte, int, bool, error) { if miniBlock.Type != block.PeerBlock { - return nil, 0, process.ErrWrongTypeInMiniBlock + return nil, indexOfLastTxProcessed, false, process.ErrWrongTypeInMiniBlock } if miniBlock.SenderShardID != core.MetachainShardId { - return nil, 0, process.ErrValidatorInfoMiniBlockNotFromMeta + return nil, indexOfLastTxProcessed, false, process.ErrValidatorInfoMiniBlockNotFromMeta } //TODO: We need another function in the BlockSizeComputationHandler implementation that will better handle //the PeerBlock miniblocks as those are not hashes if vip.blockSizeComputation.IsMaxBlockSizeWithoutThrottleReached(1, len(miniBlock.TxHashes)) { - return nil, 0, process.ErrMaxBlockSizeReached + return nil, indexOfLastTxProcessed, false, process.ErrMaxBlockSizeReached } vip.blockSizeComputation.AddNumMiniBlocks(1) vip.blockSizeComputation.AddNumTxs(len(miniBlock.TxHashes)) - return nil, len(miniBlock.TxHashes), nil + return nil, len(miniBlock.TxHashes) - 1, false, nil } // CreateMarshalizedData does nothing diff --git a/process/block/preprocess/validatorInfoPreProcessor_test.go b/process/block/preprocess/validatorInfoPreProcessor_test.go index 43e961a2bba..88d71c708a8 100644 --- a/process/block/preprocess/validatorInfoPreProcessor_test.go +++ b/process/block/preprocess/validatorInfoPreProcessor_test.go @@ -95,7 +95,11 @@ func TestNewValidatorInfoPreprocessor_ProcessMiniBlockInvalidMiniBlockTypeShould Type: 0, } - _, _, err := rtp.ProcessMiniBlock(&mb1, haveTimeTrue, haveAdditionalTimeFalse, getNumOfCrossInterMbsAndTxsZero, false) + preProcessorExecutionInfoHandlerMock := &testscommon.PreProcessorExecutionInfoHandlerMock{ + GetNumOfCrossInterMbsAndTxsCalled: getNumOfCrossInterMbsAndTxsZero, + } + + _, _, _, err := rtp.ProcessMiniBlock(&mb1, haveTimeTrue, haveAdditionalTimeFalse, false, false, -1, preProcessorExecutionInfoHandlerMock) assert.Equal(t, process.ErrWrongTypeInMiniBlock, err) } @@ -116,7 +120,11 @@ func TestNewValidatorInfoPreprocessor_ProcessMiniBlockShouldWork(t *testing.T) { Type: block.PeerBlock, } - _, _, err := rtp.ProcessMiniBlock(&mb1, haveTimeTrue, haveAdditionalTimeFalse, getNumOfCrossInterMbsAndTxsZero, false) + preProcessorExecutionInfoHandlerMock := &testscommon.PreProcessorExecutionInfoHandlerMock{ + GetNumOfCrossInterMbsAndTxsCalled: getNumOfCrossInterMbsAndTxsZero, + } + + _, _, _, err := rtp.ProcessMiniBlock(&mb1, haveTimeTrue, haveAdditionalTimeFalse, false, false, -1, preProcessorExecutionInfoHandlerMock) assert.Nil(t, err) } @@ -137,7 +145,11 @@ func TestNewValidatorInfoPreprocessor_ProcessMiniBlockNotFromMeta(t *testing.T) Type: block.PeerBlock, } - _, _, err := rtp.ProcessMiniBlock(&mb1, haveTimeTrue, haveAdditionalTimeFalse, getNumOfCrossInterMbsAndTxsZero, false) + preProcessorExecutionInfoHandlerMock := &testscommon.PreProcessorExecutionInfoHandlerMock{ + GetNumOfCrossInterMbsAndTxsCalled: getNumOfCrossInterMbsAndTxsZero, + } + + _, _, _, err := rtp.ProcessMiniBlock(&mb1, haveTimeTrue, haveAdditionalTimeFalse, false, false, -1, preProcessorExecutionInfoHandlerMock) assert.Equal(t, process.ErrValidatorInfoMiniBlockNotFromMeta, err) } diff --git a/process/block/processedMb/processedMiniBlocks.go b/process/block/processedMb/processedMiniBlocks.go index ced47b8f27a..c7552c3124a 100644 --- a/process/block/processedMb/processedMiniBlocks.go +++ b/process/block/processedMb/processedMiniBlocks.go @@ -9,104 +9,147 @@ import ( var log = logger.GetOrCreate("process/processedMb") -// MiniBlockHashes will keep a list of miniblock hashes as keys in a map for easy access -type MiniBlockHashes map[string]struct{} +// ProcessedMiniBlockInfo will keep the info about a processed mini block +type ProcessedMiniBlockInfo struct { + FullyProcessed bool + IndexOfLastTxProcessed int32 +} + +// miniBlocksInfo will keep a list of mini blocks hashes as keys, with mini blocks info as value +type miniBlocksInfo map[string]*ProcessedMiniBlockInfo -// ProcessedMiniBlockTracker is used to store all processed mini blocks hashes grouped by a metahash -type ProcessedMiniBlockTracker struct { - processedMiniBlocks map[string]MiniBlockHashes +// processedMiniBlocksTracker is used to store all processed mini blocks hashes grouped by a meta hash +type processedMiniBlocksTracker struct { + processedMiniBlocks map[string]miniBlocksInfo mutProcessedMiniBlocks sync.RWMutex } -// NewProcessedMiniBlocks will create a complex type of processedMb -func NewProcessedMiniBlocks() *ProcessedMiniBlockTracker { - return &ProcessedMiniBlockTracker{ - processedMiniBlocks: make(map[string]MiniBlockHashes), +// NewProcessedMiniBlocksTracker will create a processed mini blocks tracker object +func NewProcessedMiniBlocksTracker() *processedMiniBlocksTracker { + return &processedMiniBlocksTracker{ + processedMiniBlocks: make(map[string]miniBlocksInfo), } } -// AddMiniBlockHash will add a miniblock hash -func (pmb *ProcessedMiniBlockTracker) AddMiniBlockHash(metaBlockHash string, miniBlockHash string) { - pmb.mutProcessedMiniBlocks.Lock() - defer pmb.mutProcessedMiniBlocks.Unlock() +// SetProcessedMiniBlockInfo will set a processed mini block info for the given meta block hash and mini block hash +func (pmbt *processedMiniBlocksTracker) SetProcessedMiniBlockInfo(metaBlockHash []byte, miniBlockHash []byte, processedMbInfo *ProcessedMiniBlockInfo) { + pmbt.mutProcessedMiniBlocks.Lock() + defer pmbt.mutProcessedMiniBlocks.Unlock() - miniBlocksProcessed, ok := pmb.processedMiniBlocks[metaBlockHash] + miniBlocksProcessed, ok := pmbt.processedMiniBlocks[string(metaBlockHash)] if !ok { - miniBlocksProcessed = make(MiniBlockHashes) - miniBlocksProcessed[miniBlockHash] = struct{}{} - pmb.processedMiniBlocks[metaBlockHash] = miniBlocksProcessed - - return + miniBlocksProcessed = make(miniBlocksInfo) + pmbt.processedMiniBlocks[string(metaBlockHash)] = miniBlocksProcessed } - miniBlocksProcessed[miniBlockHash] = struct{}{} + miniBlocksProcessed[string(miniBlockHash)] = &ProcessedMiniBlockInfo{ + FullyProcessed: processedMbInfo.FullyProcessed, + IndexOfLastTxProcessed: processedMbInfo.IndexOfLastTxProcessed, + } } // RemoveMetaBlockHash will remove a meta block hash -func (pmb *ProcessedMiniBlockTracker) RemoveMetaBlockHash(metaBlockHash string) { - pmb.mutProcessedMiniBlocks.Lock() - delete(pmb.processedMiniBlocks, metaBlockHash) - pmb.mutProcessedMiniBlocks.Unlock() +func (pmbt *processedMiniBlocksTracker) RemoveMetaBlockHash(metaBlockHash []byte) { + pmbt.mutProcessedMiniBlocks.Lock() + defer pmbt.mutProcessedMiniBlocks.Unlock() + + delete(pmbt.processedMiniBlocks, string(metaBlockHash)) } // RemoveMiniBlockHash will remove a mini block hash -func (pmb *ProcessedMiniBlockTracker) RemoveMiniBlockHash(miniBlockHash string) { - pmb.mutProcessedMiniBlocks.Lock() - for metaHash, miniBlocksProcessed := range pmb.processedMiniBlocks { - delete(miniBlocksProcessed, miniBlockHash) +func (pmbt *processedMiniBlocksTracker) RemoveMiniBlockHash(miniBlockHash []byte) { + pmbt.mutProcessedMiniBlocks.Lock() + defer pmbt.mutProcessedMiniBlocks.Unlock() + + for metaHash, miniBlocksProcessed := range pmbt.processedMiniBlocks { + delete(miniBlocksProcessed, string(miniBlockHash)) if len(miniBlocksProcessed) == 0 { - delete(pmb.processedMiniBlocks, metaHash) + delete(pmbt.processedMiniBlocks, metaHash) + } + } +} + +// GetProcessedMiniBlocksInfo will return all processed mini blocks info for a meta block hash +func (pmbt *processedMiniBlocksTracker) GetProcessedMiniBlocksInfo(metaBlockHash []byte) map[string]*ProcessedMiniBlockInfo { + pmbt.mutProcessedMiniBlocks.RLock() + defer pmbt.mutProcessedMiniBlocks.RUnlock() + + processedMiniBlocksInfo := make(map[string]*ProcessedMiniBlockInfo) + for miniBlockHash, processedMiniBlockInfo := range pmbt.processedMiniBlocks[string(metaBlockHash)] { + processedMiniBlocksInfo[miniBlockHash] = &ProcessedMiniBlockInfo{ + FullyProcessed: processedMiniBlockInfo.FullyProcessed, + IndexOfLastTxProcessed: processedMiniBlockInfo.IndexOfLastTxProcessed, } } - pmb.mutProcessedMiniBlocks.Unlock() + + return processedMiniBlocksInfo } -// GetProcessedMiniBlocksHashes will return all processed miniblocks for a metablock -func (pmb *ProcessedMiniBlockTracker) GetProcessedMiniBlocksHashes(metaBlockHash string) map[string]struct{} { - pmb.mutProcessedMiniBlocks.RLock() - processedMiniBlocksHashes := make(map[string]struct{}) - for hash, value := range pmb.processedMiniBlocks[metaBlockHash] { - processedMiniBlocksHashes[hash] = value +// GetProcessedMiniBlockInfo will return processed mini block info for a mini block hash +func (pmbt *processedMiniBlocksTracker) GetProcessedMiniBlockInfo(miniBlockHash []byte) (*ProcessedMiniBlockInfo, []byte) { + pmbt.mutProcessedMiniBlocks.RLock() + defer pmbt.mutProcessedMiniBlocks.RUnlock() + + for metaBlockHash, miniBlocksInfo := range pmbt.processedMiniBlocks { + processedMiniBlockInfo, hashExists := miniBlocksInfo[string(miniBlockHash)] + if !hashExists { + continue + } + + return &ProcessedMiniBlockInfo{ + FullyProcessed: processedMiniBlockInfo.FullyProcessed, + IndexOfLastTxProcessed: processedMiniBlockInfo.IndexOfLastTxProcessed, + }, []byte(metaBlockHash) } - pmb.mutProcessedMiniBlocks.RUnlock() - return processedMiniBlocksHashes + return &ProcessedMiniBlockInfo{ + FullyProcessed: false, + IndexOfLastTxProcessed: -1, + }, nil } -// IsMiniBlockProcessed will return true if a mini block is processed -func (pmb *ProcessedMiniBlockTracker) IsMiniBlockProcessed(metaBlockHash string, miniBlockHash string) bool { - pmb.mutProcessedMiniBlocks.RLock() - defer pmb.mutProcessedMiniBlocks.RUnlock() +// IsMiniBlockFullyProcessed will return true if a mini block is fully processed +func (pmbt *processedMiniBlocksTracker) IsMiniBlockFullyProcessed(metaBlockHash []byte, miniBlockHash []byte) bool { + pmbt.mutProcessedMiniBlocks.RLock() + defer pmbt.mutProcessedMiniBlocks.RUnlock() - miniBlocksProcessed, ok := pmb.processedMiniBlocks[metaBlockHash] + miniBlocksProcessed, ok := pmbt.processedMiniBlocks[string(metaBlockHash)] if !ok { return false } - _, isProcessed := miniBlocksProcessed[miniBlockHash] - return isProcessed + processedMbInfo, hashExists := miniBlocksProcessed[string(miniBlockHash)] + if !hashExists { + return false + } + + return processedMbInfo.FullyProcessed } // ConvertProcessedMiniBlocksMapToSlice will convert a map[string]map[string]struct{} in a slice of MiniBlocksInMeta -func (pmb *ProcessedMiniBlockTracker) ConvertProcessedMiniBlocksMapToSlice() []bootstrapStorage.MiniBlocksInMeta { - pmb.mutProcessedMiniBlocks.RLock() - defer pmb.mutProcessedMiniBlocks.RUnlock() +func (pmbt *processedMiniBlocksTracker) ConvertProcessedMiniBlocksMapToSlice() []bootstrapStorage.MiniBlocksInMeta { + pmbt.mutProcessedMiniBlocks.RLock() + defer pmbt.mutProcessedMiniBlocks.RUnlock() - if len(pmb.processedMiniBlocks) == 0 { + if len(pmbt.processedMiniBlocks) == 0 { return nil } - miniBlocksInMetaBlocks := make([]bootstrapStorage.MiniBlocksInMeta, 0, len(pmb.processedMiniBlocks)) + miniBlocksInMetaBlocks := make([]bootstrapStorage.MiniBlocksInMeta, 0, len(pmbt.processedMiniBlocks)) - for metaHash, miniBlocksHashes := range pmb.processedMiniBlocks { + for metaHash, miniBlocksInfo := range pmbt.processedMiniBlocks { miniBlocksInMeta := bootstrapStorage.MiniBlocksInMeta{ - MetaHash: []byte(metaHash), - MiniBlocksHashes: make([][]byte, 0, len(miniBlocksHashes)), + MetaHash: []byte(metaHash), + MiniBlocksHashes: make([][]byte, 0, len(miniBlocksInfo)), + FullyProcessed: make([]bool, 0, len(miniBlocksInfo)), + IndexOfLastTxProcessed: make([]int32, 0, len(miniBlocksInfo)), } - for miniBlockHash := range miniBlocksHashes { + for miniBlockHash, processedMiniBlockInfo := range miniBlocksInfo { miniBlocksInMeta.MiniBlocksHashes = append(miniBlocksInMeta.MiniBlocksHashes, []byte(miniBlockHash)) + miniBlocksInMeta.FullyProcessed = append(miniBlocksInMeta.FullyProcessed, processedMiniBlockInfo.FullyProcessed) + miniBlocksInMeta.IndexOfLastTxProcessed = append(miniBlocksInMeta.IndexOfLastTxProcessed, processedMiniBlockInfo.IndexOfLastTxProcessed) } miniBlocksInMetaBlocks = append(miniBlocksInMetaBlocks, miniBlocksInMeta) @@ -115,32 +158,52 @@ func (pmb *ProcessedMiniBlockTracker) ConvertProcessedMiniBlocksMapToSlice() []b return miniBlocksInMetaBlocks } -// ConvertSliceToProcessedMiniBlocksMap will convert a slice of MiniBlocksInMeta in an map[string]MiniBlockHashes -func (pmb *ProcessedMiniBlockTracker) ConvertSliceToProcessedMiniBlocksMap(miniBlocksInMetaBlocks []bootstrapStorage.MiniBlocksInMeta) { - pmb.mutProcessedMiniBlocks.Lock() - defer pmb.mutProcessedMiniBlocks.Unlock() +// ConvertSliceToProcessedMiniBlocksMap will convert a slice of MiniBlocksInMeta in a map[string]MiniBlockHashes +func (pmbt *processedMiniBlocksTracker) ConvertSliceToProcessedMiniBlocksMap(miniBlocksInMetaBlocks []bootstrapStorage.MiniBlocksInMeta) { + pmbt.mutProcessedMiniBlocks.Lock() + defer pmbt.mutProcessedMiniBlocks.Unlock() for _, miniBlocksInMeta := range miniBlocksInMetaBlocks { - miniBlocksHashes := make(MiniBlockHashes) - for _, miniBlockHash := range miniBlocksInMeta.MiniBlocksHashes { - miniBlocksHashes[string(miniBlockHash)] = struct{}{} + pmbt.processedMiniBlocks[string(miniBlocksInMeta.MetaHash)] = getMiniBlocksInfo(miniBlocksInMeta) + } +} + +func getMiniBlocksInfo(miniBlocksInMeta bootstrapStorage.MiniBlocksInMeta) miniBlocksInfo { + mbsInfo := make(miniBlocksInfo) + + for index, miniBlockHash := range miniBlocksInMeta.MiniBlocksHashes { + fullyProcessed := miniBlocksInMeta.IsFullyProcessed(index) + indexOfLastTxProcessed := miniBlocksInMeta.GetIndexOfLastTxProcessedInMiniBlock(index) + + mbsInfo[string(miniBlockHash)] = &ProcessedMiniBlockInfo{ + FullyProcessed: fullyProcessed, + IndexOfLastTxProcessed: indexOfLastTxProcessed, } - pmb.processedMiniBlocks[string(miniBlocksInMeta.MetaHash)] = miniBlocksHashes } + + return mbsInfo } -// DisplayProcessedMiniBlocks will display all miniblocks hashes and meta block hash from the map -func (pmb *ProcessedMiniBlockTracker) DisplayProcessedMiniBlocks() { - log.Debug("processed mini blocks applied") +// DisplayProcessedMiniBlocks will display all mini blocks hashes and meta block hash from the map +func (pmbt *processedMiniBlocksTracker) DisplayProcessedMiniBlocks() { + pmbt.mutProcessedMiniBlocks.RLock() + defer pmbt.mutProcessedMiniBlocks.RUnlock() - pmb.mutProcessedMiniBlocks.RLock() - for metaBlockHash, miniBlocksHashes := range pmb.processedMiniBlocks { + log.Debug("processed mini blocks applied") + for metaBlockHash, miniBlocksInfo := range pmbt.processedMiniBlocks { log.Debug("processed", "meta hash", []byte(metaBlockHash)) - for miniBlockHash := range miniBlocksHashes { + for miniBlockHash, processedMiniBlockInfo := range miniBlocksInfo { log.Debug("processed", - "mini block hash", []byte(miniBlockHash)) + "mini block hash", []byte(miniBlockHash), + "index of last tx processed", processedMiniBlockInfo.IndexOfLastTxProcessed, + "fully processed", processedMiniBlockInfo.FullyProcessed, + ) } } - pmb.mutProcessedMiniBlocks.RUnlock() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pmbt *processedMiniBlocksTracker) IsInterfaceNil() bool { + return pmbt == nil } diff --git a/process/block/processedMb/processedMiniBlocks_test.go b/process/block/processedMb/processedMiniBlocks_test.go index afc1cec7937..67abfb5bb07 100644 --- a/process/block/processedMb/processedMiniBlocks_test.go +++ b/process/block/processedMb/processedMiniBlocks_test.go @@ -8,74 +8,99 @@ import ( "github.com/stretchr/testify/assert" ) -func TestProcessedMiniBlocks_AddMiniBlockHashShouldWork(t *testing.T) { +func TestProcessedMiniBlocks_SetProcessedMiniBlockInfoShouldWork(t *testing.T) { t.Parallel() - pmb := processedMb.NewProcessedMiniBlocks() + pmbt := processedMb.NewProcessedMiniBlocksTracker() - mbHash1 := "hash1" - mbHash2 := "hash2" - mtbHash1 := "meta1" - mtbHash2 := "meta2" + mbHash1 := []byte("hash1") + mbHash2 := []byte("hash2") + mtbHash1 := []byte("meta1") + mtbHash2 := []byte("meta2") - pmb.AddMiniBlockHash(mtbHash1, mbHash1) - assert.True(t, pmb.IsMiniBlockProcessed(mtbHash1, mbHash1)) + pmbt.SetProcessedMiniBlockInfo(mtbHash1, mbHash1, &processedMb.ProcessedMiniBlockInfo{FullyProcessed: true}) + assert.True(t, pmbt.IsMiniBlockFullyProcessed(mtbHash1, mbHash1)) - pmb.AddMiniBlockHash(mtbHash2, mbHash1) - assert.True(t, pmb.IsMiniBlockProcessed(mtbHash2, mbHash1)) + pmbt.SetProcessedMiniBlockInfo(mtbHash2, mbHash1, &processedMb.ProcessedMiniBlockInfo{FullyProcessed: true}) + assert.True(t, pmbt.IsMiniBlockFullyProcessed(mtbHash2, mbHash1)) - pmb.AddMiniBlockHash(mtbHash1, mbHash2) - assert.True(t, pmb.IsMiniBlockProcessed(mtbHash1, mbHash2)) + pmbt.SetProcessedMiniBlockInfo(mtbHash1, mbHash2, &processedMb.ProcessedMiniBlockInfo{FullyProcessed: true}) + assert.True(t, pmbt.IsMiniBlockFullyProcessed(mtbHash1, mbHash2)) - pmb.RemoveMiniBlockHash(mbHash1) - assert.False(t, pmb.IsMiniBlockProcessed(mtbHash1, mbHash1)) + pmbt.RemoveMiniBlockHash(mbHash1) + assert.False(t, pmbt.IsMiniBlockFullyProcessed(mtbHash1, mbHash1)) - pmb.RemoveMiniBlockHash(mbHash1) - assert.False(t, pmb.IsMiniBlockProcessed(mtbHash1, mbHash1)) + pmbt.RemoveMiniBlockHash(mbHash1) + assert.False(t, pmbt.IsMiniBlockFullyProcessed(mtbHash1, mbHash1)) - pmb.RemoveMetaBlockHash(mtbHash2) - assert.False(t, pmb.IsMiniBlockProcessed(mtbHash2, mbHash1)) + pmbt.RemoveMetaBlockHash(mtbHash2) + assert.False(t, pmbt.IsMiniBlockFullyProcessed(mtbHash2, mbHash1)) } -func TestProcessedMiniBlocks_GetProcessedMiniBlocksHashes(t *testing.T) { +func TestProcessedMiniBlocks_GetProcessedMiniBlocksInfo(t *testing.T) { t.Parallel() - pmb := processedMb.NewProcessedMiniBlocks() + pmbt := processedMb.NewProcessedMiniBlocksTracker() - mbHash1 := "hash1" - mbHash2 := "hash2" - mtbHash1 := "meta1" - mtbHash2 := "meta2" + mbHash1 := []byte("hash1") + mbHash2 := []byte("hash2") + mtbHash1 := []byte("meta1") + mtbHash2 := []byte("meta2") - pmb.AddMiniBlockHash(mtbHash1, mbHash1) - pmb.AddMiniBlockHash(mtbHash1, mbHash2) - pmb.AddMiniBlockHash(mtbHash2, mbHash2) + pmbt.SetProcessedMiniBlockInfo(mtbHash1, mbHash1, &processedMb.ProcessedMiniBlockInfo{FullyProcessed: true}) + pmbt.SetProcessedMiniBlockInfo(mtbHash1, mbHash2, &processedMb.ProcessedMiniBlockInfo{FullyProcessed: true}) + pmbt.SetProcessedMiniBlockInfo(mtbHash2, mbHash2, &processedMb.ProcessedMiniBlockInfo{FullyProcessed: true}) - mapData := pmb.GetProcessedMiniBlocksHashes(mtbHash1) - assert.NotNil(t, mapData[mbHash1]) - assert.NotNil(t, mapData[mbHash2]) + mapData := pmbt.GetProcessedMiniBlocksInfo(mtbHash1) + assert.NotNil(t, mapData[string(mbHash1)]) + assert.NotNil(t, mapData[string(mbHash2)]) - mapData = pmb.GetProcessedMiniBlocksHashes(mtbHash2) - assert.NotNil(t, mapData[mbHash1]) + mapData = pmbt.GetProcessedMiniBlocksInfo(mtbHash2) + assert.NotNil(t, mapData[string(mbHash2)]) } func TestProcessedMiniBlocks_ConvertSliceToProcessedMiniBlocksMap(t *testing.T) { t.Parallel() - pmb := processedMb.NewProcessedMiniBlocks() + pmbt := processedMb.NewProcessedMiniBlocksTracker() - mbHash1 := "hash1" - mtbHash1 := "meta1" + mbHash1 := []byte("hash1") + mtbHash1 := []byte("meta1") data1 := bootstrapStorage.MiniBlocksInMeta{ - MetaHash: []byte(mtbHash1), - MiniBlocksHashes: [][]byte{[]byte(mbHash1)}, + MetaHash: mtbHash1, + MiniBlocksHashes: [][]byte{mbHash1}, + FullyProcessed: []bool{true}, + IndexOfLastTxProcessed: []int32{69}, } miniBlocksInMeta := []bootstrapStorage.MiniBlocksInMeta{data1} - pmb.ConvertSliceToProcessedMiniBlocksMap(miniBlocksInMeta) - assert.True(t, pmb.IsMiniBlockProcessed(mtbHash1, mbHash1)) + pmbt.ConvertSliceToProcessedMiniBlocksMap(miniBlocksInMeta) + assert.True(t, pmbt.IsMiniBlockFullyProcessed(mtbHash1, mbHash1)) - convertedData := pmb.ConvertProcessedMiniBlocksMapToSlice() + convertedData := pmbt.ConvertProcessedMiniBlocksMapToSlice() assert.Equal(t, miniBlocksInMeta, convertedData) } + +func TestProcessedMiniBlocks_GetProcessedMiniBlockInfo(t *testing.T) { + t.Parallel() + + mbHash := []byte("mb_hash") + metaHash := []byte("meta_hash") + processedMbInfo := &processedMb.ProcessedMiniBlockInfo{ + FullyProcessed: true, + IndexOfLastTxProcessed: 69, + } + pmbt := processedMb.NewProcessedMiniBlocksTracker() + pmbt.SetProcessedMiniBlockInfo(metaHash, mbHash, processedMbInfo) + + processedMiniBlockInfo, processedMetaHash := pmbt.GetProcessedMiniBlockInfo(nil) + assert.Nil(t, processedMetaHash) + assert.False(t, processedMiniBlockInfo.FullyProcessed) + assert.Equal(t, int32(-1), processedMiniBlockInfo.IndexOfLastTxProcessed) + + processedMiniBlockInfo, processedMetaHash = pmbt.GetProcessedMiniBlockInfo(mbHash) + assert.Equal(t, metaHash, processedMetaHash) + assert.Equal(t, processedMbInfo.FullyProcessed, processedMiniBlockInfo.FullyProcessed) + assert.Equal(t, processedMbInfo.IndexOfLastTxProcessed, processedMiniBlockInfo.IndexOfLastTxProcessed) +} diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 67b01a2089c..2c116a9bf92 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -26,17 +26,18 @@ var _ process.BlockProcessor = (*shardProcessor)(nil) const timeBetweenCheckForEpochStart = 100 * time.Millisecond -type createMbsAndProcessTxsDestMeInfo struct { - currMetaHdr data.HeaderHandler - currMetaHdrHash []byte - processedMiniBlocksHashes map[string]struct{} - haveTime func() bool - haveAdditionalTime func() bool - miniBlocks block.MiniBlockSlice - hdrAdded bool - numTxsAdded uint32 - numHdrsAdded uint32 - scheduledMode bool +type createAndProcessMiniBlocksDestMeInfo struct { + currMetaHdr data.HeaderHandler + currMetaHdrHash []byte + currProcessedMiniBlocksInfo map[string]*processedMb.ProcessedMiniBlockInfo + allProcessedMiniBlocksInfo map[string]*processedMb.ProcessedMiniBlockInfo + haveTime func() bool + haveAdditionalTime func() bool + miniBlocks block.MiniBlockSlice + hdrAdded bool + numTxsAdded uint32 + numHdrsAdded uint32 + scheduledMode bool } // shardProcessor implements shardProcessor interface and actually it tries to execute block @@ -45,7 +46,6 @@ type shardProcessor struct { metaBlockFinality uint32 chRcvAllMetaHdrs chan bool - processedMiniBlocks *processedMb.ProcessedMiniBlockTracker userStatePruningQueue core.Queue processStatusHandler common.ProcessStatusHandler } @@ -108,6 +108,7 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { economicsData: arguments.CoreComponents.EconomicsData(), scheduledTxsExecutionHandler: arguments.ScheduledTxsExecutionHandler, scheduledMiniBlocksEnableEpoch: arguments.ScheduledMiniBlocksEnableEpoch, + processedMiniBlocksTracker: arguments.ProcessedMiniBlocksTracker, } sp := shardProcessor{ @@ -126,7 +127,6 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { sp.chRcvAllMetaHdrs = make(chan bool) sp.hdrsForCurrBlock = newHdrForBlock() - sp.processedMiniBlocks = processedMb.NewProcessedMiniBlocks() headersPool := sp.dataPool.Headers() headersPool.RegisterHandler(sp.receivedMetaBlock) @@ -688,7 +688,7 @@ func (sp *shardProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler } miniBlockHashes := header.MapMiniBlockHashesToShards() - err := sp.restoreMetaBlockIntoPool(miniBlockHashes, header.GetMetaBlockHashes()) + err := sp.restoreMetaBlockIntoPool(headerHandler, miniBlockHashes, header.GetMetaBlockHashes()) if err != nil { return err } @@ -700,10 +700,15 @@ func (sp *shardProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler return nil } -func (sp *shardProcessor) restoreMetaBlockIntoPool(mapMiniBlockHashes map[string]uint32, metaBlockHashes [][]byte) error { +func (sp *shardProcessor) restoreMetaBlockIntoPool( + headerHandler data.HeaderHandler, + mapMiniBlockHashes map[string]uint32, + metaBlockHashes [][]byte, +) error { headersPool := sp.dataPool.Headers() - mapMetaHashMiniBlockHashes := make(map[string][][]byte, len(metaBlockHashes)) + mapMetaHashMiniBlockHashes := make(map[string][][]byte) + mapMetaHashMetaBlock := make(map[string]*block.MetaBlock) for _, metaBlockHash := range metaBlockHashes { metaBlock, errNotCritical := process.GetMetaHeaderFromStorage(metaBlockHash, sp.marshalizer, sp.store) @@ -713,6 +718,7 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool(mapMiniBlockHashes map[string continue } + mapMetaHashMetaBlock[string(metaBlockHash)] = metaBlock processedMiniBlocks := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) for mbHash := range processedMiniBlocks { mapMetaHashMiniBlockHashes[string(metaBlockHash)] = append(mapMetaHashMiniBlockHashes[string(metaBlockHash)], []byte(mbHash)) @@ -741,16 +747,87 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool(mapMiniBlockHashes map[string } for metaBlockHash, miniBlockHashes := range mapMetaHashMiniBlockHashes { - for _, miniBlockHash := range miniBlockHashes { - sp.processedMiniBlocks.AddMiniBlockHash(metaBlockHash, string(miniBlockHash)) + sp.setProcessedMiniBlocksInfo(miniBlockHashes, metaBlockHash, mapMetaHashMetaBlock[metaBlockHash]) + } + + sp.rollBackProcessedMiniBlocksInfo(headerHandler, mapMiniBlockHashes) + + return nil +} + +func (sp *shardProcessor) setProcessedMiniBlocksInfo(miniBlockHashes [][]byte, metaBlockHash string, metaBlock *block.MetaBlock) { + for _, miniBlockHash := range miniBlockHashes { + indexOfLastTxProcessed := getIndexOfLastTxProcessedInMiniBlock(miniBlockHash, metaBlock) + sp.processedMiniBlocksTracker.SetProcessedMiniBlockInfo([]byte(metaBlockHash), miniBlockHash, &processedMb.ProcessedMiniBlockInfo{ + FullyProcessed: true, + IndexOfLastTxProcessed: indexOfLastTxProcessed, + }) + } +} + +func getIndexOfLastTxProcessedInMiniBlock(miniBlockHash []byte, metaBlock *block.MetaBlock) int32 { + for _, mbh := range metaBlock.MiniBlockHeaders { + if bytes.Equal(mbh.Hash, miniBlockHash) { + return int32(mbh.TxCount) - 1 + } + } + + for _, shardData := range metaBlock.ShardInfo { + for _, mbh := range shardData.ShardMiniBlockHeaders { + if bytes.Equal(mbh.Hash, miniBlockHash) { + return int32(mbh.TxCount) - 1 + } } } + log.Warn("shardProcessor.getIndexOfLastTxProcessedInMiniBlock", + "miniBlock hash", miniBlockHash, + "metaBlock round", metaBlock.Round, + "metaBlock nonce", metaBlock.Nonce, + "error", process.ErrMissingMiniBlock) + + return common.MaxIndexOfTxInMiniBlock +} + +func (sp *shardProcessor) rollBackProcessedMiniBlocksInfo(headerHandler data.HeaderHandler, mapMiniBlockHashes map[string]uint32) { for miniBlockHash := range mapMiniBlockHashes { - sp.processedMiniBlocks.RemoveMiniBlockHash(miniBlockHash) + miniBlockHeader := process.GetMiniBlockHeaderWithHash(headerHandler, []byte(miniBlockHash)) + if miniBlockHeader == nil { + log.Warn("shardProcessor.rollBackProcessedMiniBlocksInfo: GetMiniBlockHeaderWithHash", + "mb hash", miniBlockHash, + "error", process.ErrMissingMiniBlockHeader) + continue + } + + if miniBlockHeader.GetSenderShardID() == sp.shardCoordinator.SelfId() { + continue + } + + sp.rollBackProcessedMiniBlockInfo(miniBlockHeader, []byte(miniBlockHash)) } +} - return nil +func (sp *shardProcessor) rollBackProcessedMiniBlockInfo(miniBlockHeader data.MiniBlockHeaderHandler, miniBlockHash []byte) { + indexOfFirstTxProcessed := miniBlockHeader.GetIndexOfFirstTxProcessed() + if indexOfFirstTxProcessed == 0 { + sp.processedMiniBlocksTracker.RemoveMiniBlockHash(miniBlockHash) + return + } + + _, metaBlockHash := sp.processedMiniBlocksTracker.GetProcessedMiniBlockInfo(miniBlockHash) + if metaBlockHash == nil { + log.Warn("shardProcessor.rollBackProcessedMiniBlockInfo: mini block was not found in ProcessedMiniBlockTracker component", + "sender shard", miniBlockHeader.GetSenderShardID(), + "receiver shard", miniBlockHeader.GetReceiverShardID(), + "tx count", miniBlockHeader.GetTxCount(), + "mb hash", miniBlockHash) + return + } + + sp.processedMiniBlocksTracker.SetProcessedMiniBlockInfo(metaBlockHash, miniBlockHash, &processedMb.ProcessedMiniBlockInfo{ + FullyProcessed: false, + IndexOfLastTxProcessed: indexOfFirstTxProcessed - 1, + }) } // CreateBlock creates the final block and header for the current round @@ -789,12 +866,12 @@ func (sp *shardProcessor) CreateBlock( sp.epochNotifier.CheckEpoch(shardHdr) sp.blockChainHook.SetCurrentHeader(shardHdr) - body, err := sp.createBlockBody(shardHdr, haveTime) + body, processedMiniBlocksDestMeInfo, err := sp.createBlockBody(shardHdr, haveTime) if err != nil { return nil, nil, err } - finalBody, err := sp.applyBodyToHeader(shardHdr, body) + finalBody, err := sp.applyBodyToHeader(shardHdr, body, processedMiniBlocksDestMeInfo) if err != nil { return nil, nil, err } @@ -812,7 +889,7 @@ func (sp *shardProcessor) CreateBlock( // createBlockBody creates a a list of miniblocks by filling them with transactions out of the transactions pools // as long as the transactions limit for the block has not been reached and there is still time to add transactions -func (sp *shardProcessor) createBlockBody(shardHdr data.HeaderHandler, haveTime func() bool) (*block.Body, error) { +func (sp *shardProcessor) createBlockBody(shardHdr data.HeaderHandler, haveTime func() bool) (*block.Body, map[string]*processedMb.ProcessedMiniBlockInfo, error) { sp.blockSizeThrottler.ComputeCurrentMaxSize() log.Debug("started creating block body", @@ -821,14 +898,14 @@ func (sp *shardProcessor) createBlockBody(shardHdr data.HeaderHandler, haveTime "nonce", shardHdr.GetNonce(), ) - miniBlocks, err := sp.createMiniBlocks(haveTime, shardHdr.GetPrevRandSeed()) + miniBlocks, processedMiniBlocksDestMeInfo, err := sp.createMiniBlocks(haveTime, shardHdr.GetPrevRandSeed()) if err != nil { - return nil, err + return nil, nil, err } sp.requestHandler.SetEpoch(shardHdr.GetEpoch()) - return miniBlocks, nil + return miniBlocks, processedMiniBlocksDestMeInfo, nil } // CommitBlock commits the block in the blockchain if everything was checked successfully @@ -1000,7 +1077,7 @@ func (sp *shardProcessor) CommitBlock( round: header.GetRound(), lastSelfNotarizedHeaders: sp.getBootstrapHeadersInfo(selfNotarizedHeaders, selfNotarizedHeadersHashes), highestFinalBlockNonce: sp.forkDetector.GetHighestFinalBlockNonce(), - processedMiniBlocks: sp.processedMiniBlocks.ConvertProcessedMiniBlocksMapToSlice(), + processedMiniBlocks: sp.processedMiniBlocksTracker.ConvertProcessedMiniBlocksMapToSlice(), nodesCoordinatorConfigKey: nodesCoordinatorKey, epochStartTriggerConfigKey: epochStartKey, } @@ -1307,11 +1384,6 @@ func (sp *shardProcessor) saveLastNotarizedHeader(shardId uint32, processedHdrs return nil } -// ApplyProcessedMiniBlocks will apply processed mini blocks -func (sp *shardProcessor) ApplyProcessedMiniBlocks(processedMiniBlocks *processedMb.ProcessedMiniBlockTracker) { - sp.processedMiniBlocks = processedMiniBlocks -} - // CreateNewHeader creates a new header func (sp *shardProcessor) CreateNewHeader(round uint64, nonce uint64) (data.HeaderHandler, error) { sp.roundNotifier.CheckRound(round) @@ -1443,7 +1515,7 @@ func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromHeader(header data.He "num miniblocks", len(miniBlockHashes), ) - processedMetaBlocks, err := sp.getOrderedProcessedMetaBlocksFromMiniBlockHashes(miniBlockHashes) + processedMetaBlocks, err := sp.getOrderedProcessedMetaBlocksFromMiniBlockHashes(miniBlockHeaders, miniBlockHashes) if err != nil { return nil, err } @@ -1451,18 +1523,18 @@ func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromHeader(header data.He return processedMetaBlocks, nil } -func (sp *shardProcessor) addProcessedCrossMiniBlocksFromHeader(header data.HeaderHandler) error { - if check.IfNil(header) { +func (sp *shardProcessor) addProcessedCrossMiniBlocksFromHeader(headerHandler data.HeaderHandler) error { + if check.IfNil(headerHandler) { return process.ErrNilBlockHeader } - shardHeader, ok := header.(data.ShardHeaderHandler) + shardHeader, ok := headerHandler.(data.ShardHeaderHandler) if !ok { return process.ErrWrongTypeAssertion } - miniBlockHashes := make(map[int][]byte, len(header.GetMiniBlockHeaderHandlers())) - for i := 0; i < len(header.GetMiniBlockHeaderHandlers()); i++ { - miniBlockHashes[i] = header.GetMiniBlockHeaderHandlers()[i].GetHash() + miniBlockHashes := make(map[int][]byte, len(headerHandler.GetMiniBlockHeaderHandlers())) + for i := 0; i < len(headerHandler.GetMiniBlockHeaderHandlers()); i++ { + miniBlockHashes[i] = headerHandler.GetMiniBlockHeaderHandlers()[i].GetHash() } sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() @@ -1487,7 +1559,16 @@ func (sp *shardProcessor) addProcessedCrossMiniBlocksFromHeader(header data.Head continue } - sp.processedMiniBlocks.AddMiniBlockHash(string(metaBlockHash), string(miniBlockHash)) + miniBlockHeader := process.GetMiniBlockHeaderWithHash(headerHandler, miniBlockHash) + if miniBlockHeader == nil { + log.Warn("shardProcessor.addProcessedCrossMiniBlocksFromHeader: GetMiniBlockHeaderWithHash", "mb hash", miniBlockHash, "error", process.ErrMissingMiniBlockHeader) + continue + } + + sp.processedMiniBlocksTracker.SetProcessedMiniBlockInfo(metaBlockHash, miniBlockHash, &processedMb.ProcessedMiniBlockInfo{ + FullyProcessed: miniBlockHeader.IsFinal(), + IndexOfLastTxProcessed: miniBlockHeader.GetIndexOfLastTxProcessed(), + }) delete(miniBlockHashes, key) } @@ -1498,6 +1579,7 @@ func (sp *shardProcessor) addProcessedCrossMiniBlocksFromHeader(header data.Head } func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromMiniBlockHashes( + miniBlockHeaders []data.MiniBlockHeaderHandler, miniBlockHashes map[int][]byte, ) ([]data.HeaderHandler, error) { @@ -1522,7 +1604,7 @@ func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromMiniBlockHashes( crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) for hash := range crossMiniBlockHashes { - processedCrossMiniBlocksHashes[hash] = sp.processedMiniBlocks.IsMiniBlockProcessed(metaBlockHash, hash) + processedCrossMiniBlocksHashes[hash] = sp.processedMiniBlocksTracker.IsMiniBlockFullyProcessed([]byte(metaBlockHash), []byte(hash)) } for key, miniBlockHash := range miniBlockHashes { @@ -1531,7 +1613,7 @@ func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromMiniBlockHashes( continue } - processedCrossMiniBlocksHashes[string(miniBlockHash)] = true + processedCrossMiniBlocksHashes[string(miniBlockHash)] = miniBlockHeaders[key].IsFinal() delete(miniBlockHashes, key) } @@ -1585,7 +1667,7 @@ func (sp *shardProcessor) updateCrossShardInfo(processedMetaHdrs []data.HeaderHa sp.saveMetaHeader(hdr, headerHash, marshalizedHeader) - sp.processedMiniBlocks.RemoveMetaBlockHash(string(headerHash)) + sp.processedMiniBlocksTracker.RemoveMetaBlockHash(headerHash) } return nil @@ -1753,9 +1835,7 @@ func (sp *shardProcessor) getAllMiniBlockDstMeFromMeta(header data.ShardHeaderHa } // full verification through metachain header -func (sp *shardProcessor) createAndProcessMiniBlocksDstMe( - haveTime func() bool, -) (block.MiniBlockSlice, uint32, uint32, error) { +func (sp *shardProcessor) createAndProcessMiniBlocksDstMe(haveTime func() bool) (*createAndProcessMiniBlocksDestMeInfo, error) { log.Debug("createAndProcessMiniBlocksDstMe has been started") sw := core.NewStopWatch() @@ -1764,7 +1844,7 @@ func (sp *shardProcessor) createAndProcessMiniBlocksDstMe( sw.Stop("ComputeLongestMetaChainFromLastNotarized") log.Debug("measurements", sw.GetMeasurements()...) if err != nil { - return nil, 0, 0, err + return nil, err } log.Debug("metablocks ordered", @@ -1773,20 +1853,21 @@ func (sp *shardProcessor) createAndProcessMiniBlocksDstMe( lastMetaHdr, _, err := sp.blockTracker.GetLastCrossNotarizedHeader(core.MetachainShardId) if err != nil { - return nil, 0, 0, err + return nil, err } haveAdditionalTimeFalse := func() bool { return false } - createAndProcessInfo := &createMbsAndProcessTxsDestMeInfo{ - haveTime: haveTime, - haveAdditionalTime: haveAdditionalTimeFalse, - miniBlocks: make(block.MiniBlockSlice, 0), - numTxsAdded: uint32(0), - numHdrsAdded: uint32(0), - scheduledMode: false, + createAndProcessInfo := &createAndProcessMiniBlocksDestMeInfo{ + haveTime: haveTime, + haveAdditionalTime: haveAdditionalTimeFalse, + miniBlocks: make(block.MiniBlockSlice, 0), + allProcessedMiniBlocksInfo: make(map[string]*processedMb.ProcessedMiniBlockInfo), + numTxsAdded: uint32(0), + numHdrsAdded: uint32(0), + scheduledMode: false, } // do processing in order @@ -1825,12 +1906,12 @@ func (sp *shardProcessor) createAndProcessMiniBlocksDstMe( continue } - createAndProcessInfo.processedMiniBlocksHashes = sp.processedMiniBlocks.GetProcessedMiniBlocksHashes(string(createAndProcessInfo.currMetaHdrHash)) + createAndProcessInfo.currProcessedMiniBlocksInfo = sp.processedMiniBlocksTracker.GetProcessedMiniBlocksInfo(createAndProcessInfo.currMetaHdrHash) createAndProcessInfo.hdrAdded = false shouldContinue, errCreated := sp.createMbsAndProcessCrossShardTransactionsDstMe(createAndProcessInfo) if errCreated != nil { - return nil, 0, 0, errCreated + return nil, errCreated } if !shouldContinue { break @@ -1854,15 +1935,15 @@ func (sp *shardProcessor) createAndProcessMiniBlocksDstMe( "num txs added", createAndProcessInfo.numTxsAdded, "num hdrs added", createAndProcessInfo.numHdrsAdded) - return createAndProcessInfo.miniBlocks, createAndProcessInfo.numTxsAdded, createAndProcessInfo.numHdrsAdded, nil + return createAndProcessInfo, nil } func (sp *shardProcessor) createMbsAndProcessCrossShardTransactionsDstMe( - createAndProcessInfo *createMbsAndProcessTxsDestMeInfo, + createAndProcessInfo *createAndProcessMiniBlocksDestMeInfo, ) (bool, error) { currMiniBlocksAdded, currNumTxsAdded, hdrProcessFinished, errCreated := sp.txCoordinator.CreateMbsAndProcessCrossShardTransactionsDstMe( createAndProcessInfo.currMetaHdr, - createAndProcessInfo.processedMiniBlocksHashes, + createAndProcessInfo.currProcessedMiniBlocksInfo, createAndProcessInfo.haveTime, createAndProcessInfo.haveAdditionalTime, createAndProcessInfo.scheduledMode) @@ -1870,6 +1951,13 @@ func (sp *shardProcessor) createMbsAndProcessCrossShardTransactionsDstMe( return false, errCreated } + for miniBlockHash, processedMiniBlockInfo := range createAndProcessInfo.currProcessedMiniBlocksInfo { + createAndProcessInfo.allProcessedMiniBlocksInfo[miniBlockHash] = &processedMb.ProcessedMiniBlockInfo{ + FullyProcessed: processedMiniBlockInfo.FullyProcessed, + IndexOfLastTxProcessed: processedMiniBlockInfo.IndexOfLastTxProcessed, + } + } + // all txs processed, add to processed miniblocks createAndProcessInfo.miniBlocks = append(createAndProcessInfo.miniBlocks, currMiniBlocksAdded...) createAndProcessInfo.numTxsAdded += currNumTxsAdded @@ -1919,8 +2007,9 @@ func (sp *shardProcessor) requestMetaHeadersIfNeeded(hdrsAdded uint32, lastMetaH } } -func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []byte) (*block.Body, error) { +func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []byte) (*block.Body, map[string]*processedMb.ProcessedMiniBlockInfo, error) { var miniBlocks block.MiniBlockSlice + processedMiniBlocksDestMeInfo := make(map[string]*processedMb.ProcessedMiniBlockInfo) if sp.flagScheduledMiniBlocks.IsSet() { miniBlocks = sp.scheduledTxsExecutionHandler.GetScheduledMiniBlocks() @@ -1943,7 +2032,7 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by } log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) - return &block.Body{MiniBlocks: miniBlocks}, nil + return &block.Body{MiniBlocks: miniBlocks}, processedMiniBlocksDestMeInfo, nil } if !haveTime() { @@ -1955,24 +2044,26 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by } log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) - return &block.Body{MiniBlocks: miniBlocks}, nil + return &block.Body{MiniBlocks: miniBlocks}, processedMiniBlocksDestMeInfo, nil } startTime := time.Now() - mbsToMe, numTxs, numMetaHeaders, err := sp.createAndProcessMiniBlocksDstMe(haveTime) + createAndProcessMBsDestMeInfo, err := sp.createAndProcessMiniBlocksDstMe(haveTime) elapsedTime := time.Since(startTime) log.Debug("elapsed time to create mbs to me", "time", elapsedTime) if err != nil { log.Debug("createAndProcessCrossMiniBlocksDstMe", "error", err.Error()) } + if createAndProcessMBsDestMeInfo != nil { + processedMiniBlocksDestMeInfo = createAndProcessMBsDestMeInfo.allProcessedMiniBlocksInfo + if len(createAndProcessMBsDestMeInfo.miniBlocks) > 0 { + miniBlocks = append(miniBlocks, createAndProcessMBsDestMeInfo.miniBlocks...) - if len(mbsToMe) > 0 { - miniBlocks = append(miniBlocks, mbsToMe...) - - log.Debug("processed miniblocks and txs with destination in self shard", - "num miniblocks", len(mbsToMe), - "num txs", numTxs, - "num meta headers", numMetaHeaders) + log.Debug("processed miniblocks and txs with destination in self shard", + "num miniblocks", len(createAndProcessMBsDestMeInfo.miniBlocks), + "num txs", createAndProcessMBsDestMeInfo.numTxsAdded, + "num meta headers", createAndProcessMBsDestMeInfo.numHdrsAdded) + } } if sp.blockTracker.IsShardStuck(core.MetachainShardId) { @@ -1986,7 +2077,7 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by } log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) - return &block.Body{MiniBlocks: miniBlocks}, nil + return &block.Body{MiniBlocks: miniBlocks}, processedMiniBlocksDestMeInfo, nil } startTime = time.Now() @@ -1997,9 +2088,9 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by if len(mbsFromMe) > 0 { miniBlocks = append(miniBlocks, mbsFromMe...) - numTxs = 0 + numTxs := 0 for _, mb := range mbsFromMe { - numTxs += uint32(len(mb.TxHashes)) + numTxs += len(mb.TxHashes) } log.Debug("processed miniblocks and txs from self shard", @@ -2008,11 +2099,15 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by } log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) - return &block.Body{MiniBlocks: miniBlocks}, nil + return &block.Body{MiniBlocks: miniBlocks}, processedMiniBlocksDestMeInfo, nil } // applyBodyToHeader creates a miniblock header list given a block body -func (sp *shardProcessor) applyBodyToHeader(shardHeader data.ShardHeaderHandler, body *block.Body) (*block.Body, error) { +func (sp *shardProcessor) applyBodyToHeader( + shardHeader data.ShardHeaderHandler, + body *block.Body, + processedMiniBlocksDestMeInfo map[string]*processedMb.ProcessedMiniBlockInfo, +) (*block.Body, error) { sw := core.NewStopWatch() sw.Start("applyBodyToHeader") defer func() { @@ -2055,7 +2150,7 @@ func (sp *shardProcessor) applyBodyToHeader(shardHeader data.ShardHeaderHandler, newBody := deleteSelfReceiptsMiniBlocks(body) sw.Start("createMiniBlockHeaders") - totalTxCount, miniBlockHeaderHandlers, err := sp.createMiniBlockHeaderHandlers(newBody) + totalTxCount, miniBlockHeaderHandlers, err := sp.createMiniBlockHeaderHandlers(newBody, processedMiniBlocksDestMeInfo) sw.Stop("createMiniBlockHeaders") if err != nil { return nil, err diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 3f13cc79b8b..c938cfe4086 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -27,6 +27,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever/blockchain" "github.com/ElrondNetwork/elrond-go/process" blproc "github.com/ElrondNetwork/elrond-go/process/block" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" "github.com/ElrondNetwork/elrond-go/process/coordinator" "github.com/ElrondNetwork/elrond-go/process/factory/shard" "github.com/ElrondNetwork/elrond-go/process/mock" @@ -277,7 +278,7 @@ func TestShardProcess_CreateNewBlockHeaderProcessHeaderExpectCheckRoundCalled(t shardProcessor, _ := blproc.NewShardProcessor(arguments) header := &block.Header{Round: round} - bodyHandler, _ := shardProcessor.CreateBlockBody(header, func() bool { return true }) + bodyHandler, _, _ := shardProcessor.CreateBlockBody(header, func() bool { return true }) headerHandler, err := shardProcessor.CreateNewHeader(round, 1) require.Nil(t, err) @@ -474,6 +475,7 @@ func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) container, _ := factory.Create() @@ -697,6 +699,7 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) container, _ := factory.Create() @@ -2273,7 +2276,7 @@ func TestShardProcessor_CreateTxBlockBodyWithDirtyAccStateShouldReturnEmptyBody( sp, _ := blproc.NewShardProcessor(arguments) - bl, err := sp.CreateBlockBody(&block.Header{PrevRandSeed: []byte("randSeed")}, func() bool { return true }) + bl, _, err := sp.CreateBlockBody(&block.Header{PrevRandSeed: []byte("randSeed")}, func() bool { return true }) assert.Nil(t, err) assert.Equal(t, &block.Body{}, bl) } @@ -2299,7 +2302,7 @@ func TestShardProcessor_CreateTxBlockBodyWithNoTimeShouldReturnEmptyBody(t *test haveTimeTrue := func() bool { return false } - bl, err := sp.CreateBlockBody(&block.Header{PrevRandSeed: []byte("randSeed")}, haveTimeTrue) + bl, _, err := sp.CreateBlockBody(&block.Header{PrevRandSeed: []byte("randSeed")}, haveTimeTrue) assert.Nil(t, err) assert.Equal(t, &block.Body{}, bl) } @@ -2323,7 +2326,7 @@ func TestShardProcessor_CreateTxBlockBodyOK(t *testing.T) { } sp, _ := blproc.NewShardProcessor(arguments) - blk, err := sp.CreateBlockBody(&block.Header{PrevRandSeed: []byte("randSeed")}, haveTimeTrue) + blk, _, err := sp.CreateBlockBody(&block.Header{PrevRandSeed: []byte("randSeed")}, haveTimeTrue) assert.NotNil(t, blk) assert.Nil(t, err) } @@ -2445,7 +2448,7 @@ func TestBlockProcessor_ApplyBodyToHeaderNilBodyError(t *testing.T) { bp, _ := blproc.NewShardProcessor(arguments) hdr := &block.Header{} - _, err := bp.ApplyBodyToHeader(hdr, nil) + _, err := bp.ApplyBodyToHeader(hdr, nil, nil) assert.Equal(t, process.ErrNilBlockBody, err) } @@ -2457,7 +2460,7 @@ func TestBlockProcessor_ApplyBodyToHeaderShouldNotReturnNil(t *testing.T) { bp, _ := blproc.NewShardProcessor(arguments) hdr := &block.Header{} - _, err := bp.ApplyBodyToHeader(hdr, &block.Body{}) + _, err := bp.ApplyBodyToHeader(hdr, &block.Body{}, make(map[string]*processedMb.ProcessedMiniBlockInfo)) assert.Nil(t, err) assert.NotNil(t, hdr) } @@ -2488,7 +2491,7 @@ func TestShardProcessor_ApplyBodyToHeaderShouldErrWhenMarshalizerErrors(t *testi }, } hdr := &block.Header{} - _, err := bp.ApplyBodyToHeader(hdr, body) + _, err := bp.ApplyBodyToHeader(hdr, body, make(map[string]*processedMb.ProcessedMiniBlockInfo)) assert.NotNil(t, err) } @@ -2518,7 +2521,7 @@ func TestShardProcessor_ApplyBodyToHeaderReturnsOK(t *testing.T) { }, } hdr := &block.Header{} - _, err := bp.ApplyBodyToHeader(hdr, body) + _, err := bp.ApplyBodyToHeader(hdr, body, make(map[string]*processedMb.ProcessedMiniBlockInfo)) assert.Nil(t, err) assert.Equal(t, len(body.MiniBlocks), len(hdr.MiniBlockHeaders)) } @@ -2591,6 +2594,7 @@ func TestShardProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) container, _ := factory.Create() @@ -2701,6 +2705,7 @@ func TestShardProcessor_MarshalizedDataMarshalWithoutSuccess(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) container, _ := factory.Create() @@ -3095,6 +3100,7 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) container, _ := factory.Create() @@ -3112,7 +3118,7 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T bp, err := blproc.NewShardProcessor(arguments) require.Nil(t, err) - blockBody, err := bp.CreateMiniBlocks(func() bool { return true }) + blockBody, _, err := bp.CreateMiniBlocks(func() bool { return true }) assert.Nil(t, err) // testing execution @@ -3278,6 +3284,7 @@ func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) container, _ := factory.Create() @@ -4089,7 +4096,7 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolShouldPass(t *testing.T) { assert.Equal(t, nil, metaBlockRestored) assert.Error(t, err) - err = sp.RestoreMetaBlockIntoPool(miniblockHashes, metablockHashes) + err = sp.RestoreMetaBlockIntoPool(miniblockHashes, metablockHashes, &block.Header{}) metaBlockRestored, _ = poolFake.Headers().GetHeaderByHash(metaHash) @@ -4444,7 +4451,7 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { } } - err = sp.RestoreMetaBlockIntoPool(miniblockHashes, metablockHashes) + err = sp.RestoreMetaBlockIntoPool(miniblockHashes, metablockHashes, &block.Header{}) metaBlockRestored, _ = poolMock.Headers().GetHeaderByHash(metaHash) @@ -5063,7 +5070,185 @@ func TestShardProcessor_createMiniBlocks(t *testing.T) { require.Nil(t, err) sp.EpochConfirmed(1, 0) - _, err = sp.CreateMiniBlocks(func() bool { return false }) + _, _, err = sp.CreateMiniBlocks(func() bool { return false }) require.Nil(t, err) require.True(t, called.IsSet()) } + +func TestShardProcessor_RollBackProcessedMiniBlockInfo(t *testing.T) { + t.Parallel() + + arguments := CreateMockArguments(createComponentHolderMocks()) + processedMiniBlocksTracker := processedMb.NewProcessedMiniBlocksTracker() + arguments.ProcessedMiniBlocksTracker = processedMiniBlocksTracker + sp, _ := blproc.NewShardProcessor(arguments) + + metaHash := []byte("meta_hash") + mbHash := []byte("mb_hash") + mbInfo := &processedMb.ProcessedMiniBlockInfo{ + FullyProcessed: true, + IndexOfLastTxProcessed: 69, + } + miniBlockHeader := &block.MiniBlockHeader{} + + processedMiniBlocksTracker.SetProcessedMiniBlockInfo(metaHash, mbHash, mbInfo) + assert.Equal(t, 1, len(processedMiniBlocksTracker.GetProcessedMiniBlocksInfo(metaHash))) + + sp.RollBackProcessedMiniBlockInfo(miniBlockHeader, mbHash) + assert.Equal(t, 0, len(processedMiniBlocksTracker.GetProcessedMiniBlocksInfo(metaHash))) + + processedMiniBlocksTracker.SetProcessedMiniBlockInfo(metaHash, mbHash, mbInfo) + assert.Equal(t, 1, len(processedMiniBlocksTracker.GetProcessedMiniBlocksInfo(metaHash))) + + _ = miniBlockHeader.SetIndexOfFirstTxProcessed(2) + + sp.RollBackProcessedMiniBlockInfo(miniBlockHeader, []byte("mb_hash_missing")) + assert.Equal(t, 1, len(processedMiniBlocksTracker.GetProcessedMiniBlocksInfo(metaHash))) + + processedMbInfo, processedMetaHash := processedMiniBlocksTracker.GetProcessedMiniBlockInfo(mbHash) + assert.Equal(t, metaHash, processedMetaHash) + assert.Equal(t, mbInfo.FullyProcessed, processedMbInfo.FullyProcessed) + assert.Equal(t, mbInfo.IndexOfLastTxProcessed, processedMbInfo.IndexOfLastTxProcessed) + + sp.RollBackProcessedMiniBlockInfo(miniBlockHeader, mbHash) + assert.Equal(t, 1, len(processedMiniBlocksTracker.GetProcessedMiniBlocksInfo(metaHash))) + + processedMbInfo, processedMetaHash = processedMiniBlocksTracker.GetProcessedMiniBlockInfo(mbHash) + assert.Equal(t, metaHash, processedMetaHash) + assert.False(t, processedMbInfo.FullyProcessed) + assert.Equal(t, int32(1), processedMbInfo.IndexOfLastTxProcessed) +} + +func TestShardProcessor_SetProcessedMiniBlocksInfo(t *testing.T) { + t.Parallel() + + arguments := CreateMockArguments(createComponentHolderMocks()) + processedMiniBlocksTracker := processedMb.NewProcessedMiniBlocksTracker() + arguments.ProcessedMiniBlocksTracker = processedMiniBlocksTracker + sp, _ := blproc.NewShardProcessor(arguments) + + mbHash1 := []byte("mbHash1") + mbHash2 := []byte("mbHash2") + mbHash3 := []byte("mbHash3") + miniBlockHashes := [][]byte{mbHash1, mbHash2, mbHash3} + metaHash := "metaHash" + mbh1 := block.MiniBlockHeader{ + TxCount: 3, + Hash: mbHash1, + } + mbh2 := block.MiniBlockHeader{ + TxCount: 5, + Hash: mbHash2, + } + mbh3 := block.MiniBlockHeader{ + TxCount: 5, + Hash: mbHash3, + } + metaBlock := &block.MetaBlock{ + MiniBlockHeaders: []block.MiniBlockHeader{mbh1, mbh2, mbh3}, + } + + sp.SetProcessedMiniBlocksInfo(miniBlockHashes, metaHash, metaBlock) + mapProcessedMiniBlocksInfo := processedMiniBlocksTracker.GetProcessedMiniBlocksInfo([]byte(metaHash)) + assert.Equal(t, 3, len(mapProcessedMiniBlocksInfo)) + + mbi, ok := mapProcessedMiniBlocksInfo[string(mbHash1)] + assert.True(t, ok) + assert.True(t, mbi.FullyProcessed) + assert.Equal(t, int32(mbh1.TxCount-1), mbi.IndexOfLastTxProcessed) + + mbi, ok = mapProcessedMiniBlocksInfo[string(mbHash2)] + assert.True(t, ok) + assert.True(t, mbi.FullyProcessed) + assert.Equal(t, int32(mbh2.TxCount-1), mbi.IndexOfLastTxProcessed) + + mbi, ok = mapProcessedMiniBlocksInfo[string(mbHash3)] + assert.True(t, ok) + assert.True(t, mbi.FullyProcessed) + assert.Equal(t, int32(mbh3.TxCount-1), mbi.IndexOfLastTxProcessed) +} + +func TestShardProcessor_GetIndexOfLastTxProcessedInMiniBlock(t *testing.T) { + t.Parallel() + + arguments := CreateMockArguments(createComponentHolderMocks()) + sp, _ := blproc.NewShardProcessor(arguments) + + mbHash1 := []byte("mbHash1") + mbHash2 := []byte("mbHash2") + mbHash3 := []byte("mbHash3") + + mbh1 := block.MiniBlockHeader{ + TxCount: 3, + Hash: mbHash1, + } + mbh2 := block.MiniBlockHeader{ + TxCount: 5, + Hash: mbHash2, + } + metaBlock := &block.MetaBlock{ + MiniBlockHeaders: []block.MiniBlockHeader{mbh1}, + ShardInfo: []block.ShardData{ + {ShardMiniBlockHeaders: []block.MiniBlockHeader{mbh2}}, + }, + } + + index := sp.GetIndexOfLastTxProcessedInMiniBlock(mbHash1, metaBlock) + assert.Equal(t, int32(mbh1.TxCount-1), index) + + index = sp.GetIndexOfLastTxProcessedInMiniBlock(mbHash2, metaBlock) + assert.Equal(t, int32(mbh2.TxCount-1), index) + + index = sp.GetIndexOfLastTxProcessedInMiniBlock(mbHash3, metaBlock) + assert.Equal(t, common.MaxIndexOfTxInMiniBlock, index) +} + +func TestShardProcessor_RollBackProcessedMiniBlocksInfo(t *testing.T) { + t.Parallel() + + arguments := CreateMockArguments(createComponentHolderMocks()) + processedMiniBlocksTracker := processedMb.NewProcessedMiniBlocksTracker() + arguments.ProcessedMiniBlocksTracker = processedMiniBlocksTracker + sp, _ := blproc.NewShardProcessor(arguments) + + metaHash := []byte("metaHash") + mbHash1 := []byte("mbHash1") + mbHash2 := []byte("mbHash2") + mbHash3 := []byte("mbHash3") + + mbInfo := &processedMb.ProcessedMiniBlockInfo{ + FullyProcessed: true, + IndexOfLastTxProcessed: 69, + } + + processedMiniBlocksTracker.SetProcessedMiniBlockInfo(metaHash, mbHash3, mbInfo) + + mbh2 := block.MiniBlockHeader{ + SenderShardID: 0, + TxCount: 5, + Hash: mbHash2, + } + mbh3 := block.MiniBlockHeader{ + SenderShardID: 2, + TxCount: 5, + Hash: mbHash3, + } + indexOfFirstTxProcessed := int32(3) + _ = mbh3.SetIndexOfFirstTxProcessed(indexOfFirstTxProcessed) + + mapMiniBlockHashes := make(map[string]uint32) + mapMiniBlockHashes[string(mbHash1)] = 1 + mapMiniBlockHashes[string(mbHash2)] = 0 + mapMiniBlockHashes[string(mbHash3)] = 2 + + header := &block.Header{ + MiniBlockHeaders: []block.MiniBlockHeader{mbh2, mbh3}, + } + + sp.RollBackProcessedMiniBlocksInfo(header, mapMiniBlockHashes) + + processedMbInfo, processedMetaHash := processedMiniBlocksTracker.GetProcessedMiniBlockInfo(mbHash3) + assert.Equal(t, metaHash, processedMetaHash) + assert.False(t, processedMbInfo.FullyProcessed) + assert.Equal(t, indexOfFirstTxProcessed-1, processedMbInfo.IndexOfLastTxProcessed) +} diff --git a/process/common.go b/process/common.go index 42dd884b38d..9b10cba7774 100644 --- a/process/common.go +++ b/process/common.go @@ -815,3 +815,28 @@ func GetMiniBlockHeaderWithHash(header data.HeaderHandler, miniBlockHash []byte) } return nil } + +// CheckIfIndexesAreOutOfBound checks if the given indexes are out of bound for the given mini block +func CheckIfIndexesAreOutOfBound( + indexOfFirstTxToBeProcessed int32, + indexOfLastTxToBeProcessed int32, + miniBlock *block.MiniBlock, +) error { + maxIndex := int32(len(miniBlock.TxHashes)) - 1 + + isFirstIndexHigherThanLastIndex := indexOfFirstTxToBeProcessed > indexOfLastTxToBeProcessed + isFirstIndexOutOfRange := indexOfFirstTxToBeProcessed < 0 || indexOfFirstTxToBeProcessed > maxIndex + isLastIndexOutOfRange := indexOfLastTxToBeProcessed < 0 || indexOfLastTxToBeProcessed > maxIndex + + isIndexOutOfBound := isFirstIndexHigherThanLastIndex || isFirstIndexOutOfRange || isLastIndexOutOfRange + if isIndexOutOfBound { + return fmt.Errorf("%w: indexOfFirstTxToBeProcessed: %d, indexOfLastTxToBeProcessed = %d, maxIndex: %d", + ErrIndexIsOutOfBound, + indexOfFirstTxToBeProcessed, + indexOfLastTxToBeProcessed, + maxIndex, + ) + } + + return nil +} diff --git a/process/common_test.go b/process/common_test.go index 175a60a152d..67bcf4a332c 100644 --- a/process/common_test.go +++ b/process/common_test.go @@ -1961,3 +1961,28 @@ func TestGetMiniBlockHeaderWithHash(t *testing.T) { assert.Equal(t, expectedMbh, mbh) }) } + +func TestCheckIfIndexesAreOutOfBound(t *testing.T) { + txHashes := [][]byte{[]byte("txHash1"), []byte("txHash2"), []byte("txHash3")} + miniBlock := &block.MiniBlock{TxHashes: txHashes} + + indexOfFirstTxToBeProcessed := int32(1) + indexOfLastTxToBeProcessed := int32(0) + err := process.CheckIfIndexesAreOutOfBound(indexOfFirstTxToBeProcessed, indexOfLastTxToBeProcessed, miniBlock) + assert.True(t, errors.Is(err, process.ErrIndexIsOutOfBound)) + + indexOfFirstTxToBeProcessed = int32(-1) + indexOfLastTxToBeProcessed = int32(0) + err = process.CheckIfIndexesAreOutOfBound(indexOfFirstTxToBeProcessed, indexOfLastTxToBeProcessed, miniBlock) + assert.True(t, errors.Is(err, process.ErrIndexIsOutOfBound)) + + indexOfFirstTxToBeProcessed = int32(0) + indexOfLastTxToBeProcessed = int32(len(txHashes)) + err = process.CheckIfIndexesAreOutOfBound(indexOfFirstTxToBeProcessed, indexOfLastTxToBeProcessed, miniBlock) + assert.True(t, errors.Is(err, process.ErrIndexIsOutOfBound)) + + indexOfFirstTxToBeProcessed = int32(0) + indexOfLastTxToBeProcessed = int32(len(txHashes) - 1) + err = process.CheckIfIndexesAreOutOfBound(indexOfFirstTxToBeProcessed, indexOfLastTxToBeProcessed, miniBlock) + assert.Nil(t, err) +} diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 0e6fca0f741..d1d13e0c85a 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -8,19 +8,19 @@ import ( "sync" "time" - "github.com/ElrondNetwork/elrond-go-core/core/atomic" - "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/atomic" "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/batch" "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go-core/hashing" "github.com/ElrondNetwork/elrond-go-core/marshal" - "github.com/ElrondNetwork/elrond-go-logger" + logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block/preprocess" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/state" @@ -32,28 +32,43 @@ var _ process.TransactionCoordinator = (*transactionCoordinator)(nil) var log = logger.GetOrCreate("process/coordinator") +type createMiniBlockDestMeExecutionInfo struct { + processedTxHashes [][]byte + miniBlocks block.MiniBlockSlice + numTxAdded uint32 + numNewMiniBlocksProcessed int + numAlreadyMiniBlocksProcessed int +} + +type processedIndexes struct { + indexOfLastTxProcessed int32 + indexOfLastTxProcessedByProposer int32 +} + // ArgTransactionCoordinator holds all dependencies required by the transaction coordinator factory in order to create new instances type ArgTransactionCoordinator struct { - Hasher hashing.Hasher - Marshalizer marshal.Marshalizer - ShardCoordinator sharding.Coordinator - Accounts state.AccountsAdapter - MiniBlockPool storage.Cacher - RequestHandler process.RequestHandler - PreProcessors process.PreProcessorsContainer - InterProcessors process.IntermediateProcessorContainer - GasHandler process.GasHandler - FeeHandler process.TransactionFeeHandler - BlockSizeComputation preprocess.BlockSizeComputationHandler - BalanceComputation preprocess.BalanceComputationHandler - EconomicsFee process.FeeHandler - TxTypeHandler process.TxTypeHandler - TransactionsLogProcessor process.TransactionLogProcessor - BlockGasAndFeesReCheckEnableEpoch uint32 - EpochNotifier process.EpochNotifier - ScheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler - ScheduledMiniBlocksEnableEpoch uint32 - DoubleTransactionsDetector process.DoubleTransactionDetector + Hasher hashing.Hasher + Marshalizer marshal.Marshalizer + ShardCoordinator sharding.Coordinator + Accounts state.AccountsAdapter + MiniBlockPool storage.Cacher + RequestHandler process.RequestHandler + PreProcessors process.PreProcessorsContainer + InterProcessors process.IntermediateProcessorContainer + GasHandler process.GasHandler + FeeHandler process.TransactionFeeHandler + BlockSizeComputation preprocess.BlockSizeComputationHandler + BalanceComputation preprocess.BalanceComputationHandler + EconomicsFee process.FeeHandler + TxTypeHandler process.TxTypeHandler + TransactionsLogProcessor process.TransactionLogProcessor + BlockGasAndFeesReCheckEnableEpoch uint32 + EpochNotifier process.EpochNotifier + ScheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler + ScheduledMiniBlocksEnableEpoch uint32 + DoubleTransactionsDetector process.DoubleTransactionDetector + MiniBlockPartialExecutionEnableEpoch uint32 + ProcessedMiniBlocksTracker process.ProcessedMiniBlocksTracker } type transactionCoordinator struct { @@ -74,20 +89,23 @@ type transactionCoordinator struct { mutRequestedTxs sync.RWMutex requestedTxs map[block.Type]int - onRequestMiniBlock func(shardId uint32, mbHash []byte) - gasHandler process.GasHandler - feeHandler process.TransactionFeeHandler - blockSizeComputation preprocess.BlockSizeComputationHandler - balanceComputation preprocess.BalanceComputationHandler - requestedItemsHandler process.TimeCacher - economicsFee process.FeeHandler - txTypeHandler process.TxTypeHandler - transactionsLogProcessor process.TransactionLogProcessor - blockGasAndFeesReCheckEnableEpoch uint32 - scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler - scheduledMiniBlocksEnableEpoch uint32 - flagScheduledMiniBlocks atomic.Flag - doubleTransactionsDetector process.DoubleTransactionDetector + onRequestMiniBlock func(shardId uint32, mbHash []byte) + gasHandler process.GasHandler + feeHandler process.TransactionFeeHandler + blockSizeComputation preprocess.BlockSizeComputationHandler + balanceComputation preprocess.BalanceComputationHandler + requestedItemsHandler process.TimeCacher + economicsFee process.FeeHandler + txTypeHandler process.TxTypeHandler + transactionsLogProcessor process.TransactionLogProcessor + blockGasAndFeesReCheckEnableEpoch uint32 + scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler + scheduledMiniBlocksEnableEpoch uint32 + flagScheduledMiniBlocks atomic.Flag + doubleTransactionsDetector process.DoubleTransactionDetector + miniBlockPartialExecutionEnableEpoch uint32 + flagMiniBlockPartialExecution atomic.Flag + processedMiniBlocksTracker process.ProcessedMiniBlocksTracker } // NewTransactionCoordinator creates a transaction coordinator to run and coordinate preprocessors and processors @@ -98,23 +116,27 @@ func NewTransactionCoordinator(args ArgTransactionCoordinator) (*transactionCoor } tc := &transactionCoordinator{ - shardCoordinator: args.ShardCoordinator, - accounts: args.Accounts, - gasHandler: args.GasHandler, - hasher: args.Hasher, - marshalizer: args.Marshalizer, - feeHandler: args.FeeHandler, - blockSizeComputation: args.BlockSizeComputation, - balanceComputation: args.BalanceComputation, - economicsFee: args.EconomicsFee, - txTypeHandler: args.TxTypeHandler, - blockGasAndFeesReCheckEnableEpoch: args.BlockGasAndFeesReCheckEnableEpoch, - transactionsLogProcessor: args.TransactionsLogProcessor, - scheduledTxsExecutionHandler: args.ScheduledTxsExecutionHandler, - scheduledMiniBlocksEnableEpoch: args.ScheduledMiniBlocksEnableEpoch, - doubleTransactionsDetector: args.DoubleTransactionsDetector, + shardCoordinator: args.ShardCoordinator, + accounts: args.Accounts, + gasHandler: args.GasHandler, + hasher: args.Hasher, + marshalizer: args.Marshalizer, + feeHandler: args.FeeHandler, + blockSizeComputation: args.BlockSizeComputation, + balanceComputation: args.BalanceComputation, + economicsFee: args.EconomicsFee, + txTypeHandler: args.TxTypeHandler, + blockGasAndFeesReCheckEnableEpoch: args.BlockGasAndFeesReCheckEnableEpoch, + transactionsLogProcessor: args.TransactionsLogProcessor, + scheduledTxsExecutionHandler: args.ScheduledTxsExecutionHandler, + scheduledMiniBlocksEnableEpoch: args.ScheduledMiniBlocksEnableEpoch, + doubleTransactionsDetector: args.DoubleTransactionsDetector, + miniBlockPartialExecutionEnableEpoch: args.MiniBlockPartialExecutionEnableEpoch, + processedMiniBlocksTracker: args.ProcessedMiniBlocksTracker, } log.Debug("coordinator/process: enable epoch for block gas and fees re-check", "epoch", tc.blockGasAndFeesReCheckEnableEpoch) + log.Debug("coordinator/process: enable epoch for scheduled txs execution", "epoch", tc.scheduledMiniBlocksEnableEpoch) + log.Debug("coordinator/process: enable epoch for mini block partial execution", "epoch", tc.miniBlockPartialExecutionEnableEpoch) tc.miniBlockPool = args.MiniBlockPool tc.onRequestMiniBlock = args.RequestHandler.RequestMiniBlock @@ -550,29 +572,27 @@ func (tc *transactionCoordinator) processMiniBlocksToMe( // with destination of current shard func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe( hdr data.HeaderHandler, - processedMiniBlocksHashes map[string]struct{}, + processedMiniBlocksInfo map[string]*processedMb.ProcessedMiniBlockInfo, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool, ) (block.MiniBlockSlice, uint32, bool, error) { - miniBlocks := make(block.MiniBlockSlice, 0) - numTxAdded := uint32(0) - numAlreadyMiniBlocksProcessed := 0 - numNewMiniBlocksProcessed := 0 - processedTxHashes := make([][]byte, 0) + createMBDestMeExecutionInfo := initMiniBlockDestMeExecutionInfo() if check.IfNil(hdr) { - return miniBlocks, numTxAdded, false, nil + return createMBDestMeExecutionInfo.miniBlocks, createMBDestMeExecutionInfo.numTxAdded, false, nil } shouldSkipShard := make(map[uint32]bool) - if tc.shardCoordinator.SelfId() == core.MetachainShardId { - tc.initProcessedTxsResults() - tc.gasHandler.Reset() + headerHash, err := core.CalculateHash(tc.marshalizer, tc.hasher, hdr) + if err != nil { + return createMBDestMeExecutionInfo.miniBlocks, createMBDestMeExecutionInfo.numTxAdded, false, nil } + tc.handleCreateMiniBlocksDestMeInit(headerHash) + finalCrossMiniBlockInfos := tc.getFinalCrossMiniBlockInfos(hdr.GetOrderedCrossMiniblocksWithDst(tc.shardCoordinator.SelfId()), hdr) defer func() { @@ -580,8 +600,8 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe "header round", hdr.GetRound(), "header nonce", hdr.GetNonce(), "num mini blocks to be processed", len(finalCrossMiniBlockInfos), - "num already mini blocks processed", numAlreadyMiniBlocksProcessed, - "num new mini blocks processed", numNewMiniBlocksProcessed, + "num already mini blocks processed", createMBDestMeExecutionInfo.numAlreadyMiniBlocksProcessed, + "num new mini blocks processed", createMBDestMeExecutionInfo.numNewMiniBlocksProcessed, "total gas provided", tc.gasHandler.TotalGasProvided(), "total gas provided as scheduled", tc.gasHandler.TotalGasProvidedAsScheduled(), "total gas refunded", tc.gasHandler.TotalGasRefunded(), @@ -613,9 +633,9 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe continue } - _, ok := processedMiniBlocksHashes[string(miniBlockInfo.Hash)] - if ok { - numAlreadyMiniBlocksProcessed++ + processedMbInfo := getProcessedMiniBlockInfo(processedMiniBlocksInfo, miniBlockInfo.Hash) + if processedMbInfo.FullyProcessed { + createMBDestMeExecutionInfo.numAlreadyMiniBlocksProcessed++ log.Trace("transactionCoordinator.CreateMbsAndProcessCrossShardTransactionsDstMe: mini block already processed", "scheduled mode", scheduledMode, "sender shard", miniBlockInfo.SenderShardID, @@ -650,7 +670,7 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe continue } - if scheduledMode && miniBlock.Type != block.TxBlock { + if scheduledMode && (miniBlock.Type != block.TxBlock || processedMbInfo.IndexOfLastTxProcessed > -1) { shouldSkipShard[miniBlockInfo.SenderShardID] = true log.Debug("transactionCoordinator.CreateMbsAndProcessCrossShardTransactionsDstMe: mini block can not be processed in scheduled mode", "scheduled mode", scheduledMode, @@ -658,6 +678,7 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe "sender shard", miniBlockInfo.SenderShardID, "hash", miniBlockInfo.Hash, "round", miniBlockInfo.Round, + "index of last tx processed", processedMbInfo.IndexOfLastTxProcessed, ) continue } @@ -680,7 +701,10 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe continue } - err := tc.processCompleteMiniBlock(preproc, miniBlock, miniBlockInfo.Hash, haveTime, haveAdditionalTime, scheduledMode) + oldIndexOfLastTxProcessed := processedMbInfo.IndexOfLastTxProcessed + + err := tc.processCompleteMiniBlock(preproc, miniBlock, miniBlockInfo.Hash, haveTime, haveAdditionalTime, scheduledMode, processedMbInfo) + tc.handleProcessMiniBlockExecution(oldIndexOfLastTxProcessed, miniBlock, processedMbInfo, createMBDestMeExecutionInfo) if err != nil { shouldSkipShard[miniBlockInfo.SenderShardID] = true log.Debug("transactionCoordinator.CreateMbsAndProcessCrossShardTransactionsDstMe: processed complete mini block failed", @@ -690,6 +714,9 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe "type", miniBlock.Type, "round", miniBlockInfo.Round, "num txs", len(miniBlock.TxHashes), + "num all txs processed", processedMbInfo.IndexOfLastTxProcessed+1, + "num current txs processed", processedMbInfo.IndexOfLastTxProcessed-oldIndexOfLastTxProcessed, + "fully processed", processedMbInfo.FullyProcessed, "total gas provided", tc.gasHandler.TotalGasProvided(), "total gas provided as scheduled", tc.gasHandler.TotalGasProvidedAsScheduled(), "total gas refunded", tc.gasHandler.TotalGasRefunded(), @@ -705,30 +732,86 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe "type", miniBlock.Type, "round", miniBlockInfo.Round, "num txs", len(miniBlock.TxHashes), + "num all txs processed", processedMbInfo.IndexOfLastTxProcessed+1, + "num current txs processed", processedMbInfo.IndexOfLastTxProcessed-oldIndexOfLastTxProcessed, + "fully processed", processedMbInfo.FullyProcessed, "total gas provided", tc.gasHandler.TotalGasProvided(), "total gas provided as scheduled", tc.gasHandler.TotalGasProvidedAsScheduled(), "total gas refunded", tc.gasHandler.TotalGasRefunded(), "total gas penalized", tc.gasHandler.TotalGasPenalized(), ) + } + + numTotalMiniBlocksProcessed := createMBDestMeExecutionInfo.numAlreadyMiniBlocksProcessed + createMBDestMeExecutionInfo.numNewMiniBlocksProcessed + allMBsProcessed := numTotalMiniBlocksProcessed == len(finalCrossMiniBlockInfos) + if !allMBsProcessed { + tc.revertIfNeeded(createMBDestMeExecutionInfo, headerHash) + } - processedTxHashes = append(processedTxHashes, miniBlock.TxHashes...) + return createMBDestMeExecutionInfo.miniBlocks, createMBDestMeExecutionInfo.numTxAdded, allMBsProcessed, nil +} - // all txs processed, add to processed miniblocks - miniBlocks = append(miniBlocks, miniBlock) - numTxAdded = numTxAdded + uint32(len(miniBlock.TxHashes)) - numNewMiniBlocksProcessed++ - if processedMiniBlocksHashes != nil { - processedMiniBlocksHashes[string(miniBlockInfo.Hash)] = struct{}{} +func initMiniBlockDestMeExecutionInfo() *createMiniBlockDestMeExecutionInfo { + return &createMiniBlockDestMeExecutionInfo{ + processedTxHashes: make([][]byte, 0), + miniBlocks: make(block.MiniBlockSlice, 0), + numTxAdded: 0, + numNewMiniBlocksProcessed: 0, + numAlreadyMiniBlocksProcessed: 0, + } +} + +func (tc *transactionCoordinator) handleCreateMiniBlocksDestMeInit(headerHash []byte) { + if tc.shardCoordinator.SelfId() != core.MetachainShardId { + return + } + + tc.InitProcessedTxsResults(headerHash) + tc.gasHandler.Reset(headerHash) +} + +func (tc *transactionCoordinator) handleProcessMiniBlockExecution( + oldIndexOfLastTxProcessed int32, + miniBlock *block.MiniBlock, + processedMbInfo *processedMb.ProcessedMiniBlockInfo, + createMBDestMeExecutionInfo *createMiniBlockDestMeExecutionInfo, +) { + if oldIndexOfLastTxProcessed >= processedMbInfo.IndexOfLastTxProcessed { + return + } + + newProcessedTxHashes := miniBlock.TxHashes[oldIndexOfLastTxProcessed+1 : processedMbInfo.IndexOfLastTxProcessed+1] + createMBDestMeExecutionInfo.processedTxHashes = append(createMBDestMeExecutionInfo.processedTxHashes, newProcessedTxHashes...) + createMBDestMeExecutionInfo.miniBlocks = append(createMBDestMeExecutionInfo.miniBlocks, miniBlock) + createMBDestMeExecutionInfo.numTxAdded = createMBDestMeExecutionInfo.numTxAdded + uint32(len(newProcessedTxHashes)) + + if processedMbInfo.FullyProcessed { + createMBDestMeExecutionInfo.numNewMiniBlocksProcessed++ + } +} + +func getProcessedMiniBlockInfo( + processedMiniBlocksInfo map[string]*processedMb.ProcessedMiniBlockInfo, + miniBlockHash []byte, +) *processedMb.ProcessedMiniBlockInfo { + + if processedMiniBlocksInfo == nil { + return &processedMb.ProcessedMiniBlockInfo{ + IndexOfLastTxProcessed: -1, + FullyProcessed: false, } } - numTotalMiniBlocksProcessed := numAlreadyMiniBlocksProcessed + numNewMiniBlocksProcessed - allMBsProcessed := numTotalMiniBlocksProcessed == len(finalCrossMiniBlockInfos) - if !allMBsProcessed { - tc.revertIfNeeded(processedTxHashes) + processedMbInfo, ok := processedMiniBlocksInfo[string(miniBlockHash)] + if !ok { + processedMbInfo = &processedMb.ProcessedMiniBlockInfo{ + IndexOfLastTxProcessed: -1, + FullyProcessed: false, + } + processedMiniBlocksInfo[string(miniBlockHash)] = processedMbInfo } - return miniBlocks, numTxAdded, allMBsProcessed, nil + return processedMbInfo } func (tc *transactionCoordinator) getFinalCrossMiniBlockInfos( @@ -754,14 +837,17 @@ func (tc *transactionCoordinator) getFinalCrossMiniBlockInfos( return miniBlockInfos } -func (tc *transactionCoordinator) revertIfNeeded(txsToBeReverted [][]byte) { - shouldRevert := tc.shardCoordinator.SelfId() == core.MetachainShardId && len(txsToBeReverted) > 0 +func (tc *transactionCoordinator) revertIfNeeded(createMBDestMeExecutionInfo *createMiniBlockDestMeExecutionInfo, key []byte) { + shouldRevert := tc.shardCoordinator.SelfId() == core.MetachainShardId && len(createMBDestMeExecutionInfo.processedTxHashes) > 0 if !shouldRevert { return } - tc.gasHandler.RestoreGasSinceLastReset() - tc.revertProcessedTxsResults(txsToBeReverted) + tc.gasHandler.RestoreGasSinceLastReset(key) + tc.RevertProcessedTxsResults(createMBDestMeExecutionInfo.processedTxHashes, key) + + createMBDestMeExecutionInfo.miniBlocks = make(block.MiniBlockSlice, 0) + createMBDestMeExecutionInfo.numTxAdded = 0 } // CreateMbsAndProcessTransactionsFromMe creates miniblocks and processes transactions from pool @@ -1057,13 +1143,10 @@ func (tc *transactionCoordinator) processCompleteMiniBlock( haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool, + processedMbInfo *processedMb.ProcessedMiniBlockInfo, ) error { - snapshot := tc.accounts.JournalLen() - if tc.shardCoordinator.SelfId() != core.MetachainShardId { - tc.initProcessedTxsResults() - tc.gasHandler.Reset() - } + snapshot := tc.handleProcessMiniBlockInit(miniBlockHash) log.Debug("transactionsCoordinator.processCompleteMiniBlock: before processing", "scheduled mode", scheduledMode, @@ -1077,10 +1160,19 @@ func (tc *transactionCoordinator) processCompleteMiniBlock( "total gas penalized", tc.gasHandler.TotalGasPenalized(), ) - txsToBeReverted, numTxsProcessed, err := preproc.ProcessMiniBlock(miniBlock, haveTime, haveAdditionalTime, tc.getNumOfCrossInterMbsAndTxs, scheduledMode) + txsToBeReverted, indexOfLastTxProcessed, shouldRevert, err := preproc.ProcessMiniBlock( + miniBlock, + haveTime, + haveAdditionalTime, + scheduledMode, + tc.flagMiniBlockPartialExecution.IsSet(), + int(processedMbInfo.IndexOfLastTxProcessed), + tc, + ) log.Debug("transactionsCoordinator.processCompleteMiniBlock: after processing", - "num txs processed", numTxsProcessed, + "num all txs processed", indexOfLastTxProcessed+1, + "num current txs processed", indexOfLastTxProcessed-int(processedMbInfo.IndexOfLastTxProcessed), "txs to be reverted", len(txsToBeReverted), "total gas provided", tc.gasHandler.TotalGasProvided(), "total gas provided as scheduled", tc.gasHandler.TotalGasProvidedAsScheduled(), @@ -1097,27 +1189,56 @@ func (tc *transactionCoordinator) processCompleteMiniBlock( "rcv shard", miniBlock.ReceiverShardID, "num txs", len(miniBlock.TxHashes), "txs to be reverted", len(txsToBeReverted), - "num txs processed", numTxsProcessed, + "num all txs processed", indexOfLastTxProcessed+1, + "num current txs processed", indexOfLastTxProcessed-int(processedMbInfo.IndexOfLastTxProcessed), + "should revert", shouldRevert, "error", err.Error(), ) - errAccountState := tc.accounts.RevertToSnapshot(snapshot) - if errAccountState != nil { - // TODO: evaluate if reloading the trie from disk will might solve the problem - log.Debug("RevertToSnapshot", "error", errAccountState.Error()) - } - - if len(txsToBeReverted) > 0 { - tc.revertProcessedTxsResults(txsToBeReverted) + if shouldRevert { + tc.handleProcessTransactionError(snapshot, miniBlockHash, txsToBeReverted) + } else { + if tc.flagMiniBlockPartialExecution.IsSet() { + processedMbInfo.IndexOfLastTxProcessed = int32(indexOfLastTxProcessed) + processedMbInfo.FullyProcessed = false + } } return err } + processedMbInfo.IndexOfLastTxProcessed = int32(indexOfLastTxProcessed) + processedMbInfo.FullyProcessed = true + return nil } -func (tc *transactionCoordinator) initProcessedTxsResults() { +func (tc *transactionCoordinator) handleProcessMiniBlockInit(miniBlockHash []byte) int { + snapshot := tc.accounts.JournalLen() + if tc.shardCoordinator.SelfId() != core.MetachainShardId { + tc.InitProcessedTxsResults(miniBlockHash) + tc.gasHandler.Reset(miniBlockHash) + } + + return snapshot +} + +func (tc *transactionCoordinator) handleProcessTransactionError(snapshot int, miniBlockHash []byte, txsToBeReverted [][]byte) { + tc.gasHandler.RestoreGasSinceLastReset(miniBlockHash) + + err := tc.accounts.RevertToSnapshot(snapshot) + if err != nil { + // TODO: evaluate if reloading the trie from disk will might solve the problem + log.Debug("transactionCoordinator.handleProcessTransactionError: RevertToSnapshot", "error", err.Error()) + } + + if len(txsToBeReverted) > 0 { + tc.RevertProcessedTxsResults(txsToBeReverted, miniBlockHash) + } +} + +// InitProcessedTxsResults inits processed txs results for the given key +func (tc *transactionCoordinator) InitProcessedTxsResults(key []byte) { tc.mutInterimProcessors.RLock() defer tc.mutInterimProcessors.RUnlock() @@ -1126,11 +1247,12 @@ func (tc *transactionCoordinator) initProcessedTxsResults() { if !ok { continue } - interProc.InitProcessedResults() + interProc.InitProcessedResults(key) } } -func (tc *transactionCoordinator) revertProcessedTxsResults(txHashes [][]byte) { +// RevertProcessedTxsResults reverts processed txs results for the given hashes and key +func (tc *transactionCoordinator) RevertProcessedTxsResults(txHashes [][]byte, key []byte) { tc.mutInterimProcessors.RLock() defer tc.mutInterimProcessors.RUnlock() @@ -1139,7 +1261,7 @@ func (tc *transactionCoordinator) revertProcessedTxsResults(txHashes [][]byte) { if !ok { continue } - resultHashes := interProc.RemoveProcessedResults() + resultHashes := interProc.RemoveProcessedResults(key) accFeesBeforeRevert := tc.feeHandler.GetAccumulatedFees() tc.feeHandler.RevertFees(resultHashes) accFeesAfterRevert := tc.feeHandler.GetAccumulatedFees() @@ -1286,7 +1408,8 @@ func (tc *transactionCoordinator) CreateMarshalizedReceipts() ([]byte, error) { return tc.marshalizer.Marshal(receiptsBatch) } -func (tc *transactionCoordinator) getNumOfCrossInterMbsAndTxs() (int, int) { +// GetNumOfCrossInterMbsAndTxs gets the number of cross intermediate transactions and mini blocks +func (tc *transactionCoordinator) GetNumOfCrossInterMbsAndTxs() (int, int) { totalNumMbs := 0 totalNumTxs := 0 @@ -1374,7 +1497,10 @@ func getNumOfCrossShardScCallsOrSpecialTxs( } // VerifyCreatedMiniBlocks re-checks gas used and generated fees in the given block -func (tc *transactionCoordinator) VerifyCreatedMiniBlocks(header data.HeaderHandler, body *block.Body) error { +func (tc *transactionCoordinator) VerifyCreatedMiniBlocks( + header data.HeaderHandler, + body *block.Body, +) error { if header.GetEpoch() < tc.blockGasAndFeesReCheckEnableEpoch { return nil } @@ -1517,6 +1643,7 @@ func (tc *transactionCoordinator) verifyFees( } maxAccumulatedFeesFromMiniBlock, maxDeveloperFeesFromMiniBlock, err := tc.getMaxAccumulatedAndDeveloperFees( + header.GetMiniBlockHeaderHandlers()[index], miniBlock, mapMiniBlockTypeAllTxs[miniBlock.Type], ) @@ -1539,17 +1666,30 @@ func (tc *transactionCoordinator) verifyFees( } func (tc *transactionCoordinator) getMaxAccumulatedAndDeveloperFees( + miniBlockHeaderHandler data.MiniBlockHeaderHandler, miniBlock *block.MiniBlock, mapHashTx map[string]data.TransactionHandler, ) (*big.Int, *big.Int, error) { maxAccumulatedFeesFromMiniBlock := big.NewInt(0) maxDeveloperFeesFromMiniBlock := big.NewInt(0) - for _, txHash := range miniBlock.TxHashes { + pi, err := tc.getIndexesOfLastTxProcessed(miniBlock, miniBlockHeaderHandler) + if err != nil { + return nil, nil, err + } + + indexOfFirstTxToBeProcessed := pi.indexOfLastTxProcessed + 1 + err = process.CheckIfIndexesAreOutOfBound(indexOfFirstTxToBeProcessed, pi.indexOfLastTxProcessedByProposer, miniBlock) + if err != nil { + return nil, nil, err + } + + for index := indexOfFirstTxToBeProcessed; index <= pi.indexOfLastTxProcessedByProposer; index++ { + txHash := miniBlock.TxHashes[index] txHandler, ok := mapHashTx[string(txHash)] if !ok { log.Debug("missing transaction in getMaxAccumulatedFeesAndDeveloperFees ", "type", miniBlock.Type, "txHash", txHash) - return big.NewInt(0), big.NewInt(0), process.ErrMissingTransaction + return nil, nil, process.ErrMissingTransaction } maxAccumulatedFeesFromTx := core.SafeMul(txHandler.GetGasLimit(), txHandler.GetGasPrice()) @@ -1562,6 +1702,25 @@ func (tc *transactionCoordinator) getMaxAccumulatedAndDeveloperFees( return maxAccumulatedFeesFromMiniBlock, maxDeveloperFeesFromMiniBlock, nil } +func (tc *transactionCoordinator) getIndexesOfLastTxProcessed( + miniBlock *block.MiniBlock, + miniBlockHeaderHandler data.MiniBlockHeaderHandler, +) (*processedIndexes, error) { + + miniBlockHash, err := core.CalculateHash(tc.marshalizer, tc.hasher, miniBlock) + if err != nil { + return nil, err + } + + pi := &processedIndexes{} + + processedMiniBlockInfo, _ := tc.processedMiniBlocksTracker.GetProcessedMiniBlockInfo(miniBlockHash) + pi.indexOfLastTxProcessed = processedMiniBlockInfo.IndexOfLastTxProcessed + pi.indexOfLastTxProcessedByProposer = miniBlockHeaderHandler.GetIndexOfLastTxProcessed() + + return pi, nil +} + func checkTransactionCoordinatorNilParameters(arguments ArgTransactionCoordinator) error { if check.IfNil(arguments.ShardCoordinator) { return process.ErrNilShardCoordinator @@ -1617,6 +1776,9 @@ func checkTransactionCoordinatorNilParameters(arguments ArgTransactionCoordinato if check.IfNil(arguments.DoubleTransactionsDetector) { return process.ErrNilDoubleTransactionsDetector } + if check.IfNil(arguments.ProcessedMiniBlocksTracker) { + return process.ErrNilProcessedMiniBlocksTracker + } return nil } @@ -1657,6 +1819,9 @@ func (tc *transactionCoordinator) GetAllIntermediateTxs() map[block.Type]map[str func (tc *transactionCoordinator) EpochConfirmed(epoch uint32, _ uint64) { tc.flagScheduledMiniBlocks.SetValue(epoch >= tc.scheduledMiniBlocksEnableEpoch) log.Debug("transactionCoordinator: scheduled mini blocks", "enabled", tc.flagScheduledMiniBlocks.IsSet()) + + tc.flagMiniBlockPartialExecution.SetValue(epoch >= tc.miniBlockPartialExecutionEnableEpoch) + log.Debug("transactionCoordinator: mini block partial execution", "enabled", tc.flagMiniBlockPartialExecution.IsSet()) } // AddTxsFromMiniBlocks adds transactions from given mini blocks needed by the current block diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index a7327bea348..a229b53553b 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -24,6 +24,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data/transaction" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/factory/shard" "github.com/ElrondNetwork/elrond-go/process/mock" @@ -220,26 +221,28 @@ func initAccountsMock() *stateMock.AccountsStub { func createMockTransactionCoordinatorArguments() ArgTransactionCoordinator { argsTransactionCoordinator := ArgTransactionCoordinator{ - Hasher: &hashingMocks.HasherMock{}, - Marshalizer: &mock.MarshalizerMock{}, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), - Accounts: &stateMock.AccountsStub{}, - MiniBlockPool: dataRetrieverMock.NewPoolsHolderMock().MiniBlocks(), - RequestHandler: &testscommon.RequestHandlerStub{}, - PreProcessors: &mock.PreProcessorContainerMock{}, - InterProcessors: &mock.InterimProcessorContainerMock{}, - GasHandler: &testscommon.GasHandlerStub{}, - FeeHandler: &mock.FeeAccumulatorStub{}, - BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, - BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{}, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), + Accounts: &stateMock.AccountsStub{}, + MiniBlockPool: dataRetrieverMock.NewPoolsHolderMock().MiniBlocks(), + RequestHandler: &testscommon.RequestHandlerStub{}, + PreProcessors: &mock.PreProcessorContainerMock{}, + InterProcessors: &mock.InterimProcessorContainerMock{}, + GasHandler: &testscommon.GasHandlerStub{}, + FeeHandler: &mock.FeeAccumulatorStub{}, + BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, + BalanceComputation: &testscommon.BalanceComputationStub{}, + EconomicsFee: &mock.FeeHandlerStub{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } return argsTransactionCoordinator @@ -454,6 +457,17 @@ func TestNewTransactionCoordinator_NilDoubleTransactionsDetector(t *testing.T) { assert.Equal(t, process.ErrNilDoubleTransactionsDetector, err) } +func TestNewTransactionCoordinator_NilProcessedMiniBlocksTracker(t *testing.T) { + t.Parallel() + + argsTransactionCoordinator := createMockTransactionCoordinatorArguments() + argsTransactionCoordinator.ProcessedMiniBlocksTracker = nil + tc, err := NewTransactionCoordinator(argsTransactionCoordinator) + + assert.True(t, check.IfNil(tc)) + assert.Equal(t, process.ErrNilProcessedMiniBlocksTracker, err) +} + func TestNewTransactionCoordinator_OK(t *testing.T) { t.Parallel() @@ -532,6 +546,7 @@ func createPreProcessorContainer() process.PreProcessorsContainer { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) container, _ := preFactory.Create() @@ -629,6 +644,7 @@ func createPreProcessorContainerWithDataPool( 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) container, _ := preFactory.Create() @@ -899,6 +915,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactions(t *tes 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) container, _ := preFactory.Create() @@ -1040,6 +1057,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsNilPreP 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) container, _ := preFactory.Create() @@ -1150,6 +1168,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNothingToPr 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) container, _ := preFactory.Create() @@ -1689,6 +1708,7 @@ func TestTransactionCoordinator_ProcessBlockTransactionProcessTxError(t *testing 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) container, _ := preFactory.Create() @@ -1709,22 +1729,24 @@ func TestTransactionCoordinator_ProcessBlockTransactionProcessTxError(t *testing body := &block.Body{} miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} + miniBlockHash1, _ := core.CalculateHash(tc.marshalizer, tc.hasher, miniBlock) body.MiniBlocks = append(body.MiniBlocks, miniBlock) tc.RequestBlockTransactions(body) - err = tc.ProcessBlockTransaction(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{Hash: []byte("mbHash")}}}, body, haveTime) + err = tc.ProcessBlockTransaction(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{Hash: miniBlockHash1, TxCount: 1}}}, body, haveTime) assert.Equal(t, process.ErrHigherNonceInTransaction, err) noTime := func() time.Duration { return 0 } - err = tc.ProcessBlockTransaction(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{Hash: []byte("mbHash")}}}, body, noTime) + err = tc.ProcessBlockTransaction(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{Hash: miniBlockHash1, TxCount: 1}}}, body, noTime) assert.Equal(t, process.ErrHigherNonceInTransaction, err) txHashToAsk := []byte("tx_hashnotinPool") miniBlock = &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashToAsk}} + miniBlockHash2, _ := core.CalculateHash(tc.marshalizer, tc.hasher, miniBlock) body.MiniBlocks = append(body.MiniBlocks, miniBlock) - err = tc.ProcessBlockTransaction(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{Hash: []byte("mbHash")}}}, body, haveTime) + err = tc.ProcessBlockTransaction(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{Hash: miniBlockHash1, TxCount: 1}, {Hash: miniBlockHash2, TxCount: 1}}}, body, haveTime) assert.Equal(t, process.ErrHigherNonceInTransaction, err) } @@ -1749,22 +1771,24 @@ func TestTransactionCoordinator_ProcessBlockTransaction(t *testing.T) { body := &block.Body{} miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} + miniBlockHash1, _ := core.CalculateHash(tc.marshalizer, tc.hasher, miniBlock) body.MiniBlocks = append(body.MiniBlocks, miniBlock) tc.RequestBlockTransactions(body) - err = tc.ProcessBlockTransaction(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{Hash: []byte("mbHash")}}}, body, haveTime) + err = tc.ProcessBlockTransaction(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{Hash: miniBlockHash1, TxCount: 1}}}, body, haveTime) assert.Nil(t, err) noTime := func() time.Duration { return -1 } - err = tc.ProcessBlockTransaction(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{Hash: []byte("mbHash")}}}, body, noTime) + err = tc.ProcessBlockTransaction(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{Hash: miniBlockHash1, TxCount: 1}}}, body, noTime) assert.Equal(t, process.ErrTimeIsOut, err) txHashToAsk := []byte("tx_hashnotinPool") miniBlock = &block.MiniBlock{SenderShardID: 0, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHashToAsk}} + miniBlockHash2, _ := core.CalculateHash(tc.marshalizer, tc.hasher, miniBlock) body.MiniBlocks = append(body.MiniBlocks, miniBlock) - err = tc.ProcessBlockTransaction(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{Hash: []byte("mbHash")}}}, body, haveTime) + err = tc.ProcessBlockTransaction(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{Hash: miniBlockHash1, TxCount: 1}, {Hash: miniBlockHash2, TxCount: 1}}}, body, haveTime) assert.Equal(t, process.ErrMissingTransaction, err) } @@ -1813,6 +1837,7 @@ func TestTransactionCoordinator_RequestMiniblocks(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) container, _ := preFactory.Create() @@ -1954,6 +1979,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNot 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) container, _ := preFactory.Create() @@ -1978,7 +2004,11 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNot return false } preproc := tc.getPreProcessor(block.TxBlock) - err = tc.processCompleteMiniBlock(preproc, &miniBlock, []byte("hash"), haveTime, haveAdditionalTime, false) + processedMbInfo := &processedMb.ProcessedMiniBlockInfo{ + IndexOfLastTxProcessed: -1, + FullyProcessed: false, + } + err = tc.processCompleteMiniBlock(preproc, &miniBlock, []byte("hash"), haveTime, haveAdditionalTime, false, processedMbInfo) assert.Nil(t, err) assert.Equal(t, tx1Nonce, tx1ExecutionResult) @@ -2092,6 +2122,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) container, _ := preFactory.Create() @@ -2120,7 +2151,11 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR return false } preproc := tc.getPreProcessor(block.TxBlock) - err = tc.processCompleteMiniBlock(preproc, &miniBlock, []byte("hash"), haveTime, haveAdditionalTime, false) + processedMbInfo := &processedMb.ProcessedMiniBlockInfo{ + IndexOfLastTxProcessed: -1, + FullyProcessed: false, + } + err = tc.processCompleteMiniBlock(preproc, &miniBlock, []byte("hash"), haveTime, haveAdditionalTime, false, processedMbInfo) assert.Equal(t, process.ErrHigherNonceInTransaction, err) assert.True(t, revertAccntStateCalled) @@ -2385,7 +2420,7 @@ func TestTransactionCoordinator_GetNumOfCrossInterMbsAndTxsShouldWork(t *testing }, } - numMbs, numTxs := tc.getNumOfCrossInterMbsAndTxs() + numMbs, numTxs := tc.GetNumOfCrossInterMbsAndTxs() assert.Equal(t, 5, numMbs) assert.Equal(t, 10, numTxs) @@ -2507,26 +2542,28 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldReturnWhenEpochIsNo dataPool := initDataPool(txHash) txCoordinatorArgs := ArgTransactionCoordinator{ - Hasher: &hashingMocks.HasherMock{}, - Marshalizer: &mock.MarshalizerMock{}, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(3), - Accounts: initAccountsMock(), - MiniBlockPool: dataPool.MiniBlocks(), - RequestHandler: &testscommon.RequestHandlerStub{}, - PreProcessors: createPreProcessorContainerWithDataPool(dataPool, FeeHandlerMock()), - InterProcessors: createInterimProcessorContainer(), - GasHandler: &testscommon.GasHandlerStub{}, - FeeHandler: &mock.FeeAccumulatorStub{}, - BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, - BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{}, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - BlockGasAndFeesReCheckEnableEpoch: 1, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(3), + Accounts: initAccountsMock(), + MiniBlockPool: dataPool.MiniBlocks(), + RequestHandler: &testscommon.RequestHandlerStub{}, + PreProcessors: createPreProcessorContainerWithDataPool(dataPool, FeeHandlerMock()), + InterProcessors: createInterimProcessorContainer(), + GasHandler: &testscommon.GasHandlerStub{}, + FeeHandler: &mock.FeeAccumulatorStub{}, + BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, + BalanceComputation: &testscommon.BalanceComputationStub{}, + EconomicsFee: &mock.FeeHandlerStub{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + BlockGasAndFeesReCheckEnableEpoch: 1, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) assert.Nil(t, err) @@ -2568,13 +2605,15 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldErrMaxGasLimitPerMi return maxGasLimitPerBlock }, }, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) @@ -2640,13 +2679,15 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldErrMaxAccumulatedFe return 0.1 }, }, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) @@ -2667,7 +2708,7 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldErrMaxAccumulatedFe header := &block.Header{ AccumulatedFees: big.NewInt(101), DeveloperFees: big.NewInt(10), - MiniBlockHeaders: []block.MiniBlockHeader{{}}, + MiniBlockHeaders: []block.MiniBlockHeader{{TxCount: 1}}, } body := &block.Body{ MiniBlocks: []*block.MiniBlock{ @@ -2717,13 +2758,15 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldErrMaxDeveloperFees return 0.1 }, }, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) @@ -2744,7 +2787,7 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldErrMaxDeveloperFees header := &block.Header{ AccumulatedFees: big.NewInt(100), DeveloperFees: big.NewInt(11), - MiniBlockHeaders: []block.MiniBlockHeader{{}}, + MiniBlockHeaders: []block.MiniBlockHeader{{TxCount: 1}}, } body := &block.Body{ MiniBlocks: []*block.MiniBlock{ @@ -2794,13 +2837,15 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldWork(t *testing.T) return 0.1 }, }, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) @@ -2821,7 +2866,7 @@ func TestTransactionCoordinator_VerifyCreatedMiniBlocksShouldWork(t *testing.T) header := &block.Header{ AccumulatedFees: big.NewInt(100), DeveloperFees: big.NewInt(10), - MiniBlockHeaders: []block.MiniBlockHeader{{}}, + MiniBlockHeaders: []block.MiniBlockHeader{{TxCount: 1}}, } body := &block.Body{ MiniBlocks: []*block.MiniBlock{ @@ -2841,26 +2886,28 @@ func TestTransactionCoordinator_GetAllTransactionsShouldWork(t *testing.T) { dataPool := initDataPool(txHash) txCoordinatorArgs := ArgTransactionCoordinator{ - Hasher: &hashingMocks.HasherMock{}, - Marshalizer: &mock.MarshalizerMock{}, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(3), - Accounts: initAccountsMock(), - MiniBlockPool: dataPool.MiniBlocks(), - RequestHandler: &testscommon.RequestHandlerStub{}, - PreProcessors: createPreProcessorContainerWithDataPool(dataPool, FeeHandlerMock()), - InterProcessors: createInterimProcessorContainer(), - GasHandler: &testscommon.GasHandlerStub{}, - FeeHandler: &mock.FeeAccumulatorStub{}, - BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, - BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{}, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(3), + Accounts: initAccountsMock(), + MiniBlockPool: dataPool.MiniBlocks(), + RequestHandler: &testscommon.RequestHandlerStub{}, + PreProcessors: createPreProcessorContainerWithDataPool(dataPool, FeeHandlerMock()), + InterProcessors: createInterimProcessorContainer(), + GasHandler: &testscommon.GasHandlerStub{}, + FeeHandler: &mock.FeeAccumulatorStub{}, + BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, + BalanceComputation: &testscommon.BalanceComputationStub{}, + EconomicsFee: &mock.FeeHandlerStub{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) assert.Nil(t, err) @@ -2937,13 +2984,15 @@ func TestTransactionCoordinator_VerifyGasLimitShouldErrMaxGasLimitPerMiniBlockIn return tx.GetGasLimit() }, }, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) assert.Nil(t, err) @@ -3030,13 +3079,15 @@ func TestTransactionCoordinator_VerifyGasLimitShouldWork(t *testing.T) { return tx.GetGasLimit() }, }, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) assert.Nil(t, err) @@ -3096,26 +3147,28 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould dataPool := initDataPool(txHash) txCoordinatorArgs := ArgTransactionCoordinator{ - Hasher: &hashingMocks.HasherMock{}, - Marshalizer: &mock.MarshalizerMock{}, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(3), - Accounts: initAccountsMock(), - MiniBlockPool: dataPool.MiniBlocks(), - RequestHandler: &testscommon.RequestHandlerStub{}, - PreProcessors: createPreProcessorContainerWithDataPool(dataPool, FeeHandlerMock()), - InterProcessors: createInterimProcessorContainer(), - GasHandler: &testscommon.GasHandlerStub{}, - FeeHandler: &mock.FeeAccumulatorStub{}, - BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, - BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{}, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(3), + Accounts: initAccountsMock(), + MiniBlockPool: dataPool.MiniBlocks(), + RequestHandler: &testscommon.RequestHandlerStub{}, + PreProcessors: createPreProcessorContainerWithDataPool(dataPool, FeeHandlerMock()), + InterProcessors: createInterimProcessorContainer(), + GasHandler: &testscommon.GasHandlerStub{}, + FeeHandler: &mock.FeeAccumulatorStub{}, + BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, + BalanceComputation: &testscommon.BalanceComputationStub{}, + EconomicsFee: &mock.FeeHandlerStub{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) assert.Nil(t, err) @@ -3160,12 +3213,14 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould return process.MoveBalance, process.SCInvoking }, }, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) assert.Nil(t, err) @@ -3217,12 +3272,14 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould return process.MoveBalance, process.SCInvoking }, }, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) assert.Nil(t, err) @@ -3280,13 +3337,15 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould return tx.GetGasLimit() }, }, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) assert.Nil(t, err) @@ -3347,13 +3406,15 @@ func TestTransactionCoordinator_CheckGasProvidedByMiniBlockInReceiverShardShould return tx.GetGasLimit() }, }, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) @@ -3388,26 +3449,28 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMissingTransaction(t *testing dataPool := initDataPool(txHash) txCoordinatorArgs := ArgTransactionCoordinator{ - Hasher: &hashingMocks.HasherMock{}, - Marshalizer: &mock.MarshalizerMock{}, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(3), - Accounts: initAccountsMock(), - MiniBlockPool: dataPool.MiniBlocks(), - RequestHandler: &testscommon.RequestHandlerStub{}, - PreProcessors: createPreProcessorContainerWithDataPool(dataPool, FeeHandlerMock()), - InterProcessors: createInterimProcessorContainer(), - GasHandler: &testscommon.GasHandlerStub{}, - FeeHandler: &mock.FeeAccumulatorStub{}, - BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, - BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{}, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(3), + Accounts: initAccountsMock(), + MiniBlockPool: dataPool.MiniBlocks(), + RequestHandler: &testscommon.RequestHandlerStub{}, + PreProcessors: createPreProcessorContainerWithDataPool(dataPool, FeeHandlerMock()), + InterProcessors: createInterimProcessorContainer(), + GasHandler: &testscommon.GasHandlerStub{}, + FeeHandler: &mock.FeeAccumulatorStub{}, + BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, + BalanceComputation: &testscommon.BalanceComputationStub{}, + EconomicsFee: &mock.FeeHandlerStub{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) @@ -3419,7 +3482,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMissingTransaction(t *testing header := &block.Header{ AccumulatedFees: big.NewInt(100), DeveloperFees: big.NewInt(10), - MiniBlockHeaders: []block.MiniBlockHeader{{}}, + MiniBlockHeaders: []block.MiniBlockHeader{{TxCount: 1}}, } body := &block.Body{ @@ -3460,13 +3523,15 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxAccumulatedFeesExceeded(t return 0.1 }, }, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) @@ -3485,7 +3550,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxAccumulatedFeesExceeded(t header := &block.Header{ AccumulatedFees: big.NewInt(101), DeveloperFees: big.NewInt(10), - MiniBlockHeaders: []block.MiniBlockHeader{{}, {}}, + MiniBlockHeaders: []block.MiniBlockHeader{{TxCount: 1}, {TxCount: 1}}, } body := &block.Body{ @@ -3529,13 +3594,15 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxDeveloperFeesExceeded(t *t return 0.1 }, }, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) assert.Nil(t, err) @@ -3553,7 +3620,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxDeveloperFeesExceeded(t *t header := &block.Header{ AccumulatedFees: big.NewInt(100), DeveloperFees: big.NewInt(11), - MiniBlockHeaders: []block.MiniBlockHeader{{}, {}}, + MiniBlockHeaders: []block.MiniBlockHeader{{}, {TxCount: 1}}, } body := &block.Body{ @@ -3609,8 +3676,10 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxAccumulatedFeesExceededWhe } }, }, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) assert.Nil(t, err) @@ -3628,7 +3697,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxAccumulatedFeesExceededWhe header := &block.Header{ AccumulatedFees: big.NewInt(101), DeveloperFees: big.NewInt(10), - MiniBlockHeaders: []block.MiniBlockHeader{{}, {}}, + MiniBlockHeaders: []block.MiniBlockHeader{{}, {TxCount: 1}}, } for index := range header.MiniBlockHeaders { _ = header.MiniBlockHeaders[index].SetProcessingType(int32(block.Normal)) @@ -3692,8 +3761,10 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxDeveloperFeesExceededWhenS } }, }, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) assert.Nil(t, err) @@ -3711,7 +3782,7 @@ func TestTransactionCoordinator_VerifyFeesShouldErrMaxDeveloperFeesExceededWhenS header := &block.Header{ AccumulatedFees: big.NewInt(100), DeveloperFees: big.NewInt(11), - MiniBlockHeaders: []block.MiniBlockHeader{{}, {}}, + MiniBlockHeaders: []block.MiniBlockHeader{{}, {TxCount: 1}}, } for index := range header.MiniBlockHeaders { _ = header.MiniBlockHeaders[index].SetProcessingType(int32(block.Normal)) @@ -3775,8 +3846,10 @@ func TestTransactionCoordinator_VerifyFeesShouldWork(t *testing.T) { } }, }, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) assert.Nil(t, err) @@ -3794,7 +3867,7 @@ func TestTransactionCoordinator_VerifyFeesShouldWork(t *testing.T) { header := &block.Header{ AccumulatedFees: big.NewInt(100), DeveloperFees: big.NewInt(10), - MiniBlockHeaders: []block.MiniBlockHeader{{}, {}}, + MiniBlockHeaders: []block.MiniBlockHeader{{}, {TxCount: 1}}, } body := &block.Body{ @@ -3818,7 +3891,7 @@ func TestTransactionCoordinator_VerifyFeesShouldWork(t *testing.T) { header = &block.Header{ AccumulatedFees: big.NewInt(101), DeveloperFees: big.NewInt(11), - MiniBlockHeaders: []block.MiniBlockHeader{{}, {}}, + MiniBlockHeaders: []block.MiniBlockHeader{{}, {TxCount: 1}}, } for index := range header.MiniBlockHeaders { _ = header.MiniBlockHeaders[index].SetProcessingType(int32(block.Normal)) @@ -3833,26 +3906,28 @@ func TestTransactionCoordinator_GetMaxAccumulatedAndDeveloperFeesShouldErr(t *te dataPool := initDataPool(txHash) txCoordinatorArgs := ArgTransactionCoordinator{ - Hasher: &hashingMocks.HasherMock{}, - Marshalizer: &mock.MarshalizerMock{}, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(3), - Accounts: initAccountsMock(), - MiniBlockPool: dataPool.MiniBlocks(), - RequestHandler: &testscommon.RequestHandlerStub{}, - PreProcessors: createPreProcessorContainerWithDataPool(dataPool, FeeHandlerMock()), - InterProcessors: createInterimProcessorContainer(), - GasHandler: &testscommon.GasHandlerStub{}, - FeeHandler: &mock.FeeAccumulatorStub{}, - BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, - BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{}, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(3), + Accounts: initAccountsMock(), + MiniBlockPool: dataPool.MiniBlocks(), + RequestHandler: &testscommon.RequestHandlerStub{}, + PreProcessors: createPreProcessorContainerWithDataPool(dataPool, FeeHandlerMock()), + InterProcessors: createInterimProcessorContainer(), + GasHandler: &testscommon.GasHandlerStub{}, + FeeHandler: &mock.FeeAccumulatorStub{}, + BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, + BalanceComputation: &testscommon.BalanceComputationStub{}, + EconomicsFee: &mock.FeeHandlerStub{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) assert.Nil(t, err) @@ -3866,10 +3941,14 @@ func TestTransactionCoordinator_GetMaxAccumulatedAndDeveloperFeesShouldErr(t *te ReceiverShardID: 1, } - accumulatedFees, developerFees, errGetMaxFees := tc.getMaxAccumulatedAndDeveloperFees(mb, nil) + mbh := &block.MiniBlockHeader{ + TxCount: 1, + } + + accumulatedFees, developerFees, errGetMaxFees := tc.getMaxAccumulatedAndDeveloperFees(mbh, mb, nil) assert.Equal(t, process.ErrMissingTransaction, errGetMaxFees) - assert.Equal(t, big.NewInt(0), accumulatedFees) - assert.Equal(t, big.NewInt(0), developerFees) + assert.Nil(t, accumulatedFees) + assert.Nil(t, developerFees) } func TestTransactionCoordinator_GetMaxAccumulatedAndDeveloperFeesShouldWork(t *testing.T) { @@ -3898,13 +3977,15 @@ func TestTransactionCoordinator_GetMaxAccumulatedAndDeveloperFeesShouldWork(t *t return 0.1 }, }, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } tc, err := NewTransactionCoordinator(txCoordinatorArgs) assert.Nil(t, err) @@ -3929,7 +4010,11 @@ func TestTransactionCoordinator_GetMaxAccumulatedAndDeveloperFeesShouldWork(t *t ReceiverShardID: 1, } - accumulatedFees, developerFees, errGetMaxFees := tc.getMaxAccumulatedAndDeveloperFees(mb, mapAllTxs) + mbh := &block.MiniBlockHeader{ + TxCount: 3, + } + + accumulatedFees, developerFees, errGetMaxFees := tc.getMaxAccumulatedAndDeveloperFees(mbh, mb, mapAllTxs) assert.Nil(t, errGetMaxFees) assert.Equal(t, big.NewInt(600), accumulatedFees) assert.Equal(t, big.NewInt(60), developerFees) @@ -3952,7 +4037,7 @@ func TestTransactionCoordinator_RevertIfNeededShouldWork(t *testing.T) { PreProcessors: createPreProcessorContainerWithDataPool(dataPool, FeeHandlerMock()), InterProcessors: createInterimProcessorContainer(), GasHandler: &mock.GasHandlerMock{ - RestoreGasSinceLastResetCalled: func() { + RestoreGasSinceLastResetCalled: func(key []byte) { restoreGasSinceLastResetCalled = true }, }, @@ -3961,16 +4046,18 @@ func TestTransactionCoordinator_RevertIfNeededShouldWork(t *testing.T) { numTxsFeesReverted += len(txHashes) }, }, - BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, - BalanceComputation: &testscommon.BalanceComputationStub{}, - EconomicsFee: &mock.FeeHandlerStub{}, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - BlockGasAndFeesReCheckEnableEpoch: 0, - TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, - ScheduledMiniBlocksEnableEpoch: 2, - DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + BlockSizeComputation: &testscommon.BlockSizeComputationStub{}, + BalanceComputation: &testscommon.BalanceComputationStub{}, + EconomicsFee: &mock.FeeHandlerStub{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + BlockGasAndFeesReCheckEnableEpoch: 0, + TransactionsLogProcessor: &mock.TxLogsProcessorStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 2, + DoubleTransactionsDetector: &testscommon.PanicDoubleTransactionsDetector{}, + MiniBlockPartialExecutionEnableEpoch: 2, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } txHashes := make([][]byte, 0) @@ -3982,7 +4069,10 @@ func TestTransactionCoordinator_RevertIfNeededShouldWork(t *testing.T) { } tc, _ := NewTransactionCoordinator(txCoordinatorArgs) - tc.revertIfNeeded(txHashes) + createMBDestMeExecutionInfo := &createMiniBlockDestMeExecutionInfo{ + processedTxHashes: txHashes, + } + tc.revertIfNeeded(createMBDestMeExecutionInfo, []byte("key")) assert.False(t, restoreGasSinceLastResetCalled) assert.Equal(t, 0, numTxsFeesReverted) @@ -3993,7 +4083,10 @@ func TestTransactionCoordinator_RevertIfNeededShouldWork(t *testing.T) { } tc, _ = NewTransactionCoordinator(txCoordinatorArgs) - tc.revertIfNeeded(txHashes) + createMBDestMeExecutionInfo = &createMiniBlockDestMeExecutionInfo{ + processedTxHashes: txHashes, + } + tc.revertIfNeeded(createMBDestMeExecutionInfo, []byte("key")) assert.False(t, restoreGasSinceLastResetCalled) assert.Equal(t, 0, numTxsFeesReverted) @@ -4002,7 +4095,10 @@ func TestTransactionCoordinator_RevertIfNeededShouldWork(t *testing.T) { txHashes = append(txHashes, txHash1) txHashes = append(txHashes, txHash2) - tc.revertIfNeeded(txHashes) + createMBDestMeExecutionInfo = &createMiniBlockDestMeExecutionInfo{ + processedTxHashes: txHashes, + } + tc.revertIfNeeded(createMBDestMeExecutionInfo, []byte("key")) assert.True(t, restoreGasSinceLastResetCalled) assert.Equal(t, len(txHashes), numTxsFeesReverted) } @@ -4273,3 +4369,71 @@ func TestTransactionCoordinator_AddTransactions(t *testing.T) { require.True(t, addTransactionsCalled.IsSet()) }) } + +func TestGetProcessedMiniBlockInfo_ShouldWork(t *testing.T) { + processedMiniBlocksInfo := make(map[string]*processedMb.ProcessedMiniBlockInfo) + + processedMbInfo := getProcessedMiniBlockInfo(nil, []byte("hash1")) + assert.False(t, processedMbInfo.FullyProcessed) + assert.Equal(t, int32(-1), processedMbInfo.IndexOfLastTxProcessed) + + processedMbInfo = getProcessedMiniBlockInfo(processedMiniBlocksInfo, []byte("hash1")) + assert.False(t, processedMbInfo.FullyProcessed) + assert.Equal(t, int32(-1), processedMbInfo.IndexOfLastTxProcessed) + assert.Equal(t, 1, len(processedMiniBlocksInfo)) + + processedMbInfo.IndexOfLastTxProcessed = 69 + processedMbInfo.FullyProcessed = true + + processedMbInfo = getProcessedMiniBlockInfo(processedMiniBlocksInfo, []byte("hash1")) + assert.True(t, processedMbInfo.FullyProcessed) + assert.Equal(t, int32(69), processedMbInfo.IndexOfLastTxProcessed) + assert.Equal(t, 1, len(processedMiniBlocksInfo)) + assert.True(t, processedMiniBlocksInfo["hash1"].FullyProcessed) + assert.Equal(t, int32(69), processedMiniBlocksInfo["hash1"].IndexOfLastTxProcessed) +} + +func TestTransactionCoordinator_getIndexesOfLastTxProcessed(t *testing.T) { + t.Parallel() + + t.Run("calculating hash error should not get indexes", func(t *testing.T) { + t.Parallel() + + args := createMockTransactionCoordinatorArguments() + args.Marshalizer = &testscommon.MarshalizerMock{ + Fail: true, + } + tc, _ := NewTransactionCoordinator(args) + + miniBlock := &block.MiniBlock{} + miniBlockHeader := &block.MiniBlockHeader{} + + pi, err := tc.getIndexesOfLastTxProcessed(miniBlock, miniBlockHeader) + assert.Nil(t, pi) + assert.Equal(t, testscommon.ErrMockMarshalizer, err) + }) + + t.Run("should get indexes", func(t *testing.T) { + t.Parallel() + + args := createMockTransactionCoordinatorArguments() + args.Marshalizer = &testscommon.MarshalizerMock{ + Fail: false, + } + tc, _ := NewTransactionCoordinator(args) + + miniBlock := &block.MiniBlock{} + miniBlockHash, _ := core.CalculateHash(tc.marshalizer, tc.hasher, miniBlock) + mbh := &block.MiniBlockHeader{ + Hash: miniBlockHash, + TxCount: 6, + } + _ = mbh.SetIndexOfFirstTxProcessed(2) + _ = mbh.SetIndexOfLastTxProcessed(4) + + pi, err := tc.getIndexesOfLastTxProcessed(miniBlock, mbh) + assert.Nil(t, err) + assert.Equal(t, int32(-1), pi.indexOfLastTxProcessed) + assert.Equal(t, mbh.GetIndexOfLastTxProcessed(), pi.indexOfLastTxProcessedByProposer) + }) +} diff --git a/process/coordinator/transactionType_test.go b/process/coordinator/transactionType_test.go index f7979370a16..00050586557 100644 --- a/process/coordinator/transactionType_test.go +++ b/process/coordinator/transactionType_test.go @@ -12,8 +12,8 @@ import ( vmData "github.com/ElrondNetwork/elrond-go-core/data/vm" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/ElrondNetwork/elrond-go/vm" "github.com/ElrondNetwork/elrond-go/testscommon/epochNotifier" + "github.com/ElrondNetwork/elrond-go/vm" "github.com/ElrondNetwork/elrond-vm-common/builtInFunctions" "github.com/ElrondNetwork/elrond-vm-common/parsers" "github.com/stretchr/testify/assert" diff --git a/process/errors.go b/process/errors.go index fd71c776246..5d33445479b 100644 --- a/process/errors.go +++ b/process/errors.go @@ -1077,5 +1077,56 @@ var ErrNilDoubleTransactionsDetector = errors.New("nil double transactions detec // ErrNoTxToProcess signals that no transaction were sent for processing var ErrNoTxToProcess = errors.New("no transaction to process") +// ErrPropertyTooLong signals that a heartbeat property was too long +var ErrPropertyTooLong = errors.New("property too long") + +// ErrPropertyTooShort signals that a heartbeat property was too short +var ErrPropertyTooShort = errors.New("property too short") + +// ErrInvalidPeerSubType signals that an invalid peer subtype was provided +var ErrInvalidPeerSubType = errors.New("invalid peer subtype") + +// ErrNilSignaturesHandler signals that a nil signatures handler was provided +var ErrNilSignaturesHandler = errors.New("nil signatures handler") + +// ErrMessageExpired signals that a received message is expired +var ErrMessageExpired = errors.New("message expired") + +// ErrInvalidExpiryTimespan signals that an invalid expiry timespan was provided +var ErrInvalidExpiryTimespan = errors.New("invalid expiry timespan") + +// ErrNilPeerSignatureHandler signals that a nil peer signature handler was provided +var ErrNilPeerSignatureHandler = errors.New("nil peer signature handler") + +// ErrNilPeerAuthenticationCacher signals that a nil peer authentication cacher was provided +var ErrNilPeerAuthenticationCacher = errors.New("nil peer authentication cacher") + +// ErrNilHeartbeatCacher signals that a nil heartbeat cacher was provided +var ErrNilHeartbeatCacher = errors.New("nil heartbeat cacher") + // ErrInvalidProcessWaitTime signals that an invalid process wait time was provided var ErrInvalidProcessWaitTime = errors.New("invalid process wait time") + +// ErrMetaHeaderEpochOutOfRange signals that the given header is out of accepted range +var ErrMetaHeaderEpochOutOfRange = errors.New("epoch out of range for meta block header") + +// ErrNilHardforkTrigger signals that a nil hardfork trigger has been provided +var ErrNilHardforkTrigger = errors.New("nil hardfork trigger") + +// ErrMissingMiniBlockHeader signals that mini block header is missing +var ErrMissingMiniBlockHeader = errors.New("missing mini block header") + +// ErrMissingMiniBlock signals that mini block is missing +var ErrMissingMiniBlock = errors.New("missing mini block") + +// ErrIndexIsOutOfBound signals that the given index is out of bound +var ErrIndexIsOutOfBound = errors.New("index is out of bound") + +// ErrIndexDoesNotMatchWithPartialExecuted signals that the given index does not match with a partial executed mini block +var ErrIndexDoesNotMatchWithPartialExecutedMiniBlock = errors.New("index does not match with a partial executed mini block") + +// ErrIndexDoesNotMatchWithFullyExecuted signals that the given index does not match with a fully executed mini block +var ErrIndexDoesNotMatchWithFullyExecutedMiniBlock = errors.New("index does not match with a fully executed mini block") + +// ErrNilProcessedMiniBlocksTracker signals that a nil processed mini blocks tracker has been provided +var ErrNilProcessedMiniBlocksTracker = errors.New("nil processed mini blocks tracker") diff --git a/process/factory/interceptorscontainer/args.go b/process/factory/interceptorscontainer/args.go index 5fde5860dee..8e3509181be 100644 --- a/process/factory/interceptorscontainer/args.go +++ b/process/factory/interceptorscontainer/args.go @@ -1,7 +1,9 @@ package interceptorscontainer import ( + crypto "github.com/ElrondNetwork/elrond-go-crypto" "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/heartbeat" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/sharding/nodesCoordinator" @@ -10,27 +12,32 @@ import ( // CommonInterceptorsContainerFactoryArgs holds the arguments needed for the metachain/shard interceptors factories type CommonInterceptorsContainerFactoryArgs struct { - CoreComponents process.CoreComponentsHolder - CryptoComponents process.CryptoComponentsHolder - Accounts state.AccountsAdapter - ShardCoordinator sharding.Coordinator - NodesCoordinator nodesCoordinator.NodesCoordinator - Messenger process.TopicHandler - Store dataRetriever.StorageService - DataPool dataRetriever.PoolsHolder - MaxTxNonceDeltaAllowed int - TxFeeHandler process.FeeHandler - BlockBlackList process.TimeCacher - HeaderSigVerifier process.InterceptedHeaderSigVerifier - HeaderIntegrityVerifier process.HeaderIntegrityVerifier - ValidityAttester process.ValidityAttester - EpochStartTrigger process.EpochStartTriggerHandler - WhiteListHandler process.WhiteListHandler - WhiteListerVerifiedTxs process.WhiteListHandler - AntifloodHandler process.P2PAntifloodHandler - ArgumentsParser process.ArgumentsParser - PreferredPeersHolder process.PreferredPeersHolderHandler - SizeCheckDelta uint32 - EnableSignTxWithHashEpoch uint32 - RequestHandler process.RequestHandler + CoreComponents process.CoreComponentsHolder + CryptoComponents process.CryptoComponentsHolder + Accounts state.AccountsAdapter + ShardCoordinator sharding.Coordinator + NodesCoordinator nodesCoordinator.NodesCoordinator + Messenger process.TopicHandler + Store dataRetriever.StorageService + DataPool dataRetriever.PoolsHolder + MaxTxNonceDeltaAllowed int + TxFeeHandler process.FeeHandler + BlockBlackList process.TimeCacher + HeaderSigVerifier process.InterceptedHeaderSigVerifier + HeaderIntegrityVerifier process.HeaderIntegrityVerifier + ValidityAttester process.ValidityAttester + EpochStartTrigger process.EpochStartTriggerHandler + WhiteListHandler process.WhiteListHandler + WhiteListerVerifiedTxs process.WhiteListHandler + AntifloodHandler process.P2PAntifloodHandler + ArgumentsParser process.ArgumentsParser + PreferredPeersHolder process.PreferredPeersHolderHandler + SizeCheckDelta uint32 + EnableSignTxWithHashEpoch uint32 + RequestHandler process.RequestHandler + PeerSignatureHandler crypto.PeerSignatureHandler + SignaturesHandler process.SignaturesHandler + HeartbeatExpiryTimespanInSec int64 + PeerShardMapper process.PeerShardMapper + HardforkTrigger heartbeat.HardforkTrigger } diff --git a/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go b/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go index 2355400f349..8a3abe780c0 100644 --- a/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go +++ b/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go @@ -8,6 +8,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/hashing" "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/heartbeat" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/dataValidators" "github.com/ElrondNetwork/elrond-go/process/factory" @@ -21,6 +22,7 @@ import ( const numGoRoutines = 100 const chunksProcessorRequestInterval = time.Millisecond * 400 +const minTimespanDurationInSec = int64(1) type baseInterceptorsContainerFactory struct { container process.InterceptorsContainer @@ -40,6 +42,8 @@ type baseInterceptorsContainerFactory struct { preferredPeersHolder process.PreferredPeersHolderHandler hasher hashing.Hasher requestHandler process.RequestHandler + peerShardMapper process.PeerShardMapper + hardforkTrigger heartbeat.HardforkTrigger } func checkBaseParams( @@ -57,6 +61,8 @@ func checkBaseParams( whiteListerVerifiedTxs process.WhiteListHandler, preferredPeersHolder process.PreferredPeersHolderHandler, requestHandler process.RequestHandler, + peerShardMapper process.PeerShardMapper, + hardforkTrigger heartbeat.HardforkTrigger, ) error { if check.IfNil(coreComponents) { return process.ErrNilCoreComponentsHolder @@ -139,6 +145,12 @@ func checkBaseParams( if check.IfNil(requestHandler) { return process.ErrNilRequestHandler } + if check.IfNil(peerShardMapper) { + return process.ErrNilPeerShardMapper + } + if check.IfNil(hardforkTrigger) { + return process.ErrNilHardforkTrigger + } return nil } @@ -589,3 +601,139 @@ func (bicf *baseInterceptorsContainerFactory) generateUnsignedTxsInterceptors() return bicf.container.AddMultiple(keys, interceptorsSlice) } + +//------- PeerAuthentication interceptor + +func (bicf *baseInterceptorsContainerFactory) generatePeerAuthenticationInterceptor() error { + identifierPeerAuthentication := common.PeerAuthenticationTopic + + internalMarshaller := bicf.argInterceptorFactory.CoreComponents.InternalMarshalizer() + argProcessor := processor.ArgPeerAuthenticationInterceptorProcessor{ + PeerAuthenticationCacher: bicf.dataPool.PeerAuthentications(), + PeerShardMapper: bicf.peerShardMapper, + Marshaller: internalMarshaller, + HardforkTrigger: bicf.hardforkTrigger, + } + peerAuthenticationProcessor, err := processor.NewPeerAuthenticationInterceptorProcessor(argProcessor) + if err != nil { + return err + } + + peerAuthenticationFactory, err := interceptorFactory.NewInterceptedPeerAuthenticationDataFactory(*bicf.argInterceptorFactory) + if err != nil { + return err + } + + mdInterceptor, err := interceptors.NewMultiDataInterceptor( + interceptors.ArgMultiDataInterceptor{ + Topic: identifierPeerAuthentication, + Marshalizer: internalMarshaller, + DataFactory: peerAuthenticationFactory, + Processor: peerAuthenticationProcessor, + Throttler: bicf.globalThrottler, + AntifloodHandler: bicf.antifloodHandler, + WhiteListRequest: bicf.whiteListHandler, + PreferredPeersHolder: bicf.preferredPeersHolder, + CurrentPeerId: bicf.messenger.ID(), + }, + ) + if err != nil { + return err + } + + interceptor, err := bicf.createTopicAndAssignHandler(identifierPeerAuthentication, mdInterceptor, true) + if err != nil { + return err + } + + return bicf.container.Add(identifierPeerAuthentication, interceptor) +} + +//------- Heartbeat interceptor + +func (bicf *baseInterceptorsContainerFactory) generateHeartbeatInterceptor() error { + shardC := bicf.shardCoordinator + identifierHeartbeat := common.HeartbeatV2Topic + shardC.CommunicationIdentifier(shardC.SelfId()) + + argHeartbeatProcessor := processor.ArgHeartbeatInterceptorProcessor{ + HeartbeatCacher: bicf.dataPool.Heartbeats(), + ShardCoordinator: shardC, + PeerShardMapper: bicf.peerShardMapper, + } + heartbeatProcessor, err := processor.NewHeartbeatInterceptorProcessor(argHeartbeatProcessor) + if err != nil { + return err + } + + heartbeatFactory, err := interceptorFactory.NewInterceptedHeartbeatDataFactory(*bicf.argInterceptorFactory) + if err != nil { + return err + } + + internalMarshalizer := bicf.argInterceptorFactory.CoreComponents.InternalMarshalizer() + mdInterceptor, err := interceptors.NewMultiDataInterceptor( + interceptors.ArgMultiDataInterceptor{ + Topic: identifierHeartbeat, + Marshalizer: internalMarshalizer, + DataFactory: heartbeatFactory, + Processor: heartbeatProcessor, + Throttler: bicf.globalThrottler, + AntifloodHandler: bicf.antifloodHandler, + WhiteListRequest: bicf.whiteListHandler, + PreferredPeersHolder: bicf.preferredPeersHolder, + CurrentPeerId: bicf.messenger.ID(), + }, + ) + if err != nil { + return err + } + + interceptor, err := bicf.createTopicAndAssignHandler(identifierHeartbeat, mdInterceptor, true) + if err != nil { + return err + } + + return bicf.container.Add(identifierHeartbeat, interceptor) +} + +// ------- DirectConnectionInfo interceptor + +func (bicf *baseInterceptorsContainerFactory) generateDirectConnectionInfoInterceptor() error { + identifier := common.ConnectionTopic + + interceptedDirectConnectionInfoFactory, err := interceptorFactory.NewInterceptedDirectConnectionInfoFactory(*bicf.argInterceptorFactory) + if err != nil { + return err + } + + argProcessor := processor.ArgDirectConnectionInfoInterceptorProcessor{ + PeerShardMapper: bicf.peerShardMapper, + } + dciProcessor, err := processor.NewDirectConnectionInfoInterceptorProcessor(argProcessor) + if err != nil { + return err + } + + interceptor, err := interceptors.NewSingleDataInterceptor( + interceptors.ArgSingleDataInterceptor{ + Topic: identifier, + DataFactory: interceptedDirectConnectionInfoFactory, + Processor: dciProcessor, + Throttler: bicf.globalThrottler, + AntifloodHandler: bicf.antifloodHandler, + WhiteListRequest: bicf.whiteListHandler, + CurrentPeerId: bicf.messenger.ID(), + PreferredPeersHolder: bicf.preferredPeersHolder, + }, + ) + if err != nil { + return err + } + + _, err = bicf.createTopicAndAssignHandler(identifier, interceptor, true) + if err != nil { + return err + } + + return bicf.container.Add(identifier, interceptor) +} diff --git a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go index fe6a17c03bb..7aab67df6a7 100644 --- a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go +++ b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go @@ -39,6 +39,8 @@ func NewMetaInterceptorsContainerFactory( args.WhiteListerVerifiedTxs, args.PreferredPeersHolder, args.RequestHandler, + args.PeerShardMapper, + args.HardforkTrigger, ) if err != nil { return nil, err @@ -69,20 +71,33 @@ func NewMetaInterceptorsContainerFactory( if check.IfNil(args.ValidityAttester) { return nil, process.ErrNilValidityAttester } + if check.IfNil(args.SignaturesHandler) { + return nil, process.ErrNilSignaturesHandler + } + if check.IfNil(args.PeerSignatureHandler) { + return nil, process.ErrNilPeerSignatureHandler + } + if args.HeartbeatExpiryTimespanInSec < minTimespanDurationInSec { + return nil, process.ErrInvalidExpiryTimespan + } argInterceptorFactory := &interceptorFactory.ArgInterceptedDataFactory{ - CoreComponents: args.CoreComponents, - CryptoComponents: args.CryptoComponents, - ShardCoordinator: args.ShardCoordinator, - NodesCoordinator: args.NodesCoordinator, - FeeHandler: args.TxFeeHandler, - HeaderSigVerifier: args.HeaderSigVerifier, - HeaderIntegrityVerifier: args.HeaderIntegrityVerifier, - ValidityAttester: args.ValidityAttester, - EpochStartTrigger: args.EpochStartTrigger, - WhiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, - ArgsParser: args.ArgumentsParser, - EnableSignTxWithHashEpoch: args.EnableSignTxWithHashEpoch, + CoreComponents: args.CoreComponents, + CryptoComponents: args.CryptoComponents, + ShardCoordinator: args.ShardCoordinator, + NodesCoordinator: args.NodesCoordinator, + FeeHandler: args.TxFeeHandler, + WhiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, + HeaderSigVerifier: args.HeaderSigVerifier, + ValidityAttester: args.ValidityAttester, + HeaderIntegrityVerifier: args.HeaderIntegrityVerifier, + EpochStartTrigger: args.EpochStartTrigger, + ArgsParser: args.ArgumentsParser, + EnableSignTxWithHashEpoch: args.EnableSignTxWithHashEpoch, + PeerSignatureHandler: args.PeerSignatureHandler, + SignaturesHandler: args.SignaturesHandler, + HeartbeatExpiryTimespanInSec: args.HeartbeatExpiryTimespanInSec, + PeerID: args.Messenger.ID(), } container := containers.NewInterceptorsContainer() @@ -103,6 +118,8 @@ func NewMetaInterceptorsContainerFactory( preferredPeersHolder: args.PreferredPeersHolder, hasher: args.CoreComponents.Hasher(), requestHandler: args.RequestHandler, + peerShardMapper: args.PeerShardMapper, + hardforkTrigger: args.HardforkTrigger, } icf := &metaInterceptorsContainerFactory{ @@ -154,6 +171,21 @@ func (micf *metaInterceptorsContainerFactory) Create() (process.InterceptorsCont return nil, err } + err = micf.generatePeerAuthenticationInterceptor() + if err != nil { + return nil, err + } + + err = micf.generateHeartbeatInterceptor() + if err != nil { + return nil, err + } + + err = micf.generateDirectConnectionInfoInterceptor() + if err != nil { + return nil, err + } + return micf.container, nil } diff --git a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go index 65c0c39bb51..dbaeaee69b2 100644 --- a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go +++ b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go @@ -358,6 +358,42 @@ func TestNewMetaInterceptorsContainerFactory_NilValidityAttesterShouldErr(t *tes assert.Equal(t, process.ErrNilValidityAttester, err) } +func TestNewMetaInterceptorsContainerFactory_NilSignaturesHandler(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsMeta(coreComp, cryptoComp) + args.SignaturesHandler = nil + icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilSignaturesHandler, err) +} + +func TestNewMetaInterceptorsContainerFactory_NilPeerSignatureHandler(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsMeta(coreComp, cryptoComp) + args.PeerSignatureHandler = nil + icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilPeerSignatureHandler, err) +} + +func TestNewMetaInterceptorsContainerFactory_InvalidExpiryTimespan(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsMeta(coreComp, cryptoComp) + args.HeartbeatExpiryTimespanInSec = 0 + icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrInvalidExpiryTimespan, err) +} + func TestNewMetaInterceptorsContainerFactory_EpochStartTriggerShouldErr(t *testing.T) { t.Parallel() @@ -382,6 +418,30 @@ func TestNewMetaInterceptorsContainerFactory_NilRequestHandlerShouldErr(t *testi assert.Equal(t, process.ErrNilRequestHandler, err) } +func TestNewMetaInterceptorsContainerFactory_NilPeerShardMapperShouldErr(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsMeta(coreComp, cryptoComp) + args.PeerShardMapper = nil + icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilPeerShardMapper, err) +} + +func TestNewMetaInterceptorsContainerFactory_NilHardforkTriggerShouldErr(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsMeta(coreComp, cryptoComp) + args.HardforkTrigger = nil + icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilHardforkTrigger, err) +} + func TestNewMetaInterceptorsContainerFactory_ShouldWork(t *testing.T) { t.Parallel() @@ -539,9 +599,13 @@ func TestMetaInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { numInterceptorsUnsignedTxsForMetachain := noOfShards + 1 numInterceptorsRewardsTxsForMetachain := noOfShards numInterceptorsTrieNodes := 2 + numInterceptorsPeerAuthForMetachain := 1 + numInterceptorsHeartbeatForMetachain := 1 + numInterceptorsShardValidatorInfoForMetachain := 1 totalInterceptors := numInterceptorsMetablock + numInterceptorsShardHeadersForMetachain + numInterceptorsTrieNodes + numInterceptorsTransactionsForMetachain + numInterceptorsUnsignedTxsForMetachain + numInterceptorsMiniBlocksForMetachain + - numInterceptorsRewardsTxsForMetachain + numInterceptorsRewardsTxsForMetachain + numInterceptorsPeerAuthForMetachain + numInterceptorsHeartbeatForMetachain + + numInterceptorsShardValidatorInfoForMetachain assert.Nil(t, err) assert.Equal(t, totalInterceptors, container.Len()) @@ -556,27 +620,31 @@ func getArgumentsMeta( cryptoComp *mock.CryptoComponentsMock, ) interceptorscontainer.CommonInterceptorsContainerFactoryArgs { return interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ - CoreComponents: coreComp, - CryptoComponents: cryptoComp, - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - NodesCoordinator: shardingMocks.NewNodesCoordinatorMock(), - Messenger: &mock.TopicHandlerStub{}, - Store: createMetaStore(), - DataPool: createMetaDataPools(), - Accounts: &stateMock.AccountsStub{}, - MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, - TxFeeHandler: &mock.FeeHandlerStub{}, - BlockBlackList: &mock.BlackListHandlerStub{}, - HeaderSigVerifier: &mock.HeaderSigVerifierStub{}, - HeaderIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, - SizeCheckDelta: 0, - ValidityAttester: &mock.ValidityAttesterStub{}, - EpochStartTrigger: &mock.EpochStartTriggerStub{}, - AntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - WhiteListHandler: &testscommon.WhiteListHandlerStub{}, - WhiteListerVerifiedTxs: &testscommon.WhiteListHandlerStub{}, - ArgumentsParser: &mock.ArgumentParserMock{}, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - RequestHandler: &testscommon.RequestHandlerStub{}, + CoreComponents: coreComp, + CryptoComponents: cryptoComp, + Accounts: &stateMock.AccountsStub{}, + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + NodesCoordinator: shardingMocks.NewNodesCoordinatorMock(), + Messenger: &mock.TopicHandlerStub{}, + Store: createMetaStore(), + DataPool: createMetaDataPools(), + MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, + TxFeeHandler: &mock.FeeHandlerStub{}, + BlockBlackList: &mock.BlackListHandlerStub{}, + HeaderSigVerifier: &mock.HeaderSigVerifierStub{}, + HeaderIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, + ValidityAttester: &mock.ValidityAttesterStub{}, + EpochStartTrigger: &mock.EpochStartTriggerStub{}, + WhiteListHandler: &testscommon.WhiteListHandlerStub{}, + WhiteListerVerifiedTxs: &testscommon.WhiteListHandlerStub{}, + AntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + ArgumentsParser: &mock.ArgumentParserMock{}, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + RequestHandler: &testscommon.RequestHandlerStub{}, + PeerSignatureHandler: &mock.PeerSignatureHandlerStub{}, + SignaturesHandler: &mock.SignaturesHandlerStub{}, + HeartbeatExpiryTimespanInSec: 30, + PeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, + HardforkTrigger: &testscommon.HardforkTriggerStub{}, } } diff --git a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go index 21be93af9c9..be4a326114a 100644 --- a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go +++ b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go @@ -37,6 +37,8 @@ func NewShardInterceptorsContainerFactory( args.WhiteListerVerifiedTxs, args.PreferredPeersHolder, args.RequestHandler, + args.PeerShardMapper, + args.HardforkTrigger, ) if err != nil { return nil, err @@ -68,20 +70,33 @@ func NewShardInterceptorsContainerFactory( if check.IfNil(args.PreferredPeersHolder) { return nil, process.ErrNilPreferredPeersHolder } + if check.IfNil(args.SignaturesHandler) { + return nil, process.ErrNilSignaturesHandler + } + if check.IfNil(args.PeerSignatureHandler) { + return nil, process.ErrNilPeerSignatureHandler + } + if args.HeartbeatExpiryTimespanInSec < minTimespanDurationInSec { + return nil, process.ErrInvalidExpiryTimespan + } argInterceptorFactory := &interceptorFactory.ArgInterceptedDataFactory{ - CoreComponents: args.CoreComponents, - CryptoComponents: args.CryptoComponents, - ShardCoordinator: args.ShardCoordinator, - NodesCoordinator: args.NodesCoordinator, - FeeHandler: args.TxFeeHandler, - HeaderSigVerifier: args.HeaderSigVerifier, - HeaderIntegrityVerifier: args.HeaderIntegrityVerifier, - ValidityAttester: args.ValidityAttester, - EpochStartTrigger: args.EpochStartTrigger, - WhiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, - ArgsParser: args.ArgumentsParser, - EnableSignTxWithHashEpoch: args.EnableSignTxWithHashEpoch, + CoreComponents: args.CoreComponents, + CryptoComponents: args.CryptoComponents, + ShardCoordinator: args.ShardCoordinator, + NodesCoordinator: args.NodesCoordinator, + FeeHandler: args.TxFeeHandler, + HeaderSigVerifier: args.HeaderSigVerifier, + HeaderIntegrityVerifier: args.HeaderIntegrityVerifier, + ValidityAttester: args.ValidityAttester, + EpochStartTrigger: args.EpochStartTrigger, + WhiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, + ArgsParser: args.ArgumentsParser, + EnableSignTxWithHashEpoch: args.EnableSignTxWithHashEpoch, + PeerSignatureHandler: args.PeerSignatureHandler, + SignaturesHandler: args.SignaturesHandler, + HeartbeatExpiryTimespanInSec: args.HeartbeatExpiryTimespanInSec, + PeerID: args.Messenger.ID(), } container := containers.NewInterceptorsContainer() @@ -102,6 +117,8 @@ func NewShardInterceptorsContainerFactory( preferredPeersHolder: args.PreferredPeersHolder, hasher: args.CoreComponents.Hasher(), requestHandler: args.RequestHandler, + peerShardMapper: args.PeerShardMapper, + hardforkTrigger: args.HardforkTrigger, } icf := &shardInterceptorsContainerFactory{ @@ -153,6 +170,21 @@ func (sicf *shardInterceptorsContainerFactory) Create() (process.InterceptorsCon return nil, err } + err = sicf.generatePeerAuthenticationInterceptor() + if err != nil { + return nil, err + } + + err = sicf.generateHeartbeatInterceptor() + if err != nil { + return nil, err + } + + err = sicf.generateDirectConnectionInfoInterceptor() + if err != nil { + return nil, err + } + return sicf.container, nil } diff --git a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go index 9561370a58a..826c6fbb2d9 100644 --- a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go +++ b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go @@ -24,6 +24,8 @@ import ( "github.com/stretchr/testify/assert" ) +var providedHardforkPubKey = []byte("provided hardfork pub key") + func createShardStubTopicHandler(matchStrToErrOnCreate string, matchStrToErrOnRegister string) process.TopicHandler { return &mock.TopicHandlerStub{ CreateTopicCalled: func(name string, createChannelForTopic bool) error { @@ -375,6 +377,30 @@ func TestNewShardInterceptorsContainerFactory_EmptyEpochStartTriggerShouldErr(t assert.Equal(t, process.ErrNilEpochStartTrigger, err) } +func TestNewShardInterceptorsContainerFactory_NilPeerShardMapperShouldErr(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsShard(coreComp, cryptoComp) + args.PeerShardMapper = nil + icf, err := interceptorscontainer.NewShardInterceptorsContainerFactory(args) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilPeerShardMapper, err) +} + +func TestNewShardInterceptorsContainerFactory_NilHardforkTriggerShouldErr(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsShard(coreComp, cryptoComp) + args.HardforkTrigger = nil + icf, err := interceptorscontainer.NewShardInterceptorsContainerFactory(args) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilHardforkTrigger, err) +} + func TestNewShardInterceptorsContainerFactory_ShouldWork(t *testing.T) { t.Parallel() @@ -527,6 +553,42 @@ func TestShardInterceptorsContainerFactory_CreateRegisterTrieNodesShouldErr(t *t assert.Equal(t, errExpected, err) } +func TestShardInterceptorsContainerFactory_NilSignaturesHandler(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsShard(coreComp, cryptoComp) + args.SignaturesHandler = nil + icf, err := interceptorscontainer.NewShardInterceptorsContainerFactory(args) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilSignaturesHandler, err) +} + +func TestShardInterceptorsContainerFactory_NilPeerSignatureHandler(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsShard(coreComp, cryptoComp) + args.PeerSignatureHandler = nil + icf, err := interceptorscontainer.NewShardInterceptorsContainerFactory(args) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilPeerSignatureHandler, err) +} + +func TestShardInterceptorsContainerFactory_InvalidExpiryTimespan(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsShard(coreComp, cryptoComp) + args.HeartbeatExpiryTimespanInSec = 0 + icf, err := interceptorscontainer.NewShardInterceptorsContainerFactory(args) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrInvalidExpiryTimespan, err) +} + func TestShardInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { t.Parallel() @@ -594,8 +656,12 @@ func TestShardInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { numInterceptorMiniBlocks := noOfShards + 2 numInterceptorMetachainHeaders := 1 numInterceptorTrieNodes := 1 + numInterceptorPeerAuth := 1 + numInterceptorHeartbeat := 1 + numInterceptorsShardValidatorInfo := 1 totalInterceptors := numInterceptorTxs + numInterceptorsUnsignedTxs + numInterceptorsRewardTxs + - numInterceptorHeaders + numInterceptorMiniBlocks + numInterceptorMetachainHeaders + numInterceptorTrieNodes + numInterceptorHeaders + numInterceptorMiniBlocks + numInterceptorMetachainHeaders + numInterceptorTrieNodes + + numInterceptorPeerAuth + numInterceptorHeartbeat + numInterceptorsShardValidatorInfo assert.Nil(t, err) assert.Equal(t, totalInterceptors, container.Len()) @@ -615,8 +681,9 @@ func createMockComponentHolders() (*mock.CoreComponentsMock, *mock.CryptoCompone MinTransactionVersionCalled: func() uint32 { return 1 }, - EpochNotifierField: &epochNotifier.EpochNotifierStub{}, - TxVersionCheckField: versioning.NewTxVersionChecker(1), + EpochNotifierField: &epochNotifier.EpochNotifierStub{}, + TxVersionCheckField: versioning.NewTxVersionChecker(1), + HardforkTriggerPubKeyField: providedHardforkPubKey, } cryptoComponents := &mock.CryptoComponentsMock{ BlockSig: &mock.SignerMock{}, @@ -634,27 +701,32 @@ func getArgumentsShard( cryptoComp *mock.CryptoComponentsMock, ) interceptorscontainer.CommonInterceptorsContainerFactoryArgs { return interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ - CoreComponents: coreComp, - CryptoComponents: cryptoComp, - Accounts: &stateMock.AccountsStub{}, - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - NodesCoordinator: shardingMocks.NewNodesCoordinatorMock(), - Messenger: &mock.TopicHandlerStub{}, - Store: createShardStore(), - DataPool: createShardDataPools(), - MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, - TxFeeHandler: &mock.FeeHandlerStub{}, - BlockBlackList: &mock.BlackListHandlerStub{}, - HeaderSigVerifier: &mock.HeaderSigVerifierStub{}, - HeaderIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, - SizeCheckDelta: 0, - ValidityAttester: &mock.ValidityAttesterStub{}, - EpochStartTrigger: &mock.EpochStartTriggerStub{}, - AntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - WhiteListHandler: &testscommon.WhiteListHandlerStub{}, - WhiteListerVerifiedTxs: &testscommon.WhiteListHandlerStub{}, - ArgumentsParser: &mock.ArgumentParserMock{}, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - RequestHandler: &testscommon.RequestHandlerStub{}, + CoreComponents: coreComp, + CryptoComponents: cryptoComp, + Accounts: &stateMock.AccountsStub{}, + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + NodesCoordinator: shardingMocks.NewNodesCoordinatorMock(), + Messenger: &mock.TopicHandlerStub{}, + Store: createShardStore(), + DataPool: createShardDataPools(), + MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, + TxFeeHandler: &mock.FeeHandlerStub{}, + BlockBlackList: &mock.BlackListHandlerStub{}, + HeaderSigVerifier: &mock.HeaderSigVerifierStub{}, + HeaderIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, + SizeCheckDelta: 0, + ValidityAttester: &mock.ValidityAttesterStub{}, + EpochStartTrigger: &mock.EpochStartTriggerStub{}, + AntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + WhiteListHandler: &testscommon.WhiteListHandlerStub{}, + WhiteListerVerifiedTxs: &testscommon.WhiteListHandlerStub{}, + ArgumentsParser: &mock.ArgumentParserMock{}, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + RequestHandler: &testscommon.RequestHandlerStub{}, + PeerSignatureHandler: &mock.PeerSignatureHandlerStub{}, + SignaturesHandler: &mock.SignaturesHandlerStub{}, + HeartbeatExpiryTimespanInSec: 30, + PeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, + HardforkTrigger: &testscommon.HardforkTriggerStub{}, } } diff --git a/process/factory/metachain/preProcessorsContainerFactory.go b/process/factory/metachain/preProcessorsContainerFactory.go index 2f59ba4287c..1a999acf17e 100644 --- a/process/factory/metachain/preProcessorsContainerFactory.go +++ b/process/factory/metachain/preProcessorsContainerFactory.go @@ -38,6 +38,7 @@ type preProcessorsContainerFactory struct { scheduledMiniBlocksEnableEpoch uint32 txTypeHandler process.TxTypeHandler scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler + processedMiniBlocksTracker process.ProcessedMiniBlocksTracker } // NewPreProcessorsContainerFactory is responsible for creating a new preProcessors factory object @@ -63,6 +64,7 @@ func NewPreProcessorsContainerFactory( scheduledMiniBlocksEnableEpoch uint32, txTypeHandler process.TxTypeHandler, scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, + processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, ) (*preProcessorsContainerFactory, error) { if check.IfNil(shardCoordinator) { @@ -119,6 +121,9 @@ func NewPreProcessorsContainerFactory( if check.IfNil(scheduledTxsExecutionHandler) { return nil, process.ErrNilScheduledTxsExecutionHandler } + if check.IfNil(processedMiniBlocksTracker) { + return nil, process.ErrNilProcessedMiniBlocksTracker + } return &preProcessorsContainerFactory{ shardCoordinator: shardCoordinator, @@ -142,6 +147,7 @@ func NewPreProcessorsContainerFactory( scheduledMiniBlocksEnableEpoch: scheduledMiniBlocksEnableEpoch, txTypeHandler: txTypeHandler, scheduledTxsExecutionHandler: scheduledTxsExecutionHandler, + processedMiniBlocksTracker: processedMiniBlocksTracker, }, nil } @@ -195,6 +201,7 @@ func (ppcm *preProcessorsContainerFactory) createTxPreProcessor() (process.PrePr ScheduledMiniBlocksEnableEpoch: ppcm.scheduledMiniBlocksEnableEpoch, TxTypeHandler: ppcm.txTypeHandler, ScheduledTxsExecutionHandler: ppcm.scheduledTxsExecutionHandler, + ProcessedMiniBlocksTracker: ppcm.processedMiniBlocksTracker, } txPreprocessor, err := preprocess.NewTransactionPreprocessor(args) @@ -219,6 +226,7 @@ func (ppcm *preProcessorsContainerFactory) createSmartContractResultPreProcessor ppcm.balanceComputation, ppcm.epochNotifier, ppcm.optimizeGasUsedInCrossMiniBlocksEnableEpoch, + ppcm.processedMiniBlocksTracker, ) return scrPreprocessor, err diff --git a/process/factory/metachain/preProcessorsContainerFactory_test.go b/process/factory/metachain/preProcessorsContainerFactory_test.go index 31ebf1d2c3a..71e2d1515ea 100644 --- a/process/factory/metachain/preProcessorsContainerFactory_test.go +++ b/process/factory/metachain/preProcessorsContainerFactory_test.go @@ -40,6 +40,7 @@ func TestNewPreProcessorsContainerFactory_NilShardCoordinator(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -71,6 +72,7 @@ func TestNewPreProcessorsContainerFactory_NilStore(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilStore, err) @@ -102,6 +104,7 @@ func TestNewPreProcessorsContainerFactory_NilMarshalizer(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -133,6 +136,7 @@ func TestNewPreProcessorsContainerFactory_NilHasher(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilHasher, err) @@ -164,6 +168,7 @@ func TestNewPreProcessorsContainerFactory_NilDataPool(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilDataPoolHolder, err) @@ -195,6 +200,7 @@ func TestNewPreProcessorsContainerFactory_NilAccounts(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -226,6 +232,7 @@ func TestNewPreProcessorsContainerFactory_NilFeeHandler(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilEconomicsFeeHandler, err) @@ -257,6 +264,7 @@ func TestNewPreProcessorsContainerFactory_NilTxProcessor(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilTxProcessor, err) @@ -288,6 +296,7 @@ func TestNewPreProcessorsContainerFactory_NilRequestHandler(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilRequestHandler, err) assert.Nil(t, ppcm) @@ -318,6 +327,7 @@ func TestNewPreProcessorsContainerFactory_NilGasHandler(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilGasHandler, err) assert.Nil(t, ppcm) @@ -348,6 +358,7 @@ func TestNewPreProcessorsContainerFactory_NilBlockTracker(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilBlockTracker, err) assert.Nil(t, ppcm) @@ -378,6 +389,7 @@ func TestNewPreProcessorsContainerFactory_NilPubkeyConverter(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilPubkeyConverter, err) assert.Nil(t, ppcm) @@ -408,6 +420,7 @@ func TestNewPreProcessorsContainerFactory_NilBlockSizeComputationHandler(t *test 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilBlockSizeComputationHandler, err) assert.Nil(t, ppcm) @@ -438,6 +451,7 @@ func TestNewPreProcessorsContainerFactory_NilBalanceComputationHandler(t *testin 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilBalanceComputationHandler, err) assert.Nil(t, ppcm) @@ -468,6 +482,7 @@ func TestNewPreProcessorsContainerFactory_NilEpochNotifier(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilEpochNotifier, err) assert.Nil(t, ppcm) @@ -498,6 +513,7 @@ func TestNewPreProcessorsContainerFactory_NilTxTypeHandler(t *testing.T) { 2, nil, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilTxTypeHandler, err) assert.Nil(t, ppcm) @@ -528,11 +544,43 @@ func TestNewPreProcessorsContainerFactory_NilScheduledTxsExecutionHandler(t *tes 2, &testscommon.TxTypeHandlerMock{}, nil, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilScheduledTxsExecutionHandler, err) assert.Nil(t, ppcm) } +func TestNewPreProcessorsContainerFactory_NilProcessedMiniBlocksTracker(t *testing.T) { + t.Parallel() + + ppcm, err := metachain.NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + &mock.ChainStorerMock{}, + &mock.MarshalizerMock{}, + &hashingMocks.HasherMock{}, + dataRetrieverMock.NewPoolsHolderMock(), + &stateMock.AccountsStub{}, + &testscommon.RequestHandlerStub{}, + &testscommon.TxProcessorMock{}, + &testscommon.SmartContractResultsProcessorMock{}, + &mock.FeeHandlerStub{}, + &testscommon.GasHandlerStub{}, + &mock.BlockTrackerMock{}, + createMockPubkeyConverter(), + &testscommon.BlockSizeComputationStub{}, + &testscommon.BalanceComputationStub{}, + &epochNotifier.EpochNotifierStub{}, + 2, + 2, + 2, + &testscommon.TxTypeHandlerMock{}, + &testscommon.ScheduledTxsExecutionStub{}, + nil, + ) + assert.Equal(t, process.ErrNilProcessedMiniBlocksTracker, err) + assert.Nil(t, ppcm) +} + func TestNewPreProcessorsContainerFactory(t *testing.T) { t.Parallel() @@ -558,6 +606,7 @@ func TestNewPreProcessorsContainerFactory(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, err) @@ -595,6 +644,7 @@ func TestPreProcessorsContainerFactory_CreateErrTxPreproc(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, err) @@ -630,6 +680,7 @@ func TestPreProcessorsContainerFactory_Create(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, err) diff --git a/process/factory/shard/preProcessorsContainerFactory.go b/process/factory/shard/preProcessorsContainerFactory.go index aded2affd7d..bd099f61ac1 100644 --- a/process/factory/shard/preProcessorsContainerFactory.go +++ b/process/factory/shard/preProcessorsContainerFactory.go @@ -40,6 +40,7 @@ type preProcessorsContainerFactory struct { scheduledMiniBlocksEnableEpoch uint32 txTypeHandler process.TxTypeHandler scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler + processedMiniBlocksTracker process.ProcessedMiniBlocksTracker } // NewPreProcessorsContainerFactory is responsible for creating a new preProcessors factory object @@ -67,6 +68,7 @@ func NewPreProcessorsContainerFactory( scheduledMiniBlocksEnableEpoch uint32, txTypeHandler process.TxTypeHandler, scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, + processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, ) (*preProcessorsContainerFactory, error) { if check.IfNil(shardCoordinator) { @@ -129,6 +131,9 @@ func NewPreProcessorsContainerFactory( if check.IfNil(scheduledTxsExecutionHandler) { return nil, process.ErrNilScheduledTxsExecutionHandler } + if check.IfNil(processedMiniBlocksTracker) { + return nil, process.ErrNilProcessedMiniBlocksTracker + } return &preProcessorsContainerFactory{ shardCoordinator: shardCoordinator, @@ -154,6 +159,7 @@ func NewPreProcessorsContainerFactory( scheduledMiniBlocksEnableEpoch: scheduledMiniBlocksEnableEpoch, txTypeHandler: txTypeHandler, scheduledTxsExecutionHandler: scheduledTxsExecutionHandler, + processedMiniBlocksTracker: processedMiniBlocksTracker, }, nil } @@ -227,6 +233,7 @@ func (ppcm *preProcessorsContainerFactory) createTxPreProcessor() (process.PrePr ScheduledMiniBlocksEnableEpoch: ppcm.scheduledMiniBlocksEnableEpoch, TxTypeHandler: ppcm.txTypeHandler, ScheduledTxsExecutionHandler: ppcm.scheduledTxsExecutionHandler, + ProcessedMiniBlocksTracker: ppcm.processedMiniBlocksTracker, } txPreprocessor, err := preprocess.NewTransactionPreprocessor(args) @@ -251,6 +258,7 @@ func (ppcm *preProcessorsContainerFactory) createSmartContractResultPreProcessor ppcm.balanceComputation, ppcm.epochNotifier, ppcm.optimizeGasUsedInCrossMiniBlocksEnableEpoch, + ppcm.processedMiniBlocksTracker, ) return scrPreprocessor, err @@ -270,6 +278,7 @@ func (ppcm *preProcessorsContainerFactory) createRewardsTransactionPreProcessor( ppcm.pubkeyConverter, ppcm.blockSizeComputation, ppcm.balanceComputation, + ppcm.processedMiniBlocksTracker, ) return rewardTxPreprocessor, err diff --git a/process/factory/shard/preProcessorsContainerFactory_test.go b/process/factory/shard/preProcessorsContainerFactory_test.go index 6063db3e771..e785028ba6d 100644 --- a/process/factory/shard/preProcessorsContainerFactory_test.go +++ b/process/factory/shard/preProcessorsContainerFactory_test.go @@ -45,6 +45,7 @@ func TestNewPreProcessorsContainerFactory_NilShardCoordinator(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -78,6 +79,7 @@ func TestNewPreProcessorsContainerFactory_NilStore(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilStore, err) @@ -111,6 +113,7 @@ func TestNewPreProcessorsContainerFactory_NilMarshalizer(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -144,6 +147,7 @@ func TestNewPreProcessorsContainerFactory_NilHasher(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilHasher, err) @@ -177,6 +181,7 @@ func TestNewPreProcessorsContainerFactory_NilDataPool(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilDataPoolHolder, err) @@ -210,6 +215,7 @@ func TestNewPreProcessorsContainerFactory_NilAddrConv(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilPubkeyConverter, err) @@ -243,6 +249,7 @@ func TestNewPreProcessorsContainerFactory_NilAccounts(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -276,6 +283,7 @@ func TestNewPreProcessorsContainerFactory_NilTxProcessor(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilTxProcessor, err) @@ -309,6 +317,7 @@ func TestNewPreProcessorsContainerFactory_NilSCProcessor(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilSmartContractProcessor, err) @@ -342,6 +351,7 @@ func TestNewPreProcessorsContainerFactory_NilSCR(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilSmartContractResultProcessor, err) @@ -375,6 +385,7 @@ func TestNewPreProcessorsContainerFactory_NilRewardTxProcessor(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilRewardsTxProcessor, err) @@ -408,6 +419,7 @@ func TestNewPreProcessorsContainerFactory_NilRequestHandler(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilRequestHandler, err) @@ -441,6 +453,7 @@ func TestNewPreProcessorsContainerFactory_NilFeeHandler(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilEconomicsFeeHandler, err) @@ -474,6 +487,7 @@ func TestNewPreProcessorsContainerFactory_NilGasHandler(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilGasHandler, err) @@ -507,6 +521,7 @@ func TestNewPreProcessorsContainerFactory_NilBlockTracker(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilBlockTracker, err) @@ -540,6 +555,7 @@ func TestNewPreProcessorsContainerFactory_NilBlockSizeComputationHandler(t *test 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilBlockSizeComputationHandler, err) @@ -573,6 +589,7 @@ func TestNewPreProcessorsContainerFactory_NilBalanceComputationHandler(t *testin 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilBalanceComputationHandler, err) @@ -606,6 +623,7 @@ func TestNewPreProcessorsContainerFactory_NilEpochNotifier(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilEpochNotifier, err) @@ -639,6 +657,7 @@ func TestNewPreProcessorsContainerFactory_NilTxTypeHandler(t *testing.T) { 2, nil, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilTxTypeHandler, err) @@ -672,12 +691,47 @@ func TestNewPreProcessorsContainerFactory_NilScheduledTxsExecutionHandler(t *tes 2, &testscommon.TxTypeHandlerMock{}, nil, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Equal(t, process.ErrNilScheduledTxsExecutionHandler, err) assert.Nil(t, ppcm) } +func TestNewPreProcessorsContainerFactory_NilProcessedMiniBlocksTracker(t *testing.T) { + t.Parallel() + + ppcm, err := NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + &mock.ChainStorerMock{}, + &mock.MarshalizerMock{}, + &hashingMocks.HasherMock{}, + dataRetrieverMock.NewPoolsHolderMock(), + createMockPubkeyConverter(), + &stateMock.AccountsStub{}, + &testscommon.RequestHandlerStub{}, + &testscommon.TxProcessorMock{}, + &testscommon.SCProcessorMock{}, + &testscommon.SmartContractResultsProcessorMock{}, + &testscommon.RewardTxProcessorMock{}, + &mock.FeeHandlerStub{}, + &testscommon.GasHandlerStub{}, + &mock.BlockTrackerMock{}, + &testscommon.BlockSizeComputationStub{}, + &testscommon.BalanceComputationStub{}, + &epochNotifier.EpochNotifierStub{}, + 2, + 2, + 2, + &testscommon.TxTypeHandlerMock{}, + &testscommon.ScheduledTxsExecutionStub{}, + nil, + ) + + assert.Equal(t, process.ErrNilProcessedMiniBlocksTracker, err) + assert.Nil(t, ppcm) +} + func TestNewPreProcessorsContainerFactory(t *testing.T) { t.Parallel() @@ -705,6 +759,7 @@ func TestNewPreProcessorsContainerFactory(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, err) @@ -743,6 +798,7 @@ func TestPreProcessorsContainerFactory_CreateErrTxPreproc(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, err) @@ -787,6 +843,7 @@ func TestPreProcessorsContainerFactory_CreateErrScrPreproc(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, err) @@ -834,6 +891,7 @@ func TestPreProcessorsContainerFactory_Create(t *testing.T) { 2, &testscommon.TxTypeHandlerMock{}, &testscommon.ScheduledTxsExecutionStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, ) assert.Nil(t, err) diff --git a/process/headerCheck/headerIntegrityVerifier.go b/process/headerCheck/headerIntegrityVerifier.go index 3cc60b47de2..4db25a1ebef 100644 --- a/process/headerCheck/headerIntegrityVerifier.go +++ b/process/headerCheck/headerIntegrityVerifier.go @@ -12,7 +12,7 @@ import ( ) type headerIntegrityVerifier struct { - referenceChainID []byte + referenceChainID []byte headerVersionHandler factory.HeaderVersionHandler } @@ -30,7 +30,7 @@ func NewHeaderIntegrityVerifier( } hdrIntVer := &headerIntegrityVerifier{ - referenceChainID: referenceChainID, + referenceChainID: referenceChainID, headerVersionHandler: headerVersionHandler, } diff --git a/process/heartbeat/constants.go b/process/heartbeat/constants.go new file mode 100644 index 00000000000..bd53eb5e265 --- /dev/null +++ b/process/heartbeat/constants.go @@ -0,0 +1,18 @@ +package heartbeat + +const ( + minSizeInBytes = 1 + maxSizeInBytes = 128 + minDurationInSec = 10 + payloadExpiryThresholdInSec = 10 + interceptedPeerAuthenticationType = "intercepted peer authentication" + interceptedHeartbeatType = "intercepted heartbeat" + publicKeyProperty = "public key" + signatureProperty = "signature" + peerIdProperty = "peer id" + payloadProperty = "payload" + payloadSignatureProperty = "payload signature" + versionNumberProperty = "version number" + nodeDisplayNameProperty = "node display name" + identityProperty = "identity" +) diff --git a/process/heartbeat/interceptedHeartbeat.go b/process/heartbeat/interceptedHeartbeat.go new file mode 100644 index 00000000000..1e5a1e5930a --- /dev/null +++ b/process/heartbeat/interceptedHeartbeat.go @@ -0,0 +1,165 @@ +package heartbeat + +import ( + "fmt" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/marshal" + logger "github.com/ElrondNetwork/elrond-go-logger" + "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/process" +) + +const uint32Size = 4 +const uint64Size = 8 + +var log = logger.GetOrCreate("process/heartbeat") + +// ArgBaseInterceptedHeartbeat is the base argument used for messages +type ArgBaseInterceptedHeartbeat struct { + DataBuff []byte + Marshaller marshal.Marshalizer +} + +// ArgInterceptedHeartbeat is the argument used in the intercepted heartbeat constructor +type ArgInterceptedHeartbeat struct { + ArgBaseInterceptedHeartbeat + PeerId core.PeerID +} + +// interceptedHeartbeat is a wrapper over HeartbeatV2 +type interceptedHeartbeat struct { + heartbeat heartbeat.HeartbeatV2 + payload heartbeat.Payload + peerId core.PeerID +} + +// NewInterceptedHeartbeat tries to create a new intercepted heartbeat instance +func NewInterceptedHeartbeat(arg ArgInterceptedHeartbeat) (*interceptedHeartbeat, error) { + err := checkBaseArg(arg.ArgBaseInterceptedHeartbeat) + if err != nil { + return nil, err + } + if len(arg.PeerId) == 0 { + return nil, process.ErrEmptyPeerID + } + + hb, payload, err := createHeartbeat(arg.Marshaller, arg.DataBuff) + if err != nil { + return nil, err + } + + intercepted := &interceptedHeartbeat{ + heartbeat: *hb, + payload: *payload, + peerId: arg.PeerId, + } + + return intercepted, nil +} + +func checkBaseArg(arg ArgBaseInterceptedHeartbeat) error { + if len(arg.DataBuff) == 0 { + return process.ErrNilBuffer + } + if check.IfNil(arg.Marshaller) { + return process.ErrNilMarshalizer + } + return nil +} + +func createHeartbeat(marshaller marshal.Marshalizer, buff []byte) (*heartbeat.HeartbeatV2, *heartbeat.Payload, error) { + hb := &heartbeat.HeartbeatV2{} + err := marshaller.Unmarshal(hb, buff) + if err != nil { + return nil, nil, err + } + payload := &heartbeat.Payload{} + err = marshaller.Unmarshal(payload, hb.Payload) + if err != nil { + return nil, nil, err + } + + log.Trace("interceptedHeartbeat successfully created") + + return hb, payload, nil +} + +// CheckValidity will check the validity of the received peer heartbeat +func (ihb *interceptedHeartbeat) CheckValidity() error { + err := verifyPropertyMinMaxLen(payloadProperty, ihb.heartbeat.Payload) + if err != nil { + return err + } + err = verifyPropertyMinMaxLen(versionNumberProperty, []byte(ihb.heartbeat.VersionNumber)) + if err != nil { + return err + } + err = verifyPropertyMaxLen(nodeDisplayNameProperty, []byte(ihb.heartbeat.NodeDisplayName)) + if err != nil { + return err + } + err = verifyPropertyMaxLen(identityProperty, []byte(ihb.heartbeat.Identity)) + if err != nil { + return err + } + if ihb.heartbeat.PeerSubType != uint32(core.RegularPeer) && ihb.heartbeat.PeerSubType != uint32(core.FullHistoryObserver) { + return process.ErrInvalidPeerSubType + } + + log.Trace("interceptedHeartbeat received valid data") + + return nil +} + +// IsForCurrentShard always returns true +func (ihb *interceptedHeartbeat) IsForCurrentShard() bool { + return true +} + +// Hash always returns an empty string +func (ihb *interceptedHeartbeat) Hash() []byte { + return []byte("") +} + +// Type returns the type of this intercepted data +func (ihb *interceptedHeartbeat) Type() string { + return interceptedHeartbeatType +} + +// Identifiers returns the identifiers used in requests +func (ihb *interceptedHeartbeat) Identifiers() [][]byte { + return [][]byte{ihb.peerId.Bytes()} +} + +// String returns the most important fields as string +func (ihb *interceptedHeartbeat) String() string { + return fmt.Sprintf("pid=%s, version=%s, name=%s, identity=%s, nonce=%d, subtype=%d, payload=%s", + ihb.peerId.Pretty(), + ihb.heartbeat.VersionNumber, + ihb.heartbeat.NodeDisplayName, + ihb.heartbeat.Identity, + ihb.heartbeat.Nonce, + ihb.heartbeat.PeerSubType, + logger.DisplayByteSlice(ihb.heartbeat.Payload)) +} + +// Message returns the heartbeat message +func (ihb *interceptedHeartbeat) Message() interface{} { + return &ihb.heartbeat +} + +// SizeInBytes returns the size in bytes held by this instance +func (ihb *interceptedHeartbeat) SizeInBytes() int { + return len(ihb.heartbeat.Payload) + + len(ihb.heartbeat.VersionNumber) + + len(ihb.heartbeat.NodeDisplayName) + + len(ihb.heartbeat.Identity) + + uint64Size + uint32Size +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ihb *interceptedHeartbeat) IsInterfaceNil() bool { + return ihb == nil +} diff --git a/process/heartbeat/interceptedHeartbeat_test.go b/process/heartbeat/interceptedHeartbeat_test.go new file mode 100644 index 00000000000..2dcb80d0e7c --- /dev/null +++ b/process/heartbeat/interceptedHeartbeat_test.go @@ -0,0 +1,198 @@ +package heartbeat + +import ( + "strings" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/marshal" + "github.com/ElrondNetwork/elrond-go/dataRetriever/mock" + "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/stretchr/testify/assert" +) + +func createDefaultInterceptedHeartbeat() *heartbeat.HeartbeatV2 { + payload := &heartbeat.Payload{ + Timestamp: time.Now().Unix(), + HardforkMessage: "hardfork message", + } + marshaller := marshal.GogoProtoMarshalizer{} + payloadBytes, err := marshaller.Marshal(payload) + if err != nil { + return nil + } + + return &heartbeat.HeartbeatV2{ + Payload: payloadBytes, + VersionNumber: "version number", + NodeDisplayName: "node display name", + Identity: "identity", + Nonce: 123, + PeerSubType: uint32(core.RegularPeer), + } +} + +func getSizeOfHeartbeat(hb *heartbeat.HeartbeatV2) int { + return len(hb.Payload) + len(hb.VersionNumber) + + len(hb.NodeDisplayName) + len(hb.Identity) + + uint64Size + uint32Size +} + +func createMockInterceptedHeartbeatArg(interceptedData *heartbeat.HeartbeatV2) ArgInterceptedHeartbeat { + arg := ArgInterceptedHeartbeat{} + arg.Marshaller = &marshal.GogoProtoMarshalizer{} + arg.DataBuff, _ = arg.Marshaller.Marshal(interceptedData) + arg.PeerId = "pid" + + return arg +} + +func TestNewInterceptedHeartbeat(t *testing.T) { + t.Parallel() + + t.Run("nil data buff should error", func(t *testing.T) { + t.Parallel() + + arg := createMockInterceptedHeartbeatArg(createDefaultInterceptedHeartbeat()) + arg.DataBuff = nil + + ihb, err := NewInterceptedHeartbeat(arg) + assert.Nil(t, ihb) + assert.Equal(t, process.ErrNilBuffer, err) + }) + t.Run("nil marshaller should error", func(t *testing.T) { + t.Parallel() + + arg := createMockInterceptedHeartbeatArg(createDefaultInterceptedHeartbeat()) + arg.Marshaller = nil + + ihb, err := NewInterceptedHeartbeat(arg) + assert.Nil(t, ihb) + assert.Equal(t, process.ErrNilMarshalizer, err) + }) + t.Run("empty pid should error", func(t *testing.T) { + t.Parallel() + + arg := createMockInterceptedHeartbeatArg(createDefaultInterceptedHeartbeat()) + arg.PeerId = "" + + ihb, err := NewInterceptedHeartbeat(arg) + assert.Nil(t, ihb) + assert.Equal(t, process.ErrEmptyPeerID, err) + }) + t.Run("unmarshal returns error", func(t *testing.T) { + t.Parallel() + + arg := createMockInterceptedHeartbeatArg(createDefaultInterceptedHeartbeat()) + arg.Marshaller = &mock.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return expectedErr + }, + } + + ihb, err := NewInterceptedHeartbeat(arg) + assert.Nil(t, ihb) + assert.Equal(t, expectedErr, err) + }) + t.Run("unmarshalable payload returns error", func(t *testing.T) { + t.Parallel() + + interceptedData := createDefaultInterceptedHeartbeat() + interceptedData.Payload = []byte("invalid data") + arg := createMockInterceptedHeartbeatArg(interceptedData) + + ihb, err := NewInterceptedHeartbeat(arg) + assert.Nil(t, ihb) + assert.NotNil(t, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + arg := createMockInterceptedHeartbeatArg(createDefaultInterceptedHeartbeat()) + + ihb, err := NewInterceptedHeartbeat(arg) + assert.False(t, ihb.IsInterfaceNil()) + assert.Nil(t, err) + }) +} + +func TestInterceptedHeartbeat_CheckValidity(t *testing.T) { + t.Parallel() + t.Run("payloadProperty too short", testInterceptedHeartbeatPropertyLen(payloadProperty, false)) + t.Run("payloadProperty too long", testInterceptedHeartbeatPropertyLen(payloadProperty, true)) + + t.Run("versionNumberProperty too short", testInterceptedHeartbeatPropertyLen(versionNumberProperty, false)) + t.Run("versionNumberProperty too long", testInterceptedHeartbeatPropertyLen(versionNumberProperty, true)) + + t.Run("nodeDisplayNameProperty too long", testInterceptedHeartbeatPropertyLen(nodeDisplayNameProperty, true)) + + t.Run("identityProperty too long", testInterceptedHeartbeatPropertyLen(identityProperty, true)) + + t.Run("invalid peer subtype should error", func(t *testing.T) { + t.Parallel() + + arg := createMockInterceptedHeartbeatArg(createDefaultInterceptedHeartbeat()) + ihb, _ := NewInterceptedHeartbeat(arg) + ihb.heartbeat.PeerSubType = 123 + err := ihb.CheckValidity() + assert.Equal(t, process.ErrInvalidPeerSubType, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + arg := createMockInterceptedHeartbeatArg(createDefaultInterceptedHeartbeat()) + ihb, _ := NewInterceptedHeartbeat(arg) + err := ihb.CheckValidity() + assert.Nil(t, err) + }) +} + +func testInterceptedHeartbeatPropertyLen(property string, tooLong bool) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() + + value := []byte("") + expectedError := process.ErrPropertyTooShort + if tooLong { + value = make([]byte, 130) + expectedError = process.ErrPropertyTooLong + } + + arg := createMockInterceptedHeartbeatArg(createDefaultInterceptedHeartbeat()) + ihb, _ := NewInterceptedHeartbeat(arg) + switch property { + case payloadProperty: + ihb.heartbeat.Payload = value + case versionNumberProperty: + ihb.heartbeat.VersionNumber = string(value) + case nodeDisplayNameProperty: + ihb.heartbeat.NodeDisplayName = string(value) + case identityProperty: + ihb.heartbeat.Identity = string(value) + default: + assert.True(t, false) + } + + err := ihb.CheckValidity() + assert.True(t, strings.Contains(err.Error(), expectedError.Error())) + } +} + +func TestInterceptedHeartbeat_Getters(t *testing.T) { + t.Parallel() + + providedHB := createDefaultInterceptedHeartbeat() + arg := createMockInterceptedHeartbeatArg(providedHB) + ihb, _ := NewInterceptedHeartbeat(arg) + expectedHeartbeat := &heartbeat.HeartbeatV2{} + err := arg.Marshaller.Unmarshal(expectedHeartbeat, arg.DataBuff) + assert.Nil(t, err) + assert.True(t, ihb.IsForCurrentShard()) + assert.Equal(t, interceptedHeartbeatType, ihb.Type()) + assert.Equal(t, []byte(""), ihb.Hash()) + assert.Equal(t, arg.PeerId.Bytes(), ihb.Identifiers()[0]) + providedHBSize := getSizeOfHeartbeat(providedHB) + assert.Equal(t, providedHBSize, ihb.SizeInBytes()) +} diff --git a/process/heartbeat/interceptedPeerAuthentication.go b/process/heartbeat/interceptedPeerAuthentication.go new file mode 100644 index 00000000000..05c17f92fb9 --- /dev/null +++ b/process/heartbeat/interceptedPeerAuthentication.go @@ -0,0 +1,277 @@ +package heartbeat + +import ( + "bytes" + "fmt" + "time" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/marshal" + crypto "github.com/ElrondNetwork/elrond-go-crypto" + logger "github.com/ElrondNetwork/elrond-go-logger" + "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/process" +) + +// ArgInterceptedPeerAuthentication is the argument used in the intercepted peer authentication constructor +type ArgInterceptedPeerAuthentication struct { + ArgBaseInterceptedHeartbeat + NodesCoordinator NodesCoordinator + SignaturesHandler SignaturesHandler + PeerSignatureHandler crypto.PeerSignatureHandler + ExpiryTimespanInSec int64 + HardforkTriggerPubKey []byte +} + +// interceptedPeerAuthentication is a wrapper over PeerAuthentication +type interceptedPeerAuthentication struct { + peerAuthentication heartbeat.PeerAuthentication + payload heartbeat.Payload + peerId core.PeerID + nodesCoordinator NodesCoordinator + signaturesHandler SignaturesHandler + peerSignatureHandler crypto.PeerSignatureHandler + expiryTimespanInSec int64 + hardforkTriggerPubKey []byte +} + +// NewInterceptedPeerAuthentication tries to create a new intercepted peer authentication instance +func NewInterceptedPeerAuthentication(arg ArgInterceptedPeerAuthentication) (*interceptedPeerAuthentication, error) { + err := checkArg(arg) + if err != nil { + return nil, err + } + + peerAuthentication, payload, err := createPeerAuthentication(arg.Marshaller, arg.DataBuff) + if err != nil { + return nil, err + } + + intercepted := &interceptedPeerAuthentication{ + peerAuthentication: *peerAuthentication, + payload: *payload, + nodesCoordinator: arg.NodesCoordinator, + signaturesHandler: arg.SignaturesHandler, + peerSignatureHandler: arg.PeerSignatureHandler, + expiryTimespanInSec: arg.ExpiryTimespanInSec, + hardforkTriggerPubKey: arg.HardforkTriggerPubKey, + } + intercepted.peerId = core.PeerID(intercepted.peerAuthentication.Pid) + + return intercepted, nil +} + +func checkArg(arg ArgInterceptedPeerAuthentication) error { + err := checkBaseArg(arg.ArgBaseInterceptedHeartbeat) + if err != nil { + return err + } + if check.IfNil(arg.NodesCoordinator) { + return process.ErrNilNodesCoordinator + } + if check.IfNil(arg.SignaturesHandler) { + return process.ErrNilSignaturesHandler + } + if arg.ExpiryTimespanInSec < minDurationInSec { + return process.ErrInvalidExpiryTimespan + } + if check.IfNil(arg.PeerSignatureHandler) { + return process.ErrNilPeerSignatureHandler + } + if len(arg.HardforkTriggerPubKey) == 0 { + return fmt.Errorf("%w hardfork trigger public key bytes length is 0", process.ErrInvalidValue) + } + + return nil +} + +func createPeerAuthentication(marshalizer marshal.Marshalizer, buff []byte) (*heartbeat.PeerAuthentication, *heartbeat.Payload, error) { + peerAuthentication := &heartbeat.PeerAuthentication{} + err := marshalizer.Unmarshal(peerAuthentication, buff) + if err != nil { + return nil, nil, err + } + payload := &heartbeat.Payload{} + err = marshalizer.Unmarshal(payload, peerAuthentication.Payload) + if err != nil { + return nil, nil, err + } + + log.Trace("interceptedPeerAuthentication successfully created") + + return peerAuthentication, payload, nil +} + +// CheckValidity checks the validity of the received peer authentication. This call won't trigger the signature validation. +func (ipa *interceptedPeerAuthentication) CheckValidity() error { + // Verify properties len + err := verifyPropertyMinMaxLen(publicKeyProperty, ipa.peerAuthentication.Pubkey) + if err != nil { + return err + } + err = verifyPropertyMinMaxLen(signatureProperty, ipa.peerAuthentication.Signature) + if err != nil { + return err + } + err = verifyPropertyMinMaxLen(peerIdProperty, ipa.peerId.Bytes()) + if err != nil { + return err + } + err = verifyPropertyMinMaxLen(payloadProperty, ipa.peerAuthentication.Payload) + if err != nil { + return err + } + err = verifyPropertyMinMaxLen(payloadSignatureProperty, ipa.peerAuthentication.PayloadSignature) + if err != nil { + return err + } + + // If the message is hardfork trigger, it should be from the expected source + if !ipa.isHardforkFromSource() { + // Verify validator + _, _, err = ipa.nodesCoordinator.GetValidatorWithPublicKey(ipa.peerAuthentication.Pubkey) + if err != nil { + return err + } + } + + // Verify payload signature + err = ipa.signaturesHandler.Verify(ipa.peerAuthentication.Payload, ipa.peerId, ipa.peerAuthentication.PayloadSignature) + if err != nil { + return err + } + + // Verify payload + err = ipa.verifyPayloadTimestamp() + if err != nil { + return err + } + + // Verify message bls signature + err = ipa.peerSignatureHandler.VerifyPeerSignature(ipa.peerAuthentication.Pubkey, ipa.peerId, ipa.peerAuthentication.Signature) + if err != nil { + return err + } + + log.Trace("interceptedPeerAuthentication received valid data") + + return nil +} + +// IsForCurrentShard always returns true +func (ipa *interceptedPeerAuthentication) IsForCurrentShard() bool { + return true +} + +// Hash always returns an empty string +func (ipa *interceptedPeerAuthentication) Hash() []byte { + return []byte("") +} + +// Type returns the type of this intercepted data +func (ipa *interceptedPeerAuthentication) Type() string { + return interceptedPeerAuthenticationType +} + +// Identifiers returns the identifiers used in requests +func (ipa *interceptedPeerAuthentication) Identifiers() [][]byte { + return [][]byte{ipa.peerAuthentication.Pubkey, ipa.peerAuthentication.Pid} +} + +// PeerID returns the peer ID +func (ipa *interceptedPeerAuthentication) PeerID() core.PeerID { + return core.PeerID(ipa.peerAuthentication.Pid) +} + +// Signature returns the signature for the peer authentication +func (ipa *interceptedPeerAuthentication) Signature() []byte { + return ipa.peerAuthentication.Signature +} + +// Payload returns the payload data +func (ipa *interceptedPeerAuthentication) Payload() []byte { + return ipa.peerAuthentication.Payload +} + +// PayloadSignature returns the signature done on the payload +func (ipa *interceptedPeerAuthentication) PayloadSignature() []byte { + return ipa.peerAuthentication.PayloadSignature +} + +// Message returns the peer authentication message +func (ipa *interceptedPeerAuthentication) Message() interface{} { + return &ipa.peerAuthentication +} + +// Pubkey returns the public key +func (ipa *interceptedPeerAuthentication) Pubkey() []byte { + return ipa.peerAuthentication.Pubkey +} + +// String returns the most important fields as string +func (ipa *interceptedPeerAuthentication) String() string { + return fmt.Sprintf("pk=%s, pid=%s, sig=%s, payload=%s, payloadSig=%s", + logger.DisplayByteSlice(ipa.peerAuthentication.Pubkey), + ipa.peerId.Pretty(), + logger.DisplayByteSlice(ipa.peerAuthentication.Signature), + logger.DisplayByteSlice(ipa.peerAuthentication.Payload), + logger.DisplayByteSlice(ipa.peerAuthentication.PayloadSignature), + ) +} + +func (ipa *interceptedPeerAuthentication) verifyPayloadTimestamp() error { + currentTimeStamp := time.Now().Unix() + messageTimeStamp := ipa.payload.Timestamp + minTimestampAllowed := currentTimeStamp - ipa.expiryTimespanInSec + maxTimestampAllowed := currentTimeStamp + payloadExpiryThresholdInSec + if messageTimeStamp < minTimestampAllowed || messageTimeStamp > maxTimestampAllowed { + return process.ErrMessageExpired + } + + return nil +} + +func (ipa *interceptedPeerAuthentication) isHardforkFromSource() bool { + if len(ipa.payload.HardforkMessage) == 0 { + return false + } + + return bytes.Equal(ipa.peerAuthentication.Pubkey, ipa.hardforkTriggerPubKey) +} + +// SizeInBytes returns the size in bytes held by this instance +func (ipa *interceptedPeerAuthentication) SizeInBytes() int { + return len(ipa.peerAuthentication.Pubkey) + + len(ipa.peerAuthentication.Signature) + + len(ipa.peerAuthentication.Pid) + + len(ipa.peerAuthentication.Payload) + + len(ipa.peerAuthentication.PayloadSignature) +} + +// verifyPropertyMaxLen returns an error if the provided value is longer than max accepted by the network +func verifyPropertyMaxLen(property string, value []byte) error { + if len(value) > maxSizeInBytes { + return fmt.Errorf("%w for %s", process.ErrPropertyTooLong, property) + } + + return nil +} + +// verifyPropertyMinMaxLen returns an error if the provided value is longer/shorter than max/min accepted by the network +func verifyPropertyMinMaxLen(property string, value []byte) error { + err := verifyPropertyMaxLen(property, value) + if err != nil { + return err + } + + if len(value) < minSizeInBytes { + return fmt.Errorf("%w for %s", process.ErrPropertyTooShort, property) + } + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ipa *interceptedPeerAuthentication) IsInterfaceNil() bool { + return ipa == nil +} diff --git a/process/heartbeat/interceptedPeerAuthentication_test.go b/process/heartbeat/interceptedPeerAuthentication_test.go new file mode 100644 index 00000000000..6278fddf30f --- /dev/null +++ b/process/heartbeat/interceptedPeerAuthentication_test.go @@ -0,0 +1,341 @@ +package heartbeat + +import ( + "errors" + "strings" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/marshal" + "github.com/ElrondNetwork/elrond-go/dataRetriever/mock" + "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/process" + processMocks "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/sharding/nodesCoordinator" + "github.com/ElrondNetwork/elrond-go/testscommon/cryptoMocks" + "github.com/ElrondNetwork/elrond-go/testscommon/shardingMocks" + "github.com/stretchr/testify/assert" +) + +var expectedErr = errors.New("expected error") +var providedHardforkPubKey = []byte("provided pub key") + +func createDefaultInterceptedPeerAuthentication() *heartbeat.PeerAuthentication { + payload := &heartbeat.Payload{ + Timestamp: time.Now().Unix(), + HardforkMessage: "", + } + marshaller := marshal.GogoProtoMarshalizer{} + payloadBytes, err := marshaller.Marshal(payload) + if err != nil { + return nil + } + + return &heartbeat.PeerAuthentication{ + Pubkey: []byte("public key"), + Signature: []byte("signature"), + Pid: []byte("peer id"), + Payload: payloadBytes, + PayloadSignature: []byte("payload signature"), + } +} + +func getSizeOfPA(pa *heartbeat.PeerAuthentication) int { + return len(pa.Pubkey) + len(pa.Pid) + + len(pa.Signature) + len(pa.Payload) + + len(pa.PayloadSignature) +} + +func createMockInterceptedPeerAuthenticationArg(interceptedData *heartbeat.PeerAuthentication) ArgInterceptedPeerAuthentication { + arg := ArgInterceptedPeerAuthentication{ + ArgBaseInterceptedHeartbeat: ArgBaseInterceptedHeartbeat{ + Marshaller: &marshal.GogoProtoMarshalizer{}, + }, + NodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, + SignaturesHandler: &processMocks.SignaturesHandlerStub{}, + PeerSignatureHandler: &cryptoMocks.PeerSignatureHandlerStub{}, + ExpiryTimespanInSec: 30, + HardforkTriggerPubKey: providedHardforkPubKey, + } + arg.DataBuff, _ = arg.Marshaller.Marshal(interceptedData) + + return arg +} + +func TestNewInterceptedPeerAuthentication(t *testing.T) { + t.Parallel() + + t.Run("nil data buff should error", func(t *testing.T) { + t.Parallel() + + arg := createMockInterceptedPeerAuthenticationArg(createDefaultInterceptedPeerAuthentication()) + arg.DataBuff = nil + + ipa, err := NewInterceptedPeerAuthentication(arg) + assert.True(t, check.IfNil(ipa)) + assert.Equal(t, process.ErrNilBuffer, err) + }) + t.Run("nil marshaller should error", func(t *testing.T) { + t.Parallel() + + arg := createMockInterceptedPeerAuthenticationArg(createDefaultInterceptedPeerAuthentication()) + arg.Marshaller = nil + + ipa, err := NewInterceptedPeerAuthentication(arg) + assert.True(t, check.IfNil(ipa)) + assert.Equal(t, process.ErrNilMarshalizer, err) + }) + t.Run("nil nodes coordinator should error", func(t *testing.T) { + t.Parallel() + + arg := createMockInterceptedPeerAuthenticationArg(createDefaultInterceptedPeerAuthentication()) + arg.NodesCoordinator = nil + + ipa, err := NewInterceptedPeerAuthentication(arg) + assert.True(t, check.IfNil(ipa)) + assert.Equal(t, process.ErrNilNodesCoordinator, err) + }) + t.Run("nil signatures handler should error", func(t *testing.T) { + t.Parallel() + + arg := createMockInterceptedPeerAuthenticationArg(createDefaultInterceptedPeerAuthentication()) + arg.SignaturesHandler = nil + + ipa, err := NewInterceptedPeerAuthentication(arg) + assert.True(t, check.IfNil(ipa)) + assert.Equal(t, process.ErrNilSignaturesHandler, err) + }) + t.Run("invalid expiry timespan should error", func(t *testing.T) { + t.Parallel() + + arg := createMockInterceptedPeerAuthenticationArg(createDefaultInterceptedPeerAuthentication()) + arg.ExpiryTimespanInSec = 1 + + ipa, err := NewInterceptedPeerAuthentication(arg) + assert.True(t, check.IfNil(ipa)) + assert.Equal(t, process.ErrInvalidExpiryTimespan, err) + }) + t.Run("nil peer signature handler should error", func(t *testing.T) { + t.Parallel() + + arg := createMockInterceptedPeerAuthenticationArg(createDefaultInterceptedPeerAuthentication()) + arg.PeerSignatureHandler = nil + + ipa, err := NewInterceptedPeerAuthentication(arg) + assert.True(t, check.IfNil(ipa)) + assert.Equal(t, process.ErrNilPeerSignatureHandler, err) + }) + t.Run("unmarshal returns error", func(t *testing.T) { + t.Parallel() + + arg := createMockInterceptedPeerAuthenticationArg(createDefaultInterceptedPeerAuthentication()) + arg.Marshaller = &mock.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return expectedErr + }, + } + + ipa, err := NewInterceptedPeerAuthentication(arg) + assert.True(t, check.IfNil(ipa)) + assert.Equal(t, expectedErr, err) + }) + t.Run("unmarshalable payload returns error", func(t *testing.T) { + t.Parallel() + + interceptedData := createDefaultInterceptedPeerAuthentication() + interceptedData.Payload = []byte("invalid data") + arg := createMockInterceptedPeerAuthenticationArg(interceptedData) + + ipa, err := NewInterceptedPeerAuthentication(arg) + assert.True(t, check.IfNil(ipa)) + assert.NotNil(t, err) + }) + t.Run("invalid hardfork pub key should error", func(t *testing.T) { + t.Parallel() + + args := createMockInterceptedPeerAuthenticationArg(createDefaultInterceptedPeerAuthentication()) + args.HardforkTriggerPubKey = make([]byte, 0) + ipa, err := NewInterceptedPeerAuthentication(args) + + assert.True(t, check.IfNil(ipa)) + assert.True(t, errors.Is(err, process.ErrInvalidValue)) + assert.True(t, strings.Contains(err.Error(), "hardfork")) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + arg := createMockInterceptedPeerAuthenticationArg(createDefaultInterceptedPeerAuthentication()) + + ipa, err := NewInterceptedPeerAuthentication(arg) + assert.False(t, check.IfNil(ipa)) + assert.Nil(t, err) + }) +} + +func TestInterceptedPeerAuthentication_CheckValidity(t *testing.T) { + t.Parallel() + t.Run("publicKeyProperty too short", testInterceptedPeerAuthenticationPropertyLen(publicKeyProperty, false)) + t.Run("publicKeyProperty too short", testInterceptedPeerAuthenticationPropertyLen(publicKeyProperty, true)) + + t.Run("signatureProperty too short", testInterceptedPeerAuthenticationPropertyLen(signatureProperty, false)) + t.Run("signatureProperty too short", testInterceptedPeerAuthenticationPropertyLen(signatureProperty, true)) + + t.Run("peerIdProperty too short", testInterceptedPeerAuthenticationPropertyLen(peerIdProperty, false)) + t.Run("peerIdProperty too short", testInterceptedPeerAuthenticationPropertyLen(peerIdProperty, true)) + + t.Run("payloadProperty too short", testInterceptedPeerAuthenticationPropertyLen(payloadProperty, false)) + t.Run("payloadProperty too short", testInterceptedPeerAuthenticationPropertyLen(payloadProperty, true)) + + t.Run("payloadSignatureProperty too short", testInterceptedPeerAuthenticationPropertyLen(payloadSignatureProperty, false)) + t.Run("payloadSignatureProperty too short", testInterceptedPeerAuthenticationPropertyLen(payloadSignatureProperty, true)) + + t.Run("nodesCoordinator.GetValidatorWithPublicKey returns error", func(t *testing.T) { + t.Parallel() + + arg := createMockInterceptedPeerAuthenticationArg(createDefaultInterceptedPeerAuthentication()) + arg.NodesCoordinator = &processMocks.NodesCoordinatorStub{ + GetValidatorWithPublicKeyCalled: func(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) { + return nil, 0, expectedErr + }, + } + ipa, _ := NewInterceptedPeerAuthentication(arg) + err := ipa.CheckValidity() + assert.Equal(t, expectedErr, err) + }) + t.Run("signaturesHandler.Verify returns error", func(t *testing.T) { + t.Parallel() + + arg := createMockInterceptedPeerAuthenticationArg(createDefaultInterceptedPeerAuthentication()) + arg.SignaturesHandler = &processMocks.SignaturesHandlerStub{ + VerifyCalled: func(payload []byte, pid core.PeerID, signature []byte) error { + return expectedErr + }, + } + ipa, _ := NewInterceptedPeerAuthentication(arg) + err := ipa.CheckValidity() + assert.Equal(t, expectedErr, err) + }) + t.Run("peerSignatureHandler.VerifyPeerSignature returns error", func(t *testing.T) { + t.Parallel() + + arg := createMockInterceptedPeerAuthenticationArg(createDefaultInterceptedPeerAuthentication()) + arg.PeerSignatureHandler = &processMocks.PeerSignatureHandlerStub{ + VerifyPeerSignatureCalled: func(pk []byte, pid core.PeerID, signature []byte) error { + return expectedErr + }, + } + ipa, _ := NewInterceptedPeerAuthentication(arg) + err := ipa.CheckValidity() + assert.Equal(t, expectedErr, err) + }) + t.Run("message is expired", func(t *testing.T) { + t.Parallel() + + marshaller := &marshal.GogoProtoMarshalizer{} + expiryTimespanInSec := int64(30) + interceptedData := createDefaultInterceptedPeerAuthentication() + expiredTimestamp := time.Now().Unix() - expiryTimespanInSec - 1 + payload := &heartbeat.Payload{ + Timestamp: expiredTimestamp, + } + payloadBytes, err := marshaller.Marshal(payload) + assert.Nil(t, err) + + interceptedData.Payload = payloadBytes + arg := createMockInterceptedPeerAuthenticationArg(interceptedData) + arg.Marshaller = marshaller + arg.ExpiryTimespanInSec = expiryTimespanInSec + + ipa, _ := NewInterceptedPeerAuthentication(arg) + + err = ipa.CheckValidity() + assert.Equal(t, process.ErrMessageExpired, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + arg := createMockInterceptedPeerAuthenticationArg(createDefaultInterceptedPeerAuthentication()) + ipa, _ := NewInterceptedPeerAuthentication(arg) + err := ipa.CheckValidity() + assert.Nil(t, err) + }) + t.Run("should work - hardfork from source", func(t *testing.T) { + t.Parallel() + + peerAuth := createDefaultInterceptedPeerAuthentication() + peerAuth.Pubkey = providedHardforkPubKey + payload := &heartbeat.Payload{ + Timestamp: time.Now().Unix(), + HardforkMessage: "hardfork message", + } + marshaller := marshal.GogoProtoMarshalizer{} + payloadBytes, _ := marshaller.Marshal(payload) + peerAuth.Payload = payloadBytes + + arg := createMockInterceptedPeerAuthenticationArg(peerAuth) + ipa, _ := NewInterceptedPeerAuthentication(arg) + err := ipa.CheckValidity() + assert.Nil(t, err) + }) +} + +func testInterceptedPeerAuthenticationPropertyLen(property string, tooLong bool) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() + + value := []byte("") + expectedError := process.ErrPropertyTooShort + if tooLong { + value = make([]byte, 130) + expectedError = process.ErrPropertyTooLong + } + + arg := createMockInterceptedPeerAuthenticationArg(createDefaultInterceptedPeerAuthentication()) + ipa, _ := NewInterceptedPeerAuthentication(arg) + switch property { + case publicKeyProperty: + ipa.peerAuthentication.Pubkey = value + case signatureProperty: + ipa.peerAuthentication.Signature = value + case peerIdProperty: + ipa.peerId = core.PeerID(value) + case payloadProperty: + ipa.peerAuthentication.Payload = value + case payloadSignatureProperty: + ipa.peerAuthentication.PayloadSignature = value + default: + assert.True(t, false) + } + + err := ipa.CheckValidity() + assert.True(t, strings.Contains(err.Error(), expectedError.Error())) + } +} + +func TestInterceptedPeerAuthentication_Getters(t *testing.T) { + t.Parallel() + + providedPA := createDefaultInterceptedPeerAuthentication() + arg := createMockInterceptedPeerAuthenticationArg(providedPA) + ipa, _ := NewInterceptedPeerAuthentication(arg) + expectedPeerAuthentication := &heartbeat.PeerAuthentication{} + err := arg.Marshaller.Unmarshal(expectedPeerAuthentication, arg.DataBuff) + assert.Nil(t, err) + assert.True(t, ipa.IsForCurrentShard()) + assert.Equal(t, interceptedPeerAuthenticationType, ipa.Type()) + assert.Equal(t, expectedPeerAuthentication.Pid, []byte(ipa.PeerID())) + assert.Equal(t, expectedPeerAuthentication.Signature, ipa.Signature()) + assert.Equal(t, expectedPeerAuthentication.Payload, ipa.Payload()) + assert.Equal(t, expectedPeerAuthentication.PayloadSignature, ipa.PayloadSignature()) + assert.Equal(t, []byte(""), ipa.Hash()) + assert.Equal(t, expectedPeerAuthentication.Pubkey, ipa.Pubkey()) + + identifiers := ipa.Identifiers() + assert.Equal(t, 2, len(identifiers)) + assert.Equal(t, expectedPeerAuthentication.Pubkey, identifiers[0]) + assert.Equal(t, expectedPeerAuthentication.Pid, identifiers[1]) + providedPASize := getSizeOfPA(providedPA) + assert.Equal(t, providedPASize, ipa.SizeInBytes()) +} diff --git a/process/heartbeat/interface.go b/process/heartbeat/interface.go new file mode 100644 index 00000000000..20fae58e41b --- /dev/null +++ b/process/heartbeat/interface.go @@ -0,0 +1,18 @@ +package heartbeat + +import ( + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go/sharding/nodesCoordinator" +) + +// NodesCoordinator defines the behavior of a struct able to do validator selection +type NodesCoordinator interface { + GetValidatorWithPublicKey(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) + IsInterfaceNil() bool +} + +// SignaturesHandler defines the behavior of a struct able to handle signatures +type SignaturesHandler interface { + Verify(payload []byte, pid core.PeerID, signature []byte) error + IsInterfaceNil() bool +} diff --git a/process/interceptors/factory/argInterceptedDataFactory.go b/process/interceptors/factory/argInterceptedDataFactory.go index 002b4220cf2..3222230eba0 100644 --- a/process/interceptors/factory/argInterceptedDataFactory.go +++ b/process/interceptors/factory/argInterceptedDataFactory.go @@ -24,6 +24,7 @@ type interceptedDataCoreComponentsHolder interface { MinTransactionVersion() uint32 IsInterfaceNil() bool EpochNotifier() process.EpochNotifier + HardforkTriggerPubKey() []byte } // interceptedDataCryptoComponentsHolder holds the crypto components required by the intercepted data factory @@ -40,16 +41,20 @@ type interceptedDataCryptoComponentsHolder interface { // ArgInterceptedDataFactory holds all dependencies required by the shard and meta intercepted data factory in order to create // new instances type ArgInterceptedDataFactory struct { - CoreComponents interceptedDataCoreComponentsHolder - CryptoComponents interceptedDataCryptoComponentsHolder - ShardCoordinator sharding.Coordinator - NodesCoordinator nodesCoordinator.NodesCoordinator - FeeHandler process.FeeHandler - WhiteListerVerifiedTxs process.WhiteListHandler - HeaderSigVerifier process.InterceptedHeaderSigVerifier - ValidityAttester process.ValidityAttester - HeaderIntegrityVerifier process.HeaderIntegrityVerifier - EpochStartTrigger process.EpochStartTriggerHandler - ArgsParser process.ArgumentsParser - EnableSignTxWithHashEpoch uint32 + CoreComponents interceptedDataCoreComponentsHolder + CryptoComponents interceptedDataCryptoComponentsHolder + ShardCoordinator sharding.Coordinator + NodesCoordinator nodesCoordinator.NodesCoordinator + FeeHandler process.FeeHandler + WhiteListerVerifiedTxs process.WhiteListHandler + HeaderSigVerifier process.InterceptedHeaderSigVerifier + ValidityAttester process.ValidityAttester + HeaderIntegrityVerifier process.HeaderIntegrityVerifier + EpochStartTrigger process.EpochStartTriggerHandler + ArgsParser process.ArgumentsParser + EnableSignTxWithHashEpoch uint32 + PeerSignatureHandler crypto.PeerSignatureHandler + SignaturesHandler process.SignaturesHandler + HeartbeatExpiryTimespanInSec int64 + PeerID core.PeerID } diff --git a/process/interceptors/factory/interceptedDirectConnectionInfoFactory.go b/process/interceptors/factory/interceptedDirectConnectionInfoFactory.go new file mode 100644 index 00000000000..de81b20cb45 --- /dev/null +++ b/process/interceptors/factory/interceptedDirectConnectionInfoFactory.go @@ -0,0 +1,57 @@ +package factory + +import ( + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/p2p" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type interceptedDirectConnectionInfoFactory struct { + marshaller marshal.Marshalizer + shardCoordinator sharding.Coordinator +} + +// NewInterceptedDirectConnectionInfoFactory creates an instance of interceptedDirectConnectionInfoFactory +func NewInterceptedDirectConnectionInfoFactory(args ArgInterceptedDataFactory) (*interceptedDirectConnectionInfoFactory, error) { + err := checkArgs(args) + if err != nil { + return nil, err + } + + return &interceptedDirectConnectionInfoFactory{ + marshaller: args.CoreComponents.InternalMarshalizer(), + shardCoordinator: args.ShardCoordinator, + }, nil +} + +func checkArgs(args ArgInterceptedDataFactory) error { + if check.IfNil(args.CoreComponents) { + return process.ErrNilCoreComponentsHolder + } + if check.IfNil(args.CoreComponents.InternalMarshalizer()) { + return process.ErrNilMarshalizer + } + if check.IfNil(args.ShardCoordinator) { + return process.ErrNilShardCoordinator + } + + return nil +} + +// Create creates instances of InterceptedData by unmarshalling provided buffer +func (idcif *interceptedDirectConnectionInfoFactory) Create(buff []byte) (process.InterceptedData, error) { + args := p2p.ArgInterceptedDirectConnectionInfo{ + Marshaller: idcif.marshaller, + DataBuff: buff, + NumOfShards: idcif.shardCoordinator.NumberOfShards(), + } + + return p2p.NewInterceptedDirectConnectionInfo(args) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (idcif *interceptedDirectConnectionInfoFactory) IsInterfaceNil() bool { + return idcif == nil +} diff --git a/process/interceptors/factory/interceptedDirectConnectionInfoFactory_test.go b/process/interceptors/factory/interceptedDirectConnectionInfoFactory_test.go new file mode 100644 index 00000000000..ac2b4ab5cac --- /dev/null +++ b/process/interceptors/factory/interceptedDirectConnectionInfoFactory_test.go @@ -0,0 +1,68 @@ +package factory + +import ( + "fmt" + "strings" + "testing" + + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go/p2p/message" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/stretchr/testify/assert" +) + +func TestNewInterceptedDirectConnectionInfoFactory(t *testing.T) { + t.Parallel() + + t.Run("nil core comp should error", func(t *testing.T) { + t.Parallel() + + _, cryptoComp := createMockComponentHolders() + arg := createMockArgument(nil, cryptoComp) + + idcif, err := NewInterceptedDirectConnectionInfoFactory(*arg) + assert.Equal(t, process.ErrNilCoreComponentsHolder, err) + assert.True(t, check.IfNil(idcif)) + }) + t.Run("nil marshaller should error", func(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + coreComp.IntMarsh = nil + arg := createMockArgument(coreComp, cryptoComp) + + idcif, err := NewInterceptedDirectConnectionInfoFactory(*arg) + assert.Equal(t, process.ErrNilMarshalizer, err) + assert.True(t, check.IfNil(idcif)) + }) + t.Run("nil shard coordinator should error", func(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + arg := createMockArgument(coreComp, cryptoComp) + arg.ShardCoordinator = nil + + idcif, err := NewInterceptedDirectConnectionInfoFactory(*arg) + assert.Equal(t, process.ErrNilShardCoordinator, err) + assert.True(t, check.IfNil(idcif)) + }) + t.Run("should work and create", func(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + arg := createMockArgument(coreComp, cryptoComp) + + idcif, err := NewInterceptedDirectConnectionInfoFactory(*arg) + assert.Nil(t, err) + assert.False(t, check.IfNil(idcif)) + + msg := &message.DirectConnectionInfo{ + ShardId: "5", + } + msgBuff, _ := arg.CoreComponents.InternalMarshalizer().Marshal(msg) + interceptedData, err := idcif.Create(msgBuff) + assert.Nil(t, err) + assert.False(t, check.IfNil(interceptedData)) + assert.True(t, strings.Contains(fmt.Sprintf("%T", interceptedData), "*p2p.interceptedDirectConnectionInfo")) + }) +} diff --git a/process/interceptors/factory/interceptedHeartbeatDataFactory.go b/process/interceptors/factory/interceptedHeartbeatDataFactory.go new file mode 100644 index 00000000000..cd321abc480 --- /dev/null +++ b/process/interceptors/factory/interceptedHeartbeatDataFactory.go @@ -0,0 +1,47 @@ +package factory + +import ( + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/heartbeat" +) + +type interceptedHeartbeatDataFactory struct { + marshalizer marshal.Marshalizer + peerID core.PeerID +} + +// NewInterceptedHeartbeatDataFactory creates an instance of interceptedHeartbeatDataFactory +func NewInterceptedHeartbeatDataFactory(arg ArgInterceptedDataFactory) (*interceptedHeartbeatDataFactory, error) { + if check.IfNil(arg.CoreComponents.InternalMarshalizer()) { + return nil, process.ErrNilMarshalizer + } + if len(arg.PeerID) == 0 { + return nil, process.ErrEmptyPeerID + } + + return &interceptedHeartbeatDataFactory{ + marshalizer: arg.CoreComponents.InternalMarshalizer(), + peerID: arg.PeerID, + }, nil +} + +// Create creates instances of InterceptedData by unmarshalling provided buffer +func (ihdf *interceptedHeartbeatDataFactory) Create(buff []byte) (process.InterceptedData, error) { + arg := heartbeat.ArgInterceptedHeartbeat{ + ArgBaseInterceptedHeartbeat: heartbeat.ArgBaseInterceptedHeartbeat{ + DataBuff: buff, + Marshaller: ihdf.marshalizer, + }, + PeerId: ihdf.peerID, + } + + return heartbeat.NewInterceptedHeartbeat(arg) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ihdf *interceptedHeartbeatDataFactory) IsInterfaceNil() bool { + return ihdf == nil +} diff --git a/process/interceptors/factory/interceptedHeartbeatDataFactory_test.go b/process/interceptors/factory/interceptedHeartbeatDataFactory_test.go new file mode 100644 index 00000000000..990e7ad274f --- /dev/null +++ b/process/interceptors/factory/interceptedHeartbeatDataFactory_test.go @@ -0,0 +1,74 @@ +package factory + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" +) + +func TestNewInterceptedHeartbeatDataFactory(t *testing.T) { + t.Parallel() + + t.Run("nil InternalMarshalizer should error", func(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + coreComp.IntMarsh = nil + arg := createMockArgument(coreComp, cryptoComp) + + ihdf, err := NewInterceptedHeartbeatDataFactory(*arg) + assert.Nil(t, ihdf) + assert.Equal(t, process.ErrNilMarshalizer, err) + }) + t.Run("empty peer id should error", func(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + arg := createMockArgument(coreComp, cryptoComp) + arg.PeerID = "" + + ihdf, err := NewInterceptedHeartbeatDataFactory(*arg) + assert.Nil(t, ihdf) + assert.Equal(t, process.ErrEmptyPeerID, err) + }) + t.Run("should work and create", func(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + arg := createMockArgument(coreComp, cryptoComp) + + ihdf, err := NewInterceptedHeartbeatDataFactory(*arg) + assert.False(t, ihdf.IsInterfaceNil()) + assert.Nil(t, err) + + payload := &heartbeat.Payload{ + Timestamp: time.Now().Unix(), + HardforkMessage: "hardfork message", + } + marshaller := mock.MarshalizerMock{} + payloadBytes, err := marshaller.Marshal(payload) + assert.Nil(t, err) + + hb := &heartbeat.HeartbeatV2{ + Payload: payloadBytes, + VersionNumber: "version number", + NodeDisplayName: "node display name", + Identity: "identity", + Nonce: 10, + PeerSubType: 0, + } + marshaledHeartbeat, err := marshaller.Marshal(hb) + assert.Nil(t, err) + + interceptedData, err := ihdf.Create(marshaledHeartbeat) + assert.NotNil(t, interceptedData) + assert.Nil(t, err) + assert.True(t, strings.Contains(fmt.Sprintf("%T", interceptedData), "*heartbeat.interceptedHeartbeat")) + }) +} diff --git a/process/interceptors/factory/interceptedMetaHeaderDataFactory_test.go b/process/interceptors/factory/interceptedMetaHeaderDataFactory_test.go index 437c13e4be8..0ea3eacb074 100644 --- a/process/interceptors/factory/interceptedMetaHeaderDataFactory_test.go +++ b/process/interceptors/factory/interceptedMetaHeaderDataFactory_test.go @@ -13,6 +13,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block/interceptedBlocks" "github.com/ElrondNetwork/elrond-go/process/mock" + processMocks "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/testscommon" "github.com/ElrondNetwork/elrond-go/testscommon/cryptoMocks" "github.com/ElrondNetwork/elrond-go/testscommon/epochNotifier" @@ -67,8 +68,9 @@ func createMockComponentHolders() (*mock.CoreComponentsMock, *mock.CryptoCompone ChainIdCalled: func() string { return "chainID" }, - TxVersionCheckField: versioning.NewTxVersionChecker(1), - EpochNotifierField: &epochNotifier.EpochNotifierStub{}, + TxVersionCheckField: versioning.NewTxVersionChecker(1), + EpochNotifierField: &epochNotifier.EpochNotifierStub{}, + HardforkTriggerPubKeyField: []byte("provided hardfork pub key"), } cryptoComponents := &mock.CryptoComponentsMock{ BlockSig: createMockSigner(), @@ -86,17 +88,21 @@ func createMockArgument( cryptoComponents *mock.CryptoComponentsMock, ) *ArgInterceptedDataFactory { return &ArgInterceptedDataFactory{ - CoreComponents: coreComponents, - CryptoComponents: cryptoComponents, - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - NodesCoordinator: shardingMocks.NewNodesCoordinatorMock(), - FeeHandler: createMockFeeHandler(), - HeaderSigVerifier: &mock.HeaderSigVerifierStub{}, - HeaderIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, - ValidityAttester: &mock.ValidityAttesterStub{}, - EpochStartTrigger: &mock.EpochStartTriggerStub{}, - WhiteListerVerifiedTxs: &testscommon.WhiteListHandlerStub{}, - ArgsParser: &mock.ArgumentParserMock{}, + CoreComponents: coreComponents, + CryptoComponents: cryptoComponents, + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + NodesCoordinator: shardingMocks.NewNodesCoordinatorMock(), + FeeHandler: createMockFeeHandler(), + WhiteListerVerifiedTxs: &testscommon.WhiteListHandlerStub{}, + HeaderSigVerifier: &mock.HeaderSigVerifierStub{}, + ValidityAttester: &mock.ValidityAttesterStub{}, + HeaderIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, + EpochStartTrigger: &mock.EpochStartTriggerStub{}, + ArgsParser: &mock.ArgumentParserMock{}, + PeerSignatureHandler: &processMocks.PeerSignatureHandlerStub{}, + SignaturesHandler: &processMocks.SignaturesHandlerStub{}, + HeartbeatExpiryTimespanInSec: 30, + PeerID: "pid", } } diff --git a/process/interceptors/factory/interceptedPeerAuthenticationDataFactory.go b/process/interceptors/factory/interceptedPeerAuthenticationDataFactory.go new file mode 100644 index 00000000000..12496a63acc --- /dev/null +++ b/process/interceptors/factory/interceptedPeerAuthenticationDataFactory.go @@ -0,0 +1,87 @@ +package factory + +import ( + "fmt" + + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/marshal" + crypto "github.com/ElrondNetwork/elrond-go-crypto" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/heartbeat" +) + +const minDurationInSec = 10 + +type interceptedPeerAuthenticationDataFactory struct { + marshalizer marshal.Marshalizer + nodesCoordinator heartbeat.NodesCoordinator + signaturesHandler heartbeat.SignaturesHandler + peerSignatureHandler crypto.PeerSignatureHandler + expiryTimespanInSec int64 + hardforkTriggerPubKey []byte +} + +// NewInterceptedPeerAuthenticationDataFactory creates an instance of interceptedPeerAuthenticationDataFactory +func NewInterceptedPeerAuthenticationDataFactory(arg ArgInterceptedDataFactory) (*interceptedPeerAuthenticationDataFactory, error) { + err := checkArgInterceptedDataFactory(arg) + if err != nil { + return nil, err + } + + return &interceptedPeerAuthenticationDataFactory{ + marshalizer: arg.CoreComponents.InternalMarshalizer(), + nodesCoordinator: arg.NodesCoordinator, + signaturesHandler: arg.SignaturesHandler, + peerSignatureHandler: arg.PeerSignatureHandler, + expiryTimespanInSec: arg.HeartbeatExpiryTimespanInSec, + hardforkTriggerPubKey: arg.CoreComponents.HardforkTriggerPubKey(), + }, nil +} + +func checkArgInterceptedDataFactory(args ArgInterceptedDataFactory) error { + if check.IfNil(args.CoreComponents) { + return process.ErrNilCoreComponentsHolder + } + if check.IfNil(args.CoreComponents.InternalMarshalizer()) { + return process.ErrNilMarshalizer + } + if check.IfNil(args.NodesCoordinator) { + return process.ErrNilNodesCoordinator + } + if check.IfNil(args.SignaturesHandler) { + return process.ErrNilSignaturesHandler + } + if check.IfNil(args.PeerSignatureHandler) { + return process.ErrNilPeerSignatureHandler + } + if args.HeartbeatExpiryTimespanInSec < minDurationInSec { + return process.ErrInvalidExpiryTimespan + } + if len(args.CoreComponents.HardforkTriggerPubKey()) == 0 { + return fmt.Errorf("%w hardfork trigger public key bytes length is 0", process.ErrInvalidValue) + } + + return nil +} + +// Create creates instances of InterceptedData by unmarshalling provided buffer +func (ipadf *interceptedPeerAuthenticationDataFactory) Create(buff []byte) (process.InterceptedData, error) { + arg := heartbeat.ArgInterceptedPeerAuthentication{ + ArgBaseInterceptedHeartbeat: heartbeat.ArgBaseInterceptedHeartbeat{ + DataBuff: buff, + Marshaller: ipadf.marshalizer, + }, + NodesCoordinator: ipadf.nodesCoordinator, + SignaturesHandler: ipadf.signaturesHandler, + PeerSignatureHandler: ipadf.peerSignatureHandler, + ExpiryTimespanInSec: ipadf.expiryTimespanInSec, + HardforkTriggerPubKey: ipadf.hardforkTriggerPubKey, + } + + return heartbeat.NewInterceptedPeerAuthentication(arg) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ipadf *interceptedPeerAuthenticationDataFactory) IsInterfaceNil() bool { + return ipadf == nil +} diff --git a/process/interceptors/factory/interceptedPeerAuthenticationDataFactory_test.go b/process/interceptors/factory/interceptedPeerAuthenticationDataFactory_test.go new file mode 100644 index 00000000000..294f1e6efb4 --- /dev/null +++ b/process/interceptors/factory/interceptedPeerAuthenticationDataFactory_test.go @@ -0,0 +1,129 @@ +package factory + +import ( + "errors" + "fmt" + "strings" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" +) + +func TestNewInterceptedPeerAuthenticationDataFactory(t *testing.T) { + t.Parallel() + + t.Run("nil CoreComponents should error", func(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + arg := createMockArgument(coreComp, cryptoComp) + arg.CoreComponents = nil + + ipadf, err := NewInterceptedPeerAuthenticationDataFactory(*arg) + assert.Nil(t, ipadf) + assert.Equal(t, process.ErrNilCoreComponentsHolder, err) + }) + t.Run("nil InternalMarshalizer should error", func(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + coreComp.IntMarsh = nil + arg := createMockArgument(coreComp, cryptoComp) + + ipadf, err := NewInterceptedPeerAuthenticationDataFactory(*arg) + assert.Nil(t, ipadf) + assert.Equal(t, process.ErrNilMarshalizer, err) + }) + t.Run("nil NodesCoordinator should error", func(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + arg := createMockArgument(coreComp, cryptoComp) + arg.NodesCoordinator = nil + + ipadf, err := NewInterceptedPeerAuthenticationDataFactory(*arg) + assert.Nil(t, ipadf) + assert.Equal(t, process.ErrNilNodesCoordinator, err) + }) + t.Run("nil SignaturesHandler should error", func(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + arg := createMockArgument(coreComp, cryptoComp) + arg.SignaturesHandler = nil + + ipadf, err := NewInterceptedPeerAuthenticationDataFactory(*arg) + assert.Nil(t, ipadf) + assert.Equal(t, process.ErrNilSignaturesHandler, err) + }) + t.Run("nil PeerSignatureHandler should error", func(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + arg := createMockArgument(coreComp, cryptoComp) + arg.PeerSignatureHandler = nil + + ipadf, err := NewInterceptedPeerAuthenticationDataFactory(*arg) + assert.Nil(t, ipadf) + assert.Equal(t, process.ErrNilPeerSignatureHandler, err) + }) + t.Run("invalid expiry timespan should error", func(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + arg := createMockArgument(coreComp, cryptoComp) + arg.HeartbeatExpiryTimespanInSec = 1 + + ipadf, err := NewInterceptedPeerAuthenticationDataFactory(*arg) + assert.Nil(t, ipadf) + assert.Equal(t, process.ErrInvalidExpiryTimespan, err) + }) + t.Run("invalid hardfork pub key should error", func(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + coreComp.HardforkTriggerPubKeyField = make([]byte, 0) + arg := createMockArgument(coreComp, cryptoComp) + + ipadf, err := NewInterceptedPeerAuthenticationDataFactory(*arg) + assert.Nil(t, ipadf) + assert.True(t, errors.Is(err, process.ErrInvalidValue)) + }) + t.Run("should work and create", func(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + arg := createMockArgument(coreComp, cryptoComp) + + ipadf, err := NewInterceptedPeerAuthenticationDataFactory(*arg) + assert.False(t, ipadf.IsInterfaceNil()) + assert.Nil(t, err) + + payload := &heartbeat.Payload{ + Timestamp: time.Now().Unix(), + HardforkMessage: "hardfork message", + } + marshaller := mock.MarshalizerMock{} + payloadBytes, err := marshaller.Marshal(payload) + assert.Nil(t, err) + + peerAuthentication := &heartbeat.PeerAuthentication{ + Pubkey: []byte("public key"), + Signature: []byte("signature"), + Pid: []byte("peer id"), + Payload: payloadBytes, + PayloadSignature: []byte("payload signature"), + } + marshaledPeerAuthentication, err := marshaller.Marshal(peerAuthentication) + assert.Nil(t, err) + + interceptedData, err := ipadf.Create(marshaledPeerAuthentication) + assert.NotNil(t, interceptedData) + assert.Nil(t, err) + assert.True(t, strings.Contains(fmt.Sprintf("%T", interceptedData), "*heartbeat.interceptedPeerAuthentication")) + }) +} diff --git a/process/interceptors/processor/directConnectionInfoInterceptorProcessor.go b/process/interceptors/processor/directConnectionInfoInterceptorProcessor.go new file mode 100644 index 00000000000..f845723ae9b --- /dev/null +++ b/process/interceptors/processor/directConnectionInfoInterceptorProcessor.go @@ -0,0 +1,66 @@ +package processor + +import ( + "strconv" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go/process" +) + +type shardProvider interface { + ShardID() string +} + +// ArgDirectConnectionInfoInterceptorProcessor is the argument for the interceptor processor used for direct connection info +type ArgDirectConnectionInfoInterceptorProcessor struct { + PeerShardMapper process.PeerShardMapper +} + +type directConnectionInfoInterceptorProcessor struct { + peerShardMapper process.PeerShardMapper +} + +// NewDirectConnectionInfoInterceptorProcessor creates an instance of directConnectionInfoInterceptorProcessor +func NewDirectConnectionInfoInterceptorProcessor(args ArgDirectConnectionInfoInterceptorProcessor) (*directConnectionInfoInterceptorProcessor, error) { + if check.IfNil(args.PeerShardMapper) { + return nil, process.ErrNilPeerShardMapper + } + + return &directConnectionInfoInterceptorProcessor{ + peerShardMapper: args.PeerShardMapper, + }, nil +} + +// Validate checks if the intercepted data can be processed +// returns nil as proper validity checks are done at intercepted data level +func (processor *directConnectionInfoInterceptorProcessor) Validate(_ process.InterceptedData, _ core.PeerID) error { + return nil +} + +// Save will save the intercepted validator info into peer shard mapper +func (processor *directConnectionInfoInterceptorProcessor) Save(data process.InterceptedData, fromConnectedPeer core.PeerID, _ string) error { + shardDirectConnectionInfo, ok := data.(shardProvider) + if !ok { + return process.ErrWrongTypeAssertion + } + + shardID, err := strconv.Atoi(shardDirectConnectionInfo.ShardID()) + if err != nil { + return err + } + + processor.peerShardMapper.PutPeerIdShardId(fromConnectedPeer, uint32(shardID)) + + return nil +} + +// RegisterHandler registers a callback function to be notified of incoming shard validator info, currently not implemented +func (processor *directConnectionInfoInterceptorProcessor) RegisterHandler(_ func(topic string, hash []byte, data interface{})) { + log.Error("directConnectionInfoInterceptorProcessor.RegisterHandler", "error", "not implemented") +} + +// IsInterfaceNil returns true if there is no value under the interface +func (processor *directConnectionInfoInterceptorProcessor) IsInterfaceNil() bool { + return processor == nil +} diff --git a/process/interceptors/processor/directConnectionInfoInterceptorProcessor_test.go b/process/interceptors/processor/directConnectionInfoInterceptorProcessor_test.go new file mode 100644 index 00000000000..6724f1b2320 --- /dev/null +++ b/process/interceptors/processor/directConnectionInfoInterceptorProcessor_test.go @@ -0,0 +1,161 @@ +package processor + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/marshal" + heartbeatMessages "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/p2p/message" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/heartbeat" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/process/p2p" + "github.com/stretchr/testify/assert" +) + +func createMockArgDirectConnectionInfoInterceptorProcessor() ArgDirectConnectionInfoInterceptorProcessor { + return ArgDirectConnectionInfoInterceptorProcessor{ + PeerShardMapper: &mock.PeerShardMapperStub{}, + } +} + +func TestNewDirectConnectionInfoInterceptorProcessor(t *testing.T) { + t.Parallel() + + t.Run("nil peer shard mapper should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgDirectConnectionInfoInterceptorProcessor() + args.PeerShardMapper = nil + + processor, err := NewDirectConnectionInfoInterceptorProcessor(args) + assert.Equal(t, process.ErrNilPeerShardMapper, err) + assert.True(t, check.IfNil(processor)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + processor, err := NewDirectConnectionInfoInterceptorProcessor(createMockArgDirectConnectionInfoInterceptorProcessor()) + assert.Nil(t, err) + assert.False(t, check.IfNil(processor)) + }) +} + +func TestDirectConnectionInfoInterceptorProcessor_Save(t *testing.T) { + t.Parallel() + + t.Run("invalid message should error", func(t *testing.T) { + t.Parallel() + + wasCalled := false + args := createMockArgDirectConnectionInfoInterceptorProcessor() + args.PeerShardMapper = &mock.PeerShardMapperStub{ + PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { + wasCalled = true + }, + } + + processor, err := NewDirectConnectionInfoInterceptorProcessor(args) + assert.Nil(t, err) + assert.False(t, check.IfNil(processor)) + + // provide heartbeat as intercepted data + arg := heartbeat.ArgInterceptedHeartbeat{ + ArgBaseInterceptedHeartbeat: heartbeat.ArgBaseInterceptedHeartbeat{ + Marshaller: &marshal.GogoProtoMarshalizer{}, + }, + PeerId: "pid", + } + arg.DataBuff, _ = arg.Marshaller.Marshal(&heartbeatMessages.HeartbeatV2{}) + ihb, _ := heartbeat.NewInterceptedHeartbeat(arg) + + err = processor.Save(ihb, "", "") + assert.Equal(t, process.ErrWrongTypeAssertion, err) + assert.False(t, wasCalled) + }) + t.Run("invalid shard should error", func(t *testing.T) { + t.Parallel() + + wasCalled := false + args := createMockArgDirectConnectionInfoInterceptorProcessor() + args.PeerShardMapper = &mock.PeerShardMapperStub{ + PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { + wasCalled = true + }, + } + + processor, err := NewDirectConnectionInfoInterceptorProcessor(args) + assert.Nil(t, err) + assert.False(t, check.IfNil(processor)) + + msg := &message.DirectConnectionInfo{ + ShardId: "invalid shard", + } + marshaller := marshal.GogoProtoMarshalizer{} + dataBuff, _ := marshaller.Marshal(msg) + arg := p2p.ArgInterceptedDirectConnectionInfo{ + Marshaller: &marshaller, + DataBuff: dataBuff, + NumOfShards: 10, + } + data, _ := p2p.NewInterceptedDirectConnectionInfo(arg) + + err = processor.Save(data, "", "") + assert.NotNil(t, err) + assert.False(t, wasCalled) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + wasCalled := false + args := createMockArgDirectConnectionInfoInterceptorProcessor() + args.PeerShardMapper = &mock.PeerShardMapperStub{ + PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { + wasCalled = true + }, + } + + processor, err := NewDirectConnectionInfoInterceptorProcessor(args) + assert.Nil(t, err) + assert.False(t, check.IfNil(processor)) + + msg := &message.DirectConnectionInfo{ + ShardId: "5", + } + marshaller := marshal.GogoProtoMarshalizer{} + dataBuff, _ := marshaller.Marshal(msg) + arg := p2p.ArgInterceptedDirectConnectionInfo{ + Marshaller: &marshaller, + DataBuff: dataBuff, + NumOfShards: 10, + } + data, _ := p2p.NewInterceptedDirectConnectionInfo(arg) + + err = processor.Save(data, "", "") + assert.Nil(t, err) + assert.True(t, wasCalled) + }) +} + +func TestDirectConnectionInfoInterceptorProcessor_DisabledMethod(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() + + processor, err := NewDirectConnectionInfoInterceptorProcessor(createMockArgDirectConnectionInfoInterceptorProcessor()) + assert.Nil(t, err) + assert.False(t, check.IfNil(processor)) + + err = processor.Validate(nil, "") + assert.Nil(t, err) + + processor.RegisterHandler(nil) + +} diff --git a/process/interceptors/processor/heartbeatInterceptorProcessor.go b/process/interceptors/processor/heartbeatInterceptorProcessor.go new file mode 100644 index 00000000000..1e7d3b68c17 --- /dev/null +++ b/process/interceptors/processor/heartbeatInterceptorProcessor.go @@ -0,0 +1,94 @@ +package processor + +import ( + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" +) + +// ArgHeartbeatInterceptorProcessor is the argument for the interceptor processor used for heartbeat +type ArgHeartbeatInterceptorProcessor struct { + HeartbeatCacher storage.Cacher + ShardCoordinator sharding.Coordinator + PeerShardMapper process.PeerShardMapper +} + +// heartbeatInterceptorProcessor is the processor used when intercepting heartbeat +type heartbeatInterceptorProcessor struct { + heartbeatCacher storage.Cacher + shardCoordinator sharding.Coordinator + peerShardMapper process.PeerShardMapper +} + +// NewHeartbeatInterceptorProcessor creates a new heartbeatInterceptorProcessor +func NewHeartbeatInterceptorProcessor(args ArgHeartbeatInterceptorProcessor) (*heartbeatInterceptorProcessor, error) { + err := checkArgsHeartbeat(args) + if err != nil { + return nil, err + } + + return &heartbeatInterceptorProcessor{ + heartbeatCacher: args.HeartbeatCacher, + shardCoordinator: args.ShardCoordinator, + peerShardMapper: args.PeerShardMapper, + }, nil +} + +func checkArgsHeartbeat(args ArgHeartbeatInterceptorProcessor) error { + if check.IfNil(args.HeartbeatCacher) { + return process.ErrNilHeartbeatCacher + } + if check.IfNil(args.ShardCoordinator) { + return process.ErrNilShardCoordinator + } + if check.IfNil(args.PeerShardMapper) { + return process.ErrNilPeerShardMapper + } + + return nil +} + +// Validate checks if the intercepted data can be processed +// returns nil as proper validity checks are done at intercepted data level +func (hip *heartbeatInterceptorProcessor) Validate(_ process.InterceptedData, _ core.PeerID) error { + return nil +} + +// Save will save the intercepted heartbeat inside the heartbeat cacher +func (hip *heartbeatInterceptorProcessor) Save(data process.InterceptedData, fromConnectedPeer core.PeerID, _ string) error { + interceptedHeartbeat, ok := data.(interceptedHeartbeatMessageHandler) + if !ok { + return process.ErrWrongTypeAssertion + } + + hip.heartbeatCacher.Put(fromConnectedPeer.Bytes(), interceptedHeartbeat.Message(), interceptedHeartbeat.SizeInBytes()) + + return hip.updatePeerInfo(interceptedHeartbeat.Message(), fromConnectedPeer) +} + +func (hip *heartbeatInterceptorProcessor) updatePeerInfo(message interface{}, fromConnectedPeer core.PeerID) error { + heartbeatData, ok := message.(*heartbeat.HeartbeatV2) + if !ok { + return process.ErrWrongTypeAssertion + } + + hip.peerShardMapper.PutPeerIdShardId(fromConnectedPeer, hip.shardCoordinator.SelfId()) + hip.peerShardMapper.PutPeerIdSubType(fromConnectedPeer, core.P2PPeerSubType(heartbeatData.GetPeerSubType())) + + log.Trace("Heartbeat message saved") + + return nil +} + +// RegisterHandler registers a callback function to be notified of incoming hearbeat +func (hip *heartbeatInterceptorProcessor) RegisterHandler(_ func(topic string, hash []byte, data interface{})) { + log.Error("heartbeatInterceptorProcessor.RegisterHandler", "error", "not implemented") +} + +// IsInterfaceNil returns true if there is no value under the interface +func (hip *heartbeatInterceptorProcessor) IsInterfaceNil() bool { + return hip == nil +} diff --git a/process/interceptors/processor/heartbeatInterceptorProcessor_test.go b/process/interceptors/processor/heartbeatInterceptorProcessor_test.go new file mode 100644 index 00000000000..82582c10aa4 --- /dev/null +++ b/process/interceptors/processor/heartbeatInterceptorProcessor_test.go @@ -0,0 +1,201 @@ +package processor_test + +import ( + "bytes" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-core/core" + heartbeatMessages "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/heartbeat" + "github.com/ElrondNetwork/elrond-go/process/interceptors/processor" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/testscommon" + "github.com/ElrondNetwork/elrond-go/testscommon/p2pmocks" + "github.com/stretchr/testify/assert" +) + +func createHeartbeatInterceptorProcessArg() processor.ArgHeartbeatInterceptorProcessor { + return processor.ArgHeartbeatInterceptorProcessor{ + HeartbeatCacher: testscommon.NewCacherStub(), + ShardCoordinator: &testscommon.ShardsCoordinatorMock{}, + PeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, + } +} + +func createInterceptedHeartbeat() *heartbeatMessages.HeartbeatV2 { + payload := &heartbeatMessages.Payload{ + Timestamp: time.Now().Unix(), + HardforkMessage: "hardfork message", + } + marshaller := mock.MarshalizerMock{} + payloadBytes, _ := marshaller.Marshal(payload) + + return &heartbeatMessages.HeartbeatV2{ + Payload: payloadBytes, + VersionNumber: "version number", + NodeDisplayName: "node display name", + Identity: "identity", + Nonce: 123, + PeerSubType: uint32(core.RegularPeer), + } +} + +func createMockInterceptedHeartbeat() process.InterceptedData { + arg := heartbeat.ArgInterceptedHeartbeat{ + ArgBaseInterceptedHeartbeat: heartbeat.ArgBaseInterceptedHeartbeat{ + Marshaller: &mock.MarshalizerMock{}, + }, + PeerId: "pid", + } + arg.DataBuff, _ = arg.Marshaller.Marshal(createInterceptedHeartbeat()) + ihb, _ := heartbeat.NewInterceptedHeartbeat(arg) + + return ihb +} + +func TestNewHeartbeatInterceptorProcessor(t *testing.T) { + t.Parallel() + + t.Run("nil cacher should error", func(t *testing.T) { + t.Parallel() + + arg := createHeartbeatInterceptorProcessArg() + arg.HeartbeatCacher = nil + hip, err := processor.NewHeartbeatInterceptorProcessor(arg) + assert.Equal(t, process.ErrNilHeartbeatCacher, err) + assert.Nil(t, hip) + }) + t.Run("nil shard coordinator should error", func(t *testing.T) { + t.Parallel() + + arg := createHeartbeatInterceptorProcessArg() + arg.ShardCoordinator = nil + hip, err := processor.NewHeartbeatInterceptorProcessor(arg) + assert.Equal(t, process.ErrNilShardCoordinator, err) + assert.Nil(t, hip) + }) + t.Run("nil peer shard mapper should error", func(t *testing.T) { + t.Parallel() + + arg := createHeartbeatInterceptorProcessArg() + arg.PeerShardMapper = nil + hip, err := processor.NewHeartbeatInterceptorProcessor(arg) + assert.Equal(t, process.ErrNilPeerShardMapper, err) + assert.Nil(t, hip) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + hip, err := processor.NewHeartbeatInterceptorProcessor(createHeartbeatInterceptorProcessArg()) + assert.Nil(t, err) + assert.False(t, hip.IsInterfaceNil()) + }) +} + +func TestHeartbeatInterceptorProcessor_Save(t *testing.T) { + t.Parallel() + + t.Run("invalid data should error", func(t *testing.T) { + t.Parallel() + + hip, err := processor.NewHeartbeatInterceptorProcessor(createHeartbeatInterceptorProcessArg()) + assert.Nil(t, err) + assert.False(t, hip.IsInterfaceNil()) + assert.Equal(t, process.ErrWrongTypeAssertion, hip.Save(nil, "", "")) + }) + t.Run("invalid heartbeat data should error", func(t *testing.T) { + t.Parallel() + + providedData := createMockInterceptedPeerAuthentication() // unable to cast to intercepted heartbeat + wasPutPeerIdShardIdCalled := false + wasPutPeerIdSubTypeCalled := false + args := createHeartbeatInterceptorProcessArg() + args.PeerShardMapper = &p2pmocks.NetworkShardingCollectorStub{ + PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { + wasPutPeerIdShardIdCalled = true + }, + PutPeerIdSubTypeCalled: func(pid core.PeerID, peerSubType core.P2PPeerSubType) { + wasPutPeerIdSubTypeCalled = true + }, + } + + paip, err := processor.NewHeartbeatInterceptorProcessor(args) + assert.Nil(t, err) + assert.False(t, paip.IsInterfaceNil()) + assert.Equal(t, process.ErrWrongTypeAssertion, paip.Save(providedData, "", "")) + assert.False(t, wasPutPeerIdShardIdCalled) + assert.False(t, wasPutPeerIdSubTypeCalled) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedHb := createMockInterceptedHeartbeat() + wasCalled := false + providedPid := core.PeerID("pid") + arg := createHeartbeatInterceptorProcessArg() + arg.HeartbeatCacher = &testscommon.CacherStub{ + PutCalled: func(key []byte, value interface{}, sizeInBytes int) (evicted bool) { + assert.True(t, bytes.Equal(providedPid.Bytes(), key)) + ihb := value.(*heartbeatMessages.HeartbeatV2) + providedHbHandler := providedHb.(interceptedDataHandler) + providedHbMessage := providedHbHandler.Message().(*heartbeatMessages.HeartbeatV2) + assert.Equal(t, providedHbMessage.Identity, ihb.Identity) + assert.Equal(t, providedHbMessage.Payload, ihb.Payload) + assert.Equal(t, providedHbMessage.NodeDisplayName, ihb.NodeDisplayName) + assert.Equal(t, providedHbMessage.PeerSubType, ihb.PeerSubType) + assert.Equal(t, providedHbMessage.VersionNumber, ihb.VersionNumber) + assert.Equal(t, providedHbMessage.Nonce, ihb.Nonce) + wasCalled = true + return false + }, + } + wasPutPeerIdShardIdCalled := false + wasPutPeerIdSubTypeCalled := false + arg.PeerShardMapper = &p2pmocks.NetworkShardingCollectorStub{ + PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { + wasPutPeerIdShardIdCalled = true + assert.Equal(t, providedPid, pid) + }, + PutPeerIdSubTypeCalled: func(pid core.PeerID, peerSubType core.P2PPeerSubType) { + wasPutPeerIdSubTypeCalled = true + assert.Equal(t, providedPid, pid) + }, + } + + hip, err := processor.NewHeartbeatInterceptorProcessor(arg) + assert.Nil(t, err) + assert.False(t, hip.IsInterfaceNil()) + + err = hip.Save(providedHb, providedPid, "") + assert.Nil(t, err) + assert.True(t, wasCalled) + assert.True(t, wasPutPeerIdShardIdCalled) + assert.True(t, wasPutPeerIdSubTypeCalled) + }) +} + +func TestHeartbeatInterceptorProcessor_Validate(t *testing.T) { + t.Parallel() + + hip, err := processor.NewHeartbeatInterceptorProcessor(createHeartbeatInterceptorProcessArg()) + assert.Nil(t, err) + assert.False(t, hip.IsInterfaceNil()) + assert.Nil(t, hip.Validate(nil, "")) +} + +func TestHeartbeatInterceptorProcessor_RegisterHandler(t *testing.T) { + t.Parallel() + + defer func() { + if r := recover(); r != nil { + assert.Fail(t, "should not panic") + } + }() + + hip, err := processor.NewHeartbeatInterceptorProcessor(createHeartbeatInterceptorProcessArg()) + assert.Nil(t, err) + assert.False(t, hip.IsInterfaceNil()) + hip.RegisterHandler(nil) +} diff --git a/process/interceptors/processor/interface.go b/process/interceptors/processor/interface.go index 435c97df887..e4f8a818a5f 100644 --- a/process/interceptors/processor/interface.go +++ b/process/interceptors/processor/interface.go @@ -21,3 +21,19 @@ type InterceptedTransactionHandler interface { type ShardedPool interface { AddData(key []byte, data interface{}, sizeInBytes int, cacheID string) } + +type interceptedDataSizeHandler interface { + SizeInBytes() int +} + +type interceptedHeartbeatMessageHandler interface { + interceptedDataSizeHandler + Message() interface{} +} + +type interceptedPeerAuthenticationMessageHandler interface { + interceptedDataSizeHandler + Message() interface{} + Payload() []byte + Pubkey() []byte +} diff --git a/process/interceptors/processor/peerAuthenticationInterceptorProcessor.go b/process/interceptors/processor/peerAuthenticationInterceptorProcessor.go new file mode 100644 index 00000000000..85ed509f232 --- /dev/null +++ b/process/interceptors/processor/peerAuthenticationInterceptorProcessor.go @@ -0,0 +1,111 @@ +package processor + +import ( + "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/marshal" + "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/storage" +) + +// ArgPeerAuthenticationInterceptorProcessor is the argument for the interceptor processor used for peer authentication +type ArgPeerAuthenticationInterceptorProcessor struct { + PeerAuthenticationCacher storage.Cacher + PeerShardMapper process.PeerShardMapper + Marshaller marshal.Marshalizer + HardforkTrigger heartbeat.HardforkTrigger +} + +// peerAuthenticationInterceptorProcessor is the processor used when intercepting peer authentication +type peerAuthenticationInterceptorProcessor struct { + peerAuthenticationCacher storage.Cacher + peerShardMapper process.PeerShardMapper + marshaller marshal.Marshalizer + hardforkTrigger heartbeat.HardforkTrigger +} + +// NewPeerAuthenticationInterceptorProcessor creates a new peerAuthenticationInterceptorProcessor +func NewPeerAuthenticationInterceptorProcessor(args ArgPeerAuthenticationInterceptorProcessor) (*peerAuthenticationInterceptorProcessor, error) { + err := checkArgsPeerAuthentication(args) + if err != nil { + return nil, err + } + + return &peerAuthenticationInterceptorProcessor{ + peerAuthenticationCacher: args.PeerAuthenticationCacher, + peerShardMapper: args.PeerShardMapper, + marshaller: args.Marshaller, + hardforkTrigger: args.HardforkTrigger, + }, nil +} + +func checkArgsPeerAuthentication(args ArgPeerAuthenticationInterceptorProcessor) error { + if check.IfNil(args.PeerAuthenticationCacher) { + return process.ErrNilPeerAuthenticationCacher + } + if check.IfNil(args.PeerShardMapper) { + return process.ErrNilPeerShardMapper + } + if check.IfNil(args.Marshaller) { + return heartbeat.ErrNilMarshaller + } + if check.IfNil(args.HardforkTrigger) { + return heartbeat.ErrNilHardforkTrigger + } + + return nil +} + +// Validate checks if the intercepted data can be processed +// returns nil as proper validity checks are done at intercepted data level +func (paip *peerAuthenticationInterceptorProcessor) Validate(_ process.InterceptedData, _ core.PeerID) error { + return nil +} + +// Save will save the intercepted peer authentication inside the peer authentication cacher +func (paip *peerAuthenticationInterceptorProcessor) Save(data process.InterceptedData, _ core.PeerID, _ string) error { + interceptedPeerAuthenticationData, ok := data.(interceptedPeerAuthenticationMessageHandler) + if !ok { + return process.ErrWrongTypeAssertion + } + + payloadBuff := interceptedPeerAuthenticationData.Payload() + payload := &heartbeat.Payload{} + err := paip.marshaller.Unmarshal(payload, payloadBuff) + if err != nil { + return err + } + + isHardforkTrigger, err := paip.hardforkTrigger.TriggerReceived(nil, []byte(payload.HardforkMessage), interceptedPeerAuthenticationData.Pubkey()) + if isHardforkTrigger { + return err + } + + return paip.updatePeerInfo(interceptedPeerAuthenticationData.Message(), interceptedPeerAuthenticationData.SizeInBytes()) +} + +func (paip *peerAuthenticationInterceptorProcessor) updatePeerInfo(message interface{}, messageSize int) error { + peerAuthenticationData, ok := message.(*heartbeat.PeerAuthentication) + if !ok { + return process.ErrWrongTypeAssertion + } + + pidBytes := peerAuthenticationData.GetPid() + paip.peerAuthenticationCacher.Put(pidBytes, message, messageSize) + paip.peerShardMapper.UpdatePeerIDPublicKeyPair(core.PeerID(pidBytes), peerAuthenticationData.GetPubkey()) + + log.Trace("PeerAuthentication message saved") + + return nil +} + +// RegisterHandler registers a callback function to be notified of incoming peer authentication +func (paip *peerAuthenticationInterceptorProcessor) RegisterHandler(_ func(topic string, hash []byte, data interface{})) { + log.Error("peerAuthenticationInterceptorProcessor.RegisterHandler", "error", "not implemented") +} + +// IsInterfaceNil returns true if there is no value under the interface +func (paip *peerAuthenticationInterceptorProcessor) IsInterfaceNil() bool { + return paip == nil +} diff --git a/process/interceptors/processor/peerAuthenticationInterceptorProcessor_test.go b/process/interceptors/processor/peerAuthenticationInterceptorProcessor_test.go new file mode 100644 index 00000000000..d43c61875c8 --- /dev/null +++ b/process/interceptors/processor/peerAuthenticationInterceptorProcessor_test.go @@ -0,0 +1,243 @@ +package processor_test + +import ( + "bytes" + "errors" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-core/core" + heartbeatMessages "github.com/ElrondNetwork/elrond-go/heartbeat" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/heartbeat" + "github.com/ElrondNetwork/elrond-go/process/interceptors/processor" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/testscommon" + "github.com/ElrondNetwork/elrond-go/testscommon/p2pmocks" + "github.com/stretchr/testify/assert" +) + +type interceptedDataHandler interface { + SizeInBytes() int + Message() interface{} +} + +func createPeerAuthenticationInterceptorProcessArg() processor.ArgPeerAuthenticationInterceptorProcessor { + return processor.ArgPeerAuthenticationInterceptorProcessor{ + PeerAuthenticationCacher: testscommon.NewCacherStub(), + PeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, + Marshaller: testscommon.MarshalizerMock{}, + HardforkTrigger: &testscommon.HardforkTriggerStub{}, + } +} + +func createInterceptedPeerAuthentication() *heartbeatMessages.PeerAuthentication { + payload := &heartbeatMessages.Payload{ + Timestamp: time.Now().Unix(), + HardforkMessage: "hardfork message", + } + marshaller := mock.MarshalizerMock{} + payloadBytes, _ := marshaller.Marshal(payload) + + return &heartbeatMessages.PeerAuthentication{ + Pubkey: []byte("public key"), + Signature: []byte("signature"), + Pid: []byte("peer id"), + Payload: payloadBytes, + PayloadSignature: []byte("payload signature"), + } +} + +func createMockInterceptedPeerAuthentication() process.InterceptedData { + arg := heartbeat.ArgInterceptedPeerAuthentication{ + ArgBaseInterceptedHeartbeat: heartbeat.ArgBaseInterceptedHeartbeat{ + Marshaller: &mock.MarshalizerMock{}, + }, + NodesCoordinator: &mock.NodesCoordinatorStub{}, + SignaturesHandler: &mock.SignaturesHandlerStub{}, + PeerSignatureHandler: &mock.PeerSignatureHandlerStub{}, + ExpiryTimespanInSec: 30, + HardforkTriggerPubKey: []byte("provided hardfork pub key"), + } + arg.DataBuff, _ = arg.Marshaller.Marshal(createInterceptedPeerAuthentication()) + ipa, _ := heartbeat.NewInterceptedPeerAuthentication(arg) + + return ipa +} + +func TestNewPeerAuthenticationInterceptorProcessor(t *testing.T) { + t.Parallel() + + t.Run("nil cacher should error", func(t *testing.T) { + t.Parallel() + + arg := createPeerAuthenticationInterceptorProcessArg() + arg.PeerAuthenticationCacher = nil + paip, err := processor.NewPeerAuthenticationInterceptorProcessor(arg) + assert.Equal(t, process.ErrNilPeerAuthenticationCacher, err) + assert.Nil(t, paip) + }) + t.Run("nil peer shard mapper should error", func(t *testing.T) { + t.Parallel() + + arg := createPeerAuthenticationInterceptorProcessArg() + arg.PeerShardMapper = nil + paip, err := processor.NewPeerAuthenticationInterceptorProcessor(arg) + assert.Equal(t, process.ErrNilPeerShardMapper, err) + assert.Nil(t, paip) + }) + t.Run("nil marshaller should error", func(t *testing.T) { + t.Parallel() + + arg := createPeerAuthenticationInterceptorProcessArg() + arg.Marshaller = nil + paip, err := processor.NewPeerAuthenticationInterceptorProcessor(arg) + assert.Equal(t, heartbeatMessages.ErrNilMarshaller, err) + assert.Nil(t, paip) + }) + t.Run("nil hardfork trigger should error", func(t *testing.T) { + t.Parallel() + + arg := createPeerAuthenticationInterceptorProcessArg() + arg.HardforkTrigger = nil + paip, err := processor.NewPeerAuthenticationInterceptorProcessor(arg) + assert.Equal(t, heartbeatMessages.ErrNilHardforkTrigger, err) + assert.Nil(t, paip) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + paip, err := processor.NewPeerAuthenticationInterceptorProcessor(createPeerAuthenticationInterceptorProcessArg()) + assert.Nil(t, err) + assert.False(t, paip.IsInterfaceNil()) + }) +} + +func TestPeerAuthenticationInterceptorProcessor_Save(t *testing.T) { + t.Parallel() + + t.Run("invalid data should error", func(t *testing.T) { + t.Parallel() + + paip, err := processor.NewPeerAuthenticationInterceptorProcessor(createPeerAuthenticationInterceptorProcessArg()) + assert.Nil(t, err) + assert.False(t, paip.IsInterfaceNil()) + assert.Equal(t, process.ErrWrongTypeAssertion, paip.Save(nil, "", "")) + }) + t.Run("invalid peer auth data should error", func(t *testing.T) { + t.Parallel() + + providedData := createMockInterceptedHeartbeat() // unable to cast to intercepted peer auth + wasCalled := false + args := createPeerAuthenticationInterceptorProcessArg() + args.PeerShardMapper = &p2pmocks.NetworkShardingCollectorStub{ + UpdatePeerIDPublicKeyPairCalled: func(pid core.PeerID, pk []byte) { + wasCalled = true + }, + } + + paip, err := processor.NewPeerAuthenticationInterceptorProcessor(args) + assert.Nil(t, err) + assert.False(t, paip.IsInterfaceNil()) + assert.Equal(t, process.ErrWrongTypeAssertion, paip.Save(providedData, "", "")) + assert.False(t, wasCalled) + }) + t.Run("unmarshal returns error", func(t *testing.T) { + t.Parallel() + + expectedError := errors.New("expected error") + args := createPeerAuthenticationInterceptorProcessArg() + args.Marshaller = &testscommon.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return expectedError + }, + } + paip, err := processor.NewPeerAuthenticationInterceptorProcessor(args) + assert.Nil(t, err) + assert.False(t, paip.IsInterfaceNil()) + + err = paip.Save(createMockInterceptedPeerAuthentication(), "", "") + assert.Equal(t, expectedError, err) + }) + t.Run("trigger received returns error", func(t *testing.T) { + t.Parallel() + + expectedError := errors.New("expected error") + args := createPeerAuthenticationInterceptorProcessArg() + args.HardforkTrigger = &testscommon.HardforkTriggerStub{ + TriggerReceivedCalled: func(payload []byte, data []byte, pkBytes []byte) (bool, error) { + return true, expectedError + }, + } + paip, err := processor.NewPeerAuthenticationInterceptorProcessor(args) + assert.Nil(t, err) + assert.False(t, paip.IsInterfaceNil()) + + err = paip.Save(createMockInterceptedPeerAuthentication(), "", "") + assert.Equal(t, expectedError, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedIPA := createMockInterceptedPeerAuthentication() + providedIPAHandler := providedIPA.(interceptedDataHandler) + providedIPAMessage := providedIPAHandler.Message().(*heartbeatMessages.PeerAuthentication) + wasPutCalled := false + providedPid := core.PeerID("pid") + arg := createPeerAuthenticationInterceptorProcessArg() + arg.PeerAuthenticationCacher = &testscommon.CacherStub{ + PutCalled: func(key []byte, value interface{}, sizeInBytes int) (evicted bool) { + assert.True(t, bytes.Equal(providedIPAMessage.Pid, key)) + ipa := value.(*heartbeatMessages.PeerAuthentication) + assert.Equal(t, providedIPAMessage.Pid, ipa.Pid) + assert.Equal(t, providedIPAMessage.Payload, ipa.Payload) + assert.Equal(t, providedIPAMessage.Signature, ipa.Signature) + assert.Equal(t, providedIPAMessage.PayloadSignature, ipa.PayloadSignature) + assert.Equal(t, providedIPAMessage.Pubkey, ipa.Pubkey) + wasPutCalled = true + return false + }, + } + wasUpdatePeerIDPublicKeyPairCalled := false + arg.PeerShardMapper = &p2pmocks.NetworkShardingCollectorStub{ + UpdatePeerIDPublicKeyPairCalled: func(pid core.PeerID, pk []byte) { + wasUpdatePeerIDPublicKeyPairCalled = true + assert.Equal(t, providedIPAMessage.Pid, pid.Bytes()) + assert.Equal(t, providedIPAMessage.Pubkey, pk) + }, + } + + paip, err := processor.NewPeerAuthenticationInterceptorProcessor(arg) + assert.Nil(t, err) + assert.False(t, paip.IsInterfaceNil()) + + err = paip.Save(providedIPA, providedPid, "") + assert.Nil(t, err) + assert.True(t, wasPutCalled) + assert.True(t, wasUpdatePeerIDPublicKeyPairCalled) + }) +} + +func TestPeerAuthenticationInterceptorProcessor_Validate(t *testing.T) { + t.Parallel() + + paip, err := processor.NewPeerAuthenticationInterceptorProcessor(createPeerAuthenticationInterceptorProcessArg()) + assert.Nil(t, err) + assert.False(t, paip.IsInterfaceNil()) + assert.Nil(t, paip.Validate(nil, "")) +} + +func TestPeerAuthenticationInterceptorProcessor_RegisterHandler(t *testing.T) { + t.Parallel() + + defer func() { + if r := recover(); r != nil { + assert.Fail(t, "should not panic") + } + }() + + paip, err := processor.NewPeerAuthenticationInterceptorProcessor(createPeerAuthenticationInterceptorProcessArg()) + assert.Nil(t, err) + assert.False(t, paip.IsInterfaceNil()) + paip.RegisterHandler(nil) +} diff --git a/process/interceptors/processor/trieNodeInterceptorProcessor.go b/process/interceptors/processor/trieNodeInterceptorProcessor.go index b58e9834891..3f0208a60bb 100644 --- a/process/interceptors/processor/trieNodeInterceptorProcessor.go +++ b/process/interceptors/processor/trieNodeInterceptorProcessor.go @@ -9,10 +9,6 @@ import ( var _ process.InterceptorProcessor = (*TrieNodeInterceptorProcessor)(nil) -type interceptedTrieNodeHandler interface { - SizeInBytes() int -} - // TrieNodeInterceptorProcessor is the processor used when intercepting trie nodes type TrieNodeInterceptorProcessor struct { interceptedNodes storage.Cacher @@ -36,7 +32,7 @@ func (tnip *TrieNodeInterceptorProcessor) Validate(_ process.InterceptedData, _ // Save saves the intercepted trie node in the intercepted nodes cacher func (tnip *TrieNodeInterceptorProcessor) Save(data process.InterceptedData, _ core.PeerID, _ string) error { - nodeData, ok := data.(interceptedTrieNodeHandler) + nodeData, ok := data.(interceptedDataSizeHandler) if !ok { return process.ErrWrongTypeAssertion } diff --git a/process/interceptors/singleDataInterceptor.go b/process/interceptors/singleDataInterceptor.go index 31be1d2cb0e..08a45d646dd 100644 --- a/process/interceptors/singleDataInterceptor.go +++ b/process/interceptors/singleDataInterceptor.go @@ -87,7 +87,7 @@ func (sdi *SingleDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, if err != nil { sdi.throttler.EndProcessing() - //this situation is so severe that we need to black list the peers + // this situation is so severe that we need to black list the peers reason := "can not create object from received bytes, topic " + sdi.topic + ", error " + err.Error() sdi.antifloodHandler.BlacklistPeer(message.Peer(), reason, common.InvalidMessageBlacklistDuration) sdi.antifloodHandler.BlacklistPeer(fromConnectedPeer, reason, common.InvalidMessageBlacklistDuration) @@ -104,7 +104,7 @@ func (sdi *SingleDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, isWrongVersion := err == process.ErrInvalidTransactionVersion || err == process.ErrInvalidChainID if isWrongVersion { - //this situation is so severe that we need to black list de peers + // this situation is so severe that we need to black list de peers reason := "wrong version of received intercepted data, topic " + sdi.topic + ", error " + err.Error() sdi.antifloodHandler.BlacklistPeer(message.Peer(), reason, common.InvalidMessageBlacklistDuration) sdi.antifloodHandler.BlacklistPeer(fromConnectedPeer, reason, common.InvalidMessageBlacklistDuration) diff --git a/process/interface.go b/process/interface.go index 296fa194193..d0a0bdd1cdf 100644 --- a/process/interface.go +++ b/process/interface.go @@ -137,7 +137,7 @@ type TransactionCoordinator interface { ProcessBlockTransaction(header data.HeaderHandler, body *block.Body, haveTime func() time.Duration) error CreateBlockStarted() - CreateMbsAndProcessCrossShardTransactionsDstMe(header data.HeaderHandler, processedMiniBlocksHashes map[string]struct{}, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool) (block.MiniBlockSlice, uint32, bool, error) + CreateMbsAndProcessCrossShardTransactionsDstMe(header data.HeaderHandler, processedMiniBlocksInfo map[string]*processedMb.ProcessedMiniBlockInfo, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool) (block.MiniBlockSlice, uint32, bool, error) CreateMbsAndProcessTransactionsFromMe(haveTime func() bool, randomness []byte) block.MiniBlockSlice CreatePostProcessMiniBlocks() block.MiniBlockSlice CreateMarshalizedData(body *block.Body) map[string][][]byte @@ -175,8 +175,8 @@ type IntermediateTransactionHandler interface { GetAllCurrentFinishedTxs() map[string]data.TransactionHandler CreateBlockStarted() GetCreatedInShardMiniBlock() *block.MiniBlock - RemoveProcessedResults() [][]byte - InitProcessedResults() + RemoveProcessedResults(key []byte) [][]byte + InitProcessedResults(key []byte) IsInterfaceNil() bool } @@ -215,7 +215,7 @@ type PreProcessor interface { RequestBlockTransactions(body *block.Body) int RequestTransactionsForMiniBlock(miniBlock *block.MiniBlock) int - ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, haveAdditionalTime func() bool, getNumOfCrossInterMbsAndTxs func() (int, int), scheduledMode bool) ([][]byte, int, error) + ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool, partialMbExecutionMode bool, indexOfLastTxProcessed int, preProcessorExecutionInfoHandler PreProcessorExecutionInfoHandler) ([][]byte, int, bool, error) CreateAndProcessMiniBlocks(haveTime func() bool, randomness []byte) (block.MiniBlockSlice, error) GetAllCurrentUsedTxs() map[string]data.TransactionHandler @@ -235,7 +235,6 @@ type BlockProcessor interface { CreateNewHeader(round uint64, nonce uint64) (data.HeaderHandler, error) RestoreBlockIntoPools(header data.HeaderHandler, body data.BodyHandler) error CreateBlock(initialHdr data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) - ApplyProcessedMiniBlocks(processedMiniBlocks *processedMb.ProcessedMiniBlockTracker) MarshalizedDataToBroadcast(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) DecodeBlockBody(dta []byte) data.BodyHandler DecodeBlockHeader(dta []byte) data.HeaderHandler @@ -523,6 +522,12 @@ type TopicHandler interface { IsInterfaceNil() bool } +// SignaturesHandler defines the behavior of a struct able to handle signatures +type SignaturesHandler interface { + Verify(payload []byte, pid core.PeerID, signature []byte) error + IsInterfaceNil() bool +} + // DataPacker can split a large slice of byte slices in smaller packets type DataPacker interface { PackDataInChunks(data [][]byte, limit int) ([][]byte, error) @@ -548,6 +553,8 @@ type RequestHandler interface { GetNumPeersToQuery(key string) (int, int, error) RequestTrieNode(requestHash []byte, topic string, chunkIndex uint32) CreateTrieNodeIdentifier(requestHash []byte, chunkIndex uint32) []byte + RequestPeerAuthenticationsChunk(destShardID uint32, chunkIndex uint32) + RequestPeerAuthenticationsByHashes(destShardID uint32, hashes [][]byte) IsInterfaceNil() bool } @@ -698,14 +705,21 @@ type PeerBlackListCacher interface { // PeerShardMapper can return the public key of a provided peer ID type PeerShardMapper interface { + UpdatePeerIDPublicKeyPair(pid core.PeerID, pk []byte) + PutPeerIdShardId(pid core.PeerID, shardID uint32) + PutPeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) + GetLastKnownPeerID(pk []byte) (*core.PeerID, bool) GetPeerInfo(pid core.PeerID) core.P2PPeerInfo IsInterfaceNil() bool } // NetworkShardingCollector defines the updating methods used by the network sharding component type NetworkShardingCollector interface { + UpdatePeerIDPublicKeyPair(pid core.PeerID, pk []byte) UpdatePeerIDInfo(pid core.PeerID, pk []byte, shardID uint32) - UpdatePeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) + PutPeerIdShardId(pid core.PeerID, shardID uint32) + PutPeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) + GetLastKnownPeerID(pk []byte) (*core.PeerID, bool) GetPeerInfo(pid core.PeerID) core.P2PPeerInfo IsInterfaceNil() bool } @@ -731,7 +745,7 @@ type SCQuery struct { // GasHandler is able to perform some gas calculation type GasHandler interface { Init() - Reset() + Reset(key []byte) SetGasProvided(gasProvided uint64, hash []byte) SetGasProvidedAsScheduled(gasProvided uint64, hash []byte) SetGasRefunded(gasRefunded uint64, hash []byte) @@ -749,7 +763,7 @@ type GasHandler interface { RemoveGasProvidedAsScheduled(hashes [][]byte) RemoveGasRefunded(hashes [][]byte) RemoveGasPenalized(hashes [][]byte) - RestoreGasSinceLastReset() + RestoreGasSinceLastReset(key []byte) ComputeGasProvidedByMiniBlock(*block.MiniBlock, map[string]data.TransactionHandler) (uint64, uint64, error) ComputeGasProvidedByTx(txSenderShardId uint32, txReceiverShardId uint32, txHandler data.TransactionHandler) (uint64, uint64, error) IsInterfaceNil() bool @@ -1122,6 +1136,7 @@ type CoreComponentsHolder interface { ChanStopNodeProcess() chan endProcess.ArgEndProcess NodeTypeProvider() core.NodeTypeProviderHandler ProcessStatusHandler() common.ProcessStatusHandler + HardforkTriggerPubKey() []byte IsInterfaceNil() bool } @@ -1133,6 +1148,7 @@ type CryptoComponentsHolder interface { BlockSigner() crypto.SingleSigner MultiSigner() crypto.MultiSigner SetMultiSigner(ms crypto.MultiSigner) error + PeerSignatureHandler() crypto.PeerSignatureHandler PublicKey() crypto.PublicKey Clone() interface{} IsInterfaceNil() bool @@ -1207,3 +1223,24 @@ type TxsSenderHandler interface { Close() error IsInterfaceNil() bool } + +// PreProcessorExecutionInfoHandler handles pre processor execution info needed by the transactions preprocessors +type PreProcessorExecutionInfoHandler interface { + GetNumOfCrossInterMbsAndTxs() (int, int) + InitProcessedTxsResults(key []byte) + RevertProcessedTxsResults(txHashes [][]byte, key []byte) +} + +// ProcessedMiniBlocksTracker handles tracking of processed mini blocks +type ProcessedMiniBlocksTracker interface { + SetProcessedMiniBlockInfo(metaBlockHash []byte, miniBlockHash []byte, processedMbInfo *processedMb.ProcessedMiniBlockInfo) + RemoveMetaBlockHash(metaBlockHash []byte) + RemoveMiniBlockHash(miniBlockHash []byte) + GetProcessedMiniBlocksInfo(metaBlockHash []byte) map[string]*processedMb.ProcessedMiniBlockInfo + GetProcessedMiniBlockInfo(miniBlockHash []byte) (*processedMb.ProcessedMiniBlockInfo, []byte) + IsMiniBlockFullyProcessed(metaBlockHash []byte, miniBlockHash []byte) bool + ConvertProcessedMiniBlocksMapToSlice() []bootstrapStorage.MiniBlocksInMeta + ConvertSliceToProcessedMiniBlocksMap(miniBlocksInMetaBlocks []bootstrapStorage.MiniBlocksInMeta) + DisplayProcessedMiniBlocks() + IsInterfaceNil() bool +} diff --git a/process/mock/blockProcessorMock.go b/process/mock/blockProcessorMock.go index 03b1ee339d9..904378c7a97 100644 --- a/process/mock/blockProcessorMock.go +++ b/process/mock/blockProcessorMock.go @@ -5,7 +5,6 @@ import ( "time" "github.com/ElrondNetwork/elrond-go-core/data" - "github.com/ElrondNetwork/elrond-go/process/block/processedMb" ) // BlockProcessorMock - @@ -29,10 +28,6 @@ type BlockProcessorMock struct { RevertIndexedBlockCalled func(header data.HeaderHandler) } -// ApplyProcessedMiniBlocks - -func (bpm *BlockProcessorMock) ApplyProcessedMiniBlocks(*processedMb.ProcessedMiniBlockTracker) { -} - // RestoreLastNotarizedHrdsToGenesis - func (bpm *BlockProcessorMock) RestoreLastNotarizedHrdsToGenesis() { } diff --git a/process/mock/coreComponentsMock.go b/process/mock/coreComponentsMock.go index 53fdba9ff06..82bf53decea 100644 --- a/process/mock/coreComponentsMock.go +++ b/process/mock/coreComponentsMock.go @@ -36,6 +36,7 @@ type CoreComponentsMock struct { NodeTypeProviderField core.NodeTypeProviderHandler EconomicsDataField process.EconomicsDataHandler ProcessStatusHandlerField common.ProcessStatusHandler + HardforkTriggerPubKeyField []byte } // ChanStopNodeProcess - @@ -156,6 +157,11 @@ func (ccm *CoreComponentsMock) ProcessStatusHandler() common.ProcessStatusHandle return ccm.ProcessStatusHandlerField } +// HardforkTriggerPubKey - +func (ccm *CoreComponentsMock) HardforkTriggerPubKey() []byte { + return ccm.HardforkTriggerPubKeyField +} + // IsInterfaceNil - func (ccm *CoreComponentsMock) IsInterfaceNil() bool { return ccm == nil diff --git a/process/mock/cryptoComponentsMock.go b/process/mock/cryptoComponentsMock.go index 3720c6a6093..7c74300b2e1 100644 --- a/process/mock/cryptoComponentsMock.go +++ b/process/mock/cryptoComponentsMock.go @@ -8,13 +8,14 @@ import ( // CryptoComponentsMock - type CryptoComponentsMock struct { - BlockSig crypto.SingleSigner - TxSig crypto.SingleSigner - MultiSig crypto.MultiSigner - BlKeyGen crypto.KeyGenerator - TxKeyGen crypto.KeyGenerator - PubKey crypto.PublicKey - mutMultiSig sync.RWMutex + BlockSig crypto.SingleSigner + TxSig crypto.SingleSigner + MultiSig crypto.MultiSigner + PeerSignHandler crypto.PeerSignatureHandler + BlKeyGen crypto.KeyGenerator + TxKeyGen crypto.KeyGenerator + PubKey crypto.PublicKey + mutMultiSig sync.RWMutex } // BlockSigner - @@ -42,6 +43,14 @@ func (ccm *CryptoComponentsMock) SetMultiSigner(multiSigner crypto.MultiSigner) return nil } +// PeerSignatureHandler returns the peer signature handler +func (ccm *CryptoComponentsMock) PeerSignatureHandler() crypto.PeerSignatureHandler { + ccm.mutMultiSig.RLock() + defer ccm.mutMultiSig.RUnlock() + + return ccm.PeerSignHandler +} + // BlockSignKeyGen - func (ccm *CryptoComponentsMock) BlockSignKeyGen() crypto.KeyGenerator { return ccm.BlKeyGen @@ -60,13 +69,14 @@ func (ccm *CryptoComponentsMock) PublicKey() crypto.PublicKey { // Clone - func (ccm *CryptoComponentsMock) Clone() interface{} { return &CryptoComponentsMock{ - BlockSig: ccm.BlockSig, - TxSig: ccm.TxSig, - MultiSig: ccm.MultiSig, - BlKeyGen: ccm.BlKeyGen, - TxKeyGen: ccm.TxKeyGen, - PubKey: ccm.PubKey, - mutMultiSig: sync.RWMutex{}, + BlockSig: ccm.BlockSig, + TxSig: ccm.TxSig, + MultiSig: ccm.MultiSig, + PeerSignHandler: ccm.PeerSignHandler, + BlKeyGen: ccm.BlKeyGen, + TxKeyGen: ccm.TxKeyGen, + PubKey: ccm.PubKey, + mutMultiSig: sync.RWMutex{}, } } diff --git a/process/mock/gasHandlerMock.go b/process/mock/gasHandlerMock.go index 45714492a35..0de4a266889 100644 --- a/process/mock/gasHandlerMock.go +++ b/process/mock/gasHandlerMock.go @@ -8,7 +8,7 @@ import ( // GasHandlerMock - type GasHandlerMock struct { InitCalled func() - ResetCalled func() + ResetCalled func(key []byte) SetGasProvidedCalled func(gasProvided uint64, hash []byte) SetGasProvidedAsScheduledCalled func(gasProvided uint64, hash []byte) SetGasRefundedCalled func(gasRefunded uint64, hash []byte) @@ -26,7 +26,7 @@ type GasHandlerMock struct { RemoveGasProvidedAsScheduledCalled func(hashes [][]byte) RemoveGasRefundedCalled func(hashes [][]byte) RemoveGasPenalizedCalled func(hashes [][]byte) - RestoreGasSinceLastResetCalled func() + RestoreGasSinceLastResetCalled func(key []byte) ComputeGasProvidedByMiniBlockCalled func(miniBlock *block.MiniBlock, mapHashTx map[string]data.TransactionHandler) (uint64, uint64, error) ComputeGasProvidedByTxCalled func(txSenderShardId uint32, txReceiverSharedId uint32, txHandler data.TransactionHandler) (uint64, uint64, error) } @@ -39,9 +39,9 @@ func (ghm *GasHandlerMock) Init() { } // Reset - -func (ghm *GasHandlerMock) Reset() { +func (ghm *GasHandlerMock) Reset(key []byte) { if ghm.ResetCalled != nil { - ghm.ResetCalled() + ghm.ResetCalled(key) } } @@ -174,9 +174,9 @@ func (ghm *GasHandlerMock) RemoveGasPenalized(hashes [][]byte) { } // RestoreGasSinceLastReset - -func (ghm *GasHandlerMock) RestoreGasSinceLastReset() { +func (ghm *GasHandlerMock) RestoreGasSinceLastReset(key []byte) { if ghm.RestoreGasSinceLastResetCalled != nil { - ghm.RestoreGasSinceLastResetCalled() + ghm.RestoreGasSinceLastResetCalled(key) } } diff --git a/process/mock/intermProcessorStub.go b/process/mock/intermProcessorStub.go index 12fa0e7899d..b4818bdc14f 100644 --- a/process/mock/intermProcessorStub.go +++ b/process/mock/intermProcessorStub.go @@ -15,23 +15,23 @@ type IntermediateTransactionHandlerStub struct { CreateBlockStartedCalled func() CreateMarshalizedDataCalled func(txHashes [][]byte) ([][]byte, error) GetAllCurrentFinishedTxsCalled func() map[string]data.TransactionHandler - RemoveProcessedResultsCalled func() [][]byte - InitProcessedResultsCalled func() + RemoveProcessedResultsCalled func(key []byte) [][]byte + InitProcessedResultsCalled func(key []byte) intermediateTransactions []data.TransactionHandler } // RemoveProcessedResults - -func (ith *IntermediateTransactionHandlerStub) RemoveProcessedResults() [][]byte { +func (ith *IntermediateTransactionHandlerStub) RemoveProcessedResults(key []byte) [][]byte { if ith.RemoveProcessedResultsCalled != nil { - return ith.RemoveProcessedResultsCalled() + return ith.RemoveProcessedResultsCalled(key) } return nil } // InitProcessedResults - -func (ith *IntermediateTransactionHandlerStub) InitProcessedResults() { +func (ith *IntermediateTransactionHandlerStub) InitProcessedResults(key []byte) { if ith.InitProcessedResultsCalled != nil { - ith.InitProcessedResultsCalled() + ith.InitProcessedResultsCalled(key) } } diff --git a/process/mock/intermediateTransactionHandlerMock.go b/process/mock/intermediateTransactionHandlerMock.go index ecce957815d..c2eee768622 100644 --- a/process/mock/intermediateTransactionHandlerMock.go +++ b/process/mock/intermediateTransactionHandlerMock.go @@ -15,24 +15,24 @@ type IntermediateTransactionHandlerMock struct { CreateBlockStartedCalled func() CreateMarshalizedDataCalled func(txHashes [][]byte) ([][]byte, error) GetAllCurrentFinishedTxsCalled func() map[string]data.TransactionHandler - RemoveProcessedResultsCalled func() [][]byte - InitProcessedResultsCalled func() + RemoveProcessedResultsCalled func(key []byte) [][]byte + InitProcessedResultsCalled func(key []byte) GetCreatedInShardMiniBlockCalled func() *block.MiniBlock intermediateTransactions []data.TransactionHandler } // RemoveProcessedResults - -func (ith *IntermediateTransactionHandlerMock) RemoveProcessedResults() [][]byte { +func (ith *IntermediateTransactionHandlerMock) RemoveProcessedResults(key []byte) [][]byte { if ith.RemoveProcessedResultsCalled != nil { - return ith.RemoveProcessedResultsCalled() + return ith.RemoveProcessedResultsCalled(key) } return nil } // InitProcessedResults - -func (ith *IntermediateTransactionHandlerMock) InitProcessedResults() { +func (ith *IntermediateTransactionHandlerMock) InitProcessedResults(key []byte) { if ith.InitProcessedResultsCalled != nil { - ith.InitProcessedResultsCalled() + ith.InitProcessedResultsCalled(key) } } diff --git a/process/mock/nodesCoordinatorStub.go b/process/mock/nodesCoordinatorStub.go new file mode 100644 index 00000000000..722d2d090b0 --- /dev/null +++ b/process/mock/nodesCoordinatorStub.go @@ -0,0 +1,23 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/sharding/nodesCoordinator" +) + +// NodesCoordinatorStub - +type NodesCoordinatorStub struct { + GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) +} + +// GetValidatorWithPublicKey - +func (nc *NodesCoordinatorStub) GetValidatorWithPublicKey(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) { + if nc.GetValidatorWithPublicKeyCalled != nil { + return nc.GetValidatorWithPublicKeyCalled(publicKey) + } + return nil, 0, nil +} + +// IsInterfaceNil - +func (nc *NodesCoordinatorStub) IsInterfaceNil() bool { + return false +} diff --git a/process/mock/peerShardMapperStub.go b/process/mock/peerShardMapperStub.go index d16162a9b09..5edf7e46df5 100644 --- a/process/mock/peerShardMapperStub.go +++ b/process/mock/peerShardMapperStub.go @@ -4,10 +4,22 @@ import "github.com/ElrondNetwork/elrond-go-core/core" // PeerShardMapperStub - type PeerShardMapperStub struct { - GetPeerInfoCalled func(pid core.PeerID) core.P2PPeerInfo - UpdatePeerIdPublicKeyCalled func(pid core.PeerID, pk []byte) - UpdatePublicKeyShardIdCalled func(pk []byte, shardId uint32) - UpdatePeerIdShardIdCalled func(pid core.PeerID, shardId uint32) + GetLastKnownPeerIDCalled func(pk []byte) (*core.PeerID, bool) + GetPeerInfoCalled func(pid core.PeerID) core.P2PPeerInfo + UpdatePeerIdPublicKeyCalled func(pid core.PeerID, pk []byte) + UpdatePublicKeyShardIdCalled func(pk []byte, shardId uint32) + PutPeerIdShardIdCalled func(pid core.PeerID, shardId uint32) + UpdatePeerIDPublicKeyPairCalled func(pid core.PeerID, pk []byte) + PutPeerIdSubTypeCalled func(pid core.PeerID, peerSubType core.P2PPeerSubType) +} + +// GetLastKnownPeerID - +func (psms *PeerShardMapperStub) GetLastKnownPeerID(pk []byte) (*core.PeerID, bool) { + if psms.GetLastKnownPeerIDCalled != nil { + return psms.GetLastKnownPeerIDCalled(pk) + } + + return nil, false } // GetPeerInfo - @@ -19,6 +31,13 @@ func (psms *PeerShardMapperStub) GetPeerInfo(pid core.PeerID) core.P2PPeerInfo { return core.P2PPeerInfo{} } +// UpdatePeerIDPublicKeyPair - +func (psms *PeerShardMapperStub) UpdatePeerIDPublicKeyPair(pid core.PeerID, pk []byte) { + if psms.UpdatePeerIDPublicKeyPairCalled != nil { + psms.UpdatePeerIDPublicKeyPairCalled(pid, pk) + } +} + // UpdatePeerIdPublicKey - func (psms *PeerShardMapperStub) UpdatePeerIdPublicKey(pid core.PeerID, pk []byte) { if psms.UpdatePeerIdPublicKeyCalled != nil { @@ -33,10 +52,17 @@ func (psms *PeerShardMapperStub) UpdatePublicKeyShardId(pk []byte, shardId uint3 } } -// UpdatePeerIdShardId - -func (psms *PeerShardMapperStub) UpdatePeerIdShardId(pid core.PeerID, shardId uint32) { - if psms.UpdatePeerIdShardIdCalled != nil { - psms.UpdatePeerIdShardIdCalled(pid, shardId) +// PutPeerIdShardId - +func (psms *PeerShardMapperStub) PutPeerIdShardId(pid core.PeerID, shardId uint32) { + if psms.PutPeerIdShardIdCalled != nil { + psms.PutPeerIdShardIdCalled(pid, shardId) + } +} + +// PutPeerIdSubType - +func (psms *PeerShardMapperStub) PutPeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) { + if psms.PutPeerIdSubTypeCalled != nil { + psms.PutPeerIdSubTypeCalled(pid, peerSubType) } } diff --git a/process/mock/peerSignatureHandlerStub.go b/process/mock/peerSignatureHandlerStub.go new file mode 100644 index 00000000000..87f8d78d774 --- /dev/null +++ b/process/mock/peerSignatureHandlerStub.go @@ -0,0 +1,33 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-core/core" + crypto "github.com/ElrondNetwork/elrond-go-crypto" +) + +// PeerSignatureHandlerStub - +type PeerSignatureHandlerStub struct { + VerifyPeerSignatureCalled func(pk []byte, pid core.PeerID, signature []byte) error + GetPeerSignatureCalled func(key crypto.PrivateKey, pid []byte) ([]byte, error) +} + +// VerifyPeerSignature - +func (pshs *PeerSignatureHandlerStub) VerifyPeerSignature(pk []byte, pid core.PeerID, signature []byte) error { + if pshs.VerifyPeerSignatureCalled != nil { + return pshs.VerifyPeerSignatureCalled(pk, pid, signature) + } + return nil +} + +// GetPeerSignature - +func (pshs *PeerSignatureHandlerStub) GetPeerSignature(key crypto.PrivateKey, pid []byte) ([]byte, error) { + if pshs.GetPeerSignatureCalled != nil { + return pshs.GetPeerSignatureCalled(key, pid) + } + return nil, nil +} + +// IsInterfaceNil - +func (pshs *PeerSignatureHandlerStub) IsInterfaceNil() bool { + return false +} diff --git a/process/mock/preprocessorMock.go b/process/mock/preprocessorMock.go index 633bea57b34..7f08bb7e21e 100644 --- a/process/mock/preprocessorMock.go +++ b/process/mock/preprocessorMock.go @@ -5,6 +5,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" + "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/storage" ) @@ -20,7 +21,7 @@ type PreProcessorMock struct { RequestBlockTransactionsCalled func(body *block.Body) int CreateMarshalizedDataCalled func(txHashes [][]byte) ([][]byte, error) RequestTransactionsForMiniBlockCalled func(miniBlock *block.MiniBlock) int - ProcessMiniBlockCalled func(miniBlock *block.MiniBlock, haveTime func() bool, haveAdditionalTime func() bool, getNumOfCrossInterMbsAndTxs func() (int, int), scheduledMode bool) ([][]byte, int, error) + ProcessMiniBlockCalled func(miniBlock *block.MiniBlock, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool, partialMbExecutionMode bool, indexOfLastTxProcessed int, preProcessorExecutionInfoHandler process.PreProcessorExecutionInfoHandler) ([][]byte, int, bool, error) CreateAndProcessMiniBlocksCalled func(haveTime func() bool) (block.MiniBlockSlice, error) GetAllCurrentUsedTxsCalled func() map[string]data.TransactionHandler AddTxsFromMiniBlocksCalled func(miniBlocks block.MiniBlockSlice) @@ -108,11 +109,19 @@ func (ppm *PreProcessorMock) RequestTransactionsForMiniBlock(miniBlock *block.Mi } // ProcessMiniBlock - -func (ppm *PreProcessorMock) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, haveAdditionalTime func() bool, getNumOfCrossInterMbsAndTxs func() (int, int), scheduledMode bool) ([][]byte, int, error) { +func (ppm *PreProcessorMock) ProcessMiniBlock( + miniBlock *block.MiniBlock, + haveTime func() bool, + haveAdditionalTime func() bool, + scheduledMode bool, + partialMbExecutionMode bool, + indexOfLastTxProcessed int, + preProcessorExecutionInfoHandler process.PreProcessorExecutionInfoHandler, +) ([][]byte, int, bool, error) { if ppm.ProcessMiniBlockCalled == nil { - return nil, 0, nil + return nil, 0, false, nil } - return ppm.ProcessMiniBlockCalled(miniBlock, haveTime, haveAdditionalTime, getNumOfCrossInterMbsAndTxs, scheduledMode) + return ppm.ProcessMiniBlockCalled(miniBlock, haveTime, haveAdditionalTime, scheduledMode, partialMbExecutionMode, indexOfLastTxProcessed, preProcessorExecutionInfoHandler) } // CreateAndProcessMiniBlocks creates miniblocks from storage and processes the reward transactions added into the miniblocks diff --git a/process/mock/signaturesHandlerStub.go b/process/mock/signaturesHandlerStub.go new file mode 100644 index 00000000000..02b583deb21 --- /dev/null +++ b/process/mock/signaturesHandlerStub.go @@ -0,0 +1,21 @@ +package mock + +import "github.com/ElrondNetwork/elrond-go-core/core" + +// SignaturesHandlerStub - +type SignaturesHandlerStub struct { + VerifyCalled func(payload []byte, pid core.PeerID, signature []byte) error +} + +// Verify - +func (s *SignaturesHandlerStub) Verify(payload []byte, pid core.PeerID, signature []byte) error { + if s.VerifyCalled != nil { + return s.VerifyCalled(payload, pid, signature) + } + return nil +} + +// IsInterfaceNil - +func (s *SignaturesHandlerStub) IsInterfaceNil() bool { + return false +} diff --git a/process/mock/transactionCoordinatorMock.go b/process/mock/transactionCoordinatorMock.go index befbcefb053..76352da47c7 100644 --- a/process/mock/transactionCoordinatorMock.go +++ b/process/mock/transactionCoordinatorMock.go @@ -6,6 +6,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" ) // TransactionCoordinatorMock - @@ -20,7 +21,7 @@ type TransactionCoordinatorMock struct { RemoveTxsFromPoolCalled func(body *block.Body) error ProcessBlockTransactionCalled func(header data.HeaderHandler, body *block.Body, haveTime func() time.Duration) error CreateBlockStartedCalled func() - CreateMbsAndProcessCrossShardTransactionsDstMeCalled func(header data.HeaderHandler, processedMiniBlocksHashes map[string]struct{}, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool) (block.MiniBlockSlice, uint32, bool, error) + CreateMbsAndProcessCrossShardTransactionsDstMeCalled func(header data.HeaderHandler, processedMiniBlocksInfo map[string]*processedMb.ProcessedMiniBlockInfo, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool) (block.MiniBlockSlice, uint32, bool, error) CreateMbsAndProcessTransactionsFromMeCalled func(haveTime func() bool) block.MiniBlockSlice CreateMarshalizedDataCalled func(body *block.Body) map[string][][]byte GetAllCurrentUsedTxsCalled func(blockType block.Type) map[string]data.TransactionHandler @@ -147,7 +148,7 @@ func (tcm *TransactionCoordinatorMock) CreateBlockStarted() { // CreateMbsAndProcessCrossShardTransactionsDstMe - func (tcm *TransactionCoordinatorMock) CreateMbsAndProcessCrossShardTransactionsDstMe( header data.HeaderHandler, - processedMiniBlocksHashes map[string]struct{}, + processedMiniBlocksInfo map[string]*processedMb.ProcessedMiniBlockInfo, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool, @@ -156,7 +157,7 @@ func (tcm *TransactionCoordinatorMock) CreateMbsAndProcessCrossShardTransactions return nil, 0, false, nil } - return tcm.CreateMbsAndProcessCrossShardTransactionsDstMeCalled(header, processedMiniBlocksHashes, haveTime, haveAdditionalTime, scheduledMode) + return tcm.CreateMbsAndProcessCrossShardTransactionsDstMeCalled(header, processedMiniBlocksInfo, haveTime, haveAdditionalTime, scheduledMode) } // CreateMbsAndProcessTransactionsFromMe - diff --git a/process/p2p/interceptedDirectConnectionInfo.go b/process/p2p/interceptedDirectConnectionInfo.go new file mode 100644 index 00000000000..1b5ec693565 --- /dev/null +++ b/process/p2p/interceptedDirectConnectionInfo.go @@ -0,0 +1,118 @@ +package p2p + +import ( + "fmt" + "strconv" + + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/marshal" + "github.com/ElrondNetwork/elrond-go/common" + "github.com/ElrondNetwork/elrond-go/p2p/message" + "github.com/ElrondNetwork/elrond-go/process" +) + +const interceptedDirectConnectionInfoType = "intercepted direct connection info" + +// ArgInterceptedDirectConnectionInfo is the argument used in the intercepted direct connection info constructor +type ArgInterceptedDirectConnectionInfo struct { + Marshaller marshal.Marshalizer + DataBuff []byte + NumOfShards uint32 +} + +// interceptedDirectConnectionInfo is a wrapper over DirectConnectionInfo +type interceptedDirectConnectionInfo struct { + directConnectionInfo message.DirectConnectionInfo + numOfShards uint32 +} + +// NewInterceptedDirectConnectionInfo creates a new intercepted direct connection info instance +func NewInterceptedDirectConnectionInfo(args ArgInterceptedDirectConnectionInfo) (*interceptedDirectConnectionInfo, error) { + err := checkArgs(args) + if err != nil { + return nil, err + } + + directConnectionInfo, err := createDirectConnectionInfo(args.Marshaller, args.DataBuff) + if err != nil { + return nil, err + } + + return &interceptedDirectConnectionInfo{ + directConnectionInfo: *directConnectionInfo, + numOfShards: args.NumOfShards, + }, nil +} + +func checkArgs(args ArgInterceptedDirectConnectionInfo) error { + if check.IfNil(args.Marshaller) { + return process.ErrNilMarshalizer + } + if len(args.DataBuff) == 0 { + return process.ErrNilBuffer + } + if args.NumOfShards == 0 { + return process.ErrInvalidValue + } + + return nil +} + +func createDirectConnectionInfo(marshaller marshal.Marshalizer, buff []byte) (*message.DirectConnectionInfo, error) { + directConnectionInfo := &message.DirectConnectionInfo{} + err := marshaller.Unmarshal(directConnectionInfo, buff) + if err != nil { + return nil, err + } + + return directConnectionInfo, nil +} + +// CheckValidity checks the validity of the received direct connection info +func (idci *interceptedDirectConnectionInfo) CheckValidity() error { + shardId, err := strconv.ParseUint(idci.directConnectionInfo.ShardId, 10, 32) + if err != nil { + return err + } + if uint32(shardId) != common.MetachainShardId && + uint32(shardId) >= idci.numOfShards { + return process.ErrInvalidValue + } + + return nil +} + +// IsForCurrentShard always returns true +func (idci *interceptedDirectConnectionInfo) IsForCurrentShard() bool { + return true +} + +// Hash always returns an empty string +func (idci *interceptedDirectConnectionInfo) Hash() []byte { + return []byte("") +} + +// Type returns the type of this intercepted data +func (idci *interceptedDirectConnectionInfo) Type() string { + return interceptedDirectConnectionInfoType +} + +// Identifiers always returns an array with an empty string +func (idci *interceptedDirectConnectionInfo) Identifiers() [][]byte { + return [][]byte{make([]byte, 0)} +} + +// String returns the most important fields as string +func (idci *interceptedDirectConnectionInfo) String() string { + return fmt.Sprintf("shard=%s", idci.directConnectionInfo.ShardId) +} + +// ShardID returns the shard id +func (idci *interceptedDirectConnectionInfo) ShardID() string { + return idci.directConnectionInfo.ShardId +} + +// IsInterfaceNil returns true if there is no value under the interface +func (idci *interceptedDirectConnectionInfo) IsInterfaceNil() bool { + return idci == nil +} diff --git a/process/p2p/interceptedDirectConnectionInfo_test.go b/process/p2p/interceptedDirectConnectionInfo_test.go new file mode 100644 index 00000000000..ce3338df3da --- /dev/null +++ b/process/p2p/interceptedDirectConnectionInfo_test.go @@ -0,0 +1,143 @@ +package p2p + +import ( + "bytes" + "fmt" + "strconv" + "testing" + + "github.com/ElrondNetwork/elrond-go-core/core/check" + "github.com/ElrondNetwork/elrond-go-core/marshal" + "github.com/ElrondNetwork/elrond-go/p2p/message" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/stretchr/testify/assert" +) + +const providedShard = "5" + +func createMockArgInterceptedDirectConnectionInfo() ArgInterceptedDirectConnectionInfo { + marshaller := &marshal.GogoProtoMarshalizer{} + msg := &message.DirectConnectionInfo{ + ShardId: providedShard, + } + msgBuff, _ := marshaller.Marshal(msg) + + return ArgInterceptedDirectConnectionInfo{ + Marshaller: marshaller, + DataBuff: msgBuff, + NumOfShards: 10, + } +} +func TestNewInterceptedDirectConnectionInfo(t *testing.T) { + t.Parallel() + + t.Run("nil marshaller should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgInterceptedDirectConnectionInfo() + args.Marshaller = nil + + idci, err := NewInterceptedDirectConnectionInfo(args) + assert.Equal(t, process.ErrNilMarshalizer, err) + assert.True(t, check.IfNil(idci)) + }) + t.Run("nil data buff should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgInterceptedDirectConnectionInfo() + args.DataBuff = nil + + idci, err := NewInterceptedDirectConnectionInfo(args) + assert.Equal(t, process.ErrNilBuffer, err) + assert.True(t, check.IfNil(idci)) + }) + t.Run("invalid num of shards should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgInterceptedDirectConnectionInfo() + args.NumOfShards = 0 + + idci, err := NewInterceptedDirectConnectionInfo(args) + assert.Equal(t, process.ErrInvalidValue, err) + assert.True(t, check.IfNil(idci)) + }) + t.Run("unmarshal returns error", func(t *testing.T) { + t.Parallel() + + args := createMockArgInterceptedDirectConnectionInfo() + args.DataBuff = []byte("invalid data") + + idci, err := NewInterceptedDirectConnectionInfo(args) + assert.NotNil(t, err) + assert.True(t, check.IfNil(idci)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + idci, err := NewInterceptedDirectConnectionInfo(createMockArgInterceptedDirectConnectionInfo()) + assert.Nil(t, err) + assert.False(t, check.IfNil(idci)) + }) +} + +func Test_interceptedDirectConnectionInfo_CheckValidity(t *testing.T) { + t.Parallel() + + t.Run("invalid shard string should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgInterceptedDirectConnectionInfo() + msg := &message.DirectConnectionInfo{ + ShardId: "invalid shard", + } + msgBuff, _ := args.Marshaller.Marshal(msg) + args.DataBuff = msgBuff + idci, err := NewInterceptedDirectConnectionInfo(args) + assert.Nil(t, err) + assert.False(t, check.IfNil(idci)) + + err = idci.CheckValidity() + assert.NotNil(t, err) + }) + t.Run("invalid shard should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgInterceptedDirectConnectionInfo() + ps, _ := strconv.ParseInt(providedShard, 10, 32) + args.NumOfShards = uint32(ps - 1) + + idci, err := NewInterceptedDirectConnectionInfo(args) + assert.Nil(t, err) + assert.False(t, check.IfNil(idci)) + + err = idci.CheckValidity() + assert.Equal(t, process.ErrInvalidValue, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + idci, err := NewInterceptedDirectConnectionInfo(createMockArgInterceptedDirectConnectionInfo()) + assert.Nil(t, err) + assert.False(t, check.IfNil(idci)) + + err = idci.CheckValidity() + assert.Nil(t, err) + }) +} + +func Test_interceptedDirectConnectionInfo_Getters(t *testing.T) { + t.Parallel() + + idci, err := NewInterceptedDirectConnectionInfo(createMockArgInterceptedDirectConnectionInfo()) + assert.Nil(t, err) + assert.False(t, check.IfNil(idci)) + + assert.True(t, idci.IsForCurrentShard()) + assert.True(t, bytes.Equal([]byte(""), idci.Hash())) + assert.Equal(t, interceptedDirectConnectionInfoType, idci.Type()) + identifiers := idci.Identifiers() + assert.Equal(t, 1, len(identifiers)) + assert.True(t, bytes.Equal([]byte(""), identifiers[0])) + assert.Equal(t, fmt.Sprintf("shard=%s", providedShard), idci.String()) + assert.Equal(t, providedShard, idci.ShardID()) +} diff --git a/process/rating/peerHonesty/peerHonesty_test.go b/process/rating/peerHonesty/peerHonesty_test.go index 77e7ffcd308..d9b61003194 100644 --- a/process/rating/peerHonesty/peerHonesty_test.go +++ b/process/rating/peerHonesty/peerHonesty_test.go @@ -178,7 +178,8 @@ func TestP2pPeerHonesty_Close(t *testing.T) { assert.Nil(t, err) time.Sleep(time.Second*2 + time.Millisecond*100) - assert.Equal(t, int32(2), atomic.LoadInt32(&numCalls)) + calls := atomic.LoadInt32(&numCalls) + assert.Equal(t, int32(2), calls) } func TestP2pPeerHonesty_ChangeScoreShouldWork(t *testing.T) { diff --git a/process/smartContract/builtInFunctions/factory.go b/process/smartContract/builtInFunctions/factory.go index a1d2b593e26..70c84f8c1a4 100644 --- a/process/smartContract/builtInFunctions/factory.go +++ b/process/smartContract/builtInFunctions/factory.go @@ -13,18 +13,19 @@ import ( // ArgsCreateBuiltInFunctionContainer defines the argument structure to create new built in function container type ArgsCreateBuiltInFunctionContainer struct { - GasSchedule core.GasScheduleNotifier - MapDNSAddresses map[string]struct{} - EnableUserNameChange bool - Marshalizer marshal.Marshalizer - Accounts state.AccountsAdapter - ShardCoordinator sharding.Coordinator - EpochNotifier vmcommon.EpochNotifier - ESDTMultiTransferEnableEpoch uint32 - ESDTTransferRoleEnableEpoch uint32 - GlobalMintBurnDisableEpoch uint32 - ESDTTransferMetaEnableEpoch uint32 - OptimizeNFTStoreEnableEpoch uint32 + GasSchedule core.GasScheduleNotifier + MapDNSAddresses map[string]struct{} + EnableUserNameChange bool + Marshalizer marshal.Marshalizer + Accounts state.AccountsAdapter + ShardCoordinator sharding.Coordinator + EpochNotifier vmcommon.EpochNotifier + ESDTMultiTransferEnableEpoch uint32 + ESDTTransferRoleEnableEpoch uint32 + GlobalMintBurnDisableEpoch uint32 + ESDTTransferMetaEnableEpoch uint32 + OptimizeNFTStoreEnableEpoch uint32 + CheckCorrectTokenIDEnableEpoch uint32 } // CreateBuiltInFuncContainerAndNFTStorageHandler creates a container that will hold all the available built in functions @@ -66,6 +67,7 @@ func CreateBuiltInFuncContainerAndNFTStorageHandler(args ArgsCreateBuiltInFuncti ESDTTransferRoleEnableEpoch: args.ESDTTransferRoleEnableEpoch, GlobalMintBurnDisableEpoch: args.GlobalMintBurnDisableEpoch, SaveNFTToSystemAccountEnableEpoch: args.OptimizeNFTStoreEnableEpoch, + CheckCorrectTokenIDEnableEpoch: args.CheckCorrectTokenIDEnableEpoch, } bContainerFactory, err := vmcommonBuiltInFunctions.NewBuiltInFunctionsCreator(modifiedArgs) diff --git a/process/smartContract/process.go b/process/smartContract/process.go index 2c745191eb8..b3356e1835f 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -1055,7 +1055,8 @@ func mergeVMOutputLogs(newVMOutput *vmcommon.VMOutput, vmOutput *vmcommon.VMOutp if newVMOutput.Logs == nil { newVMOutput.Logs = make([]*vmcommon.LogEntry, 0, len(vmOutput.Logs)) } - newVMOutput.Logs = append(newVMOutput.Logs, vmOutput.Logs...) + + newVMOutput.Logs = append(vmOutput.Logs, newVMOutput.Logs...) } func (sc *scProcessor) processSCRForSenderAfterBuiltIn( diff --git a/process/smartContract/process_test.go b/process/smartContract/process_test.go index 1e2f000069f..91fc9f826dd 100644 --- a/process/smartContract/process_test.go +++ b/process/smartContract/process_test.go @@ -4245,6 +4245,27 @@ func TestMergeVmOutputLogs(t *testing.T) { mergeVMOutputLogs(vmOutput1, vmOutput2) require.Len(t, vmOutput1.Logs, 2) + + vmOutput1 = &vmcommon.VMOutput{ + Logs: []*vmcommon.LogEntry{ + { + Identifier: []byte("identifier2"), + }, + }, + } + + vmOutput2 = &vmcommon.VMOutput{ + Logs: []*vmcommon.LogEntry{ + { + Identifier: []byte("identifier1"), + }, + }, + } + + mergeVMOutputLogs(vmOutput1, vmOutput2) + require.Len(t, vmOutput1.Logs, 2) + require.Equal(t, []byte("identifier1"), vmOutput1.Logs[0].Identifier) + require.Equal(t, []byte("identifier2"), vmOutput1.Logs[1].Identifier) } func TestScProcessor_TooMuchGasProvidedMessage(t *testing.T) { diff --git a/process/smartContract/scQueryService.go b/process/smartContract/scQueryService.go index 2faa7b51dc4..d8831ea41cb 100644 --- a/process/smartContract/scQueryService.go +++ b/process/smartContract/scQueryService.go @@ -42,6 +42,7 @@ type ArgsNewSCQueryService struct { ArwenChangeLocker common.Locker Bootstrapper process.Bootstrapper AllowExternalQueriesChan chan struct{} + MaxGasLimitPerQuery uint64 } // NewSCQueryService returns a new instance of SCQueryService @@ -70,6 +71,10 @@ func NewSCQueryService( return nil, process.ErrNilAllowExternalQueriesChan } + gasForQuery := uint64(math.MaxUint64) + if args.MaxGasLimitPerQuery > 0 { + gasForQuery = args.MaxGasLimitPerQuery + } return &SCQueryService{ vmContainer: args.VmContainer, economicsFee: args.EconomicsFee, @@ -77,7 +82,7 @@ func NewSCQueryService( blockChainHook: args.BlockChainHook, arwenChangeLocker: args.ArwenChangeLocker, bootstrapper: args.Bootstrapper, - gasForQuery: math.MaxUint64, + gasForQuery: gasForQuery, allowExternalQueriesChan: args.AllowExternalQueriesChan, }, nil } diff --git a/process/smartContract/scQueryService_test.go b/process/smartContract/scQueryService_test.go index 5fbfbad49fd..20ebce0ca88 100644 --- a/process/smartContract/scQueryService_test.go +++ b/process/smartContract/scQueryService_test.go @@ -308,6 +308,85 @@ func TestExecuteQuery_ReturnsCorrectly(t *testing.T) { assert.Equal(t, d[1], vmOutput.ReturnData[1]) } +func TestExecuteQuery_GasProvidedShouldBeApplied(t *testing.T) { + t.Parallel() + + t.Run("no gas defined, should use max uint64", func(t *testing.T) { + t.Parallel() + + runSCWasCalled := false + mockVM := &mock.VMExecutionHandlerStub{ + RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (output *vmcommon.VMOutput, e error) { + require.Equal(t, uint64(math.MaxUint64), input.GasProvided) + runSCWasCalled = true + return &vmcommon.VMOutput{}, nil + }, + } + argsNewSCQuery := createMockArgumentsForSCQuery() + argsNewSCQuery.VmContainer = &mock.VMContainerMock{ + GetCalled: func(key []byte) (handler vmcommon.VMExecutionHandler, e error) { + return mockVM, nil + }, + } + argsNewSCQuery.EconomicsFee = &mock.FeeHandlerStub{ + MaxGasLimitPerBlockCalled: func() uint64 { + return uint64(math.MaxUint64) + }, + } + + target, _ := NewSCQueryService(argsNewSCQuery) + + query := process.SCQuery{ + ScAddress: []byte(DummyScAddress), + FuncName: "function", + Arguments: [][]byte{}, + } + + _, err := target.ExecuteQuery(&query) + require.Nil(t, err) + require.True(t, runSCWasCalled) + }) + + t.Run("custom gas defined, should use it", func(t *testing.T) { + t.Parallel() + + maxGasLimit := uint64(1_500_000_000) + runSCWasCalled := false + mockVM := &mock.VMExecutionHandlerStub{ + RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (output *vmcommon.VMOutput, e error) { + require.Equal(t, maxGasLimit, input.GasProvided) + runSCWasCalled = true + return &vmcommon.VMOutput{}, nil + }, + } + argsNewSCQuery := createMockArgumentsForSCQuery() + argsNewSCQuery.VmContainer = &mock.VMContainerMock{ + GetCalled: func(key []byte) (handler vmcommon.VMExecutionHandler, e error) { + return mockVM, nil + }, + } + argsNewSCQuery.EconomicsFee = &mock.FeeHandlerStub{ + MaxGasLimitPerBlockCalled: func() uint64 { + return uint64(math.MaxUint64) + }, + } + + argsNewSCQuery.MaxGasLimitPerQuery = maxGasLimit + + target, _ := NewSCQueryService(argsNewSCQuery) + + query := process.SCQuery{ + ScAddress: []byte(DummyScAddress), + FuncName: "function", + Arguments: [][]byte{}, + } + + _, err := target.ExecuteQuery(&query) + require.Nil(t, err) + require.True(t, runSCWasCalled) + }) +} + func TestExecuteQuery_WhenNotOkCodeShouldNotErr(t *testing.T) { t.Parallel() diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index a4c9ab87344..a4e22e5d9b4 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -750,7 +750,7 @@ func (boot *baseBootstrap) rollBack(revertUsingForkNonce bool) error { return err } - allowRollBack := boot.shouldAllowRollback(currHeader) + allowRollBack := boot.shouldAllowRollback(currHeader, currHeaderHash) if !revertUsingForkNonce && !allowRollBack { return ErrRollBackBehindFinalHeader } @@ -834,17 +834,22 @@ func (boot *baseBootstrap) rollBack(revertUsingForkNonce bool) error { return nil } -func (boot *baseBootstrap) shouldAllowRollback(currHeader data.HeaderHandler) bool { +func (boot *baseBootstrap) shouldAllowRollback(currHeader data.HeaderHandler, currHeaderHash []byte) bool { finalBlockNonce := boot.forkDetector.GetHighestFinalBlockNonce() + finalBlockHash := boot.forkDetector.GetHighestFinalBlockHash() isRollBackBehindFinal := currHeader.GetNonce() <= finalBlockNonce isFinalBlockRollBack := currHeader.GetNonce() == finalBlockNonce headerWithScheduledMiniBlocks := currHeader.HasScheduledMiniBlocks() - allowFinalBlockRollBack := headerWithScheduledMiniBlocks && isFinalBlockRollBack + headerHashDoesNotMatchWithFinalBlockHash := !bytes.Equal(currHeaderHash, finalBlockHash) + allowFinalBlockRollBack := (headerWithScheduledMiniBlocks || headerHashDoesNotMatchWithFinalBlockHash) && isFinalBlockRollBack allowRollBack := !isRollBackBehindFinal || allowFinalBlockRollBack log.Debug("baseBootstrap.shouldAllowRollback", "isRollBackBehindFinal", isRollBackBehindFinal, + "isFinalBlockRollBack", isFinalBlockRollBack, + "headerWithScheduledMiniBlocks", headerWithScheduledMiniBlocks, + "headerHashDoesNotMatchWithFinalBlockHash", headerHashDoesNotMatchWithFinalBlockHash, "allowFinalBlockRollBack", allowFinalBlockRollBack, "allowRollBack", allowRollBack, ) diff --git a/process/sync/baseSync_test.go b/process/sync/baseSync_test.go index 90f35ae6c47..e15cc76e07d 100644 --- a/process/sync/baseSync_test.go +++ b/process/sync/baseSync_test.go @@ -230,11 +230,16 @@ func TestBaseSync_getEpochOfCurrentBlockHeader(t *testing.T) { func TestBaseSync_shouldAllowRollback(t *testing.T) { t.Parallel() + finalBlockHash := []byte("final block hash") + notFinalBlockHash := []byte("not final block hash") boot := &baseBootstrap{ forkDetector: &mock.ForkDetectorMock{ GetHighestFinalBlockNonceCalled: func() uint64 { return 10 }, + GetHighestFinalBlockHashCalled: func() []byte { + return finalBlockHash + }, }, } @@ -247,15 +252,17 @@ func TestBaseSync_shouldAllowRollback(t *testing.T) { return false }, } - require.True(t, boot.shouldAllowRollback(header)) + require.True(t, boot.shouldAllowRollback(header, finalBlockHash)) + require.True(t, boot.shouldAllowRollback(header, notFinalBlockHash)) header.HasScheduledMiniBlocksCalled = func() bool { return true } - require.True(t, boot.shouldAllowRollback(header)) + require.True(t, boot.shouldAllowRollback(header, finalBlockHash)) + require.True(t, boot.shouldAllowRollback(header, notFinalBlockHash)) }) - t.Run("should not allow rollback of a final header if it doesn't have scheduled miniBlocks", func(t *testing.T) { + t.Run("should not allow rollback of a final header with the same final hash if it doesn't have scheduled miniBlocks", func(t *testing.T) { header := &testscommon.HeaderHandlerStub{ GetNonceCalled: func() uint64 { return 10 @@ -264,10 +271,19 @@ func TestBaseSync_shouldAllowRollback(t *testing.T) { return false }, } - header.GetNonceCalled = func() uint64 { - return 10 + require.False(t, boot.shouldAllowRollback(header, finalBlockHash)) + }) + + t.Run("should allow rollback of a final header without the same final hash", func(t *testing.T) { + header := &testscommon.HeaderHandlerStub{ + GetNonceCalled: func() uint64 { + return 10 + }, + HasScheduledMiniBlocksCalled: func() bool { + return false + }, } - require.False(t, boot.shouldAllowRollback(header)) + require.True(t, boot.shouldAllowRollback(header, notFinalBlockHash)) }) t.Run("should allow rollback of a final header if it holds scheduled miniBlocks", func(t *testing.T) { @@ -279,10 +295,10 @@ func TestBaseSync_shouldAllowRollback(t *testing.T) { return true }, } - require.True(t, boot.shouldAllowRollback(header)) + require.True(t, boot.shouldAllowRollback(header, finalBlockHash)) }) - t.Run("should not allow any rollBack of a header if nonce is behind final", func(t *testing.T){ + t.Run("should not allow any rollBack of a header if nonce is behind final", func(t *testing.T) { header := &testscommon.HeaderHandlerStub{ GetNonceCalled: func() uint64 { return 9 @@ -291,11 +307,13 @@ func TestBaseSync_shouldAllowRollback(t *testing.T) { return true }, } - require.False(t, boot.shouldAllowRollback(header)) + require.False(t, boot.shouldAllowRollback(header, finalBlockHash)) + require.False(t, boot.shouldAllowRollback(header, notFinalBlockHash)) header.HasScheduledMiniBlocksCalled = func() bool { return false } - require.False(t, boot.shouldAllowRollback(header)) + require.False(t, boot.shouldAllowRollback(header, finalBlockHash)) + require.False(t, boot.shouldAllowRollback(header, notFinalBlockHash)) }) } diff --git a/process/sync/errors.go b/process/sync/errors.go index a3993a90ac3..c33db506b65 100644 --- a/process/sync/errors.go +++ b/process/sync/errors.go @@ -44,3 +44,6 @@ var ErrRollBackBehindForkNonce = errors.New("roll back behind fork nonce is not // ErrGenesisTimeMissmatch signals that a received header has a genesis time missmatch var ErrGenesisTimeMissmatch = errors.New("genesis time missmatch") + +// ErrHeaderNotFound signals that the needed header is not found +var ErrHeaderNotFound = errors.New("header is not found") diff --git a/process/sync/loadPersistentMetrics.go b/process/sync/loadPersistentMetrics.go index 545410be416..c917f6450e8 100644 --- a/process/sync/loadPersistentMetrics.go +++ b/process/sync/loadPersistentMetrics.go @@ -91,7 +91,8 @@ func prepareMetricMaps(metricsMap map[string]interface{}) (map[string]uint64, ma uint64Map[common.MetricNumProcessedTxs] = persister.GetUint64(metricsMap[common.MetricNumProcessedTxs]) uint64Map[common.MetricNumShardHeadersProcessed] = persister.GetUint64(metricsMap[common.MetricNumShardHeadersProcessed]) uint64Map[common.MetricEpochForEconomicsData] = persister.GetUint64(metricsMap[common.MetricEpochForEconomicsData]) - + uint64Map[common.MetricNonceAtEpochStart] = persister.GetUint64(metricsMap[common.MetricNonceAtEpochStart]) + uint64Map[common.MetricRoundAtEpochStart] = persister.GetUint64(metricsMap[common.MetricRoundAtEpochStart]) stringMap[common.MetricTotalSupply] = persister.GetString(metricsMap[common.MetricTotalSupply]) stringMap[common.MetricTotalFees] = persister.GetString(metricsMap[common.MetricTotalFees]) stringMap[common.MetricDevRewardsInEpoch] = persister.GetString(metricsMap[common.MetricDevRewardsInEpoch]) diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index 8eea995b6df..c7845e868d3 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -746,6 +746,9 @@ func TestMetaBootstrap_SyncBlockShouldReturnErrorWhenProcessBlockFailed(t *testi forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { return hdr.Nonce } + forkDetector.GetHighestFinalBlockHashCalled = func() []byte { + return []byte("hash") + } forkDetector.ProbableHighestNonceCalled = func() uint64 { return 2 } @@ -801,6 +804,9 @@ func TestMetaBootstrap_GetNodeStateShouldReturnNotSynchronizedWhenCurrentBlockIs forkDetector.ProbableHighestNonceCalled = func() uint64 { return 1 } + forkDetector.GetHighestFinalBlockHashCalled = func() []byte { + return []byte("hash") + } args.ForkDetector = forkDetector args.RoundHandler, _ = round.NewRound(time.Now(), time.Now().Add(100*time.Millisecond), 100*time.Millisecond, &mock.SyncTimerMock{}, 0) @@ -1243,8 +1249,11 @@ func TestMetaBootstrap_RollBackIsNotEmptyShouldErr(t *testing.T) { Nonce: newHdrNonce, } } + blkc.GetCurrentBlockHeaderHashCalled = func() []byte { + return newHdrHash + } args.ChainHandler = blkc - args.ForkDetector = createForkDetector(newHdrNonce, remFlags) + args.ForkDetector = createForkDetector(newHdrNonce, newHdrHash, remFlags) bs, _ := sync.NewMetaBootstrap(args) err := bs.RollBack(false) @@ -1370,7 +1379,7 @@ func TestMetaBootstrap_RollBackIsEmptyCallRollBackOneBlockOkValsShouldWork(t *te return nil }, } - args.ForkDetector = createForkDetector(currentHdrNonce, remFlags) + args.ForkDetector = createForkDetector(currentHdrNonce, currentHdrHash, remFlags) args.Accounts = &stateMock.AccountsStub{ RecreateTrieCalled: func(rootHash []byte) error { return nil @@ -1511,7 +1520,7 @@ func TestMetaBootstrap_RollBackIsEmptyCallRollBackOneBlockToGenesisShouldWork(t return nil }, } - args.ForkDetector = createForkDetector(currentHdrNonce, remFlags) + args.ForkDetector = createForkDetector(currentHdrNonce, currentHdrHash, remFlags) args.Accounts = &stateMock.AccountsStub{ RecreateTrieCalled: func(rootHash []byte) error { return nil @@ -1655,6 +1664,9 @@ func TestMetaBootstrap_SyncBlockErrGetNodeDBShouldSyncAccounts(t *testing.T) { forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { return hdr.Nonce } + forkDetector.GetHighestFinalBlockHashCalled = func() []byte { + return []byte("hash") + } forkDetector.ProbableHighestNonceCalled = func() uint64 { return 2 } diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index a918daf4d33..018eb9ff8cc 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -123,7 +123,7 @@ func createBlockProcessor(blk data.ChainHandler) *mock.BlockProcessorMock { return blockProcessorMock } -func createForkDetector(removedNonce uint64, remFlags *removedFlags) process.ForkDetector { +func createForkDetector(removedNonce uint64, removedHash []byte, remFlags *removedFlags) process.ForkDetector { return &mock.ForkDetectorMock{ RemoveHeaderCalled: func(nonce uint64, hash []byte) { if nonce == removedNonce { @@ -133,6 +133,9 @@ func createForkDetector(removedNonce uint64, remFlags *removedFlags) process.For GetHighestFinalBlockNonceCalled: func() uint64 { return removedNonce }, + GetHighestFinalBlockHashCalled: func() []byte { + return removedHash + }, ProbableHighestNonceCalled: func() uint64 { return uint64(0) }, @@ -917,6 +920,9 @@ func TestBootstrap_SyncBlockShouldReturnErrorWhenProcessBlockFailed(t *testing.T forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { return hdr.Nonce } + forkDetector.GetHighestFinalBlockHashCalled = func() []byte { + return []byte("hash") + } forkDetector.ProbableHighestNonceCalled = func() uint64 { return 2 } @@ -1400,8 +1406,11 @@ func TestBootstrap_RollBackIsNotEmptyShouldErr(t *testing.T) { Nonce: newHdrNonce, } } + blkc.GetCurrentBlockHeaderHashCalled = func() []byte { + return newHdrHash + } args.ChainHandler = blkc - args.ForkDetector = createForkDetector(newHdrNonce, remFlags) + args.ForkDetector = createForkDetector(newHdrNonce, newHdrHash, remFlags) bs, _ := sync.NewShardBootstrap(args) err := bs.RollBack(false) @@ -1527,7 +1536,7 @@ func TestBootstrap_RollBackIsEmptyCallRollBackOneBlockOkValsShouldWork(t *testin return nil }, } - args.ForkDetector = createForkDetector(currentHdrNonce, remFlags) + args.ForkDetector = createForkDetector(currentHdrNonce, currentHdrHash, remFlags) args.Accounts = &stateMock.AccountsStub{ RecreateTrieCalled: func(rootHash []byte) error { return nil @@ -1670,7 +1679,7 @@ func TestBootstrap_RollbackIsEmptyCallRollBackOneBlockToGenesisShouldWork(t *tes return nil }, } - args.ForkDetector = createForkDetector(currentHdrNonce, remFlags) + args.ForkDetector = createForkDetector(currentHdrNonce, currentHdrHash, remFlags) args.Accounts = &stateMock.AccountsStub{ RecreateTrieCalled: func(rootHash []byte) error { return nil @@ -2005,6 +2014,9 @@ func TestShardBootstrap_DoJobOnSyncBlockFailShouldResetProbableHighestNonce(t *t GetHighestFinalBlockNonceCalled: func() uint64 { return 1 }, + GetHighestFinalBlockHashCalled: func() []byte { + return []byte("hash") + }, ResetProbableHighestNonceCalled: func() { wasCalled = true }, @@ -2115,6 +2127,9 @@ func TestShardBootstrap_SyncBlockGetNodeDBErrorShouldSync(t *testing.T) { forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { return hdr.Nonce } + forkDetector.GetHighestFinalBlockHashCalled = func() []byte { + return []byte("hash") + } forkDetector.ProbableHighestNonceCalled = func() uint64 { return 2 } diff --git a/process/sync/storageBootstrap/baseStorageBootstrapper.go b/process/sync/storageBootstrap/baseStorageBootstrapper.go index 6fd1f4f4e81..35d33f3d679 100644 --- a/process/sync/storageBootstrap/baseStorageBootstrapper.go +++ b/process/sync/storageBootstrap/baseStorageBootstrapper.go @@ -13,7 +13,6 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" - "github.com/ElrondNetwork/elrond-go/process/block/processedMb" "github.com/ElrondNetwork/elrond-go/process/sync" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/sharding/nodesCoordinator" @@ -22,6 +21,8 @@ import ( var log = logger.GetOrCreate("process/sync") +const maxNumOfConsecutiveNoncesNotFoundAccepted = 10 + // ArgsBaseStorageBootstrapper is structure used to create a new storage bootstrapper type ArgsBaseStorageBootstrapper struct { BootStorer process.BootStorer @@ -40,6 +41,7 @@ type ArgsBaseStorageBootstrapper struct { ScheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler MiniblocksProvider process.MiniBlockProvider EpochNotifier process.EpochNotifier + ProcessedMiniBlocksTracker process.ProcessedMiniBlocksTracker } // ArgsShardStorageBootstrapper is structure used to create a new storage bootstrapper for shard @@ -73,6 +75,7 @@ type storageBootstrapper struct { scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler miniBlocksProvider process.MiniBlockProvider epochNotifier process.EpochNotifier + processedMiniBlocksTracker process.ProcessedMiniBlocksTracker } func (st *storageBootstrapper) loadBlocks() error { @@ -164,13 +167,11 @@ func (st *storageBootstrapper) loadBlocks() error { st.bootstrapper.applyNumPendingMiniBlocks(headerInfo.PendingMiniBlocks) - processedMiniBlocks := processedMb.NewProcessedMiniBlocks() - processedMiniBlocks.ConvertSliceToProcessedMiniBlocksMap(headerInfo.ProcessedMiniBlocks) - processedMiniBlocks.DisplayProcessedMiniBlocks() - - st.blkExecutor.ApplyProcessedMiniBlocks(processedMiniBlocks) + st.processedMiniBlocksTracker.ConvertSliceToProcessedMiniBlocksMap(headerInfo.ProcessedMiniBlocks) + st.processedMiniBlocksTracker.DisplayProcessedMiniBlocks() st.cleanupStorageForHigherNonceIfExist() + st.bootstrapper.cleanupNotarizedStorageForHigherNoncesIfExist(headerInfo.LastCrossNotarizedHeaders) for i := 0; i < len(storageHeadersInfo)-1; i++ { st.cleanupStorage(storageHeadersInfo[i].LastHeader) @@ -500,6 +501,9 @@ func checkBaseStorageBootstrapperArguments(args ArgsBaseStorageBootstrapper) err if check.IfNil(args.EpochNotifier) { return process.ErrNilEpochNotifier } + if check.IfNil(args.ProcessedMiniBlocksTracker) { + return process.ErrNilProcessedMiniBlocksTracker + } return nil } diff --git a/process/sync/storageBootstrap/baseStorageBootstrapper_test.go b/process/sync/storageBootstrap/baseStorageBootstrapper_test.go index f72c2ab340a..fe950aaccf3 100644 --- a/process/sync/storageBootstrap/baseStorageBootstrapper_test.go +++ b/process/sync/storageBootstrap/baseStorageBootstrapper_test.go @@ -35,6 +35,7 @@ func createMockShardStorageBoostrapperArgs() ArgsBaseStorageBootstrapper { ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, MiniblocksProvider: &mock.MiniBlocksProviderStub{}, EpochNotifier: &epochNotifierMock.EpochNotifierStub{}, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, } return argsBaseBootstrapper @@ -169,6 +170,16 @@ func TestBaseStorageBootstrapper_CheckBaseStorageBootstrapperArguments(t *testin err := checkBaseStorageBootstrapperArguments(args) assert.Equal(t, process.ErrNilEpochNotifier, err) }) + + t.Run("nil processed mini blocks tracker should error", func(t *testing.T) { + t.Parallel() + + args := createMockShardStorageBoostrapperArgs() + args.ProcessedMiniBlocksTracker = nil + + err := checkBaseStorageBootstrapperArguments(args) + assert.Equal(t, process.ErrNilProcessedMiniBlocksTracker, err) + }) } func TestBaseStorageBootstrapper_RestoreBlockBodyIntoPoolsShouldErrMissingHeader(t *testing.T) { diff --git a/process/sync/storageBootstrap/interface.go b/process/sync/storageBootstrap/interface.go index 25d5374a90d..84acabf3671 100644 --- a/process/sync/storageBootstrap/interface.go +++ b/process/sync/storageBootstrap/interface.go @@ -13,6 +13,7 @@ type storageBootstrapperHandler interface { applyNumPendingMiniBlocks(pendingMiniBlocks []bootstrapStorage.PendingMiniBlocksInfo) applySelfNotarizedHeaders(selfNotarizedHeaders []bootstrapStorage.BootstrapHeaderInfo) ([]data.HeaderHandler, [][]byte, error) cleanupNotarizedStorage(hash []byte) + cleanupNotarizedStorageForHigherNoncesIfExist(crossNotarizedHeaders []bootstrapStorage.BootstrapHeaderInfo) getRootHash(hash []byte) []byte IsInterfaceNil() bool } diff --git a/process/sync/storageBootstrap/metaStorageBootstrapper.go b/process/sync/storageBootstrap/metaStorageBootstrapper.go index 0b358976272..5d76622a401 100644 --- a/process/sync/storageBootstrap/metaStorageBootstrapper.go +++ b/process/sync/storageBootstrap/metaStorageBootstrapper.go @@ -39,6 +39,7 @@ func NewMetaStorageBootstrapper(arguments ArgsMetaStorageBootstrapper) (*metaSto scheduledTxsExecutionHandler: arguments.ScheduledTxsExecutionHandler, miniBlocksProvider: arguments.MiniblocksProvider, epochNotifier: arguments.EpochNotifier, + processedMiniBlocksTracker: arguments.ProcessedMiniBlocksTracker, } boot := metaStorageBootstrapper{ @@ -133,6 +134,9 @@ func (msb *metaStorageBootstrapper) cleanupNotarizedStorage(metaBlockHash []byte } } +func (msb *metaStorageBootstrapper) cleanupNotarizedStorageForHigherNoncesIfExist(_ []bootstrapStorage.BootstrapHeaderInfo) { +} + func (msb *metaStorageBootstrapper) applySelfNotarizedHeaders( bootstrapHeadersInfo []bootstrapStorage.BootstrapHeaderInfo, ) ([]data.HeaderHandler, [][]byte, error) { diff --git a/process/sync/storageBootstrap/shardStorageBootstrapper.go b/process/sync/storageBootstrap/shardStorageBootstrapper.go index f228bf87f20..758cbda123d 100644 --- a/process/sync/storageBootstrap/shardStorageBootstrapper.go +++ b/process/sync/storageBootstrap/shardStorageBootstrapper.go @@ -7,6 +7,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/process/sync" ) var _ process.BootstrapperFromStorage = (*shardStorageBootstrapper)(nil) @@ -39,6 +40,7 @@ func NewShardStorageBootstrapper(arguments ArgsShardStorageBootstrapper) (*shard scheduledTxsExecutionHandler: arguments.ScheduledTxsExecutionHandler, miniBlocksProvider: arguments.MiniblocksProvider, epochNotifier: arguments.EpochNotifier, + processedMiniBlocksTracker: arguments.ProcessedMiniBlocksTracker, } boot := shardStorageBootstrapper{ @@ -118,25 +120,100 @@ func (ssb *shardStorageBootstrapper) cleanupNotarizedStorage(shardHeaderHash []b "nonce", metaBlock.GetNonce(), "hash", metaBlockHash) - nonceToByteSlice := ssb.uint64Converter.ToByteSlice(metaBlock.GetNonce()) - err = ssb.store.GetStorer(dataRetriever.MetaHdrNonceHashDataUnit).Remove(nonceToByteSlice) + ssb.removeMetaFromMetaHeaderNonceToHashUnit(metaBlock, metaBlockHash) + ssb.removeMetaFromMetaBlockUnit(metaBlock, metaBlockHash) + } +} + +func (ssb *shardStorageBootstrapper) cleanupNotarizedStorageForHigherNoncesIfExist( + crossNotarizedHeaders []bootstrapStorage.BootstrapHeaderInfo, +) { + var numConsecutiveNoncesNotFound int + + lastCrossNotarizedNonce, err := getLastCrossNotarizedHeaderNonce(crossNotarizedHeaders) + if err != nil { + log.Warn("cleanupNotarizedStorageForHigherNoncesIfExist", "error", err.Error()) + return + } + + log.Debug("cleanup notarized storage has been started", "from nonce", lastCrossNotarizedNonce+1) + nonce := lastCrossNotarizedNonce + + for { + nonce++ + + metaBlock, metaBlockHash, err := process.GetMetaHeaderFromStorageWithNonce( + nonce, + ssb.store, + ssb.uint64Converter, + ssb.marshalizer, + ) if err != nil { - log.Debug("meta block was not removed from MetaHdrNonceHashDataUnit storage", - "shardId", metaBlock.GetShardID(), - "nonce", metaBlock.GetNonce(), - "hash", metaBlockHash, - "error", err.Error()) + log.Debug("meta block is not found in MetaHdrNonceHashDataUnit storage", + "nonce", nonce, "error", err.Error()) + + numConsecutiveNoncesNotFound++ + if numConsecutiveNoncesNotFound > maxNumOfConsecutiveNoncesNotFoundAccepted { + log.Debug("cleanup notarized storage has been finished", + "from nonce", lastCrossNotarizedNonce+1, + "to nonce", nonce) + break + } + + continue } - err = ssb.store.GetStorer(dataRetriever.MetaBlockUnit).Remove(metaBlockHash) - if err != nil { - log.Debug("meta block was not removed from MetaBlockUnit storage", - "shardId", metaBlock.GetShardID(), - "nonce", metaBlock.GetNonce(), - "hash", metaBlockHash, - "error", err.Error()) + numConsecutiveNoncesNotFound = 0 + + log.Debug("removing meta block from storage", + "shardId", metaBlock.GetShardID(), + "nonce", metaBlock.GetNonce(), + "hash", metaBlockHash) + + ssb.removeMetaFromMetaHeaderNonceToHashUnit(metaBlock, metaBlockHash) + ssb.removeMetaFromMetaBlockUnit(metaBlock, metaBlockHash) + } +} + +func (ssb *shardStorageBootstrapper) removeMetaFromMetaHeaderNonceToHashUnit(metaBlock *block.MetaBlock, metaBlockHash []byte) { + nonceToByteSlice := ssb.uint64Converter.ToByteSlice(metaBlock.GetNonce()) + err := ssb.store.GetStorer(dataRetriever.MetaHdrNonceHashDataUnit).Remove(nonceToByteSlice) + if err != nil { + log.Debug("meta block was not removed from MetaHdrNonceHashDataUnit storage", + "shardId", metaBlock.GetShardID(), + "nonce", metaBlock.GetNonce(), + "hash", metaBlockHash, + "error", err.Error()) + } +} + +func (ssb *shardStorageBootstrapper) removeMetaFromMetaBlockUnit(metaBlock *block.MetaBlock, metaBlockHash []byte) { + err := ssb.store.GetStorer(dataRetriever.MetaBlockUnit).Remove(metaBlockHash) + if err != nil { + log.Debug("meta block was not removed from MetaBlockUnit storage", + "shardId", metaBlock.GetShardID(), + "nonce", metaBlock.GetNonce(), + "hash", metaBlockHash, + "error", err.Error()) + } +} + +func getLastCrossNotarizedHeaderNonce(crossNotarizedHeaders []bootstrapStorage.BootstrapHeaderInfo) (uint64, error) { + for _, crossNotarizedHeader := range crossNotarizedHeaders { + if crossNotarizedHeader.ShardId != core.MetachainShardId { + continue } + + log.Debug("last cross notarized header", + "shard", crossNotarizedHeader.ShardId, + "epoch", crossNotarizedHeader.Epoch, + "nonce", crossNotarizedHeader.Nonce, + "hash", crossNotarizedHeader.Hash) + + return crossNotarizedHeader.Nonce, nil } + + return 0, sync.ErrHeaderNotFound } func (ssb *shardStorageBootstrapper) applySelfNotarizedHeaders( diff --git a/process/sync/storageBootstrap/shardStorageBootstrapper_test.go b/process/sync/storageBootstrap/shardStorageBootstrapper_test.go index 5ff316b94a0..26d4c4becf1 100644 --- a/process/sync/storageBootstrap/shardStorageBootstrapper_test.go +++ b/process/sync/storageBootstrap/shardStorageBootstrapper_test.go @@ -1,18 +1,23 @@ package storageBootstrap import ( + "bytes" + "errors" "testing" + "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/process/sync" "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/testscommon" epochNotifierMock "github.com/ElrondNetwork/elrond-go/testscommon/epochNotifier" "github.com/ElrondNetwork/elrond-go/testscommon/shardingMocks" + storageMock "github.com/ElrondNetwork/elrond-go/testscommon/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -126,6 +131,7 @@ func TestShardStorageBootstrapper_LoadFromStorageShouldWork(t *testing.T) { wasCalledEpochNotifier = true }, }, + ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, }, } @@ -141,3 +147,105 @@ func TestShardStorageBootstrapper_LoadFromStorageShouldWork(t *testing.T) { assert.Equal(t, int64(3999), savedLastRound) assert.True(t, wasCalledEpochNotifier) } + +func TestShardStorageBootstrapper_CleanupNotarizedStorageForHigherNoncesIfExist(t *testing.T) { + baseArgs := createMockShardStorageBoostrapperArgs() + + bForceError := true + numCalled := 0 + numKeysNotFound := 0 + metaNonce := uint64(2) + nonceToByteSlice := []byte("nonceToByteSlice") + metaHash := []byte("meta_hash") + + metaNonceToDelete := metaNonce + maxNumOfConsecutiveNoncesNotFoundAccepted + 2 + metaBlock := &block.MetaBlock{Nonce: metaNonceToDelete} + marshalledMetaBlock, _ := baseArgs.Marshalizer.Marshal(metaBlock) + + baseArgs.Uint64Converter = &mock.Uint64ByteSliceConverterMock{ + ToByteSliceCalled: func(u uint64) []byte { + if u == metaNonceToDelete { + return nonceToByteSlice + } + return []byte("") + }, + } + baseArgs.Store = &mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &storageMock.StorerStub{ + RemoveCalled: func(key []byte) error { + if bForceError { + return errors.New("forced error") + } + + if bytes.Equal(key, nonceToByteSlice) { + numCalled++ + return nil + } + if bytes.Equal(key, metaHash) { + numCalled++ + return nil + } + + return errors.New("error") + }, + GetCalled: func(key []byte) ([]byte, error) { + if bytes.Equal(key, nonceToByteSlice) { + return metaHash, nil + } + if bytes.Equal(key, metaHash) { + return marshalledMetaBlock, nil + } + numKeysNotFound++ + return nil, errors.New("error") + }, + } + }, + } + + args := ArgsShardStorageBootstrapper{ + ArgsBaseStorageBootstrapper: baseArgs, + } + ssb, _ := NewShardStorageBootstrapper(args) + + crossNotarizedHeaders := make([]bootstrapStorage.BootstrapHeaderInfo, 0) + + crossNotarizedHeaders = append(crossNotarizedHeaders, bootstrapStorage.BootstrapHeaderInfo{ShardId: 0, Nonce: 1}) + ssb.cleanupNotarizedStorageForHigherNoncesIfExist(crossNotarizedHeaders) + assert.Equal(t, 0, numCalled) + + crossNotarizedHeaders = append(crossNotarizedHeaders, bootstrapStorage.BootstrapHeaderInfo{ShardId: core.MetachainShardId, Nonce: metaNonce}) + ssb.cleanupNotarizedStorageForHigherNoncesIfExist(crossNotarizedHeaders) + assert.Equal(t, 0, numCalled) + assert.Equal(t, maxNumOfConsecutiveNoncesNotFoundAccepted, numKeysNotFound-1) + + numKeysNotFound = 0 + metaNonceToDelete = metaNonce + maxNumOfConsecutiveNoncesNotFoundAccepted + 1 + metaBlock = &block.MetaBlock{Nonce: metaNonceToDelete} + marshalledMetaBlock, _ = baseArgs.Marshalizer.Marshal(metaBlock) + + ssb.cleanupNotarizedStorageForHigherNoncesIfExist(crossNotarizedHeaders) + assert.Equal(t, 0, numCalled) + assert.Equal(t, maxNumOfConsecutiveNoncesNotFoundAccepted*2, numKeysNotFound-1) + + numKeysNotFound = 0 + bForceError = false + + ssb.cleanupNotarizedStorageForHigherNoncesIfExist(crossNotarizedHeaders) + assert.Equal(t, 2, numCalled) + assert.Equal(t, maxNumOfConsecutiveNoncesNotFoundAccepted*2, numKeysNotFound-1) +} + +func TestShardStorageBootstrapper_GetCrossNotarizedHeaderNonceShouldWork(t *testing.T) { + crossNotarizedHeaders := make([]bootstrapStorage.BootstrapHeaderInfo, 0) + + crossNotarizedHeaders = append(crossNotarizedHeaders, bootstrapStorage.BootstrapHeaderInfo{ShardId: 0, Nonce: 1}) + nonce, err := getLastCrossNotarizedHeaderNonce(crossNotarizedHeaders) + assert.Equal(t, sync.ErrHeaderNotFound, err) + assert.Equal(t, uint64(0), nonce) + + crossNotarizedHeaders = append(crossNotarizedHeaders, bootstrapStorage.BootstrapHeaderInfo{ShardId: core.MetachainShardId, Nonce: 2}) + nonce, err = getLastCrossNotarizedHeaderNonce(crossNotarizedHeaders) + assert.Nil(t, err) + assert.Equal(t, uint64(2), nonce) +} diff --git a/process/transaction/metaProcess_test.go b/process/transaction/metaProcess_test.go index f6589c3396a..c1b15d8fdee 100644 --- a/process/transaction/metaProcess_test.go +++ b/process/transaction/metaProcess_test.go @@ -13,9 +13,9 @@ import ( txproc "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/state" "github.com/ElrondNetwork/elrond-go/testscommon" - stateMock "github.com/ElrondNetwork/elrond-go/testscommon/state" "github.com/ElrondNetwork/elrond-go/testscommon/epochNotifier" "github.com/ElrondNetwork/elrond-go/testscommon/hashingMocks" + stateMock "github.com/ElrondNetwork/elrond-go/testscommon/state" vmcommon "github.com/ElrondNetwork/elrond-vm-common" "github.com/ElrondNetwork/elrond-vm-common/builtInFunctions" "github.com/ElrondNetwork/elrond-vm-common/parsers" diff --git a/sharding/networksharding/peerShardMapper.go b/sharding/networksharding/peerShardMapper.go index a332db6f1ef..625596c874a 100644 --- a/sharding/networksharding/peerShardMapper.go +++ b/sharding/networksharding/peerShardMapper.go @@ -41,7 +41,7 @@ type PeerShardMapper struct { fallbackPkShardCache storage.Cacher fallbackPidShardCache storage.Cacher peerIdSubTypeCache storage.Cacher - mutUpdatePeerIdPublicKey sync.Mutex + mutUpdatePeerIdPublicKey sync.RWMutex mutEpoch sync.RWMutex epoch uint32 @@ -149,7 +149,7 @@ func (psm *PeerShardMapper) getPeerInfoWithNodesCoordinator(pid core.PeerID) (*c pkBuff, ok := pkObj.([]byte) if !ok { - log.Warn("PeerShardMapper.getShardIDWithNodesCoordinator: the contained element should have been of type []byte") + log.Warn("PeerShardMapper.getPeerInfoWithNodesCoordinator: the contained element should have been of type []byte") return &core.P2PPeerInfo{ PeerType: core.UnknownPeer, @@ -201,7 +201,7 @@ func (psm *PeerShardMapper) getPeerSubType(pid core.PeerID) core.P2PPeerSubType subType, ok := subTypeObj.(core.P2PPeerSubType) if !ok { - log.Warn("PeerShardMapper.getPeerInfoSearchingPidInFallbackCache: the contained element should have been of type core.P2PPeerSubType") + log.Warn("PeerShardMapper.getPeerSubType: the contained element should have been of type core.P2PPeerSubType") return core.RegularPeer } @@ -219,7 +219,7 @@ func (psm *PeerShardMapper) getPeerInfoSearchingPidInFallbackCache(pid core.Peer shard, ok := shardObj.(uint32) if !ok { - log.Warn("PeerShardMapper.getShardIDSearchingPidInFallbackCache: the contained element should have been of type uint32") + log.Warn("PeerShardMapper.getPeerInfoSearchingPidInFallbackCache: the contained element should have been of type uint32") return &core.P2PPeerInfo{ PeerType: core.UnknownPeer, @@ -234,7 +234,42 @@ func (psm *PeerShardMapper) getPeerInfoSearchingPidInFallbackCache(pid core.Peer } } -// UpdatePeerIDInfo updates the public keys and the shard ID for the peer IDin the corresponding maps +// GetLastKnownPeerID returns the newest updated peer id for the given public key +func (psm *PeerShardMapper) GetLastKnownPeerID(pk []byte) (*core.PeerID, bool) { + psm.mutUpdatePeerIdPublicKey.RLock() + defer psm.mutUpdatePeerIdPublicKey.RUnlock() + + objPidsQueue, found := psm.pkPeerIdCache.Get(pk) + if !found { + return nil, false + } + + pq, ok := objPidsQueue.(*pidQueue) + if !ok { + log.Warn("PeerShardMapper.GetLastKnownPeerID: the contained element should have been of type pidQueue") + return nil, false + } + + if len(pq.data) == 0 { + log.Warn("PeerShardMapper.GetLastKnownPeerID: empty pidQueue element") + return nil, false + } + + latestPeerId := &pq.data[len(pq.data)-1] + return latestPeerId, true +} + +// UpdatePeerIDPublicKeyPair updates the public key - peer ID pair in the corresponding maps +// It also uses the intermediate pkPeerId cache that will prevent having thousands of peer ID's with +// the same Elrond PK that will make the node prone to an eclipse attack +func (psm *PeerShardMapper) UpdatePeerIDPublicKeyPair(pid core.PeerID, pk []byte) { + isNew := psm.updatePeerIDPublicKey(pid, pk) + if isNew { + peerLog.Trace("new peer mapping", "pid", pid.Pretty(), "pk", pk) + } +} + +// UpdatePeerIDInfo updates the public keys and the shard ID for the peer ID in the corresponding maps // It also uses the intermediate pkPeerId cache that will prevent having thousands of peer ID's with // the same Elrond PK that will make the node prone to an eclipse attack func (psm *PeerShardMapper) UpdatePeerIDInfo(pid core.PeerID, pk []byte, shardID uint32) { @@ -246,17 +281,18 @@ func (psm *PeerShardMapper) UpdatePeerIDInfo(pid core.PeerID, pk []byte, shardID if shardID == core.AllShardId { return } - psm.updatePublicKeyShardId(pk, shardID) - psm.updatePeerIdShardId(pid, shardID) - psm.preferredPeersHolder.Put(pk, pid, shardID) + psm.putPublicKeyShardId(pk, shardID) + psm.PutPeerIdShardId(pid, shardID) } -func (psm *PeerShardMapper) updatePublicKeyShardId(pk []byte, shardId uint32) { - psm.fallbackPkShardCache.HasOrAdd(pk, shardId, uint32Size) +func (psm *PeerShardMapper) putPublicKeyShardId(pk []byte, shardId uint32) { + psm.fallbackPkShardCache.Put(pk, shardId, uint32Size) } -func (psm *PeerShardMapper) updatePeerIdShardId(pid core.PeerID, shardId uint32) { - psm.fallbackPidShardCache.HasOrAdd([]byte(pid), shardId, uint32Size) +// PutPeerIdShardId puts the peer ID and shard ID into fallback cache in case it does not exists +func (psm *PeerShardMapper) PutPeerIdShardId(pid core.PeerID, shardId uint32) { + psm.fallbackPidShardCache.Put([]byte(pid), shardId, uint32Size) + psm.preferredPeersHolder.PutShardID(pid, shardId) } // updatePeerIDPublicKey will update the pid <-> pk mapping, returning true if the pair is a new known pair @@ -299,7 +335,7 @@ func (psm *PeerShardMapper) updatePeerIDPublicKey(pid core.PeerID, pk []byte) bo psm.peerIdPkCache.Remove([]byte(evictedPid)) psm.fallbackPidShardCache.Remove([]byte(evictedPid)) } - psm.pkPeerIdCache.Put(pk, pq, pq.size()) + psm.pkPeerIdCache.Put(pk, pq, pq.dataSizeInBytes()) psm.peerIdPkCache.Put([]byte(pid), pk, len(pk)) return isNew @@ -335,13 +371,13 @@ func (psm *PeerShardMapper) removePidAssociation(pid core.PeerID) []byte { return oldPkBuff } - psm.pkPeerIdCache.Put(oldPkBuff, pq, pq.size()) + psm.pkPeerIdCache.Put(oldPkBuff, pq, pq.dataSizeInBytes()) return oldPkBuff } -// UpdatePeerIdSubType updates the peerIdSubType search map containing peer IDs and peer subtypes -func (psm *PeerShardMapper) UpdatePeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) { - psm.peerIdSubTypeCache.HasOrAdd([]byte(pid), peerSubType, uint32Size) +// PutPeerIdSubType puts the peerIdSubType search map containing peer IDs and peer subtypes +func (psm *PeerShardMapper) PutPeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) { + psm.peerIdSubTypeCache.Put([]byte(pid), peerSubType, uint32Size) } // EpochStartAction is the method called whenever an action needs to be undertaken in respect to the epoch change diff --git a/sharding/networksharding/peerShardMapper_test.go b/sharding/networksharding/peerShardMapper_test.go index 343570172f2..b6bd8e8c572 100644 --- a/sharding/networksharding/peerShardMapper_test.go +++ b/sharding/networksharding/peerShardMapper_test.go @@ -17,7 +17,6 @@ import ( "github.com/ElrondNetwork/elrond-go/testscommon/p2pmocks" "github.com/ElrondNetwork/elrond-go/testscommon/shardingMocks" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const epochZero = uint32(0) @@ -139,28 +138,6 @@ func TestPeerShardMapper_UpdatePeerIDInfoShouldWork(t *testing.T) { peerInfo) } -func TestPeerShardMapper_UpdatePeerIDInfoShouldAddInPreferredPeers(t *testing.T) { - t.Parallel() - - expectedPid := core.PeerID("dummy peer ID") - expectedPk := []byte("dummy pk") - expectedShardID := uint32(3737) - putWasCalled := false - arg := createMockArgumentForPeerShardMapper() - arg.PreferredPeersHolder = &p2pmocks.PeersHolderStub{ - PutCalled: func(publicKey []byte, peerID core.PeerID, shardID uint32) { - putWasCalled = true - require.Equal(t, expectedPid, peerID) - require.Equal(t, expectedPk, publicKey) - require.Equal(t, expectedShardID, shardID) - }, - } - psm, _ := networksharding.NewPeerShardMapper(arg) - - psm.UpdatePeerIDInfo(expectedPid, expectedPk, expectedShardID) - require.True(t, putWasCalled) -} - func TestPeerShardMapper_UpdatePeerIDInfoMorePidsThanAllowedShouldTrim(t *testing.T) { t.Parallel() @@ -250,6 +227,24 @@ func TestPeerShardMapper_UpdatePeerIDInfoShouldWorkConcurrently(t *testing.T) { assert.Equal(t, shardId, shardidRecovered) } +// ------- UpdatePeerIDPublicKeyPair + +func TestPeerShardMapper_UpdatePeerIDPublicKeyPairShouldWork(t *testing.T) { + t.Parallel() + + psm := createPeerShardMapper() + pid := core.PeerID("dummy peer ID") + pk := []byte("dummy pk") + + psm.UpdatePeerIDPublicKeyPair(pid, pk) + + pkRecovered := psm.GetPkFromPidPk(pid) + assert.Equal(t, pk, pkRecovered) + + pidRecovered := psm.GetFromPkPeerId(pk) + assert.Equal(t, []core.PeerID{pid}, pidRecovered) +} + // ------- GetPeerInfo func TestPeerShardMapper_GetPeerInfoPkNotFoundShouldReturnUnknown(t *testing.T) { @@ -582,3 +577,67 @@ func TestPeerShardMapper_UpdatePeerIDPublicKey(t *testing.T) { assert.False(t, psm.UpdatePeerIDPublicKey(pid2, pk1)) }) } + +func TestPeerShardMapper_GetLastKnownPeerID(t *testing.T) { + t.Parallel() + + pid1 := core.PeerID("pid1") + pid2 := core.PeerID("pid2") + pk1 := []byte("pk1") + pk2 := []byte("pk2") + + t.Run("no pk in cache should return false", func(t *testing.T) { + t.Parallel() + + psm := createPeerShardMapper() + pid, ok := psm.GetLastKnownPeerID(pk1) + assert.Nil(t, pid) + assert.False(t, ok) + }) + t.Run("cast error should return false", func(t *testing.T) { + t.Parallel() + + psm := createPeerShardMapper() + dummyData := "dummy data" + psm.PkPeerId().Put(pk1, dummyData, len(dummyData)) + + pid, ok := psm.GetLastKnownPeerID(pk1) + assert.Nil(t, pid) + assert.False(t, ok) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + psm := createPeerShardMapper() + psm.UpdatePeerIDPublicKeyPair(pid1, pk1) + pid, ok := psm.GetLastKnownPeerID(pk1) + assert.True(t, ok) + assert.Equal(t, &pid1, pid) + + psm.UpdatePeerIDPublicKeyPair(pid2, pk2) + pid, ok = psm.GetLastKnownPeerID(pk2) + assert.True(t, ok) + assert.Equal(t, &pid2, pid) + }) +} + +func TestPeerShardMapper_PutPeerIdShardId(t *testing.T) { + t.Parallel() + + providedPid := core.PeerID("provided pid") + providedShardID := uint32(123) + wasCalled := false + args := createMockArgumentForPeerShardMapper() + args.PreferredPeersHolder = &p2pmocks.PeersHolderStub{ + PutShardIDCalled: func(peerID core.PeerID, shardID uint32) { + wasCalled = true + assert.Equal(t, providedPid, peerID) + assert.Equal(t, providedShardID, shardID) + }, + } + psm, _ := networksharding.NewPeerShardMapper(args) + assert.False(t, check.IfNil(psm)) + + psm.PutPeerIdShardId(providedPid, providedShardID) + assert.True(t, wasCalled) +} diff --git a/sharding/networksharding/pidQueue.go b/sharding/networksharding/pidQueue.go index 7a5bd395181..ef4291f1a2b 100644 --- a/sharding/networksharding/pidQueue.go +++ b/sharding/networksharding/pidQueue.go @@ -61,7 +61,7 @@ func (pq *pidQueue) remove(pid core.PeerID) { pq.data = newData } -func (pq *pidQueue) size() int { +func (pq *pidQueue) dataSizeInBytes() int { sum := 0 for _, pid := range pq.data { sum += len(pid) diff --git a/sharding/networksharding/pidQueue_test.go b/sharding/networksharding/pidQueue_test.go index 1d08d314311..ef31a591979 100644 --- a/sharding/networksharding/pidQueue_test.go +++ b/sharding/networksharding/pidQueue_test.go @@ -138,18 +138,18 @@ func TestPidQueue_RemoveShouldWork(t *testing.T) { assert.Equal(t, 1, pq.indexOf(pid2)) } -func TestPidQueue_Size(t *testing.T) { +func TestPidQueue_dataSizeInBytes(t *testing.T) { t.Parallel() pq := newPidQueue() - assert.Equal(t, 0, pq.size()) + assert.Equal(t, 0, pq.dataSizeInBytes()) pq.push("pid 0") - assert.Equal(t, 5, pq.size()) + assert.Equal(t, 5, pq.dataSizeInBytes()) pq.push("pid 1") - assert.Equal(t, 10, pq.size()) + assert.Equal(t, 10, pq.dataSizeInBytes()) pq.push("0") - assert.Equal(t, 11, pq.size()) + assert.Equal(t, 11, pq.dataSizeInBytes()) } diff --git a/statusHandler/persister/persistentHandler.go b/statusHandler/persister/persistentHandler.go index c86ffabd626..f714bb3c390 100644 --- a/statusHandler/persister/persistentHandler.go +++ b/statusHandler/persister/persistentHandler.go @@ -139,6 +139,11 @@ func (psh *PersistentStatusHandler) SetUInt64Value(key string, value uint64) { return } + if value == 0 { + // do not write in database when the metrics are initialized. as a side effect, metrics for genesis block won't be saved + return + } + psh.saveMetricsInDb(value) } diff --git a/statusHandler/statusMetricsProvider.go b/statusHandler/statusMetricsProvider.go index 51def41be71..670cff3e721 100644 --- a/statusHandler/statusMetricsProvider.go +++ b/statusHandler/statusMetricsProvider.go @@ -262,6 +262,7 @@ func (sm *statusMetrics) EnableEpochsMetrics() (map[string]interface{}, error) { enableEpochsMetrics[common.MetricIncrementSCRNonceInMultiTransferEnableEpoch] = sm.uint64Metrics[common.MetricIncrementSCRNonceInMultiTransferEnableEpoch] enableEpochsMetrics[common.MetricBalanceWaitingListsEnableEpoch] = sm.uint64Metrics[common.MetricBalanceWaitingListsEnableEpoch] enableEpochsMetrics[common.MetricWaitingListFixEnableEpoch] = sm.uint64Metrics[common.MetricWaitingListFixEnableEpoch] + enableEpochsMetrics[common.MetricHeartbeatDisableEpoch] = sm.uint64Metrics[common.MetricHeartbeatDisableEpoch] numNodesChangeConfig := sm.uint64Metrics[common.MetricMaxNodesChangeEnableEpoch+"_count"] @@ -288,11 +289,18 @@ func (sm *statusMetrics) EnableEpochsMetrics() (map[string]interface{}, error) { // NetworkMetrics will return metrics related to current configuration func (sm *statusMetrics) NetworkMetrics() (map[string]interface{}, error) { + networkMetrics := make(map[string]interface{}) + + sm.saveUint64MetricsInMap(networkMetrics) + sm.saveStringMetricsInMap(networkMetrics) + + return networkMetrics, nil +} + +func (sm *statusMetrics) saveUint64MetricsInMap(networkMetrics map[string]interface{}) { sm.mutUint64Operations.RLock() defer sm.mutUint64Operations.RUnlock() - networkMetrics := make(map[string]interface{}) - currentRound := sm.uint64Metrics[common.MetricCurrentRound] roundNumberAtEpochStart := sm.uint64Metrics[common.MetricRoundAtEpochStart] @@ -316,8 +324,16 @@ func (sm *statusMetrics) NetworkMetrics() (map[string]interface{}, error) { noncesPassedInEpoch = currentNonce - nonceAtEpochStart } networkMetrics[common.MetricNoncesPassedInCurrentEpoch] = noncesPassedInEpoch +} - return networkMetrics, nil +func (sm *statusMetrics) saveStringMetricsInMap(networkMetrics map[string]interface{}) { + sm.mutStringOperations.RLock() + defer sm.mutStringOperations.RUnlock() + + crossCheckValue := sm.stringMetrics[common.MetricCrossCheckBlockHeight] + if len(crossCheckValue) > 0 { + networkMetrics[common.MetricCrossCheckBlockHeight] = crossCheckValue + } } // RatingsMetrics will return metrics related to current configuration diff --git a/statusHandler/statusMetricsProvider_test.go b/statusHandler/statusMetricsProvider_test.go index cbc7a0fe0a7..360b7624e65 100644 --- a/statusHandler/statusMetricsProvider_test.go +++ b/statusHandler/statusMetricsProvider_test.go @@ -227,8 +227,18 @@ func TestStatusMetrics_NetworkMetrics(t *testing.T) { "erd_nonces_passed_in_current_epoch": uint64(85), } - configMetrics, _ := sm.NetworkMetrics() - assert.Equal(t, expectedConfig, configMetrics) + t.Run("no cross check value", func(t *testing.T) { + configMetrics, _ := sm.NetworkMetrics() + assert.Equal(t, expectedConfig, configMetrics) + }) + t.Run("with cross check value", func(t *testing.T) { + crossCheckValue := "0: 9169897, 1: 9166353, 2: 9170524, " + sm.SetStringValue(common.MetricCrossCheckBlockHeight, crossCheckValue) + + configMetrics, _ := sm.NetworkMetrics() + expectedConfig[common.MetricCrossCheckBlockHeight] = crossCheckValue + assert.Equal(t, expectedConfig, configMetrics) + }) } func TestStatusMetrics_EnableEpochMetrics(t *testing.T) { @@ -259,6 +269,7 @@ func TestStatusMetrics_EnableEpochMetrics(t *testing.T) { sm.SetUInt64Value(common.MetricIncrementSCRNonceInMultiTransferEnableEpoch, 3) sm.SetUInt64Value(common.MetricBalanceWaitingListsEnableEpoch, 4) sm.SetUInt64Value(common.MetricWaitingListFixEnableEpoch, 1) + sm.SetUInt64Value(common.MetricHeartbeatDisableEpoch, 5) maxNodesChangeConfig := []map[string]uint64{ { @@ -321,6 +332,7 @@ func TestStatusMetrics_EnableEpochMetrics(t *testing.T) { common.MetricNodesToShufflePerShard: uint64(5), }, }, + common.MetricHeartbeatDisableEpoch: uint64(5), } epochsMetrics, _ := sm.EnableEpochsMetrics() diff --git a/storage/databaseremover/disabled/disabledCustomDatabaseRemover.go b/storage/databaseremover/disabled/disabledCustomDatabaseRemover.go index d14bdd08500..7ca977d87ba 100644 --- a/storage/databaseremover/disabled/disabledCustomDatabaseRemover.go +++ b/storage/databaseremover/disabled/disabledCustomDatabaseRemover.go @@ -1,6 +1,6 @@ package disabled -type disabledCustomDatabaseRemover struct {} +type disabledCustomDatabaseRemover struct{} // NewDisabledCustomDatabaseRemover returns a new instance of disabledCustomDatabaseRemover func NewDisabledCustomDatabaseRemover() *disabledCustomDatabaseRemover { diff --git a/storage/errors.go b/storage/errors.go index 3d8e4dff34e..765f7c3d333 100644 --- a/storage/errors.go +++ b/storage/errors.go @@ -147,3 +147,9 @@ var ErrNilOldDataCleanerProvider = errors.New("nil old data cleaner provider") // ErrNilStoredDataFactory signals that a nil stored data factory has been provided var ErrNilStoredDataFactory = errors.New("nil stored data factory") + +// ErrInvalidDefaultSpan signals that an invalid default span was provided +var ErrInvalidDefaultSpan = errors.New("invalid default span") + +// ErrInvalidCacheExpiry signals that an invalid cache expiry was provided +var ErrInvalidCacheExpiry = errors.New("invalid cache expiry") diff --git a/storage/interface.go b/storage/interface.go index f5fe3cf157b..f5abfc09f78 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -202,12 +202,19 @@ type SizedLRUCacheHandler interface { // TimeCacher defines the cache that can keep a record for a bounded time type TimeCacher interface { + Add(key string) error Upsert(key string, span time.Duration) error Has(key string) bool Sweep() + RegisterEvictionHandler(handler EvictionHandler) IsInterfaceNil() bool } +// EvictionHandler defines a component which can be registered on TimeCaher +type EvictionHandler interface { + Evicted(key []byte) +} + // AdaptedSizedLRUCache defines a cache that returns the evicted value type AdaptedSizedLRUCache interface { SizedLRUCacheHandler diff --git a/storage/mapTimeCache/mapTimeCache.go b/storage/mapTimeCache/mapTimeCache.go new file mode 100644 index 00000000000..77d61c46c2a --- /dev/null +++ b/storage/mapTimeCache/mapTimeCache.go @@ -0,0 +1,264 @@ +package mapTimeCache + +import ( + "bytes" + "context" + "encoding/gob" + "math" + "sync" + "time" + + logger "github.com/ElrondNetwork/elrond-go-logger" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/timecache" +) + +var log = logger.GetOrCreate("storage/maptimecache") + +const minDuration = time.Second + +// ArgMapTimeCacher is the argument used to create a new mapTimeCacher +type ArgMapTimeCacher struct { + DefaultSpan time.Duration + CacheExpiry time.Duration +} + +// mapTimeCacher implements a map cache with eviction and inner TimeCacher +type mapTimeCacher struct { + sync.RWMutex + dataMap map[string]interface{} + timeCache storage.TimeCacher + cacheExpiry time.Duration + defaultTimeSpan time.Duration + cancelFunc func() + sizeInBytesContained uint64 +} + +// NewMapTimeCache creates a new mapTimeCacher +func NewMapTimeCache(arg ArgMapTimeCacher) (*mapTimeCacher, error) { + err := checkArg(arg) + if err != nil { + return nil, err + } + + mtc := &mapTimeCacher{ + dataMap: make(map[string]interface{}), + timeCache: timecache.NewTimeCache(arg.DefaultSpan), + cacheExpiry: arg.CacheExpiry, + defaultTimeSpan: arg.DefaultSpan, + } + + mtc.timeCache.RegisterEvictionHandler(mtc) + + var ctx context.Context + ctx, mtc.cancelFunc = context.WithCancel(context.Background()) + go mtc.startSweeping(ctx) + + return mtc, nil +} + +func checkArg(arg ArgMapTimeCacher) error { + if arg.DefaultSpan < minDuration { + return storage.ErrInvalidDefaultSpan + } + if arg.CacheExpiry < minDuration { + return storage.ErrInvalidCacheExpiry + } + + return nil +} + +// startSweeping handles sweeping the time cache +func (mtc *mapTimeCacher) startSweeping(ctx context.Context) { + timer := time.NewTimer(mtc.cacheExpiry) + defer timer.Stop() + + for { + timer.Reset(mtc.cacheExpiry) + + select { + case <-timer.C: + mtc.timeCache.Sweep() + case <-ctx.Done(): + log.Info("closing mapTimeCacher's sweep go routine...") + return + } + } +} + +// Evicted is the handler called on Sweep method +func (mtc *mapTimeCacher) Evicted(key []byte) { + mtc.Remove(key) +} + +// Clear deletes all stored data +func (mtc *mapTimeCacher) Clear() { + mtc.Lock() + defer mtc.Unlock() + + mtc.dataMap = make(map[string]interface{}) + mtc.sizeInBytesContained = 0 +} + +// Put adds a value to the cache. Returns true if an eviction occurred +func (mtc *mapTimeCacher) Put(key []byte, value interface{}, _ int) (evicted bool) { + mtc.Lock() + defer mtc.Unlock() + + oldValue, found := mtc.dataMap[string(key)] + mtc.dataMap[string(key)] = value + mtc.addSizeContained(value) + if found { + mtc.subtractSizeContained(oldValue) + mtc.upsertToTimeCache(key) + return false + } + + mtc.addToTimeCache(key) + return false +} + +// Get returns a key's value from the cache +func (mtc *mapTimeCacher) Get(key []byte) (value interface{}, ok bool) { + mtc.RLock() + defer mtc.RUnlock() + + v, ok := mtc.dataMap[string(key)] + return v, ok +} + +// Has checks if a key is in the cache +func (mtc *mapTimeCacher) Has(key []byte) bool { + mtc.RLock() + defer mtc.RUnlock() + + _, ok := mtc.dataMap[string(key)] + return ok +} + +// Peek returns a key's value from the cache +func (mtc *mapTimeCacher) Peek(key []byte) (value interface{}, ok bool) { + return mtc.Get(key) +} + +// HasOrAdd checks if a key is in the cache. +// If key exists, does not update the value. Otherwise, adds the key-value in the cache +func (mtc *mapTimeCacher) HasOrAdd(key []byte, value interface{}, _ int) (has, added bool) { + mtc.Lock() + defer mtc.Unlock() + + _, ok := mtc.dataMap[string(key)] + if ok { + return true, false + } + + mtc.dataMap[string(key)] = value + mtc.addSizeContained(value) + mtc.upsertToTimeCache(key) + + return false, true +} + +// Remove removes the key from cache +func (mtc *mapTimeCacher) Remove(key []byte) { + if key == nil { + return + } + + mtc.Lock() + defer mtc.Unlock() + + mtc.subtractSizeContained(mtc.dataMap[string(key)]) + delete(mtc.dataMap, string(key)) +} + +// Keys returns all keys from cache +func (mtc *mapTimeCacher) Keys() [][]byte { + mtc.RLock() + defer mtc.RUnlock() + + keys := make([][]byte, len(mtc.dataMap)) + idx := 0 + for k := range mtc.dataMap { + keys[idx] = []byte(k) + idx++ + } + + return keys +} + +// Len returns the size of the cache +func (mtc *mapTimeCacher) Len() int { + mtc.RLock() + defer mtc.RUnlock() + + return len(mtc.dataMap) +} + +// SizeInBytesContained returns the size in bytes of all contained elements +func (mtc *mapTimeCacher) SizeInBytesContained() uint64 { + mtc.RLock() + defer mtc.RUnlock() + + return mtc.sizeInBytesContained +} + +// MaxSize returns the maximum number of items which can be stored in cache. +func (mtc *mapTimeCacher) MaxSize() int { + return math.MaxInt32 +} + +// RegisterHandler registers a handler, currently not needed +func (mtc *mapTimeCacher) RegisterHandler(_ func(key []byte, value interface{}), _ string) { +} + +// UnRegisterHandler unregisters a handler, currently not needed +func (mtc *mapTimeCacher) UnRegisterHandler(_ string) { +} + +// Close will close the internal sweep go routine +func (mtc *mapTimeCacher) Close() error { + if mtc.cancelFunc != nil { + mtc.cancelFunc() + } + + return nil +} + +func (mtc *mapTimeCacher) addToTimeCache(key []byte) { + err := mtc.timeCache.Add(string(key)) + if err != nil { + log.Error("could not add key", "key", string(key)) + } +} + +func (mtc *mapTimeCacher) upsertToTimeCache(key []byte) { + err := mtc.timeCache.Upsert(string(key), mtc.defaultTimeSpan) + if err != nil { + log.Error("could not upsert timestamp for key", "key", string(key)) + } +} + +func (mtc *mapTimeCacher) addSizeContained(value interface{}) { + mtc.sizeInBytesContained += mtc.computeSize(value) +} + +func (mtc *mapTimeCacher) subtractSizeContained(value interface{}) { + mtc.sizeInBytesContained -= mtc.computeSize(value) +} + +func (mtc *mapTimeCacher) computeSize(value interface{}) uint64 { + b := new(bytes.Buffer) + err := gob.NewEncoder(b).Encode(value) + if err != nil { + log.Error(err.Error()) + return 0 + } + + return uint64(b.Len()) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (mtc *mapTimeCacher) IsInterfaceNil() bool { + return mtc == nil +} diff --git a/storage/mapTimeCache/mapTimeCache_test.go b/storage/mapTimeCache/mapTimeCache_test.go new file mode 100644 index 00000000000..23a3ed3b1b8 --- /dev/null +++ b/storage/mapTimeCache/mapTimeCache_test.go @@ -0,0 +1,292 @@ +package mapTimeCache_test + +import ( + "bytes" + "encoding/gob" + "math" + "sort" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/mapTimeCache" + "github.com/stretchr/testify/assert" +) + +func createArgMapTimeCache() mapTimeCache.ArgMapTimeCacher { + return mapTimeCache.ArgMapTimeCacher{ + DefaultSpan: time.Minute, + CacheExpiry: time.Minute, + } +} + +func createKeysVals(numOfPairs int) ([][]byte, [][]byte) { + keys := make([][]byte, numOfPairs) + vals := make([][]byte, numOfPairs) + for i := 0; i < numOfPairs; i++ { + keys[i] = []byte("k" + string(rune(i))) + vals[i] = []byte("v" + string(rune(i))) + } + + return keys, vals +} + +func TestNewMapTimeCache(t *testing.T) { + t.Parallel() + + t.Run("invalid DefaultSpan should error", func(t *testing.T) { + t.Parallel() + + arg := createArgMapTimeCache() + arg.DefaultSpan = time.Second - time.Nanosecond + cacher, err := mapTimeCache.NewMapTimeCache(arg) + assert.Nil(t, cacher) + assert.Equal(t, storage.ErrInvalidDefaultSpan, err) + }) + t.Run("invalid CacheExpiry should error", func(t *testing.T) { + t.Parallel() + + arg := createArgMapTimeCache() + arg.CacheExpiry = time.Second - time.Nanosecond + cacher, err := mapTimeCache.NewMapTimeCache(arg) + assert.Nil(t, cacher) + assert.Equal(t, storage.ErrInvalidCacheExpiry, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cacher, err := mapTimeCache.NewMapTimeCache(createArgMapTimeCache()) + assert.Nil(t, err) + assert.False(t, cacher.IsInterfaceNil()) + }) +} + +func TestMapTimeCacher_Clear(t *testing.T) { + t.Parallel() + + cacher, _ := mapTimeCache.NewMapTimeCache(createArgMapTimeCache()) + assert.False(t, cacher.IsInterfaceNil()) + + numOfPairs := 3 + providedKeys, providedVals := createKeysVals(numOfPairs) + for i := 0; i < numOfPairs; i++ { + cacher.Put(providedKeys[i], providedVals[i], len(providedVals[i])) + } + assert.Equal(t, numOfPairs, cacher.Len()) + + cacher.Clear() + assert.Equal(t, 0, cacher.Len()) +} + +func TestMapTimeCacher_Close(t *testing.T) { + t.Parallel() + + cacher, _ := mapTimeCache.NewMapTimeCache(createArgMapTimeCache()) + assert.False(t, cacher.IsInterfaceNil()) + + err := cacher.Close() + assert.Nil(t, err) +} + +func TestMapTimeCacher_Get(t *testing.T) { + t.Parallel() + + cacher, _ := mapTimeCache.NewMapTimeCache(createArgMapTimeCache()) + assert.False(t, cacher.IsInterfaceNil()) + + providedKey, providedVal := []byte("key"), []byte("val") + cacher.Put(providedKey, providedVal, len(providedVal)) + + v, ok := cacher.Get(providedKey) + assert.True(t, ok) + assert.Equal(t, providedVal, v) + + v, ok = cacher.Get([]byte("missing key")) + assert.False(t, ok) + assert.Nil(t, v) +} + +func TestMapTimeCacher_Has(t *testing.T) { + t.Parallel() + + cacher, _ := mapTimeCache.NewMapTimeCache(createArgMapTimeCache()) + assert.False(t, cacher.IsInterfaceNil()) + + providedKey, providedVal := []byte("key"), []byte("val") + cacher.Put(providedKey, providedVal, len(providedVal)) + + assert.True(t, cacher.Has(providedKey)) + assert.False(t, cacher.Has([]byte("missing key"))) +} + +func TestMapTimeCacher_HasOrAdd(t *testing.T) { + t.Parallel() + + cacher, _ := mapTimeCache.NewMapTimeCache(createArgMapTimeCache()) + assert.False(t, cacher.IsInterfaceNil()) + + providedKey, providedVal := []byte("key"), []byte("val") + has, added := cacher.HasOrAdd(providedKey, providedVal, len(providedVal)) + assert.False(t, has) + assert.True(t, added) + + has, added = cacher.HasOrAdd(providedKey, providedVal, len(providedVal)) + assert.True(t, has) + assert.False(t, added) +} + +func TestMapTimeCacher_Keys(t *testing.T) { + t.Parallel() + + cacher, _ := mapTimeCache.NewMapTimeCache(createArgMapTimeCache()) + assert.False(t, cacher.IsInterfaceNil()) + + numOfPairs := 10 + providedKeys, providedVals := createKeysVals(numOfPairs) + for i := 0; i < numOfPairs; i++ { + cacher.Put(providedKeys[i], providedVals[i], len(providedVals[i])) + } + + receivedKeys := cacher.Keys() + assert.Equal(t, numOfPairs, len(receivedKeys)) + + sort.Slice(providedKeys, func(i, j int) bool { + return bytes.Compare(providedKeys[i], providedKeys[j]) < 0 + }) + sort.Slice(receivedKeys, func(i, j int) bool { + return bytes.Compare(receivedKeys[i], receivedKeys[j]) < 0 + }) + assert.Equal(t, providedKeys, receivedKeys) +} + +func TestMapTimeCacher_Evicted(t *testing.T) { + t.Parallel() + + arg := createArgMapTimeCache() + arg.CacheExpiry = 2 * time.Second + arg.DefaultSpan = time.Second + cacher, _ := mapTimeCache.NewMapTimeCache(arg) + assert.False(t, cacher.IsInterfaceNil()) + + numOfPairs := 2 + providedKeys, providedVals := createKeysVals(numOfPairs) + for i := 0; i < numOfPairs; i++ { + cacher.Put(providedKeys[i], providedVals[i], len(providedVals[i])) + } + assert.Equal(t, numOfPairs, cacher.Len()) + + time.Sleep(2 * arg.CacheExpiry) + assert.Equal(t, 0, cacher.Len()) + err := cacher.Close() + assert.Nil(t, err) +} + +func TestMapTimeCacher_Peek(t *testing.T) { + t.Parallel() + + cacher, _ := mapTimeCache.NewMapTimeCache(createArgMapTimeCache()) + assert.False(t, cacher.IsInterfaceNil()) + + providedKey, providedVal := []byte("key"), []byte("val") + cacher.Put(providedKey, providedVal, len(providedVal)) + + v, ok := cacher.Peek(providedKey) + assert.True(t, ok) + assert.Equal(t, providedVal, v) + + v, ok = cacher.Peek([]byte("missing key")) + assert.False(t, ok) + assert.Nil(t, v) +} + +func TestMapTimeCacher_Put(t *testing.T) { + t.Parallel() + + cacher, _ := mapTimeCache.NewMapTimeCache(createArgMapTimeCache()) + assert.False(t, cacher.IsInterfaceNil()) + + numOfPairs := 2 + keys, vals := createKeysVals(numOfPairs) + evicted := cacher.Put(keys[0], vals[0], len(vals[0])) + assert.False(t, evicted) + assert.Equal(t, 1, cacher.Len()) + evicted = cacher.Put(keys[0], vals[1], len(vals[1])) + assert.False(t, evicted) + assert.Equal(t, 1, cacher.Len()) +} + +func TestMapTimeCacher_Remove(t *testing.T) { + t.Parallel() + + defer func() { + if r := recover(); r != nil { + assert.Fail(t, "should not panic") + } + }() + + cacher, _ := mapTimeCache.NewMapTimeCache(createArgMapTimeCache()) + assert.False(t, cacher.IsInterfaceNil()) + + providedKey, providedVal := []byte("key"), []byte("val") + cacher.Put(providedKey, providedVal, len(providedVal)) + assert.Equal(t, 1, cacher.Len()) + + cacher.Remove(nil) + assert.Equal(t, 1, cacher.Len()) + + cacher.Remove(providedKey) + assert.Equal(t, 0, cacher.Len()) + + cacher.Remove(providedKey) +} + +func TestMapTimeCacher_SizeInBytesContained(t *testing.T) { + t.Parallel() + + cacher, _ := mapTimeCache.NewMapTimeCache(createArgMapTimeCache()) + assert.False(t, cacher.IsInterfaceNil()) + + providedKey, providedVal := []byte("key"), []byte("val") + cacher.Put(providedKey, providedVal, len(providedVal)) + + b := new(bytes.Buffer) + err := gob.NewEncoder(b).Encode(providedVal) + assert.Nil(t, err) + assert.Equal(t, uint64(b.Len()), cacher.SizeInBytesContained()) +} + +func TestMapTimeCacher_RegisterHandler(t *testing.T) { + t.Parallel() + + defer func() { + if r := recover(); r != nil { + assert.Fail(t, "should not panic") + } + }() + + cacher, _ := mapTimeCache.NewMapTimeCache(createArgMapTimeCache()) + assert.False(t, cacher.IsInterfaceNil()) + cacher.RegisterHandler(func(key []byte, value interface{}) {}, "0") +} + +func TestMapTimeCacher_UnRegisterHandler(t *testing.T) { + t.Parallel() + + defer func() { + if r := recover(); r != nil { + assert.Fail(t, "should not panic") + } + }() + + cacher, _ := mapTimeCache.NewMapTimeCache(createArgMapTimeCache()) + assert.False(t, cacher.IsInterfaceNil()) + cacher.UnRegisterHandler("0") +} + +func TestMapTimeCacher_MaxSize(t *testing.T) { + t.Parallel() + + cacher, _ := mapTimeCache.NewMapTimeCache(createArgMapTimeCache()) + assert.False(t, cacher.IsInterfaceNil()) + assert.Equal(t, math.MaxInt32, cacher.MaxSize()) +} diff --git a/storage/mock/sweepHandlerStub.go b/storage/mock/sweepHandlerStub.go new file mode 100644 index 00000000000..dd8001b6c53 --- /dev/null +++ b/storage/mock/sweepHandlerStub.go @@ -0,0 +1,13 @@ +package mock + +// EvictionHandlerStub - +type EvictionHandlerStub struct { + EvictedCalled func(key []byte) +} + +// Evicted - +func (sh *EvictionHandlerStub) Evicted(key []byte) { + if sh.EvictedCalled != nil { + sh.EvictedCalled(key) + } +} diff --git a/storage/mock/timeCacheStub.go b/storage/mock/timeCacheStub.go index 5d05da07c15..047fb8e7b5c 100644 --- a/storage/mock/timeCacheStub.go +++ b/storage/mock/timeCacheStub.go @@ -1,12 +1,27 @@ package mock -import "time" +import ( + "time" + + "github.com/ElrondNetwork/elrond-go/storage" +) // TimeCacheStub - type TimeCacheStub struct { - UpsertCalled func(key string, span time.Duration) error - HasCalled func(key string) bool - SweepCalled func() + AddCalled func(key string) error + UpsertCalled func(key string, span time.Duration) error + HasCalled func(key string) bool + SweepCalled func() + RegisterEvictionHandlerCalled func(handler storage.EvictionHandler) +} + +// Add - +func (tcs *TimeCacheStub) Add(key string) error { + if tcs.AddCalled != nil { + return tcs.AddCalled(key) + } + + return nil } // Upsert - @@ -34,6 +49,13 @@ func (tcs *TimeCacheStub) Sweep() { } } +// RegisterEvictionHandler - +func (tcs *TimeCacheStub) RegisterEvictionHandler(handler storage.EvictionHandler) { + if tcs.RegisterEvictionHandlerCalled != nil { + tcs.RegisterEvictionHandlerCalled(handler) + } +} + // IsInterfaceNil - func (tcs *TimeCacheStub) IsInterfaceNil() bool { return tcs == nil diff --git a/storage/pruning/export_test.go b/storage/pruning/export_test.go index 40e03226b99..a5d34af2e33 100644 --- a/storage/pruning/export_test.go +++ b/storage/pruning/export_test.go @@ -30,7 +30,6 @@ func (ps *PruningStorer) AddMockActivePersisters(epochs []uint32, ordered bool, ps.activePersisters = append(ps.activePersisters, pd) } - if withMap { ps.persistersMapByEpoch[e] = pd } diff --git a/storage/pruning/pruningStorer_test.go b/storage/pruning/pruningStorer_test.go index af524ae4b6a..43ec5b82fc8 100644 --- a/storage/pruning/pruningStorer_test.go +++ b/storage/pruning/pruningStorer_test.go @@ -839,17 +839,17 @@ func TestPruningStorer_ClosePersisters(t *testing.T) { ps, _ := pruning.NewPruningStorer(args) ps.ClearPersisters() - ps.AddMockActivePersisters([]uint32{0, 1}, true,true) + ps.AddMockActivePersisters([]uint32{0, 1}, true, true) err := ps.ClosePersisters(1) require.NoError(t, err) require.Equal(t, []uint32{0, 1}, ps.PersistersMapByEpochToSlice()) - ps.AddMockActivePersisters([]uint32{2, 3}, true,true) + ps.AddMockActivePersisters([]uint32{2, 3}, true, true) err = ps.ClosePersisters(3) require.NoError(t, err) require.Equal(t, []uint32{1, 2, 3}, ps.PersistersMapByEpochToSlice()) - ps.AddMockActivePersisters([]uint32{4, 5, 6}, true,true) + ps.AddMockActivePersisters([]uint32{4, 5, 6}, true, true) err = ps.ClosePersisters(6) require.NoError(t, err) require.Equal(t, []uint32{4, 5, 6}, ps.PersistersMapByEpochToSlice()) diff --git a/storage/timecache/timeCache.go b/storage/timecache/timeCache.go index 70d71553fe4..90addfb7133 100644 --- a/storage/timecache/timeCache.go +++ b/storage/timecache/timeCache.go @@ -19,16 +19,18 @@ type span struct { // sweeping (clean-up) is triggered each time a new item is added or a key is present in the time cache // This data structure is concurrent safe. type TimeCache struct { - mut sync.RWMutex - data map[string]*span - defaultSpan time.Duration + mut sync.RWMutex + data map[string]*span + defaultSpan time.Duration + evictionHandlers []storage.EvictionHandler } // NewTimeCache creates a new time cache data structure instance func NewTimeCache(defaultSpan time.Duration) *TimeCache { return &TimeCache{ - data: make(map[string]*span), - defaultSpan: defaultSpan, + data: make(map[string]*span), + defaultSpan: defaultSpan, + evictionHandlers: make([]storage.EvictionHandler, 0), } } @@ -97,6 +99,7 @@ func (tc *TimeCache) Sweep() { isOldElement := time.Since(element.timestamp) > element.span if isOldElement { delete(tc.data, key) + tc.notifyHandlers([]byte(key)) } } } @@ -119,6 +122,23 @@ func (tc *TimeCache) Len() int { return len(tc.data) } +// RegisterEvictionHandler adds a handler to the handlers slice +func (tc *TimeCache) RegisterEvictionHandler(handler storage.EvictionHandler) { + if handler == nil { + return + } + + tc.mut.Lock() + tc.evictionHandlers = append(tc.evictionHandlers, handler) + tc.mut.Unlock() +} + +func (tc *TimeCache) notifyHandlers(key []byte) { + for _, handler := range tc.evictionHandlers { + handler.Evicted(key) + } +} + // IsInterfaceNil returns true if there is no value under the interface func (tc *TimeCache) IsInterfaceNil() bool { return tc == nil diff --git a/storage/timecache/timeCache_test.go b/storage/timecache/timeCache_test.go index 73bda3af81d..942d312b8da 100644 --- a/storage/timecache/timeCache_test.go +++ b/storage/timecache/timeCache_test.go @@ -1,11 +1,13 @@ package timecache import ( + "bytes" "testing" "time" "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/mock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -215,6 +217,41 @@ func TestTimeCache_UpsertmoreSpanShouldUpdate(t *testing.T) { assert.Equal(t, highSpan, recovered.span) } +//------- RegisterHandler + +func TestTimeCache_RegisterNilHandler(t *testing.T) { + t.Parallel() + + tc := NewTimeCache(time.Second) + tc.RegisterEvictionHandler(nil) + assert.Equal(t, 0, len(tc.evictionHandlers)) +} + +func TestTimeCache_RegisterHandlerShouldWork(t *testing.T) { + t.Parallel() + + providedKey := "key1" + wasCalled := false + eh := &mock.EvictionHandlerStub{ + EvictedCalled: func(key []byte) { + assert.True(t, bytes.Equal([]byte(providedKey), key)) + wasCalled = true + }, + } + tc := NewTimeCache(time.Second) + tc.RegisterEvictionHandler(eh) + assert.Equal(t, 1, len(tc.evictionHandlers)) + _ = tc.Add(providedKey) + time.Sleep(time.Second) + tc.Sweep() + + exists := tc.Has(providedKey) + + assert.False(t, exists) + assert.Equal(t, 0, len(tc.Keys())) + assert.True(t, wasCalled) +} + //------- IsInterfaceNil func TestTimeCache_IsInterfaceNilNotNil(t *testing.T) { diff --git a/storage/txcache/disabledCache.go b/storage/txcache/disabledCache.go index bd214c96003..841d11ae18e 100644 --- a/storage/txcache/disabledCache.go +++ b/storage/txcache/disabledCache.go @@ -26,7 +26,7 @@ func (cache *DisabledCache) GetByTxHash(_ []byte) (*WrappedTransaction, bool) { } // SelectTransactionsWithBandwidth returns an empty slice -func (cache *DisabledCache) SelectTransactionsWithBandwidth(_ int, _ int, _ uint64) []*WrappedTransaction { +func (cache *DisabledCache) SelectTransactionsWithBandwidth(_ int, _ int, _ uint64) []*WrappedTransaction { return make([]*WrappedTransaction, 0) } diff --git a/storage/txcache/testutils_test.go b/storage/txcache/testutils_test.go index f7f49c0f556..76382eb7676 100644 --- a/storage/txcache/testutils_test.go +++ b/storage/txcache/testutils_test.go @@ -107,8 +107,8 @@ func createTx(hash []byte, sender string, nonce uint64) *WrappedTransaction { } func createTxWithGasLimit(hash []byte, sender string, nonce uint64, gasLimit uint64) *WrappedTransaction { tx := &transaction.Transaction{ - SndAddr: []byte(sender), - Nonce: nonce, + SndAddr: []byte(sender), + Nonce: nonce, GasLimit: gasLimit, } @@ -119,7 +119,6 @@ func createTxWithGasLimit(hash []byte, sender string, nonce uint64, gasLimit uin } } - func createTxWithParams(hash []byte, sender string, nonce uint64, size uint64, gasLimit uint64, gasPrice uint64) *WrappedTransaction { dataLength := int(size) - int(estimatedSizeOfBoundedTxFields) if dataLength < 0 { diff --git a/storage/txcache/txCache_test.go b/storage/txcache/txCache_test.go index 19aba447e8c..9014aceb9c1 100644 --- a/storage/txcache/txCache_test.go +++ b/storage/txcache/txCache_test.go @@ -284,7 +284,7 @@ func Test_SelectTransactionsWithBandwidth_Dummy(t *testing.T) { cache.AddTx(createTxWithGasLimit([]byte("hash-carol-1"), "carol", 1, 50000)) sorted := cache.SelectTransactionsWithBandwidth(5, 2, 200000) - numSelected := 1+1+3 // 1 alice, 1 carol, 3 bob + numSelected := 1 + 1 + 3 // 1 alice, 1 carol, 3 bob require.Len(t, sorted, numSelected) } diff --git a/testscommon/cacherStub.go b/testscommon/cacherStub.go index bbf7e33f032..e3e11dd811f 100644 --- a/testscommon/cacherStub.go +++ b/testscommon/cacherStub.go @@ -138,5 +138,6 @@ func (cacher *CacherStub) Close() error { if cacher.CloseCalled != nil { return cacher.CloseCalled() } + return nil } diff --git a/testscommon/cryptoMocks/peerSignatureHandlerStub.go b/testscommon/cryptoMocks/peerSignatureHandlerStub.go new file mode 100644 index 00000000000..a6bb3c04633 --- /dev/null +++ b/testscommon/cryptoMocks/peerSignatureHandlerStub.go @@ -0,0 +1,33 @@ +package cryptoMocks + +import ( + "github.com/ElrondNetwork/elrond-go-core/core" + crypto "github.com/ElrondNetwork/elrond-go-crypto" +) + +// PeerSignatureHandlerStub - +type PeerSignatureHandlerStub struct { + VerifyPeerSignatureCalled func(pk []byte, pid core.PeerID, signature []byte) error + GetPeerSignatureCalled func(key crypto.PrivateKey, pid []byte) ([]byte, error) +} + +// VerifyPeerSignature - +func (pshs *PeerSignatureHandlerStub) VerifyPeerSignature(pk []byte, pid core.PeerID, signature []byte) error { + if pshs.VerifyPeerSignatureCalled != nil { + return pshs.VerifyPeerSignatureCalled(pk, pid, signature) + } + return nil +} + +// GetPeerSignature - +func (pshs *PeerSignatureHandlerStub) GetPeerSignature(key crypto.PrivateKey, pid []byte) ([]byte, error) { + if pshs.GetPeerSignatureCalled != nil { + return pshs.GetPeerSignatureCalled(key, pid) + } + return nil, nil +} + +// IsInterfaceNil - +func (pshs *PeerSignatureHandlerStub) IsInterfaceNil() bool { + return false +} diff --git a/testscommon/dataRetriever/poolFactory.go b/testscommon/dataRetriever/poolFactory.go index be7bd68578f..14f2c4ee4a8 100644 --- a/testscommon/dataRetriever/poolFactory.go +++ b/testscommon/dataRetriever/poolFactory.go @@ -3,6 +3,7 @@ package dataRetriever import ( "fmt" "io/ioutil" + "time" "github.com/ElrondNetwork/elrond-go-core/marshal" "github.com/ElrondNetwork/elrond-go/config" @@ -12,12 +13,15 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" "github.com/ElrondNetwork/elrond-go/dataRetriever/txpool" "github.com/ElrondNetwork/elrond-go/storage/lrucache/capacity" + "github.com/ElrondNetwork/elrond-go/storage/mapTimeCache" "github.com/ElrondNetwork/elrond-go/storage/storageCacherAdapter" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/ElrondNetwork/elrond-go/testscommon/txcachemocks" "github.com/ElrondNetwork/elrond-go/trie/factory" ) +var peerAuthDuration = 10 * time.Second + func panicIfError(message string, err error) { if err != nil { panic(fmt.Sprintf("%s: %s", message, err)) @@ -112,6 +116,16 @@ func CreatePoolsHolder(numShards uint32, selfShard uint32) dataRetriever.PoolsHo smartContracts, err := storageUnit.NewCache(cacherConfig) panicIfError("CreatePoolsHolder", err) + peerAuthPool, err := mapTimeCache.NewMapTimeCache(mapTimeCache.ArgMapTimeCacher{ + DefaultSpan: 60 * time.Second, + CacheExpiry: 60 * time.Second, + }) + panicIfError("CreatePoolsHolder", err) + + cacherConfig = storageUnit.CacheConfig{Capacity: 50000, Type: storageUnit.LRUCache} + heartbeatPool, err := storageUnit.NewCache(cacherConfig) + panicIfError("CreatePoolsHolder", err) + currentTx := dataPool.NewCurrentBlockPool() dataPoolArgs := dataPool.DataPoolArgs{ Transactions: txPool, @@ -124,6 +138,8 @@ func CreatePoolsHolder(numShards uint32, selfShard uint32) dataRetriever.PoolsHo TrieNodesChunks: trieNodesChunks, CurrentBlockTransactions: currentTx, SmartContracts: smartContracts, + PeerAuthentications: peerAuthPool, + Heartbeats: heartbeatPool, } holder, err := dataPool.NewDataPool(dataPoolArgs) panicIfError("CreatePoolsHolder", err) @@ -174,6 +190,16 @@ func CreatePoolsHolderWithTxPool(txPool dataRetriever.ShardedDataCacherNotifier) smartContracts, err := storageUnit.NewCache(cacherConfig) panicIfError("CreatePoolsHolderWithTxPool", err) + peerAuthPool, err := mapTimeCache.NewMapTimeCache(mapTimeCache.ArgMapTimeCacher{ + DefaultSpan: peerAuthDuration, + CacheExpiry: peerAuthDuration, + }) + panicIfError("CreatePoolsHolderWithTxPool", err) + + cacherConfig = storageUnit.CacheConfig{Capacity: 50000, Type: storageUnit.LRUCache} + heartbeatPool, err := storageUnit.NewCache(cacherConfig) + panicIfError("CreatePoolsHolderWithTxPool", err) + currentTx := dataPool.NewCurrentBlockPool() dataPoolArgs := dataPool.DataPoolArgs{ Transactions: txPool, @@ -186,6 +212,8 @@ func CreatePoolsHolderWithTxPool(txPool dataRetriever.ShardedDataCacherNotifier) TrieNodesChunks: trieNodesChunks, CurrentBlockTransactions: currentTx, SmartContracts: smartContracts, + PeerAuthentications: peerAuthPool, + Heartbeats: heartbeatPool, } holder, err := dataPool.NewDataPool(dataPoolArgs) panicIfError("CreatePoolsHolderWithTxPool", err) diff --git a/testscommon/dataRetriever/poolsHolderMock.go b/testscommon/dataRetriever/poolsHolderMock.go index 112ada62273..c33716ee959 100644 --- a/testscommon/dataRetriever/poolsHolderMock.go +++ b/testscommon/dataRetriever/poolsHolderMock.go @@ -1,6 +1,9 @@ package dataRetriever import ( + "time" + + "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" @@ -8,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" "github.com/ElrondNetwork/elrond-go/dataRetriever/txpool" "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/mapTimeCache" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/ElrondNetwork/elrond-go/testscommon/txcachemocks" ) @@ -24,6 +28,8 @@ type PoolsHolderMock struct { trieNodesChunks storage.Cacher smartContracts storage.Cacher currBlockTxs dataRetriever.TransactionCacher + peerAuthentications storage.Cacher + heartbeats storage.Cacher } // NewPoolsHolderMock - @@ -84,6 +90,15 @@ func NewPoolsHolderMock() *PoolsHolderMock { holder.smartContracts, err = storageUnit.NewCache(storageUnit.CacheConfig{Type: storageUnit.LRUCache, Capacity: 10000, Shards: 1, SizeInBytes: 0}) panicIfError("NewPoolsHolderMock", err) + holder.peerAuthentications, err = mapTimeCache.NewMapTimeCache(mapTimeCache.ArgMapTimeCacher{ + DefaultSpan: 10 * time.Second, + CacheExpiry: 10 * time.Second, + }) + panicIfError("NewPoolsHolderMock", err) + + holder.heartbeats, err = storageUnit.NewCache(storageUnit.CacheConfig{Type: storageUnit.LRUCache, Capacity: 10000, Shards: 1, SizeInBytes: 0}) + panicIfError("NewPoolsHolderMock", err) + return holder } @@ -147,6 +162,36 @@ func (holder *PoolsHolderMock) SmartContracts() storage.Cacher { return holder.smartContracts } +// PeerAuthentications - +func (holder *PoolsHolderMock) PeerAuthentications() storage.Cacher { + return holder.peerAuthentications +} + +// Heartbeats - +func (holder *PoolsHolderMock) Heartbeats() storage.Cacher { + return holder.heartbeats +} + +// Close - +func (holder *PoolsHolderMock) Close() error { + var lastError error + if !check.IfNil(holder.trieNodes) { + err := holder.trieNodes.Close() + if err != nil { + lastError = err + } + } + + if !check.IfNil(holder.peerAuthentications) { + err := holder.peerAuthentications.Close() + if err != nil { + lastError = err + } + } + + return lastError +} + // IsInterfaceNil returns true if there is no value under the interface func (holder *PoolsHolderMock) IsInterfaceNil() bool { return holder == nil diff --git a/testscommon/dataRetriever/poolsHolderStub.go b/testscommon/dataRetriever/poolsHolderStub.go index 7d6f7976f5e..a8dd89a04c5 100644 --- a/testscommon/dataRetriever/poolsHolderStub.go +++ b/testscommon/dataRetriever/poolsHolderStub.go @@ -19,6 +19,9 @@ type PoolsHolderStub struct { TrieNodesChunksCalled func() storage.Cacher PeerChangesBlocksCalled func() storage.Cacher SmartContractsCalled func() storage.Cacher + PeerAuthenticationsCalled func() storage.Cacher + HeartbeatsCalled func() storage.Cacher + CloseCalled func() error } // NewPoolsHolderStub - @@ -125,6 +128,33 @@ func (holder *PoolsHolderStub) SmartContracts() storage.Cacher { return testscommon.NewCacherStub() } +// PeerAuthentications - +func (holder *PoolsHolderStub) PeerAuthentications() storage.Cacher { + if holder.PeerAuthenticationsCalled != nil { + return holder.PeerAuthenticationsCalled() + } + + return testscommon.NewCacherStub() +} + +// Heartbeats - +func (holder *PoolsHolderStub) Heartbeats() storage.Cacher { + if holder.HeartbeatsCalled != nil { + return holder.HeartbeatsCalled() + } + + return testscommon.NewCacherStub() +} + +// Close - +func (holder *PoolsHolderStub) Close() error { + if holder.CloseCalled != nil { + return holder.CloseCalled() + } + + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (holder *PoolsHolderStub) IsInterfaceNil() bool { return holder == nil diff --git a/testscommon/gasHandlerStub.go b/testscommon/gasHandlerStub.go index b5dcd20672e..f6394d7a0fc 100644 --- a/testscommon/gasHandlerStub.go +++ b/testscommon/gasHandlerStub.go @@ -8,7 +8,7 @@ import ( // GasHandlerStub - type GasHandlerStub struct { InitCalled func() - ResetCalled func() + ResetCalled func(key []byte) SetGasProvidedCalled func(gasProvided uint64, hash []byte) SetGasProvidedAsScheduledCalled func(gasProvided uint64, hash []byte) SetGasRefundedCalled func(gasRefunded uint64, hash []byte) @@ -26,7 +26,7 @@ type GasHandlerStub struct { RemoveGasProvidedAsScheduledCalled func(hashes [][]byte) RemoveGasRefundedCalled func(hashes [][]byte) RemoveGasPenalizedCalled func(hashes [][]byte) - RestoreGasSinceLastResetCalled func() + RestoreGasSinceLastResetCalled func(key []byte) ComputeGasProvidedByMiniBlockCalled func(miniBlock *block.MiniBlock, mapHashTx map[string]data.TransactionHandler) (uint64, uint64, error) ComputeGasProvidedByTxCalled func(txSenderShardId uint32, txReceiverSharedId uint32, txHandler data.TransactionHandler) (uint64, uint64, error) } @@ -39,9 +39,9 @@ func (ghs *GasHandlerStub) Init() { } // Reset - -func (ghs *GasHandlerStub) Reset() { +func (ghs *GasHandlerStub) Reset(key []byte) { if ghs.ResetCalled != nil { - ghs.ResetCalled() + ghs.ResetCalled(key) } } @@ -174,9 +174,9 @@ func (ghs *GasHandlerStub) RemoveGasPenalized(hashes [][]byte) { } // RestoreGasSinceLastReset - -func (ghs *GasHandlerStub) RestoreGasSinceLastReset() { +func (ghs *GasHandlerStub) RestoreGasSinceLastReset(key []byte) { if ghs.RestoreGasSinceLastResetCalled != nil { - ghs.RestoreGasSinceLastResetCalled() + ghs.RestoreGasSinceLastResetCalled(key) } } diff --git a/testscommon/generalConfig.go b/testscommon/generalConfig.go index 4ca7b49727d..d22c2f529c3 100644 --- a/testscommon/generalConfig.go +++ b/testscommon/generalConfig.go @@ -8,6 +8,10 @@ import ( // GetGeneralConfig returns the common configuration used for testing func GetGeneralConfig() config.Config { return config.Config{ + Hardfork: config.HardforkConfig{ + PublicKeyToListenFrom: "153dae6cb3963260f309959bf285537b77ae16d82e9933147be7827f7394de8dc97d9d9af41e970bc72aecb44b77e819621081658c37f7000d21e2d0e8963df83233407bde9f46369ba4fcd03b57f40b80b06c191a428cfb5c447ec510e79307", + CloseAfterExportInMinutes: 2, + }, PublicKeyPeerId: config.CacheConfig{ Type: "LRU", Capacity: 5000, @@ -257,6 +261,25 @@ func GetGeneralConfig() config.Config { }, }, }, + HeartbeatV2: config.HeartbeatV2Config{ + PeerAuthenticationTimeBetweenSendsInSec: 1, + PeerAuthenticationTimeBetweenSendsWhenErrorInSec: 1, + PeerAuthenticationThresholdBetweenSends: 0.1, + HeartbeatTimeBetweenSendsInSec: 1, + HeartbeatTimeBetweenSendsWhenErrorInSec: 1, + HeartbeatThresholdBetweenSends: 0.1, + MaxNumOfPeerAuthenticationInResponse: 5, + DelayBetweenConnectionNotificationsInSec: 5, + HeartbeatExpiryTimespanInSec: 30, + MaxDurationPeerUnresponsiveInSec: 10, + HideInactiveValidatorIntervalInSec: 60, + HardforkTimeBetweenSendsInSec: 5, + PeerAuthenticationPool: config.PeerAuthenticationPoolConfig{ + DefaultSpanInSec: 30, + CacheExpiryInSec: 30, + }, + HeartbeatPool: getLRUCacheConfig(), + }, StatusMetricsStorage: config.StorageConfig{ Cache: getLRUCacheConfig(), DB: config.DBConfig{ @@ -371,7 +394,7 @@ func GetGeneralConfig() config.Config { }, Resolvers: config.ResolverConfig{ NumCrossShardPeers: 2, - NumIntraShardPeers: 1, + NumTotalPeers: 3, NumFullHistoryPeers: 3, }, VirtualMachine: config.VirtualMachineServicesConfig{ @@ -394,6 +417,10 @@ func GetGeneralConfig() config.Config { Capacity: 10000, Name: "VMOutputCacher", }, + PeersRatingConfig: config.PeersRatingConfig{ + TopRatedCacheCapacity: 1000, + BadRatedCacheCapacity: 1000, + }, } } diff --git a/factory/mock/hardforkTriggerStub.go b/testscommon/hardforkTriggerStub.go similarity index 59% rename from factory/mock/hardforkTriggerStub.go rename to testscommon/hardforkTriggerStub.go index 6858c666c16..5775ac32329 100644 --- a/factory/mock/hardforkTriggerStub.go +++ b/testscommon/hardforkTriggerStub.go @@ -1,16 +1,27 @@ -package mock +package testscommon import "github.com/ElrondNetwork/elrond-go/update" // HardforkTriggerStub - type HardforkTriggerStub struct { - TriggerCalled func(epoch uint32, withEarlyEndOfEpoch bool) error - IsSelfTriggerCalled func() bool - TriggerReceivedCalled func(payload []byte, data []byte, pkBytes []byte) (bool, error) - RecordedTriggerMessageCalled func() ([]byte, bool) - CreateDataCalled func() []byte - AddCloserCalled func(closer update.Closer) error - NotifyTriggerReceivedCalled func() <-chan struct{} + SetExportFactoryHandlerCalled func(exportFactoryHandler update.ExportFactoryHandler) error + TriggerCalled func(epoch uint32, withEarlyEndOfEpoch bool) error + IsSelfTriggerCalled func() bool + TriggerReceivedCalled func(payload []byte, data []byte, pkBytes []byte) (bool, error) + RecordedTriggerMessageCalled func() ([]byte, bool) + CreateDataCalled func() []byte + AddCloserCalled func(closer update.Closer) error + NotifyTriggerReceivedCalled func() <-chan struct{} + NotifyTriggerReceivedV2Called func() <-chan struct{} +} + +// SetExportFactoryHandler - +func (hts *HardforkTriggerStub) SetExportFactoryHandler(exportFactoryHandler update.ExportFactoryHandler) error { + if hts.SetExportFactoryHandlerCalled != nil { + return hts.SetExportFactoryHandlerCalled(exportFactoryHandler) + } + + return nil } // Trigger - @@ -76,6 +87,15 @@ func (hts *HardforkTriggerStub) NotifyTriggerReceived() <-chan struct{} { return make(chan struct{}) } +// NotifyTriggerReceivedV2 - +func (hts *HardforkTriggerStub) NotifyTriggerReceivedV2() <-chan struct{} { + if hts.NotifyTriggerReceivedV2Called != nil { + return hts.NotifyTriggerReceivedV2Called() + } + + return make(chan struct{}) +} + // IsInterfaceNil - func (hts *HardforkTriggerStub) IsInterfaceNil() bool { return hts == nil diff --git a/testscommon/loggerStub.go b/testscommon/loggerStub.go index 478d899e3cf..10ffd3d4c68 100644 --- a/testscommon/loggerStub.go +++ b/testscommon/loggerStub.go @@ -16,6 +16,20 @@ type LoggerStub struct { GetLevelCalled func() logger.LogLevel } +// Log - +func (stub *LoggerStub) Log(logLevel logger.LogLevel, message string, args ...interface{}) { + if stub.LogCalled != nil { + stub.LogCalled(logLevel, message, args...) + } +} + +// LogLine - +func (stub *LoggerStub) LogLine(line *logger.LogLine) { + if stub.LogLineCalled != nil { + stub.LogLineCalled(line) + } +} + // Trace - func (stub *LoggerStub) Trace(message string, args ...interface{}) { if stub.TraceCalled != nil { @@ -58,20 +72,6 @@ func (stub *LoggerStub) LogIfError(err error, args ...interface{}) { } } -// Log - -func (stub *LoggerStub) Log(logLevel logger.LogLevel, message string, args ...interface{}) { - if stub.LogCalled != nil { - stub.LogCalled(logLevel, message, args...) - } -} - -// LogLine - -func (stub *LoggerStub) LogLine(line *logger.LogLine) { - if stub.LogLineCalled != nil { - stub.LogLineCalled(line) - } -} - // SetLevel - func (stub *LoggerStub) SetLevel(logLevel logger.LogLevel) { if stub.SetLevelCalled != nil { diff --git a/testscommon/marshalizerStub.go b/testscommon/marshalizerStub.go index b29904d02d6..18b42297b1e 100644 --- a/testscommon/marshalizerStub.go +++ b/testscommon/marshalizerStub.go @@ -8,12 +8,18 @@ type MarshalizerStub struct { // Marshal - func (ms *MarshalizerStub) Marshal(obj interface{}) ([]byte, error) { - return ms.MarshalCalled(obj) + if ms.MarshalCalled != nil { + return ms.MarshalCalled(obj) + } + return nil, nil } // Unmarshal - func (ms *MarshalizerStub) Unmarshal(obj interface{}, buff []byte) error { - return ms.UnmarshalCalled(obj, buff) + if ms.UnmarshalCalled != nil { + return ms.UnmarshalCalled(obj, buff) + } + return nil } // IsInterfaceNil - diff --git a/testscommon/p2pmocks/messengerStub.go b/testscommon/p2pmocks/messengerStub.go index ad45e3192a0..28d6f430c90 100644 --- a/testscommon/p2pmocks/messengerStub.go +++ b/testscommon/p2pmocks/messengerStub.go @@ -40,6 +40,8 @@ type MessengerStub struct { UnjoinAllTopicsCalled func() error PortCalled func() int WaitForConnectionsCalled func(maxWaitingTime time.Duration, minNumOfPeers uint32) + SignCalled func(payload []byte) ([]byte, error) + VerifyCalled func(payload []byte, pid core.PeerID, signature []byte) error } // ConnectedFullHistoryPeersOnTopic - @@ -315,6 +317,24 @@ func (ms *MessengerStub) WaitForConnections(maxWaitingTime time.Duration, minNum } } +// Sign - +func (ms *MessengerStub) Sign(payload []byte) ([]byte, error) { + if ms.SignCalled != nil { + return ms.SignCalled(payload) + } + + return make([]byte, 0), nil +} + +// Verify - +func (ms *MessengerStub) Verify(payload []byte, pid core.PeerID, signature []byte) error { + if ms.VerifyCalled != nil { + return ms.VerifyCalled(payload, pid, signature) + } + + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (ms *MessengerStub) IsInterfaceNil() bool { return ms == nil diff --git a/testscommon/p2pmocks/networkShardingCollectorStub.go b/testscommon/p2pmocks/networkShardingCollectorStub.go index 5d87bb2af49..a8626caa35b 100644 --- a/testscommon/p2pmocks/networkShardingCollectorStub.go +++ b/testscommon/p2pmocks/networkShardingCollectorStub.go @@ -6,9 +6,26 @@ import ( // NetworkShardingCollectorStub - type NetworkShardingCollectorStub struct { - UpdatePeerIDInfoCalled func(pid core.PeerID, pk []byte, shardID uint32) - UpdatePeerIdSubTypeCalled func(pid core.PeerID, peerSubType core.P2PPeerSubType) - GetPeerInfoCalled func(pid core.PeerID) core.P2PPeerInfo + UpdatePeerIDPublicKeyPairCalled func(pid core.PeerID, pk []byte) + UpdatePeerIDInfoCalled func(pid core.PeerID, pk []byte, shardID uint32) + PutPeerIdShardIdCalled func(pid core.PeerID, shardId uint32) + PutPeerIdSubTypeCalled func(pid core.PeerID, peerSubType core.P2PPeerSubType) + GetLastKnownPeerIDCalled func(pk []byte) (*core.PeerID, bool) + GetPeerInfoCalled func(pid core.PeerID) core.P2PPeerInfo +} + +// UpdatePeerIDPublicKeyPair - +func (nscs *NetworkShardingCollectorStub) UpdatePeerIDPublicKeyPair(pid core.PeerID, pk []byte) { + if nscs.UpdatePeerIDPublicKeyPairCalled != nil { + nscs.UpdatePeerIDPublicKeyPairCalled(pid, pk) + } +} + +// PutPeerIdShardId - +func (nscs *NetworkShardingCollectorStub) PutPeerIdShardId(pid core.PeerID, shardID uint32) { + if nscs.PutPeerIdShardIdCalled != nil { + nscs.PutPeerIdShardIdCalled(pid, shardID) + } } // UpdatePeerIDInfo - @@ -18,13 +35,22 @@ func (nscs *NetworkShardingCollectorStub) UpdatePeerIDInfo(pid core.PeerID, pk [ } } -// UpdatePeerIdSubType -func (nscs *NetworkShardingCollectorStub) UpdatePeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) { - if nscs.UpdatePeerIdSubTypeCalled != nil { - nscs.UpdatePeerIdSubTypeCalled(pid, peerSubType) +// PutPeerIdSubType - +func (nscs *NetworkShardingCollectorStub) PutPeerIdSubType(pid core.PeerID, peerSubType core.P2PPeerSubType) { + if nscs.PutPeerIdSubTypeCalled != nil { + nscs.PutPeerIdSubTypeCalled(pid, peerSubType) } } +// GetLastKnownPeerID - +func (nscs *NetworkShardingCollectorStub) GetLastKnownPeerID(pk []byte) (*core.PeerID, bool) { + if nscs.GetLastKnownPeerIDCalled != nil { + return nscs.GetLastKnownPeerIDCalled(pk) + } + + return nil, false +} + // GetPeerInfo - func (nscs *NetworkShardingCollectorStub) GetPeerInfo(pid core.PeerID) core.P2PPeerInfo { if nscs.GetPeerInfoCalled != nil { diff --git a/testscommon/p2pmocks/peersHolderStub.go b/testscommon/p2pmocks/peersHolderStub.go index c1e805efb34..8749ca792b7 100644 --- a/testscommon/p2pmocks/peersHolderStub.go +++ b/testscommon/p2pmocks/peersHolderStub.go @@ -4,17 +4,25 @@ import "github.com/ElrondNetwork/elrond-go-core/core" // PeersHolderStub - type PeersHolderStub struct { - PutCalled func(publicKey []byte, peerID core.PeerID, shardID uint32) - GetCalled func() map[uint32][]core.PeerID - ContainsCalled func(peerID core.PeerID) bool - RemoveCalled func(peerID core.PeerID) - ClearCalled func() + PutConnectionAddressCalled func(peerID core.PeerID, address string) + PutShardIDCalled func(peerID core.PeerID, shardID uint32) + GetCalled func() map[uint32][]core.PeerID + ContainsCalled func(peerID core.PeerID) bool + RemoveCalled func(peerID core.PeerID) + ClearCalled func() } -// Put - -func (p *PeersHolderStub) Put(publicKey []byte, peerID core.PeerID, shardID uint32) { - if p.PutCalled != nil { - p.PutCalled(publicKey, peerID, shardID) +// PutConnectionAddress - +func (p *PeersHolderStub) PutConnectionAddress(peerID core.PeerID, address string) { + if p.PutConnectionAddressCalled != nil { + p.PutConnectionAddressCalled(peerID, address) + } +} + +// PutShardID - +func (p *PeersHolderStub) PutShardID(peerID core.PeerID, shardID uint32) { + if p.PutShardIDCalled != nil { + p.PutShardIDCalled(peerID, shardID) } } diff --git a/testscommon/p2pmocks/peersRatingHandlerStub.go b/testscommon/p2pmocks/peersRatingHandlerStub.go new file mode 100644 index 00000000000..cf150a26c31 --- /dev/null +++ b/testscommon/p2pmocks/peersRatingHandlerStub.go @@ -0,0 +1,46 @@ +package p2pmocks + +import "github.com/ElrondNetwork/elrond-go-core/core" + +// PeersRatingHandlerStub - +type PeersRatingHandlerStub struct { + AddPeerCalled func(pid core.PeerID) + IncreaseRatingCalled func(pid core.PeerID) + DecreaseRatingCalled func(pid core.PeerID) + GetTopRatedPeersFromListCalled func(peers []core.PeerID, numOfPeers int) []core.PeerID +} + +// AddPeer - +func (stub *PeersRatingHandlerStub) AddPeer(pid core.PeerID) { + if stub.AddPeerCalled != nil { + stub.AddPeerCalled(pid) + } +} + +// IncreaseRating - +func (stub *PeersRatingHandlerStub) IncreaseRating(pid core.PeerID) { + if stub.IncreaseRatingCalled != nil { + stub.IncreaseRatingCalled(pid) + } +} + +// DecreaseRating - +func (stub *PeersRatingHandlerStub) DecreaseRating(pid core.PeerID) { + if stub.DecreaseRatingCalled != nil { + stub.DecreaseRatingCalled(pid) + } +} + +// GetTopRatedPeersFromList - +func (stub *PeersRatingHandlerStub) GetTopRatedPeersFromList(peers []core.PeerID, numOfPeers int) []core.PeerID { + if stub.GetTopRatedPeersFromListCalled != nil { + return stub.GetTopRatedPeersFromListCalled(peers, numOfPeers) + } + + return peers +} + +// IsInterfaceNil returns true if there is no value under the interface +func (stub *PeersRatingHandlerStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/testscommon/preProcessorExecutionInfoHandlerMock.go b/testscommon/preProcessorExecutionInfoHandlerMock.go new file mode 100644 index 00000000000..116f58f7d88 --- /dev/null +++ b/testscommon/preProcessorExecutionInfoHandlerMock.go @@ -0,0 +1,30 @@ +package testscommon + +// PreProcessorExecutionInfoHandlerMock - +type PreProcessorExecutionInfoHandlerMock struct { + GetNumOfCrossInterMbsAndTxsCalled func() (int, int) + InitProcessedTxsResultsCalled func(key []byte) + RevertProcessedTxsResultsCalled func(txHashes [][]byte, key []byte) +} + +// GetNumOfCrossInterMbsAndTxs - +func (ppeihm *PreProcessorExecutionInfoHandlerMock) GetNumOfCrossInterMbsAndTxs() (int, int) { + if ppeihm.GetNumOfCrossInterMbsAndTxsCalled != nil { + return ppeihm.GetNumOfCrossInterMbsAndTxsCalled() + } + return 0, 0 +} + +// InitProcessedTxsResults - +func (ppeihm *PreProcessorExecutionInfoHandlerMock) InitProcessedTxsResults(key []byte) { + if ppeihm.InitProcessedTxsResultsCalled != nil { + ppeihm.InitProcessedTxsResultsCalled(key) + } +} + +// RevertProcessedTxsResults - +func (ppeihm *PreProcessorExecutionInfoHandlerMock) RevertProcessedTxsResults(txHashes [][]byte, key []byte) { + if ppeihm.RevertProcessedTxsResultsCalled != nil { + ppeihm.RevertProcessedTxsResultsCalled(txHashes, key) + } +} diff --git a/testscommon/processedMiniBlocksTrackerStub.go b/testscommon/processedMiniBlocksTrackerStub.go new file mode 100644 index 00000000000..82c064c92f7 --- /dev/null +++ b/testscommon/processedMiniBlocksTrackerStub.go @@ -0,0 +1,94 @@ +package testscommon + +import ( + "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" +) + +// ProcessedMiniBlocksTrackerStub - +type ProcessedMiniBlocksTrackerStub struct { + SetProcessedMiniBlockInfoCalled func(metaBlockHash []byte, miniBlockHash []byte, processedMbInfo *processedMb.ProcessedMiniBlockInfo) + RemoveMetaBlockHashCalled func(metaBlockHash []byte) + RemoveMiniBlockHashCalled func(miniBlockHash []byte) + GetProcessedMiniBlocksInfoCalled func(metaBlockHash []byte) map[string]*processedMb.ProcessedMiniBlockInfo + GetProcessedMiniBlockInfoCalled func(miniBlockHash []byte) (*processedMb.ProcessedMiniBlockInfo, []byte) + IsMiniBlockFullyProcessedCalled func(metaBlockHash []byte, miniBlockHash []byte) bool + ConvertProcessedMiniBlocksMapToSliceCalled func() []bootstrapStorage.MiniBlocksInMeta + ConvertSliceToProcessedMiniBlocksMapCalled func(miniBlocksInMetaBlocks []bootstrapStorage.MiniBlocksInMeta) + DisplayProcessedMiniBlocksCalled func() +} + +// SetProcessedMiniBlockInfo - +func (pmbts *ProcessedMiniBlocksTrackerStub) SetProcessedMiniBlockInfo(metaBlockHash []byte, miniBlockHash []byte, processedMbInfo *processedMb.ProcessedMiniBlockInfo) { + if pmbts.SetProcessedMiniBlockInfoCalled != nil { + pmbts.SetProcessedMiniBlockInfoCalled(metaBlockHash, miniBlockHash, processedMbInfo) + } +} + +// RemoveMetaBlockHash - +func (pmbts *ProcessedMiniBlocksTrackerStub) RemoveMetaBlockHash(metaBlockHash []byte) { + if pmbts.RemoveMetaBlockHashCalled != nil { + pmbts.RemoveMiniBlockHashCalled(metaBlockHash) + } +} + +// RemoveMiniBlockHash - +func (pmbts *ProcessedMiniBlocksTrackerStub) RemoveMiniBlockHash(miniBlockHash []byte) { + if pmbts.RemoveMiniBlockHashCalled != nil { + pmbts.RemoveMiniBlockHashCalled(miniBlockHash) + } +} + +// GetProcessedMiniBlocksInfo - +func (pmbts *ProcessedMiniBlocksTrackerStub) GetProcessedMiniBlocksInfo(metaBlockHash []byte) map[string]*processedMb.ProcessedMiniBlockInfo { + if pmbts.GetProcessedMiniBlocksInfoCalled != nil { + return pmbts.GetProcessedMiniBlocksInfoCalled(metaBlockHash) + } + return make(map[string]*processedMb.ProcessedMiniBlockInfo) +} + +// GetProcessedMiniBlockInfo - +func (pmbts *ProcessedMiniBlocksTrackerStub) GetProcessedMiniBlockInfo(miniBlockHash []byte) (*processedMb.ProcessedMiniBlockInfo, []byte) { + if pmbts.GetProcessedMiniBlockInfoCalled != nil { + return pmbts.GetProcessedMiniBlockInfoCalled(miniBlockHash) + } + return &processedMb.ProcessedMiniBlockInfo{ + FullyProcessed: false, + IndexOfLastTxProcessed: -1, + }, nil +} + +// IsMiniBlockFullyProcessed - +func (pmbts *ProcessedMiniBlocksTrackerStub) IsMiniBlockFullyProcessed(metaBlockHash []byte, miniBlockHash []byte) bool { + if pmbts.IsMiniBlockFullyProcessedCalled != nil { + return pmbts.IsMiniBlockFullyProcessedCalled(metaBlockHash, miniBlockHash) + } + return false +} + +// ConvertProcessedMiniBlocksMapToSlice - +func (pmbts *ProcessedMiniBlocksTrackerStub) ConvertProcessedMiniBlocksMapToSlice() []bootstrapStorage.MiniBlocksInMeta { + if pmbts.ConvertProcessedMiniBlocksMapToSliceCalled != nil { + return pmbts.ConvertProcessedMiniBlocksMapToSliceCalled() + } + return nil +} + +// ConvertSliceToProcessedMiniBlocksMap - +func (pmbts *ProcessedMiniBlocksTrackerStub) ConvertSliceToProcessedMiniBlocksMap(miniBlocksInMetaBlocks []bootstrapStorage.MiniBlocksInMeta) { + if pmbts.ConvertSliceToProcessedMiniBlocksMapCalled != nil { + pmbts.ConvertSliceToProcessedMiniBlocksMapCalled(miniBlocksInMetaBlocks) + } +} + +// DisplayProcessedMiniBlocks - +func (pmbts *ProcessedMiniBlocksTrackerStub) DisplayProcessedMiniBlocks() { + if pmbts.DisplayProcessedMiniBlocksCalled != nil { + pmbts.DisplayProcessedMiniBlocksCalled() + } +} + +// IsInterfaceNil - +func (pmbts *ProcessedMiniBlocksTrackerStub) IsInterfaceNil() bool { + return pmbts == nil +} diff --git a/testscommon/requestHandlerStub.go b/testscommon/requestHandlerStub.go index 6c2f90f0e5d..a5bc8b19901 100644 --- a/testscommon/requestHandlerStub.go +++ b/testscommon/requestHandlerStub.go @@ -4,21 +4,23 @@ import "time" // RequestHandlerStub - type RequestHandlerStub struct { - RequestShardHeaderCalled func(shardID uint32, hash []byte) - RequestMetaHeaderCalled func(hash []byte) - RequestMetaHeaderByNonceCalled func(nonce uint64) - RequestShardHeaderByNonceCalled func(shardID uint32, nonce uint64) - RequestTransactionHandlerCalled func(destShardID uint32, txHashes [][]byte) - RequestScrHandlerCalled func(destShardID uint32, txHashes [][]byte) - RequestRewardTxHandlerCalled func(destShardID uint32, txHashes [][]byte) - RequestMiniBlockHandlerCalled func(destShardID uint32, miniblockHash []byte) - RequestMiniBlocksHandlerCalled func(destShardID uint32, miniblocksHashes [][]byte) - RequestTrieNodesCalled func(destShardID uint32, hashes [][]byte, topic string) - RequestStartOfEpochMetaBlockCalled func(epoch uint32) - SetNumPeersToQueryCalled func(key string, intra int, cross int) error - GetNumPeersToQueryCalled func(key string) (int, int, error) - RequestTrieNodeCalled func(requestHash []byte, topic string, chunkIndex uint32) - CreateTrieNodeIdentifierCalled func(requestHash []byte, chunkIndex uint32) []byte + RequestShardHeaderCalled func(shardID uint32, hash []byte) + RequestMetaHeaderCalled func(hash []byte) + RequestMetaHeaderByNonceCalled func(nonce uint64) + RequestShardHeaderByNonceCalled func(shardID uint32, nonce uint64) + RequestTransactionHandlerCalled func(destShardID uint32, txHashes [][]byte) + RequestScrHandlerCalled func(destShardID uint32, txHashes [][]byte) + RequestRewardTxHandlerCalled func(destShardID uint32, txHashes [][]byte) + RequestMiniBlockHandlerCalled func(destShardID uint32, miniblockHash []byte) + RequestMiniBlocksHandlerCalled func(destShardID uint32, miniblocksHashes [][]byte) + RequestTrieNodesCalled func(destShardID uint32, hashes [][]byte, topic string) + RequestStartOfEpochMetaBlockCalled func(epoch uint32) + SetNumPeersToQueryCalled func(key string, intra int, cross int) error + GetNumPeersToQueryCalled func(key string) (int, int, error) + RequestTrieNodeCalled func(requestHash []byte, topic string, chunkIndex uint32) + CreateTrieNodeIdentifierCalled func(requestHash []byte, chunkIndex uint32) []byte + RequestPeerAuthenticationsChunkCalled func(destShardID uint32, chunkIndex uint32) + RequestPeerAuthenticationsByHashesCalled func(destShardID uint32, hashes [][]byte) } // SetNumPeersToQuery - @@ -152,6 +154,20 @@ func (rhs *RequestHandlerStub) RequestTrieNode(requestHash []byte, topic string, } } +// RequestPeerAuthenticationsChunk - +func (rhs *RequestHandlerStub) RequestPeerAuthenticationsChunk(destShardID uint32, chunkIndex uint32) { + if rhs.RequestPeerAuthenticationsChunkCalled != nil { + rhs.RequestPeerAuthenticationsChunkCalled(destShardID, chunkIndex) + } +} + +// RequestPeerAuthenticationsByHashes - +func (rhs *RequestHandlerStub) RequestPeerAuthenticationsByHashes(destShardID uint32, hashes [][]byte) { + if rhs.RequestPeerAuthenticationsByHashesCalled != nil { + rhs.RequestPeerAuthenticationsByHashesCalled(destShardID, hashes) + } +} + // IsInterfaceNil returns true if there is no value under the interface func (rhs *RequestHandlerStub) IsInterfaceNil() bool { return rhs == nil diff --git a/testscommon/shardingMocks/nodesCoordinatorStub.go b/testscommon/shardingMocks/nodesCoordinatorStub.go index 70ea4b61577..0bb4a0b5302 100644 --- a/testscommon/shardingMocks/nodesCoordinatorStub.go +++ b/testscommon/shardingMocks/nodesCoordinatorStub.go @@ -8,15 +8,16 @@ import ( // NodesCoordinatorStub - type NodesCoordinatorStub struct { - ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]nodesCoordinator.Validator, error) - GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) - GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) - GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) - GetAllValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) - ConsensusGroupSizeCalled func(shardID uint32) int - ComputeConsensusGroupCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) - EpochStartPrepareCalled func(metaHdr data.HeaderHandler, body data.BodyHandler) - GetNumTotalEligibleCalled func() uint64 + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]nodesCoordinator.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) + GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) + GetAllValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) + GetAllEligibleValidatorsPublicKeysCalled func(epoch uint32) (map[uint32][][]byte, error) + ConsensusGroupSizeCalled func(shardID uint32) int + ComputeConsensusGroupCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) + EpochStartPrepareCalled func(metaHdr data.HeaderHandler, body data.BodyHandler) + GetNumTotalEligibleCalled func() uint64 } // NodesCoordinatorToRegistry - @@ -57,7 +58,10 @@ func (ncm *NodesCoordinatorStub) ComputeAdditionalLeaving(_ []*state.ShardValida } // GetAllEligibleValidatorsPublicKeys - -func (ncm *NodesCoordinatorStub) GetAllEligibleValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { +func (ncm *NodesCoordinatorStub) GetAllEligibleValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) { + if ncm.GetAllEligibleValidatorsPublicKeysCalled != nil { + return ncm.GetAllEligibleValidatorsPublicKeysCalled(epoch) + } return nil, nil } diff --git a/trie/factory/trieCreator.go b/trie/factory/trieCreator.go index 3b0de97949e..c02116b4bc8 100644 --- a/trie/factory/trieCreator.go +++ b/trie/factory/trieCreator.go @@ -73,7 +73,7 @@ func (tc *trieCreator) Create(args TrieCreateArgs) (common.StorageManager, commo Hasher: tc.hasher, GeneralConfig: tc.trieStorageManagerConfig, CheckpointHashesHolder: checkpointHashesHolder, - IdleProvider: args.IdleProvider, + IdleProvider: args.IdleProvider, } log.Debug("trie checkpoints status", "enabled", args.CheckpointsEnabled) diff --git a/trie/node_test.go b/trie/node_test.go index 163682d888b..54b076a2593 100644 --- a/trie/node_test.go +++ b/trie/node_test.go @@ -5,8 +5,8 @@ import ( "testing" "time" - "github.com/ElrondNetwork/elrond-go-core/core/atomic" "github.com/ElrondNetwork/elrond-go-core/core" + "github.com/ElrondNetwork/elrond-go-core/core/atomic" "github.com/ElrondNetwork/elrond-go/common" dataMock "github.com/ElrondNetwork/elrond-go/dataRetriever/mock" "github.com/ElrondNetwork/elrond-go/testscommon" diff --git a/trie/trieStorageManager.go b/trie/trieStorageManager.go index ab1d731da38..0be7ba72a53 100644 --- a/trie/trieStorageManager.go +++ b/trie/trieStorageManager.go @@ -51,7 +51,7 @@ type NewTrieStorageManagerArgs struct { Hasher hashing.Hasher GeneralConfig config.TrieStorageManagerConfig CheckpointHashesHolder CheckpointHashesHolder - IdleProvider IdleNodeProvider + IdleProvider IdleNodeProvider } // NewTrieStorageManager creates a new instance of trieStorageManager diff --git a/update/common.go b/update/common.go index 2ab4721df25..a3a121f9bc8 100644 --- a/update/common.go +++ b/update/common.go @@ -132,7 +132,7 @@ func getAllMiniBlocksWithDst(metaBlock data.MetaHeaderHandler, destShardID uint3 } miniBlockHeaderHandlers := metaBlock.GetMiniBlockHeaderHandlers() - for i, mbHdr := range miniBlockHeaderHandlers{ + for i, mbHdr := range miniBlockHeaderHandlers { if mbHdr.GetReceiverShardID() == destShardID && mbHdr.GetSenderShardID() != destShardID { mbHdrs = append(mbHdrs, miniBlockHeaderHandlers[i]) } diff --git a/update/disabled/exportFactoryHandler.go b/update/disabled/exportFactoryHandler.go new file mode 100644 index 00000000000..214f9219c61 --- /dev/null +++ b/update/disabled/exportFactoryHandler.go @@ -0,0 +1,17 @@ +package disabled + +import "github.com/ElrondNetwork/elrond-go/update" + +// ExportFactoryHandler implements ExportFactoryHandler interface but does nothing +type ExportFactoryHandler struct { +} + +// Create does nothing as it is disabled +func (e *ExportFactoryHandler) Create() (update.ExportHandler, error) { + return nil, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (e *ExportFactoryHandler) IsInterfaceNil() bool { + return e == nil +} diff --git a/update/disabled/preferredPeersHolder.go b/update/disabled/preferredPeersHolder.go index f660895d103..ad9a2823796 100644 --- a/update/disabled/preferredPeersHolder.go +++ b/update/disabled/preferredPeersHolder.go @@ -12,11 +12,15 @@ func NewPreferredPeersHolder() *disabledPreferredPeersHolder { return &disabledPreferredPeersHolder{} } -// Put won't do anything -func (d *disabledPreferredPeersHolder) Put(_ []byte, _ core.PeerID, _ uint32) { +// PutConnectionAddress does nothing as it is disabled +func (d *disabledPreferredPeersHolder) PutConnectionAddress(_ core.PeerID, _ string) { } -// Get will return an empty map +// PutShardID does nothing as it is disabled +func (d *disabledPreferredPeersHolder) PutShardID(_ core.PeerID, _ uint32) { +} + +// Get returns an empty map func (d *disabledPreferredPeersHolder) Get() map[uint32][]core.PeerID { return make(map[uint32][]core.PeerID) } @@ -26,11 +30,11 @@ func (d *disabledPreferredPeersHolder) Contains(_ core.PeerID) bool { return false } -// Remove won't do anything +// Remove does nothing as it is disabled func (d *disabledPreferredPeersHolder) Remove(_ core.PeerID) { } -// Clear won't do anything +// Clear does nothing as it is disabled func (d *disabledPreferredPeersHolder) Clear() { } diff --git a/update/errors.go b/update/errors.go index d87ea88f5b7..e5db94f1abe 100644 --- a/update/errors.go +++ b/update/errors.go @@ -277,3 +277,6 @@ var ErrInvalidMaxHardCapForMissingNodes = errors.New("invalid max hardcap for mi // ErrInvalidNumConcurrentTrieSyncers signals that the number of concurrent trie syncers is invalid var ErrInvalidNumConcurrentTrieSyncers = errors.New("invalid num concurrent trie syncers") + +// ErrNilPeersRatingHandler signals that a nil peers rating handler implementation has been provided +var ErrNilPeersRatingHandler = errors.New("nil peers rating handler") diff --git a/update/factory/exportHandlerFactory.go b/update/factory/exportHandlerFactory.go index 8e782803cb0..16ca4bea643 100644 --- a/update/factory/exportHandlerFactory.go +++ b/update/factory/exportHandlerFactory.go @@ -62,6 +62,7 @@ type ArgsExporter struct { InputAntifloodHandler process.P2PAntifloodHandler OutputAntifloodHandler process.P2PAntifloodHandler RoundHandler process.RoundHandler + PeersRatingHandler dataRetriever.PeersRatingHandler InterceptorDebugConfig config.InterceptorResolverDebugConfig EnableSignTxWithHashEpoch uint32 MaxHardCapForMissingNodes int @@ -98,6 +99,7 @@ type exportHandlerFactory struct { inputAntifloodHandler process.P2PAntifloodHandler outputAntifloodHandler process.P2PAntifloodHandler roundHandler process.RoundHandler + peersRatingHandler dataRetriever.PeersRatingHandler interceptorDebugConfig config.InterceptorResolverDebugConfig enableSignTxWithHashEpoch uint32 maxHardCapForMissingNodes int @@ -200,6 +202,9 @@ func NewExportHandlerFactory(args ArgsExporter) (*exportHandlerFactory, error) { if check.IfNil(args.RoundHandler) { return nil, update.ErrNilRoundHandler } + if check.IfNil(args.PeersRatingHandler) { + return nil, update.ErrNilPeersRatingHandler + } if check.IfNil(args.CoreComponents.TxSignHasher()) { return nil, update.ErrNilHasher } @@ -244,6 +249,7 @@ func NewExportHandlerFactory(args ArgsExporter) (*exportHandlerFactory, error) { outputAntifloodHandler: args.OutputAntifloodHandler, maxTrieLevelInMemory: args.MaxTrieLevelInMemory, roundHandler: args.RoundHandler, + peersRatingHandler: args.PeersRatingHandler, interceptorDebugConfig: args.InterceptorDebugConfig, enableSignTxWithHashEpoch: args.EnableSignTxWithHashEpoch, maxHardCapForMissingNodes: args.MaxHardCapForMissingNodes, @@ -333,6 +339,7 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { NumConcurrentResolvingJobs: 100, InputAntifloodHandler: e.inputAntifloodHandler, OutputAntifloodHandler: e.outputAntifloodHandler, + PeersRatingHandler: e.peersRatingHandler, } resolversFactory, err := NewResolversContainerFactory(argsResolvers) if err != nil { diff --git a/update/factory/fullSyncResolversContainerFactory.go b/update/factory/fullSyncResolversContainerFactory.go index 14eff65bcc6..2b32a832509 100644 --- a/update/factory/fullSyncResolversContainerFactory.go +++ b/update/factory/fullSyncResolversContainerFactory.go @@ -33,6 +33,7 @@ type resolversContainerFactory struct { inputAntifloodHandler dataRetriever.P2PAntifloodHandler outputAntifloodHandler dataRetriever.P2PAntifloodHandler throttler dataRetriever.ResolverThrottler + peersRatingHandler dataRetriever.PeersRatingHandler } // ArgsNewResolversContainerFactory defines the arguments for the resolversContainerFactory constructor @@ -44,6 +45,7 @@ type ArgsNewResolversContainerFactory struct { ExistingResolvers dataRetriever.ResolversContainer InputAntifloodHandler dataRetriever.P2PAntifloodHandler OutputAntifloodHandler dataRetriever.P2PAntifloodHandler + PeersRatingHandler dataRetriever.PeersRatingHandler NumConcurrentResolvingJobs int32 } @@ -64,6 +66,9 @@ func NewResolversContainerFactory(args ArgsNewResolversContainerFactory) (*resol if check.IfNil(args.ExistingResolvers) { return nil, update.ErrNilResolverContainer } + if check.IfNil(args.PeersRatingHandler) { + return nil, update.ErrNilPeersRatingHandler + } thr, err := throttler.NewNumGoRoutinesThrottler(args.NumConcurrentResolvingJobs) if err != nil { @@ -79,6 +84,7 @@ func NewResolversContainerFactory(args ArgsNewResolversContainerFactory) (*resol inputAntifloodHandler: args.InputAntifloodHandler, outputAntifloodHandler: args.OutputAntifloodHandler, throttler: thr, + peersRatingHandler: args.PeersRatingHandler, }, nil } @@ -179,6 +185,7 @@ func (rcf *resolversContainerFactory) createTrieNodesResolver(baseTopic string, CurrentNetworkEpochProvider: disabled.NewCurrentNetworkEpochProviderHandler(), PreferredPeersHolder: disabled.NewPreferredPeersHolder(), SelfShardIdProvider: rcf.shardCoordinator, + PeersRatingHandler: rcf.peersRatingHandler, } resolverSender, err := topicResolverSender.NewTopicResolverSender(arg) if err != nil { @@ -187,11 +194,13 @@ func (rcf *resolversContainerFactory) createTrieNodesResolver(baseTopic string, trie := rcf.dataTrieContainer.Get([]byte(trieId)) argTrieResolver := resolvers.ArgTrieNodeResolver{ - SenderResolver: resolverSender, - TrieDataGetter: trie, - Marshalizer: rcf.marshalizer, - AntifloodHandler: rcf.inputAntifloodHandler, - Throttler: rcf.throttler, + ArgBaseResolver: resolvers.ArgBaseResolver{ + SenderResolver: resolverSender, + Marshaller: rcf.marshalizer, + AntifloodHandler: rcf.inputAntifloodHandler, + Throttler: rcf.throttler, + }, + TrieDataGetter: trie, } resolver, err := resolvers.NewTrieNodeResolver(argTrieResolver) if err != nil { diff --git a/update/interface.go b/update/interface.go index f1b47ece497..e2c42116a79 100644 --- a/update/interface.go +++ b/update/interface.go @@ -263,7 +263,8 @@ type RoundHandler interface { // PreferredPeersHolderHandler defines the behavior of a component able to handle preferred peers operations type PreferredPeersHolderHandler interface { - Put(publicKey []byte, peerID core.PeerID, shardID uint32) + PutConnectionAddress(peerID core.PeerID, address string) + PutShardID(peerID core.PeerID, shardID uint32) Get() map[uint32][]core.PeerID Contains(peerID core.PeerID) bool Remove(peerID core.PeerID) diff --git a/update/mock/transactionCoordinatorMock.go b/update/mock/transactionCoordinatorMock.go index db875c708de..7c6e0263284 100644 --- a/update/mock/transactionCoordinatorMock.go +++ b/update/mock/transactionCoordinatorMock.go @@ -6,6 +6,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" ) // TransactionCoordinatorMock - @@ -20,7 +21,7 @@ type TransactionCoordinatorMock struct { RemoveTxsFromPoolCalled func(body *block.Body) error ProcessBlockTransactionCalled func(header data.HeaderHandler, body *block.Body, haveTime func() time.Duration) error CreateBlockStartedCalled func() - CreateMbsAndProcessCrossShardTransactionsDstMeCalled func(header data.HeaderHandler, processedMiniBlocksHashes map[string]struct{}, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool) (block.MiniBlockSlice, uint32, bool, error) + CreateMbsAndProcessCrossShardTransactionsDstMeCalled func(header data.HeaderHandler, processedMiniBlocksInfo map[string]*processedMb.ProcessedMiniBlockInfo, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool) (block.MiniBlockSlice, uint32, bool, error) CreateMbsAndProcessTransactionsFromMeCalled func(haveTime func() bool) block.MiniBlockSlice CreateMarshalizedDataCalled func(body *block.Body) map[string][][]byte GetAllCurrentUsedTxsCalled func(blockType block.Type) map[string]data.TransactionHandler @@ -136,7 +137,7 @@ func (tcm *TransactionCoordinatorMock) CreateBlockStarted() { // CreateMbsAndProcessCrossShardTransactionsDstMe - func (tcm *TransactionCoordinatorMock) CreateMbsAndProcessCrossShardTransactionsDstMe( header data.HeaderHandler, - processedMiniBlocksHashes map[string]struct{}, + processedMiniBlocksInfo map[string]*processedMb.ProcessedMiniBlockInfo, haveTime func() bool, haveAdditionalTime func() bool, scheduledMode bool, @@ -145,7 +146,7 @@ func (tcm *TransactionCoordinatorMock) CreateMbsAndProcessCrossShardTransactions return nil, 0, false, nil } - return tcm.CreateMbsAndProcessCrossShardTransactionsDstMeCalled(header, processedMiniBlocksHashes, haveTime, haveAdditionalTime, scheduledMode) + return tcm.CreateMbsAndProcessCrossShardTransactionsDstMeCalled(header, processedMiniBlocksInfo, haveTime, haveAdditionalTime, scheduledMode) } // CreateMbsAndProcessTransactionsFromMe - diff --git a/update/process/metaBlock.go b/update/process/metaBlock.go index 730bc94d681..96670e209d9 100644 --- a/update/process/metaBlock.go +++ b/update/process/metaBlock.go @@ -91,8 +91,8 @@ func (m *metaBlockCreator) CreateBlock( } hardForkMeta := m.importHandler.GetHardForkMetaBlock() - epochStart, ok:= hardForkMeta.GetEpochStartHandler().(*block.EpochStart) - if !ok{ + epochStart, ok := hardForkMeta.GetEpochStartHandler().(*block.EpochStart) + if !ok { return nil, update.ErrWrongTypeAssertion } diff --git a/update/process/metaBlock_test.go b/update/process/metaBlock_test.go index 83040905003..19c7b9c8dd9 100644 --- a/update/process/metaBlock_test.go +++ b/update/process/metaBlock_test.go @@ -10,8 +10,8 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go-core/data/transaction" "github.com/ElrondNetwork/elrond-go/state" - stateMock "github.com/ElrondNetwork/elrond-go/testscommon/state" "github.com/ElrondNetwork/elrond-go/testscommon/hashingMocks" + stateMock "github.com/ElrondNetwork/elrond-go/testscommon/state" "github.com/ElrondNetwork/elrond-go/update" "github.com/ElrondNetwork/elrond-go/update/mock" "github.com/stretchr/testify/assert" diff --git a/update/trigger/trigger.go b/update/trigger/trigger.go index 28d65d293bd..52c328576ef 100644 --- a/update/trigger/trigger.go +++ b/update/trigger/trigger.go @@ -69,7 +69,8 @@ type trigger struct { chanStopNodeProcess chan endProcess.ArgEndProcess mutClosers sync.RWMutex closers []update.Closer - chanTriggerReceived chan struct{} + chanTriggerReceived chan struct{} // TODO: remove it with heartbeat v1 cleanup + chanTriggerReceivedV2 chan struct{} importStartHandler update.ImportStartHandler isWithEarlyEndOfEpoch bool roundHandler update.RoundHandler @@ -112,21 +113,22 @@ func NewTrigger(arg ArgHardforkTrigger) (*trigger, error) { } t := &trigger{ - enabled: arg.Enabled, - enabledAuthenticated: arg.EnabledAuthenticated, - selfPubKey: arg.SelfPubKeyBytes, - triggerPubKey: arg.TriggerPubKeyBytes, - triggerReceived: false, - triggerExecuting: false, - argumentParser: arg.ArgumentParser, - epochProvider: arg.EpochProvider, - exportFactoryHandler: arg.ExportFactoryHandler, - closeAfterInMinutes: arg.CloseAfterExportInMinutes, - chanStopNodeProcess: arg.ChanStopNodeProcess, - closers: make([]update.Closer, 0), - chanTriggerReceived: make(chan struct{}, 1), //buffer with one value as there might be async calls - importStartHandler: arg.ImportStartHandler, - roundHandler: arg.RoundHandler, + enabled: arg.Enabled, + enabledAuthenticated: arg.EnabledAuthenticated, + selfPubKey: arg.SelfPubKeyBytes, + triggerPubKey: arg.TriggerPubKeyBytes, + triggerReceived: false, + triggerExecuting: false, + argumentParser: arg.ArgumentParser, + epochProvider: arg.EpochProvider, + exportFactoryHandler: arg.ExportFactoryHandler, + closeAfterInMinutes: arg.CloseAfterExportInMinutes, + chanStopNodeProcess: arg.ChanStopNodeProcess, + closers: make([]update.Closer, 0), + chanTriggerReceived: make(chan struct{}, 1), // TODO: remove it with heartbeat v1 cleanup + chanTriggerReceivedV2: make(chan struct{}, 1), // buffer with one value as there might be async calls + importStartHandler: arg.ImportStartHandler, + roundHandler: arg.RoundHandler, } t.isTriggerSelf = bytes.Equal(arg.TriggerPubKeyBytes, arg.SelfPubKeyBytes) @@ -171,7 +173,17 @@ func (t *trigger) computeTriggerStartOfEpoch(receivedTrigger uint32) bool { return true } -// Trigger will start the hardfork process +// SetExportFactoryHandler sets the exportFactoryHandler with the provided one +func (t *trigger) SetExportFactoryHandler(exportFactoryHandler update.ExportFactoryHandler) error { + if check.IfNil(exportFactoryHandler) { + return update.ErrNilExportFactoryHandler + } + + t.exportFactoryHandler = exportFactoryHandler + return nil +} + +// Trigger starts the hardfork process func (t *trigger) Trigger(epoch uint32, withEarlyEndOfEpoch bool) error { if !t.enabled { return update.ErrTriggerNotEnabled @@ -244,7 +256,8 @@ func (t *trigger) computeAndSetTrigger(epoch uint32, originalPayload []byte, wit } if len(originalPayload) == 0 { - t.writeOnNotifyChan() + t.writeOnNotifyChan() // TODO: remove it with heartbeat v1 cleanup + t.writeOnNotifyChanV2() } shouldSetTriggerFromEpochChange := epoch > t.epochProvider.MetaEpoch() @@ -263,13 +276,22 @@ func (t *trigger) computeAndSetTrigger(epoch uint32, originalPayload []byte, wit } func (t *trigger) writeOnNotifyChan() { - //writing on the notification chan should not be blocking as to allow self to initiate the hardfork process + // TODO: remove it with heartbeat v1 cleanup + // writing on the notification chan should not be blocking as to allow self to initiate the hardfork process select { case t.chanTriggerReceived <- struct{}{}: default: } } +func (t *trigger) writeOnNotifyChanV2() { + // writing on the notification chan should not be blocking as to allow self to initiate the hardfork process + select { + case t.chanTriggerReceivedV2 <- struct{}{}: + default: + } +} + func (t *trigger) doTrigger() { t.callClose() t.exportAll() @@ -328,7 +350,7 @@ func (t *trigger) TriggerReceived(originalPayload []byte, data []byte, pkBytes [ isTriggerEnabled := t.enabled && t.enabledAuthenticated if !isTriggerEnabled { - //should not return error as to allow the message to get to other peers + // should not return error as to allow the message to get to other peers return true, nil } @@ -455,7 +477,7 @@ func (t *trigger) CreateData() []byte { return []byte(payload) } -// AddCloser will add a closer interface on the existing list +// AddCloser adds a closer interface on the existing list func (t *trigger) AddCloser(closer update.Closer) error { if check.IfNil(closer) { return update.ErrNilCloser @@ -468,12 +490,19 @@ func (t *trigger) AddCloser(closer update.Closer) error { return nil } -// NotifyTriggerReceived will write a struct{}{} on the provided channel as soon as a trigger is received +// NotifyTriggerReceived writes a struct{}{} on the provided channel as soon as a trigger is received // this is done to decrease the latency of the heartbeat sending system func (t *trigger) NotifyTriggerReceived() <-chan struct{} { + // TODO: remove it with heartbeat v1 cleanup return t.chanTriggerReceived } +// NotifyTriggerReceivedV2 writes a struct{}{} on the provided channel as soon as a trigger is received +// this is done to decrease the latency of the heartbeat sending system +func (t *trigger) NotifyTriggerReceivedV2() <-chan struct{} { + return t.chanTriggerReceivedV2 +} + // IsInterfaceNil returns true if there is no value under the interface func (t *trigger) IsInterfaceNil() bool { return t == nil diff --git a/update/trigger/trigger_test.go b/update/trigger/trigger_test.go index 066c85d3886..5b297dc32b0 100644 --- a/update/trigger/trigger_test.go +++ b/update/trigger/trigger_test.go @@ -70,6 +70,26 @@ func TestNewTrigger_ShouldWork(t *testing.T) { assert.False(t, check.IfNil(trig)) } +//------- SetExportFactoryHandler + +func TestSetExportFactoryHandler_NilArgShouldErr(t *testing.T) { + t.Parallel() + + trig, _ := trigger.NewTrigger(createMockArgHardforkTrigger()) + + err := trig.SetExportFactoryHandler(nil) + assert.Equal(t, update.ErrNilExportFactoryHandler, err) +} + +func TestSetExportFactoryHandler_ShouldWork(t *testing.T) { + t.Parallel() + + trig, _ := trigger.NewTrigger(createMockArgHardforkTrigger()) + + err := trig.SetExportFactoryHandler(&mock.ExportFactoryHandlerStub{}) + assert.Nil(t, err) +} + //------- Trigger func TestTrigger_TriggerNotEnabledShouldErr(t *testing.T) { diff --git a/vm/interface.go b/vm/interface.go index 84dc4cb68c6..f5788c87e09 100644 --- a/vm/interface.go +++ b/vm/interface.go @@ -85,6 +85,7 @@ type ContextHandler interface { AddCode(addr []byte, code []byte) AddTxValueToSmartContract(value *big.Int, scAddress []byte) SetGasProvided(gasProvided uint64) + GetReturnMessage() string } // MessageSignVerifier is used to verify if message was signed with given public key diff --git a/vm/mock/systemEIStub.go b/vm/mock/systemEIStub.go index c91147135c4..08b66c0d344 100644 --- a/vm/mock/systemEIStub.go +++ b/vm/mock/systemEIStub.go @@ -222,6 +222,11 @@ func (s *SystemEIStub) AddReturnMessage(msg string) { } } +// GetReturnMessage - +func (s *SystemEIStub) GetReturnMessage() string { + return s.ReturnMessage +} + // GetStorage - func (s *SystemEIStub) GetStorage(key []byte) []byte { if s.GetStorageCalled != nil { diff --git a/vm/process/systemVM.go b/vm/process/systemVM.go index 08e63ba00c3..549d7a2e17e 100644 --- a/vm/process/systemVM.go +++ b/vm/process/systemVM.go @@ -1,6 +1,7 @@ package process import ( + "math/big" "sync" "github.com/ElrondNetwork/elrond-go-core/core" @@ -118,7 +119,17 @@ func (s *systemVM) RunSmartContractCall(input *vmcommon.ContractCallInput) (*vmc } returnCode := contract.Execute(input) - vmOutput := s.systemEI.CreateVMOutput() + var vmOutput *vmcommon.VMOutput + if returnCode == vmcommon.Ok { + vmOutput = s.systemEI.CreateVMOutput() + } else { + vmOutput = &vmcommon.VMOutput{ + GasRemaining: 0, + GasRefund: big.NewInt(0), + ReturnMessage: s.systemEI.GetReturnMessage(), + } + } + vmOutput.ReturnCode = returnCode return vmOutput, nil diff --git a/vm/systemSmartContracts/eei.go b/vm/systemSmartContracts/eei.go index faa12a799cd..4f840d073e6 100644 --- a/vm/systemSmartContracts/eei.go +++ b/vm/systemSmartContracts/eei.go @@ -435,6 +435,11 @@ func (host *vmContext) AddReturnMessage(message string) { host.returnMessage += "@" + message } +// GetReturnMessage will return the accumulated return message +func (host *vmContext) GetReturnMessage() string { + return host.returnMessage +} + // AddLogEntry will add a log entry func (host *vmContext) AddLogEntry(entry *vmcommon.LogEntry) { host.logs = append(host.logs, entry)